diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 74c86ba..f14f1b3 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -85,6 +85,7 @@ jobs: INTEGRATION_TEST_APIKEY: ${{ secrets.INTEGRATION_TEST_APIKEY }} INTEGRATION_TEST_CLOUDSYNC_ADDRESS: ${{ secrets.INTEGRATION_TEST_CLOUDSYNC_ADDRESS }} INTEGRATION_TEST_OFFLINE_DATABASE_ID: ${{ secrets.INTEGRATION_TEST_OFFLINE_DATABASE_ID }} + INTEGRATION_TEST_FAILURE_DATABASE_ID: ${{ secrets.INTEGRATION_TEST_FAILURE_DATABASE_ID }} steps: @@ -132,6 +133,7 @@ jobs: -e INTEGRATION_TEST_APIKEY="${{ env.INTEGRATION_TEST_APIKEY }}" \ -e INTEGRATION_TEST_CLOUDSYNC_ADDRESS="${{ env.INTEGRATION_TEST_CLOUDSYNC_ADDRESS }}" \ -e INTEGRATION_TEST_OFFLINE_DATABASE_ID="${{ env.INTEGRATION_TEST_OFFLINE_DATABASE_ID }}" \ + -e INTEGRATION_TEST_FAILURE_DATABASE_ID="${{ env.INTEGRATION_TEST_FAILURE_DATABASE_ID }}" \ alpine:latest \ tail -f /dev/null docker exec alpine sh -c "apk update && apk add --no-cache gcc make curl sqlite openssl-dev musl-dev linux-headers" @@ -206,6 +208,7 @@ jobs: export INTEGRATION_TEST_APIKEY="$INTEGRATION_TEST_APIKEY" export INTEGRATION_TEST_CLOUDSYNC_ADDRESS="$INTEGRATION_TEST_CLOUDSYNC_ADDRESS" export INTEGRATION_TEST_OFFLINE_DATABASE_ID="$INTEGRATION_TEST_OFFLINE_DATABASE_ID" + export INTEGRATION_TEST_FAILURE_DATABASE_ID="$INTEGRATION_TEST_FAILURE_DATABASE_ID" $(make test PLATFORM=$PLATFORM ARCH=$ARCH -n) EOF echo "::endgroup::" diff --git a/API.md b/API.md index 031732d..8e6e825 100644 --- a/API.md +++ b/API.md @@ -490,6 +490,7 @@ The sync functions follow a consistent error-handling contract: | **Endpoint/network errors** (server unreachable, auth failure, bad URL) | SQL error — the function could not execute. | | **Apply errors** (`cloudsync_payload_apply` failures — unknown schema hash, invalid checksum, decompression error) | Structured JSON — a `receive.error` string field is included in the response. | | **Server-reported apply job failures** (the server processed the request but its own apply job failed) | Structured JSON — a `send.lastFailure` object is included in the response. | +| **Server-reported check job failures** (the server failed to encode a changeset for the client) | Structured JSON — a `receive.lastFailure` object is included in the response. | This means: if you get JSON back, the server was reachable and the network protocol ran. If you get a SQL error, connectivity or configuration is broken. @@ -510,7 +511,7 @@ This means: if you get JSON back, the server was reachable and the network proto - `send.status`: The current sync state — `"synced"` (all changes confirmed), `"syncing"` (changes sent but not yet confirmed), `"out-of-sync"` (local changes pending or gaps detected), or `"error"`. - `send.localVersion`: The latest local database version. - `send.serverVersion`: The latest version confirmed by the server. -- `send.lastFailure` (optional): Present only when the server reports a failed apply job. The object is forwarded verbatim from the server and typically includes `jobId`, `code`, `message`, `retryable`, and `failedAt`. It is emitted regardless of `status` so callers can detect server-side failures during `"syncing"` or even after the state has nominally recovered. +- `send.lastFailure` (optional): Present only when the server reports a failed apply job. Forwarded verbatim from the server's `failures.apply` and typically includes `jobId`, `code`, `stage`, `message`, `retryable`, and `failedAt`. It is emitted regardless of `status` so callers can detect server-side failures during `"syncing"` or even after the state has nominally recovered. This function is **send/apply-scoped**: server-reported check-job failures (`failures.check`) are not surfaced here — see [`cloudsync_network_check_changes()`](#cloudsync_network_check_changes) and [`cloudsync_network_sync()`](#cloudsync_network_sync). **Example:** @@ -519,7 +520,7 @@ SELECT cloudsync_network_send_changes(); -- '{"send":{"status":"synced","localVersion":5,"serverVersion":5}}' -- With a server-reported failure (e.g. unknown schema hash on the server side): --- '{"send":{"status":"out-of-sync","localVersion":1,"serverVersion":0,"lastFailure":{"jobId":44961,"code":"internal_error","message":"cloudsync operation failed: Cannot apply the received payload because the schema hash is unknown 4288148391734624266.","retryable":true,"failedAt":"2026-04-15T22:21:09.018606Z"}}}' +-- '{"send":{"status":"out-of-sync","localVersion":1,"serverVersion":0,"lastFailure":{"jobId":44961,"code":"internal_error","stage":"apply_payload","message":"cloudsync operation failed: Cannot apply the received payload because the schema hash is unknown 4288148391734624266.","retryable":true,"failedAt":"2026-04-15T22:21:09.018606Z"}}}' ``` --- @@ -533,19 +534,20 @@ If a package of new changes is already available for the local site, the server This function is designed to be called periodically to keep the local database in sync. To force an update and wait for changes (with a timeout), use [`cloudsync_network_sync(wait_ms, max_retries)`]. -If the network is misconfigured or the remote server is unreachable, the function raises a SQL error. If the received payload cannot be applied locally (for example because of an unknown schema hash), the error is returned as a `receive.error` field in the JSON response. +If the network is misconfigured or the remote server is unreachable, the function raises a SQL error. If the received payload cannot be applied locally (for example because of an unknown schema hash), the error is returned as a `receive.error` field in the JSON response. If the server reports an unresolved failed check job (e.g. an `encode_changes` failure), that failure is forwarded as a `receive.lastFailure` object. **Parameters:** None. **Returns:** A JSON string with the receive result: ```json -{"receive": {"rows": N, "tables": ["table1", "table2"], "error": "..."}} +{"receive": {"rows": N, "tables": ["table1", "table2"], "error": "...", "lastFailure": {...}}} ``` - `receive.rows`: The number of rows received and applied to the local database. `0` when the receive phase failed. - `receive.tables`: An array of table names that received changes. Empty (`[]`) if no changes were applied or the receive phase failed. -- `receive.error` (optional): Present when `cloudsync_payload_apply` failed. Contains a human-readable error message describing why the received payload could not be applied. +- `receive.error` (optional, string): Present when client-side `cloudsync_payload_apply` failed. Contains a human-readable error message describing why the received payload could not be applied. +- `receive.lastFailure` (optional, object): Present only when the server reports a failed check job. Forwarded verbatim from the server's `failures.check` and typically includes `jobId`, `dbVersion`, `seq`, `code`, `stage`, `message`, `retryable`, and `failedAt`. Distinct from `receive.error`: `receive.error` describes a client-side apply failure (string), while `receive.lastFailure` describes a server-side check-job failure (object). Both can coexist in the same response. This function is **check-scoped**: server-reported apply-job failures (`failures.apply`) are not surfaced here — see [`cloudsync_network_send_changes()`](#cloudsync_network_send_changes) and [`cloudsync_network_sync()`](#cloudsync_network_sync). **Example:** @@ -553,8 +555,11 @@ If the network is misconfigured or the remote server is unreachable, the functio SELECT cloudsync_network_check_changes(); -- '{"receive":{"rows":3,"tables":["tasks"]}}' --- With an apply error: +-- With a client-side apply error: -- '{"receive":{"rows":0,"tables":[],"error":"Cannot apply the received payload because the schema hash is unknown 7218827471400075525."}}' + +-- With a server-reported check-job failure: +-- '{"receive":{"rows":0,"tables":[],"lastFailure":{"jobId":456,"dbVersion":15,"seq":1,"code":"tenant_unreachable","stage":"encode_changes","message":"tenant check failed","retryable":true,"failedAt":"2026-04-24T10:22:00Z"}}}' ``` --- @@ -576,17 +581,18 @@ SELECT cloudsync_network_check_changes(); ```json { "send": {"status": "synced|syncing|out-of-sync|error", "localVersion": N, "serverVersion": N, "lastFailure": {...}}, - "receive": {"rows": N, "tables": ["table1", "table2"], "error": "..."} + "receive": {"rows": N, "tables": ["table1", "table2"], "error": "...", "lastFailure": {...}} } ``` - `send.status`: The current sync state — `"synced"`, `"syncing"`, `"out-of-sync"`, or `"error"`. - `send.localVersion`: The latest local database version. - `send.serverVersion`: The latest version confirmed by the server. -- `send.lastFailure` (optional): Same semantics as in [`cloudsync_network_send_changes()`](#cloudsync_network_send_changes) — forwarded verbatim from the server whenever a failed apply job is reported, regardless of `status`. +- `send.lastFailure` (optional): Same semantics as in [`cloudsync_network_send_changes()`](#cloudsync_network_send_changes) — forwarded verbatim from the server's `failures.apply` whenever a failed apply job is reported, regardless of `status`. - `receive.rows`: The number of rows received and applied during the check phase. `0` when the receive phase failed. - `receive.tables`: An array of table names that received changes. Empty (`[]`) if no changes were applied or the receive phase failed. -- `receive.error` (optional): Present when `cloudsync_payload_apply` failed (for example `"Cannot apply the received payload because the schema hash is unknown 7218827471400075525."`). The send result is always preserved so the caller can tell that local changes reached the server even when applying incoming changes failed. The retry loop breaks immediately on apply errors, since failures like schema-hash mismatches do not heal across retries. Endpoint/network errors during the receive phase raise a SQL error instead. +- `receive.error` (optional, string): Present when client-side `cloudsync_payload_apply` failed (for example `"Cannot apply the received payload because the schema hash is unknown 7218827471400075525."`). The send result is always preserved so the caller can tell that local changes reached the server even when applying incoming changes failed. The retry loop breaks immediately on apply errors, since failures like schema-hash mismatches do not heal across retries. Endpoint/network errors during the receive phase raise a SQL error instead. +- `receive.lastFailure` (optional, object): Same semantics as in [`cloudsync_network_check_changes()`](#cloudsync_network_check_changes) — forwarded verbatim from the server's `failures.check` whenever a failed check job is reported. Distinct from `receive.error`. `cloudsync_network_sync()` reports both `send.lastFailure` and `receive.lastFailure` when present. **Example:** diff --git a/CHANGELOG.md b/CHANGELOG.md index b9fb7d9..ea76c25 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,20 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). +## [1.0.18] - 2026-04-29 + +### Fixed + +- **`cloudsync_network_check_changes()`** no longer errors with `missing 'url' in check response` when the server has not yet prepared any incoming changes for this device. The function now returns the standard "no rows yet" response in that case, so polling loops keep working without spurious errors. + +### Added + +- **`receive.lastFailure`** JSON field on `cloudsync_network_check_changes()` and `cloudsync_network_sync()`, surfacing the most recent server-side failure of the receive pipeline (e.g. the server failed to prepare the next batch of incoming changes for this device). It complements the existing `send.lastFailure` (server-side apply failures) and `receive.error` (local apply failures on this device), so applications can distinguish "the server has trouble producing my changes" from "I had trouble applying them locally". Each function reports only the failures relevant to its own scope: `cloudsync_network_send_changes()` reports `send.lastFailure`; `cloudsync_network_check_changes()` reports `receive.lastFailure`; `cloudsync_network_sync()` reports both. + +### Changed + +- Updated the request headers sent to the cloudsync HTTP endpoints (version advertisement, per-endpoint capabilities; legacy `Accept` header removed). + ## [1.0.17] - 2026-04-24 ### Fixed diff --git a/docs/internal/network.md b/docs/internal/network.md index 7e03bbe..9af6e15 100644 --- a/docs/internal/network.md +++ b/docs/internal/network.md @@ -42,11 +42,12 @@ This is useful when: ``` ```c - NETWORK_RESULT network_receive_buffer (network_data *data, const char *endpoint, const char *authentication, bool zero_terminated, bool is_post_request, char *json_payload, const char *custom_header); + NETWORK_RESULT network_receive_buffer (network_data *data, const char *endpoint, const char *authentication, bool zero_terminated, bool is_post_request, char *json_payload, const char **extra_headers, int nextra_headers); // Performs a network request (GET or POST depending on `is_post_request`) to the specified `endpoint`, using the given `authentication` token or header. // If `json_payload` is provided, it will be sent as the POST body (for `is_post_request == true`). // If `zero_terminated == true`, ensure that the returned buffer is null-terminated. + // `extra_headers` is an array of `nextra_headers` request-header lines (each formatted as `"Name: value"`) appended to the standard headers (`Authorization`, `X-CloudSync-Org`, `Content-Type`). Pass `NULL, 0` for none. Used by call sites to send `X-CloudSync-Version` on every cloudsync API call and `X-CloudSync-Capabilities: check-status-response` on calls to the `/check` endpoint. // Returns a `NETWORK_RESULT` enum value indicating success, error, or timeout. ``` diff --git a/src/cloudsync.h b/src/cloudsync.h index d1641ed..acce26e 100644 --- a/src/cloudsync.h +++ b/src/cloudsync.h @@ -18,7 +18,7 @@ extern "C" { #endif -#define CLOUDSYNC_VERSION "1.0.17" +#define CLOUDSYNC_VERSION "1.0.18" #define CLOUDSYNC_MAX_TABLENAME_LEN 512 #define CLOUDSYNC_VALUE_NOTSET -1 diff --git a/src/network/network.c b/src/network/network.c index bd6591b..79315d0 100644 --- a/src/network/network.c +++ b/src/network/network.c @@ -72,6 +72,17 @@ typedef struct { size_t read_pos; } network_read_data; +static const char *cloudsync_default_headers[] = { + CLOUDSYNC_HEADER_VERSION_LINE, +}; + +static const char *cloudsync_check_headers[] = { + CLOUDSYNC_HEADER_VERSION_LINE, + CLOUDSYNC_HEADER_CHECK_CAPABILITIES, +}; + +#define ARRAY_LEN(a) ((int)(sizeof(a) / sizeof((a)[0]))) + // MARK: - void network_result_cleanup (NETWORK_RESULT *res) { @@ -193,7 +204,7 @@ static size_t network_receive_callback (void *ptr, size_t size, size_t nmemb, vo return (size * nmemb); } -NETWORK_RESULT network_receive_buffer (network_data *data, const char *endpoint, const char *authentication, bool zero_terminated, bool is_post_request, char *json_payload, const char *custom_header) { +NETWORK_RESULT network_receive_buffer (network_data *data, const char *endpoint, const char *authentication, bool zero_terminated, bool is_post_request, char *json_payload, const char **extra_headers, int nextra_headers) { char *buffer = NULL; size_t blen = 0; struct curl_slist* headers = NULL; @@ -219,8 +230,8 @@ NETWORK_RESULT network_receive_buffer (network_data *data, const char *endpoint, curl_easy_setopt(curl, CURLOPT_CAINFO_BLOB, &pem_blob); #endif - if (custom_header) { - struct curl_slist *tmp = curl_slist_append(headers, custom_header); + for (int i = 0; i < nextra_headers; i++) { + struct curl_slist *tmp = curl_slist_append(headers, extra_headers[i]); if (!tmp) {rc = CURLE_OUT_OF_MEMORY; goto cleanup;} headers = tmp; } @@ -430,7 +441,7 @@ int network_download_changes (sqlite3_context *context, const char *download_url return -1; } - NETWORK_RESULT result = network_receive_buffer(netdata, download_url, NULL, false, false, NULL, NULL); + NETWORK_RESULT result = network_receive_buffer(netdata, download_url, NULL, false, false, NULL, NULL, 0); int rc = SQLITE_OK; if (result.code == CLOUDSYNC_NETWORK_BUFFER) { @@ -881,20 +892,37 @@ static char *network_get_affected_tables(sqlite3 *db, int64_t since_db_version) // always raise a SQL error via sqlite3_result_error. // - cloudsync_payload_apply failures (unknown schema hash, invalid checksum, // decompression error) are returned as structured JSON via receive.error. -// - Server-reported apply job failures are forwarded as send.lastFailure. +// - Server-reported failures from the SyncStatusResponse failures object are +// forwarded as send.lastFailure (failures.apply) and receive.lastFailure +// (failures.check). Per-function scoping: send_changes emits send.lastFailure +// only; check_changes emits receive.lastFailure only; sync emits both. // // Callers that receive JSON can trust that the server was reachable. // A SQL error means connectivity or configuration is broken. typedef struct { - int64_t server_version; // lastOptimisticVersion - int64_t local_version; // new_db_version (max local) - const char *status; // computed status string - int rows_received; // rows from check - char *tables_json; // JSON array of affected table names, caller must cloudsync_memory_free - char *last_failure_json; // raw JSON object for server-reported lastFailure, caller must cloudsync_memory_free + int64_t server_version; // lastOptimisticVersion + int64_t local_version; // new_db_version (max local) + const char *status; // computed status string + int rows_received; // rows from check + char *tables_json; // JSON array of affected table names, caller must cloudsync_memory_free + char *apply_failure_json; // raw JSON object for server-reported failures.apply, caller must cloudsync_memory_free + char *check_failure_json; // raw JSON object for server-reported failures.check, caller must cloudsync_memory_free } sync_result; +// Returns a malloc'd raw JSON copy of failures. ("apply" or "check"), +// or NULL when the field is missing or is JSON null. Caller frees with cloudsync_memory_free. +static char *json_extract_failure_stage(const char *json, size_t json_len, const char *stage_key) { + if (!json || json_len == 0 || !stage_key) return NULL; + + char *failures = json_extract_object_raw(json, json_len, "failures"); + if (!failures) return NULL; + + char *stage = json_extract_object_raw(failures, strlen(failures), stage_key); + cloudsync_memory_free(failures); + return stage; +} + static const char *network_compute_status(int64_t last_optimistic, int64_t last_confirmed, int gaps_size, int64_t local_version) { if (last_optimistic < 0 || last_confirmed < 0) return "error"; @@ -926,8 +954,8 @@ void cloudsync_network_has_unsent_changes (sqlite3_context *context, int argc, s return; } - NETWORK_RESULT res = network_receive_buffer(netdata, netdata->status_endpoint, netdata->authentication, true, false, NULL, CLOUDSYNC_HEADER_SQLITECLOUD); - + NETWORK_RESULT res = network_receive_buffer(netdata, netdata->status_endpoint, netdata->authentication, true, false, NULL, cloudsync_default_headers, ARRAY_LEN(cloudsync_default_headers)); + int64_t last_optimistic_version = -1; if (res.code == CLOUDSYNC_NETWORK_BUFFER && res.buffer) { @@ -975,7 +1003,7 @@ int cloudsync_network_send_changes_internal (sqlite3_context *context, int argc, NETWORK_RESULT res; if (blob != NULL && blob_size > 0) { // there is data to send - res = network_receive_buffer(netdata, netdata->upload_endpoint, netdata->authentication, true, false, NULL, CLOUDSYNC_HEADER_SQLITECLOUD); + res = network_receive_buffer(netdata, netdata->upload_endpoint, netdata->authentication, true, false, NULL, cloudsync_default_headers, ARRAY_LEN(cloudsync_default_headers)); if (res.code != CLOUDSYNC_NETWORK_BUFFER) { cloudsync_memory_free(blob); network_result_to_sqlite_error(context, res, "cloudsync_network_send_changes unable to receive upload URL"); @@ -1010,24 +1038,26 @@ int cloudsync_network_send_changes_internal (sqlite3_context *context, int argc, network_result_cleanup(&res); // notify remote host that we succesfully uploaded changes - res = network_receive_buffer(netdata, netdata->apply_endpoint, netdata->authentication, true, true, json_payload, CLOUDSYNC_HEADER_SQLITECLOUD); + res = network_receive_buffer(netdata, netdata->apply_endpoint, netdata->authentication, true, true, json_payload, cloudsync_default_headers, ARRAY_LEN(cloudsync_default_headers)); } else { // there is no data to send, just check the status to update the db_version value in settings and to reply the status new_db_version = db_version; - res = network_receive_buffer(netdata, netdata->status_endpoint, netdata->authentication, true, false, NULL, CLOUDSYNC_HEADER_SQLITECLOUD); + res = network_receive_buffer(netdata, netdata->status_endpoint, netdata->authentication, true, false, NULL, cloudsync_default_headers, ARRAY_LEN(cloudsync_default_headers)); } int64_t last_optimistic_version = -1; int64_t last_confirmed_version = -1; int gaps_size = -1; - char *last_failure_json = NULL; + char *apply_failure_json = NULL; + char *check_failure_json = NULL; if (res.code == CLOUDSYNC_NETWORK_BUFFER && res.buffer) { last_optimistic_version = json_extract_int(res.buffer, res.blen, "lastOptimisticVersion", -1); last_confirmed_version = json_extract_int(res.buffer, res.blen, "lastConfirmedVersion", -1); gaps_size = json_extract_array_size(res.buffer, res.blen, "gaps"); if (gaps_size < 0) gaps_size = 0; - last_failure_json = json_extract_object_raw(res.buffer, res.blen, "lastFailure"); + apply_failure_json = json_extract_failure_stage(res.buffer, res.blen, "apply"); + check_failure_json = json_extract_failure_stage(res.buffer, res.blen, "check"); } else if (res.code != CLOUDSYNC_NETWORK_OK) { network_result_to_sqlite_error(context, res, "cloudsync_network_send_changes unable to notify BLOB upload to remote host."); network_result_cleanup(&res); @@ -1051,10 +1081,13 @@ int cloudsync_network_send_changes_internal (sqlite3_context *context, int argc, out->server_version = last_optimistic_version; out->local_version = new_db_version; out->status = network_compute_status(last_optimistic_version, last_confirmed_version, gaps_size, new_db_version); - out->last_failure_json = last_failure_json; - last_failure_json = NULL; + out->apply_failure_json = apply_failure_json; + out->check_failure_json = check_failure_json; + apply_failure_json = NULL; + check_failure_json = NULL; } - if (last_failure_json) cloudsync_memory_free(last_failure_json); + if (apply_failure_json) cloudsync_memory_free(apply_failure_json); + if (check_failure_json) cloudsync_memory_free(check_failure_json); network_result_cleanup(&res); return SQLITE_OK; @@ -1063,17 +1096,23 @@ int cloudsync_network_send_changes_internal (sqlite3_context *context, int argc, void cloudsync_network_send_changes (sqlite3_context *context, int argc, sqlite3_value **argv) { DEBUG_FUNCTION("cloudsync_network_send_changes"); - sync_result sr = {-1, 0, NULL, 0, NULL, NULL}; + // send-scoped: emits send.lastFailure (from failures.apply) only. + // failures.check arriving in the same response is parsed but discarded here. + sync_result sr = {.server_version = -1}; int rc = cloudsync_network_send_changes_internal(context, argc, argv, &sr); - if (rc != SQLITE_OK) { if (sr.last_failure_json) cloudsync_memory_free(sr.last_failure_json); return; } + if (rc != SQLITE_OK) { + if (sr.apply_failure_json) cloudsync_memory_free(sr.apply_failure_json); + if (sr.check_failure_json) cloudsync_memory_free(sr.check_failure_json); + return; + } char *buf; - if (sr.last_failure_json) { + if (sr.apply_failure_json) { buf = cloudsync_memory_mprintf( "{\"send\":{\"status\":\"%s\",\"localVersion\":%lld,\"serverVersion\":%lld,\"lastFailure\":%s}}", sr.status ? sr.status : "error", (long long)sr.local_version, (long long)sr.server_version, - sr.last_failure_json); + sr.apply_failure_json); } else { buf = cloudsync_memory_mprintf( "{\"send\":{\"status\":\"%s\",\"localVersion\":%lld,\"serverVersion\":%lld}}", @@ -1081,7 +1120,8 @@ void cloudsync_network_send_changes (sqlite3_context *context, int argc, sqlite3 (long long)sr.local_version, (long long)sr.server_version); } sqlite3_result_text(context, buf, -1, cloudsync_memory_free); - if (sr.last_failure_json) cloudsync_memory_free(sr.last_failure_json); + if (sr.apply_failure_json) cloudsync_memory_free(sr.apply_failure_json); + if (sr.check_failure_json) cloudsync_memory_free(sr.check_failure_json); } int cloudsync_network_check_internal(sqlite3_context *context, int *pnrows, sync_result *out, char **err_out) { @@ -1101,22 +1141,32 @@ int cloudsync_network_check_internal(sqlite3_context *context, int *pnrows, sync char json_payload[2024]; snprintf(json_payload, sizeof(json_payload), "{\"dbVersion\":%lld, \"seq\":%d}", (long long)db_version, seq); - NETWORK_RESULT result = network_receive_buffer(netdata, netdata->check_endpoint, netdata->authentication, true, true, json_payload, CLOUDSYNC_HEADER_SQLITECLOUD); + NETWORK_RESULT result = network_receive_buffer(netdata, netdata->check_endpoint, netdata->authentication, true, true, json_payload, cloudsync_check_headers, ARRAY_LEN(cloudsync_check_headers)); int rc = SQLITE_OK; if (result.code == CLOUDSYNC_NETWORK_BUFFER) { + // The /check endpoint returns one of two shapes: + // HTTP 200 → {"url": "..."} (artifact ready for download) + // HTTP 202 → SyncStatusResponse (no artifact yet — status snapshot, + // may include failures.check) + // Branch on the presence of "url" rather than HTTP status; both shapes arrive as BUFFER. char *download_url = json_extract_string(result.buffer, result.blen, "url"); - if (!download_url) { - sqlite3_result_error(context, "cloudsync_network_check_changes: missing 'url' in check response.", -1); - network_result_cleanup(&result); - return SQLITE_ERROR; + if (download_url) { + rc = network_download_changes(context, download_url, pnrows, err_out); + cloudsync_memory_free(download_url); + } + // failures.check may appear in either shape; extract opportunistically. + if (out) { + char *check_failure = json_extract_failure_stage(result.buffer, result.blen, "check"); + if (check_failure) { + if (out->check_failure_json) cloudsync_memory_free(out->check_failure_json); + out->check_failure_json = check_failure; + } } - rc = network_download_changes(context, download_url, pnrows, err_out); - cloudsync_memory_free(download_url); } else if (result.code == CLOUDSYNC_NETWORK_ERROR) { network_set_sqlite_result(context, &result); rc = -1; } else { - // CLOUDSYNC_NETWORK_OK — no changes ready yet, not an error + // CLOUDSYNC_NETWORK_OK — no body (older server) — not an error rc = 0; } @@ -1133,9 +1183,13 @@ int cloudsync_network_check_internal(sqlite3_context *context, int *pnrows, sync } void cloudsync_network_sync (sqlite3_context *context, int wait_ms, int max_retries) { - sync_result sr = {-1, 0, NULL, 0, NULL, NULL}; + sync_result sr = {.server_version = -1}; int rc = cloudsync_network_send_changes_internal(context, 0, NULL, &sr); - if (rc != SQLITE_OK) { if (sr.last_failure_json) cloudsync_memory_free(sr.last_failure_json); return; } + if (rc != SQLITE_OK) { + if (sr.apply_failure_json) cloudsync_memory_free(sr.apply_failure_json); + if (sr.check_failure_json) cloudsync_memory_free(sr.check_failure_json); + return; + } int ntries = 0; int nrows = 0; @@ -1163,41 +1217,48 @@ void cloudsync_network_sync (sqlite3_context *context, int wait_ms, int max_retr } const char *tables = sr.tables_json ? sr.tables_json : "[]"; + const char *status = sr.status ? sr.status : "error"; char *escaped_err = receive_err ? json_escape_string(receive_err) : NULL; - char *buf; - if (sr.last_failure_json && escaped_err) { - buf = cloudsync_memory_mprintf( - "{\"send\":{\"status\":\"%s\",\"localVersion\":%lld,\"serverVersion\":%lld,\"lastFailure\":%s}," - "\"receive\":{\"rows\":%d,\"tables\":%s,\"error\":\"%s\"}}", - sr.status ? sr.status : "error", - (long long)sr.local_version, (long long)sr.server_version, - sr.last_failure_json, nrows, tables, escaped_err); - } else if (sr.last_failure_json) { - buf = cloudsync_memory_mprintf( - "{\"send\":{\"status\":\"%s\",\"localVersion\":%lld,\"serverVersion\":%lld,\"lastFailure\":%s}," - "\"receive\":{\"rows\":%d,\"tables\":%s}}", - sr.status ? sr.status : "error", - (long long)sr.local_version, (long long)sr.server_version, - sr.last_failure_json, nrows, tables); + + // Build send and receive blocks separately to avoid combinatorial explosion + // across optional fields (send.lastFailure, receive.error, receive.lastFailure). + char *send_part = sr.apply_failure_json + ? cloudsync_memory_mprintf( + "\"send\":{\"status\":\"%s\",\"localVersion\":%lld,\"serverVersion\":%lld,\"lastFailure\":%s}", + status, (long long)sr.local_version, (long long)sr.server_version, sr.apply_failure_json) + : cloudsync_memory_mprintf( + "\"send\":{\"status\":\"%s\",\"localVersion\":%lld,\"serverVersion\":%lld}", + status, (long long)sr.local_version, (long long)sr.server_version); + + char *recv_part; + if (escaped_err && sr.check_failure_json) { + recv_part = cloudsync_memory_mprintf( + "\"receive\":{\"rows\":%d,\"tables\":%s,\"error\":\"%s\",\"lastFailure\":%s}", + nrows, tables, escaped_err, sr.check_failure_json); } else if (escaped_err) { - buf = cloudsync_memory_mprintf( - "{\"send\":{\"status\":\"%s\",\"localVersion\":%lld,\"serverVersion\":%lld}," - "\"receive\":{\"rows\":%d,\"tables\":%s,\"error\":\"%s\"}}", - sr.status ? sr.status : "error", - (long long)sr.local_version, (long long)sr.server_version, + recv_part = cloudsync_memory_mprintf( + "\"receive\":{\"rows\":%d,\"tables\":%s,\"error\":\"%s\"}", nrows, tables, escaped_err); + } else if (sr.check_failure_json) { + recv_part = cloudsync_memory_mprintf( + "\"receive\":{\"rows\":%d,\"tables\":%s,\"lastFailure\":%s}", + nrows, tables, sr.check_failure_json); } else { - buf = cloudsync_memory_mprintf( - "{\"send\":{\"status\":\"%s\",\"localVersion\":%lld,\"serverVersion\":%lld}," - "\"receive\":{\"rows\":%d,\"tables\":%s}}", - sr.status ? sr.status : "error", - (long long)sr.local_version, (long long)sr.server_version, nrows, tables); + recv_part = cloudsync_memory_mprintf( + "\"receive\":{\"rows\":%d,\"tables\":%s}", + nrows, tables); } + + char *buf = cloudsync_memory_mprintf("{%s,%s}", send_part, recv_part); + cloudsync_memory_free(send_part); + cloudsync_memory_free(recv_part); + sqlite3_result_text(context, buf, -1, cloudsync_memory_free); if (escaped_err) cloudsync_memory_free(escaped_err); if (receive_err) cloudsync_memory_free(receive_err); if (sr.tables_json) cloudsync_memory_free(sr.tables_json); - if (sr.last_failure_json) cloudsync_memory_free(sr.last_failure_json); + if (sr.apply_failure_json) cloudsync_memory_free(sr.apply_failure_json); + if (sr.check_failure_json) cloudsync_memory_free(sr.check_failure_json); } void cloudsync_network_sync0 (sqlite3_context *context, int argc, sqlite3_value **argv) { @@ -1220,13 +1281,19 @@ void cloudsync_network_sync2 (sqlite3_context *context, int argc, sqlite3_value void cloudsync_network_check_changes (sqlite3_context *context, int argc, sqlite3_value **argv) { DEBUG_FUNCTION("cloudsync_network_check_changes"); - sync_result sr = {-1, 0, NULL, 0, NULL, NULL}; + // check-scoped: emits receive.error (client-side apply) and/or + // receive.lastFailure (server-side failures.check) only — never a send block. + sync_result sr = {.server_version = -1}; char *receive_err = NULL; int nrows = 0; int rc = cloudsync_network_check_internal(context, &nrows, &sr, &receive_err); // Endpoint/network errors already raised a SQL error on the context - if (rc != SQLITE_OK && !receive_err) { if (sr.tables_json) cloudsync_memory_free(sr.tables_json); return; } + if (rc != SQLITE_OK && !receive_err) { + if (sr.tables_json) cloudsync_memory_free(sr.tables_json); + if (sr.check_failure_json) cloudsync_memory_free(sr.check_failure_json); + return; + } // Apply errors → structured JSON with receive.error if (receive_err) { @@ -1235,17 +1302,25 @@ void cloudsync_network_check_changes (sqlite3_context *context, int argc, sqlite } const char *tables = sr.tables_json ? sr.tables_json : "[]"; + char *escaped = receive_err ? json_escape_string(receive_err) : NULL; char *buf; - if (receive_err) { - char *escaped = json_escape_string(receive_err); - buf = cloudsync_memory_mprintf("{\"receive\":{\"rows\":%d,\"tables\":%s,\"error\":\"%s\"}}", nrows, tables, escaped); - cloudsync_memory_free(escaped); + if (escaped && sr.check_failure_json) { + buf = cloudsync_memory_mprintf("{\"receive\":{\"rows\":%d,\"tables\":%s,\"error\":\"%s\",\"lastFailure\":%s}}", + nrows, tables, escaped, sr.check_failure_json); + } else if (escaped) { + buf = cloudsync_memory_mprintf("{\"receive\":{\"rows\":%d,\"tables\":%s,\"error\":\"%s\"}}", + nrows, tables, escaped); + } else if (sr.check_failure_json) { + buf = cloudsync_memory_mprintf("{\"receive\":{\"rows\":%d,\"tables\":%s,\"lastFailure\":%s}}", + nrows, tables, sr.check_failure_json); } else { buf = cloudsync_memory_mprintf("{\"receive\":{\"rows\":%d,\"tables\":%s}}", nrows, tables); } sqlite3_result_text(context, buf, -1, cloudsync_memory_free); + if (escaped) cloudsync_memory_free(escaped); if (receive_err) cloudsync_memory_free(receive_err); if (sr.tables_json) cloudsync_memory_free(sr.tables_json); + if (sr.check_failure_json) cloudsync_memory_free(sr.check_failure_json); } void cloudsync_network_reset_sync_version (sqlite3_context *context, int argc, sqlite3_value **argv) { @@ -1355,7 +1430,7 @@ void cloudsync_network_status (sqlite3_context *context, int argc, sqlite3_value return; } - NETWORK_RESULT res = network_receive_buffer(netdata, netdata->status_endpoint, netdata->authentication, true, false, NULL, CLOUDSYNC_HEADER_SQLITECLOUD); + NETWORK_RESULT res = network_receive_buffer(netdata, netdata->status_endpoint, netdata->authentication, true, false, NULL, cloudsync_default_headers, ARRAY_LEN(cloudsync_default_headers)); network_set_sqlite_result(context, &res); network_result_cleanup(&res); } diff --git a/src/network/network.m b/src/network/network.m index da2338c..0f13478 100644 --- a/src/network/network.m +++ b/src/network/network.m @@ -61,7 +61,7 @@ bool network_send_buffer(network_data *data, const char *endpoint, const char *a } -NETWORK_RESULT network_receive_buffer(network_data *data, const char *endpoint, const char *authentication, bool zero_terminated, bool is_post_request, char *json_payload, const char *custom_header) { +NETWORK_RESULT network_receive_buffer(network_data *data, const char *endpoint, const char *authentication, bool zero_terminated, bool is_post_request, char *json_payload, const char **extra_headers, int nextra_headers) { NSString *urlString = [NSString stringWithUTF8String:endpoint]; NSURL *url = [NSURL URLWithString:urlString]; @@ -78,11 +78,13 @@ NETWORK_RESULT network_receive_buffer(network_data *data, const char *endpoint, NSMutableURLRequest *request = [NSMutableURLRequest requestWithURL:url]; request.HTTPMethod = (json_payload || is_post_request) ? @"POST" : @"GET"; - if (custom_header) { - NSString *header = [NSString stringWithUTF8String:custom_header]; - NSArray *parts = [header componentsSeparatedByString:@": "]; - if (parts.count == 2) { - [request setValue:parts[1] forHTTPHeaderField:parts[0]]; + for (int i = 0; i < nextra_headers; i++) { + NSString *header = [NSString stringWithUTF8String:extra_headers[i]]; + NSRange sep = [header rangeOfString:@": "]; + if (sep.location != NSNotFound) { + NSString *name = [header substringToIndex:sep.location]; + NSString *value = [header substringFromIndex:sep.location + sep.length]; + [request setValue:value forHTTPHeaderField:name]; } } diff --git a/src/network/network_private.h b/src/network/network_private.h index b042959..c282e24 100644 --- a/src/network/network_private.h +++ b/src/network/network_private.h @@ -14,8 +14,11 @@ #define CLOUDSYNC_ENDPOINT_CHECK "check" #define CLOUDSYNC_ENDPOINT_APPLY "apply" #define CLOUDSYNC_ENDPOINT_STATUS "status" -#define CLOUDSYNC_HEADER_SQLITECLOUD "Accept: sqlc/plain" #define CLOUDSYNC_HEADER_ORG "X-CloudSync-Org" +#define CLOUDSYNC_HEADER_VERSION "X-CloudSync-Version" +// CLOUDSYNC_VERSION is defined in cloudsync.h — include it before this header at use sites. +#define CLOUDSYNC_HEADER_VERSION_LINE CLOUDSYNC_HEADER_VERSION ": " CLOUDSYNC_VERSION +#define CLOUDSYNC_HEADER_CHECK_CAPABILITIES "X-CloudSync-Capabilities: check-status-response" #define CLOUDSYNC_NETWORK_OK 1 #define CLOUDSYNC_NETWORK_ERROR 2 @@ -36,7 +39,7 @@ char *network_data_get_orgid (network_data *data); bool network_data_set_endpoints (network_data *data, char *auth, char *check, char *upload, char *apply, char *status); bool network_send_buffer(network_data *data, const char *endpoint, const char *authentication, const void *blob, int blob_size); -NETWORK_RESULT network_receive_buffer (network_data *data, const char *endpoint, const char *authentication, bool zero_terminated, bool is_post_request, char *json_payload, const char *custom_header); +NETWORK_RESULT network_receive_buffer (network_data *data, const char *endpoint, const char *authentication, bool zero_terminated, bool is_post_request, char *json_payload, const char **extra_headers, int nextra_headers); #endif diff --git a/test/integration.c b/test/integration.c index 62bb1de..0e5a2e0 100644 --- a/test/integration.c +++ b/test/integration.c @@ -488,6 +488,82 @@ int test_double_empty_network_init(const char *db_path) { ABORT_TEST } +// Failure-path integration test. +// +// Targets a cloudsync database (INTEGRATION_TEST_FAILURE_DATABASE_ID) +// configured server-side to fail apply and check jobs. Verifies that the +// new failures.{apply,check} response shape is correctly parsed and emitted as +// send.lastFailure (cloudsync_network_send_changes) and receive.lastFailure +// (cloudsync_network_check_changes), and that cloudsync_network_sync surfaces +// at least one of them. +// +// First invocation primes the server (sends data, queues a check) — server-side +// async jobs may not have failed yet. After a sleep, the second invocation must +// see lastFailure populated. +int test_failure_path (const char *db_path) { + int rc = SQLITE_OK; + sqlite3 *db = NULL; + + const char *test_db_id = getenv("INTEGRATION_TEST_FAILURE_DATABASE_ID"); + if (!test_db_id) { + printf("(INTEGRATION_TEST_FAILURE_DATABASE_ID not set, skipping) "); + return SQLITE_OK; + } + const char *custom_address = getenv("INTEGRATION_TEST_CLOUDSYNC_ADDRESS"); + if (!custom_address) { + printf("(INTEGRATION_TEST_CLOUDSYNC_ADDRESS not set, skipping) "); + return SQLITE_OK; + } + + rc = open_load_ext(db_path, &db); RCHECK + + rc = db_exec(db, "CREATE TABLE IF NOT EXISTS failure_users (id TEXT PRIMARY KEY NOT NULL, name TEXT NOT NULL DEFAULT '');"); RCHECK + rc = db_exec(db, "SELECT cloudsync_init('failure_users');"); RCHECK + + char network_init[1024]; + snprintf(network_init, sizeof(network_init), + "SELECT cloudsync_network_init_custom('%s', '%s');", custom_address, test_db_id); + rc = db_exec(db, network_init); RCHECK + + const char *apikey = getenv("INTEGRATION_TEST_APIKEY"); + if (apikey) { + char set_apikey[512]; + snprintf(set_apikey, sizeof(set_apikey), + "SELECT cloudsync_network_set_apikey('%s');", apikey); + rc = db_exec(db, set_apikey); RCHECK + } + + // Insert a row so cloudsync_network_send_changes has a payload to upload. + char value[UUID_STR_MAXLEN]; + cloudsync_uuid_v7_string(value, true); + char sql[256]; + snprintf(sql, sizeof(sql), "INSERT INTO failure_users (id, name) VALUES ('%s', '%s');", value, value); + rc = db_exec(db, sql); RCHECK + + // First invocation — primes the server. Failures may not yet be reported. + rc = db_exec(db, "SELECT cloudsync_network_send_changes();"); RCHECK + rc = db_exec(db, "SELECT cloudsync_network_check_changes();"); RCHECK + rc = db_exec(db, "SELECT cloudsync_network_sync(250, 1);"); RCHECK + + // Give the server time to process and fail the queued apply/check jobs. + sqlite3_sleep(5000); + + // Second invocation — failures must surface now. + // jobId is always > 0 when failure object is present, so ->> + GT0 doubles as + // an existence check (NULL → atoi returns 0 → fails GT0). + rc = db_expect_gt0(db, + "SELECT cloudsync_network_send_changes() ->> '$.send.lastFailure.jobId';"); RCHECK + rc = db_expect_gt0(db, + "SELECT cloudsync_network_check_changes() ->> '$.receive.lastFailure.jobId';"); RCHECK + // sync must surface at least one of the two; instr() catches either path. + rc = db_expect_gt0(db, + "SELECT instr(cloudsync_network_sync(250, 1), '\"lastFailure\":');"); RCHECK + + rc = db_exec(db, "SELECT cloudsync_terminate();"); + +ABORT_TEST +} + int version(void){ sqlite3 *db = NULL; int rc = open_load_ext(":memory:", &db); @@ -557,6 +633,7 @@ int main (void) { rc += test_report("Enable Disable Test:", test_enable_disable(DB_PATH)); rc += test_report("Offline Error Test:", test_offline_error(":memory:")); rc += test_report("Double Empty Init Test:", test_double_empty_network_init(":memory:")); + rc += test_report("Failure Path Test:", test_failure_path(":memory:")); remove(DB_PATH); // remove the database file