diff --git a/internal/adapters/nylas/calendars_events.go b/internal/adapters/nylas/calendars_events.go index 681e7b4..fc737e4 100644 --- a/internal/adapters/nylas/calendars_events.go +++ b/internal/adapters/nylas/calendars_events.go @@ -5,6 +5,7 @@ import ( "fmt" "net/http" "net/url" + "strings" "github.com/nylas/cli/internal/domain" ) @@ -25,6 +26,12 @@ func (c *HTTPClient) GetEvents(ctx context.Context, grantID, calendarID string, // GetEventsWithCursor retrieves events with pagination cursor support. func (c *HTTPClient) GetEventsWithCursor(ctx context.Context, grantID, calendarID string, params *domain.EventQueryParams) (*domain.EventListResponse, error) { + if err := validateRequired("grant ID", grantID); err != nil { + return nil, err + } + if err := validateRequired("calendar ID", calendarID); err != nil { + return nil, err + } if params == nil { params = &domain.EventQueryParams{Limit: 10} } @@ -45,6 +52,7 @@ func (c *HTTPClient) GetEventsWithCursor(ctx context.Context, grantID, calendarI AddBool("expand_recurring", params.ExpandRecurring). AddBoolPtr("busy", params.Busy). Add("order_by", params.OrderBy). + Add("ical_uid", params.ICalUID). BuildURL(baseURL) var result struct { @@ -201,13 +209,46 @@ func (c *HTTPClient) DeleteEvent(ctx context.Context, grantID, calendarID, event return c.doDelete(ctx, queryURL) } +// rsvpCommentMaxBytes caps the free-form RSVP comment forwarded to Nylas. +// Defense-in-depth alongside the Air handler's own 1024-byte cap: any +// future caller (CLI, SDK consumer) gets the same protection without +// having to re-implement the rule. +const rsvpCommentMaxBytes = 1024 + // SendRSVP sends an RSVP response to an event invitation. +// +// Validation errors are wrapped in domain.ErrInvalidInput so callers can +// classify them with errors.Is — matches the rest of this client. func (c *HTTPClient) SendRSVP(ctx context.Context, grantID, calendarID, eventID string, req *domain.SendRSVPRequest) error { + if err := validateRequired("grant ID", grantID); err != nil { + return err + } + if err := validateRequired("calendar ID", calendarID); err != nil { + return err + } + if err := validateRequired("event ID", eventID); err != nil { + return err + } + if req == nil { + return fmt.Errorf("%w: RSVP request cannot be nil", domain.ErrInvalidInput) + } + // Normalise so case/whitespace differences from CLI/TUI/SDK callers + // don't bypass the allow-list. Nylas itself only accepts lowercase. + status := strings.ToLower(strings.TrimSpace(req.Status)) + switch status { + case "yes", "no", "maybe": + default: + return fmt.Errorf("%w: RSVP status must be one of: yes, no, maybe (got %q)", domain.ErrInvalidInput, req.Status) + } + if len(req.Comment) > rsvpCommentMaxBytes { + return fmt.Errorf("%w: RSVP comment must be %d bytes or fewer", domain.ErrInvalidInput, rsvpCommentMaxBytes) + } + baseURL := fmt.Sprintf("%s/v3/grants/%s/events/%s/send-rsvp", c.baseURL, url.PathEscape(grantID), url.PathEscape(eventID)) queryURL := NewQueryBuilder().Add("calendar_id", calendarID).BuildURL(baseURL) payload := map[string]any{ - "status": req.Status, + "status": status, } if req.Comment != "" { payload["comment"] = req.Comment diff --git a/internal/adapters/nylas/calendars_events_rsvp_test.go b/internal/adapters/nylas/calendars_events_rsvp_test.go index cf149dc..b3020ae 100644 --- a/internal/adapters/nylas/calendars_events_rsvp_test.go +++ b/internal/adapters/nylas/calendars_events_rsvp_test.go @@ -6,8 +6,11 @@ package nylas_test import ( "context" "encoding/json" + "errors" + "io" "net/http" "net/http/httptest" + "strings" "testing" "time" @@ -179,3 +182,299 @@ func TestHTTPClient_GetAvailability(t *testing.T) { assert.NotNil(t, result) }) } + +// TestHTTPClient_SendRSVP_RejectsEmptyArgs pins the input-validation +// invariant on SendRSVP. Every other Get/Update method on this client +// validates required fields up-front; without these checks an empty +// grantID would form `/v3/grants//events//send-rsvp` and 404 silently +// upstream — a confusing failure for any future caller (e.g. CLI) that +// doesn't validate at its own boundary. +func TestHTTPClient_SendRSVP_RejectsEmptyArgs(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + grantID string + calendarID string + eventID string + req *domain.SendRSVPRequest + wantSubstr string + }{ + { + name: "empty grant ID", + grantID: "", + calendarID: "cal-1", + eventID: "evt-1", + req: &domain.SendRSVPRequest{Status: "yes"}, + wantSubstr: "grant ID", + }, + { + name: "empty calendar ID", + grantID: "grant-1", + calendarID: "", + eventID: "evt-1", + req: &domain.SendRSVPRequest{Status: "yes"}, + wantSubstr: "calendar ID", + }, + { + name: "empty event ID", + grantID: "grant-1", + calendarID: "cal-1", + eventID: "", + req: &domain.SendRSVPRequest{Status: "yes"}, + wantSubstr: "event ID", + }, + { + name: "nil request", + grantID: "grant-1", + calendarID: "cal-1", + eventID: "evt-1", + req: nil, + wantSubstr: "nil", + }, + { + name: "invalid status", + grantID: "grant-1", + calendarID: "cal-1", + eventID: "evt-1", + req: &domain.SendRSVPRequest{Status: "bogus"}, + wantSubstr: "yes, no, maybe", + }, + { + name: "empty status", + grantID: "grant-1", + calendarID: "cal-1", + eventID: "evt-1", + req: &domain.SendRSVPRequest{Status: ""}, + wantSubstr: "yes, no, maybe", + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + called := false + server := httptest.NewServer(http.HandlerFunc(func(http.ResponseWriter, *http.Request) { + called = true + })) + defer server.Close() + + client := nylas.NewHTTPClient() + client.SetCredentials("client-id", "secret", "api-key") + client.SetBaseURL(server.URL) + + err := client.SendRSVP(context.Background(), tc.grantID, tc.calendarID, tc.eventID, tc.req) + require.Error(t, err, "validation must fail closed before any HTTP request") + assert.Contains(t, strings.ToLower(err.Error()), strings.ToLower(tc.wantSubstr), + "error %q must mention %q so callers can diagnose", err.Error(), tc.wantSubstr) + assert.False(t, called, "SendRSVP must not issue any HTTP request when validation fails") + }) + } +} + +// TestHTTPClient_SendRSVP_OmitsEmptyComment pins the contract that an +// empty comment is NOT serialized into the JSON body. Nylas v3 treats a +// "comment":"" field as a literal empty comment-line, which Gmail then +// renders as a blank line under the attendee name in the organiser's +// notification email — visible UX rot from the user's perspective. +func TestHTTPClient_SendRSVP_OmitsEmptyComment(t *testing.T) { + t.Parallel() + + var rawBody string + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // io.ReadAll is required because (a) r.Body is a stream — a + // single Read() is not guaranteed to fill the buffer, and (b) + // r.ContentLength can be -1 for chunked requests, which would + // panic make([]byte, -1). + body, _ := io.ReadAll(r.Body) + rawBody = string(body) + w.WriteHeader(http.StatusOK) + })) + defer server.Close() + + client := nylas.NewHTTPClient() + client.SetCredentials("client-id", "secret", "api-key") + client.SetBaseURL(server.URL) + + err := client.SendRSVP(context.Background(), "grant-1", "cal-1", "evt-1", &domain.SendRSVPRequest{ + Status: "yes", + }) + require.NoError(t, err) + assert.NotContains(t, rawBody, `"comment"`, + "empty comment must be omitted from the request body to avoid rendering a blank attendee comment in the organiser's notification; raw=%s", rawBody) +} + +// TestHTTPClient_SendRSVP_PropagatesUpstreamError pins the 4xx/5xx +// surface area: a Nylas error during send-rsvp must surface as a real +// error (not nil). Air's handler relies on this to return 502 to the +// browser instead of a misleading 200. +func TestHTTPClient_SendRSVP_PropagatesUpstreamError(t *testing.T) { + t.Parallel() + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusForbidden) + _, _ = w.Write([]byte(`{"error":"calendar permission denied"}`)) + })) + defer server.Close() + + client := nylas.NewHTTPClient() + client.SetCredentials("client-id", "secret", "api-key") + client.SetBaseURL(server.URL) + + err := client.SendRSVP(context.Background(), "grant-1", "cal-1", "evt-1", &domain.SendRSVPRequest{ + Status: "yes", + }) + require.Error(t, err, "adapter must surface non-2xx responses as errors") +} + +// TestHTTPClient_SendRSVP_ValidationErrorsWrapErrInvalidInput pins that +// every input-validation failure wraps domain.ErrInvalidInput so callers +// can classify the failure with errors.Is. Without the wrap, the rest of +// this client (which IS consistent) becomes impossible to align against — +// CLI / Air handlers can no longer return a uniform 4xx envelope for +// validation failures coming out of any adapter method. +func TestHTTPClient_SendRSVP_ValidationErrorsWrapErrInvalidInput(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + grantID string + calendarID string + eventID string + req *domain.SendRSVPRequest + }{ + {name: "empty grant", grantID: "", calendarID: "c", eventID: "e", req: &domain.SendRSVPRequest{Status: "yes"}}, + {name: "empty calendar", grantID: "g", calendarID: "", eventID: "e", req: &domain.SendRSVPRequest{Status: "yes"}}, + {name: "empty event", grantID: "g", calendarID: "c", eventID: "", req: &domain.SendRSVPRequest{Status: "yes"}}, + {name: "nil request", grantID: "g", calendarID: "c", eventID: "e", req: nil}, + {name: "bad status", grantID: "g", calendarID: "c", eventID: "e", req: &domain.SendRSVPRequest{Status: "definitely"}}, + {name: "oversized comment", grantID: "g", calendarID: "c", eventID: "e", req: &domain.SendRSVPRequest{ + Status: "yes", + Comment: strings.Repeat("x", 2000), + }}, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client := nylas.NewHTTPClient() + client.SetCredentials("client-id", "secret", "api-key") + client.SetBaseURL("http://invalid.example") // never reached + + err := client.SendRSVP(context.Background(), tc.grantID, tc.calendarID, tc.eventID, tc.req) + require.Error(t, err) + assert.ErrorIs(t, err, domain.ErrInvalidInput, + "validation error %q must wrap domain.ErrInvalidInput so callers can classify it", err) + }) + } +} + +// TestHTTPClient_SendRSVP_NormalizesStatusCase pins that adapter-level +// validation accepts "YES" / "Yes" / " maybe " — a CLI or future SDK +// caller that doesn't normalize at its own layer must not be rejected +// for a cosmetic difference. Nylas itself only accepts lowercase; the +// adapter must lowercase before forwarding. +func TestHTTPClient_SendRSVP_NormalizesStatusCase(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + input string + wantPayload string + }{ + {name: "uppercase", input: "YES", wantPayload: "yes"}, + {name: "titlecase", input: "Maybe", wantPayload: "maybe"}, + {name: "padded", input: " no ", wantPayload: "no"}, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + var rawBody string + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + body, _ := io.ReadAll(r.Body) + rawBody = string(body) + w.WriteHeader(http.StatusOK) + })) + defer server.Close() + + client := nylas.NewHTTPClient() + client.SetCredentials("client-id", "secret", "api-key") + client.SetBaseURL(server.URL) + + err := client.SendRSVP(context.Background(), "g", "c", "e", &domain.SendRSVPRequest{Status: tc.input}) + require.NoError(t, err) + assert.Contains(t, rawBody, `"status":"`+tc.wantPayload+`"`, + "expected normalized status %q in body, got %s", tc.wantPayload, rawBody) + }) + } +} + +// TestHTTPClient_SendRSVP_ForwardsCommentExactly pins that a non-empty +// comment is forwarded byte-for-byte (modulo JSON encoding) — quoting, +// special characters, multi-byte unicode all round-trip. Without this +// pin a future refactor that "cleans up" the comment (e.g. stripping +// quotes, ASCII-only) would silently corrupt user-typed messages on +// the way to the organiser's inbox. +func TestHTTPClient_SendRSVP_ForwardsCommentExactly(t *testing.T) { + t.Parallel() + tricky := `He said "hi" — and \\ then 🎉` + + var rawBody string + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + body, _ := io.ReadAll(r.Body) + rawBody = string(body) + w.WriteHeader(http.StatusOK) + })) + defer server.Close() + + client := nylas.NewHTTPClient() + client.SetCredentials("client-id", "secret", "api-key") + client.SetBaseURL(server.URL) + + err := client.SendRSVP(context.Background(), "g", "c", "e", &domain.SendRSVPRequest{ + Status: "yes", + Comment: tricky, + }) + require.NoError(t, err) + + // Decode the wire body to compare against the input post-JSON. + var got struct { + Status string `json:"status"` + Comment string `json:"comment"` + } + require.NoError(t, json.Unmarshal([]byte(rawBody), &got), + "server-side body must be valid JSON, got %q", rawBody) + assert.Equal(t, tricky, got.Comment, + "adapter must forward comment exactly; quotes/backslash/emoji should round-trip") + assert.Equal(t, "yes", got.Status) +} + +// TestHTTPClient_SendRSVP_RejectsOversizedComment pins the +// defense-in-depth comment cap at the adapter boundary. The Air handler +// has a matching cap, but CLI / SDK consumers go straight through the +// adapter — without this the cap is single-layer. +func TestHTTPClient_SendRSVP_RejectsOversizedComment(t *testing.T) { + t.Parallel() + + called := false + server := httptest.NewServer(http.HandlerFunc(func(http.ResponseWriter, *http.Request) { + called = true + })) + defer server.Close() + + client := nylas.NewHTTPClient() + client.SetCredentials("client-id", "secret", "api-key") + client.SetBaseURL(server.URL) + + err := client.SendRSVP(context.Background(), "g", "c", "e", &domain.SendRSVPRequest{ + Status: "yes", + Comment: strings.Repeat("x", 2000), + }) + require.Error(t, err) + assert.True(t, errors.Is(err, domain.ErrInvalidInput), + "oversized comment must surface as ErrInvalidInput") + assert.False(t, called, "adapter must short-circuit before issuing any HTTP request") +} diff --git a/internal/adapters/nylas/calendars_events_test.go b/internal/adapters/nylas/calendars_events_test.go index c438bbb..0253b3a 100644 --- a/internal/adapters/nylas/calendars_events_test.go +++ b/internal/adapters/nylas/calendars_events_test.go @@ -180,6 +180,17 @@ func TestHTTPClient_GetEventsWithCursor(t *testing.T) { }, wantQueryKeys: []string{"page_token"}, }, + { + // ical_uid is the bridge between an emailed invite and a Nylas + // event ID; the RSVP handler relies on the upstream filter so + // we don't have to scan the whole calendar. + name: "includes ical_uid filter", + params: &domain.EventQueryParams{ + Limit: 1, + ICalUID: "abc-123@example.com", + }, + wantQueryKeys: []string{"ical_uid"}, + }, } for _, tt := range tests { @@ -206,6 +217,29 @@ func TestHTTPClient_GetEventsWithCursor(t *testing.T) { }) } + t.Run("ical_uid value is forwarded verbatim", func(t *testing.T) { + // Real iCal UIDs frequently contain '@' and '.', and Outlook + // occasionally emits ones with hex-tagged suffixes. URL-escaping + // must not corrupt them before Nylas sees the filter. + uid := "0123-DEF@calendar.example.com" + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, uid, r.URL.Query().Get("ical_uid")) + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(map[string]any{"data": []any{}}) + })) + defer server.Close() + + client := nylas.NewHTTPClient() + client.SetCredentials("client-id", "secret", "api-key") + client.SetBaseURL(server.URL) + + _, err := client.GetEventsWithCursor(context.Background(), "grant-123", "cal-123", &domain.EventQueryParams{ + Limit: 1, + ICalUID: uid, + }) + require.NoError(t, err) + }) + t.Run("returns pagination info", func(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { response := map[string]any{ diff --git a/internal/adapters/nylas/client_types_test.go b/internal/adapters/nylas/client_types_test.go index dac67d8..988c521 100644 --- a/internal/adapters/nylas/client_types_test.go +++ b/internal/adapters/nylas/client_types_test.go @@ -150,14 +150,16 @@ func TestCreateFolderRequest(t *testing.T) { func TestUpdateMessageRequest(t *testing.T) { unread := false starred := true + folders := []string{"folder-1", "folder-2"} req := &domain.UpdateMessageRequest{ Unread: &unread, Starred: &starred, - Folders: []string{"folder-1", "folder-2"}, + Folders: folders, } assert.False(t, *req.Unread) assert.True(t, *req.Starred) + require.NotNil(t, req.Folders) assert.Len(t, req.Folders, 2) } diff --git a/internal/adapters/nylas/messages.go b/internal/adapters/nylas/messages.go index 32948f7..e005b5e 100644 --- a/internal/adapters/nylas/messages.go +++ b/internal/adapters/nylas/messages.go @@ -173,7 +173,8 @@ func (c *HTTPClient) UpdateMessage(ctx context.Context, grantID, messageID strin if req.Starred != nil { payload["starred"] = *req.Starred } - if len(req.Folders) > 0 { + // nil = leave alone; non-nil (incl. empty) = set; empty archives in Gmail. + if req.Folders != nil { payload["folders"] = req.Folders } diff --git a/internal/adapters/nylas/messages_update_test.go b/internal/adapters/nylas/messages_update_test.go index 258edb9..0f7b20f 100644 --- a/internal/adapters/nylas/messages_update_test.go +++ b/internal/adapters/nylas/messages_update_test.go @@ -42,20 +42,34 @@ func TestHTTPClient_UpdateMessage(t *testing.T) { }, { name: "moves to folders", - request: &domain.UpdateMessageRequest{ - Folders: []string{"Archive", "Important"}, - }, + request: func() *domain.UpdateMessageRequest { + folders := []string{"Archive", "Important"} + return &domain.UpdateMessageRequest{Folders: folders} + }(), wantFields: map[string]any{"folders": []string{"Archive", "Important"}}, }, + { + // Regression: Gmail archive (drop INBOX) sends folders:[]. The + // adapter must forward an explicit empty array — silently + // dropping it leaves the message in the inbox while the UI + // reports success. + name: "archives to empty folders (gmail)", + request: func() *domain.UpdateMessageRequest { + empty := []string{} + return &domain.UpdateMessageRequest{Folders: empty} + }(), + wantFields: map[string]any{"folders": []any{}}, + }, { name: "updates multiple fields", request: func() *domain.UpdateMessageRequest { unread := true starred := true + folders := []string{"INBOX"} return &domain.UpdateMessageRequest{ Unread: &unread, Starred: &starred, - Folders: []string{"INBOX"}, + Folders: folders, } }(), wantFields: map[string]any{ @@ -106,6 +120,106 @@ func TestHTTPClient_UpdateMessage(t *testing.T) { } } +// TestHTTPClient_UpdateMessage_ForwardsEmptyFolders pins the Gmail-archive +// contract: when the caller passes []string{} (drop all labels), the PUT +// body MUST contain "folders":[] verbatim. The previous len()>0 guard +// silently elided the array, so archive succeeded in the UI but never +// happened upstream — a particularly nasty class of bug because the +// browser optimistically removes the row before the server lies. Today +// the adapter uses `!= nil` so a non-nil empty slice forwards correctly +// while nil is skipped (leave-alone). +func TestHTTPClient_UpdateMessage_ForwardsEmptyFolders(t *testing.T) { + t.Parallel() + + var rawBody string + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + raw, _ := io.ReadAll(r.Body) + rawBody = string(raw) + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(map[string]any{ + "data": map[string]any{ + "id": "msg-456", + "grant_id": "grant-123", + "subject": "Archived", + "date": 1704067200, + }, + }) + })) + defer server.Close() + + client := nylas.NewHTTPClient() + client.SetCredentials("client-id", "secret", "api-key") + client.SetBaseURL(server.URL) + + _, err := client.UpdateMessage(context.Background(), "grant-123", "msg-456", &domain.UpdateMessageRequest{ + Folders: []string{}, + }) + require.NoError(t, err) + + // Decode and assert the exact key shape — a nil Folders is skipped + // by the adapter so the key would be absent; only a non-nil empty + // slice produces "folders":[]. + var parsed map[string]any + require.NoError(t, json.Unmarshal([]byte(rawBody), &parsed)) + folders, present := parsed["folders"] + require.True(t, present, "folders key must be present so Gmail archive reaches the API; raw=%s", rawBody) + assert.Equal(t, []any{}, folders, "folders must serialize as an explicit empty array; got %#v", folders) +} + +// TestHTTPClient_UpdateMessage_EmptyFolders_PropagatesUpstreamError pins +// that a 4xx from Nylas on an archive PUT (e.g. Gmail rate limit, label +// permission denied) is surfaced as a real error rather than silently +// reported as success. The optimistic UI in Air relies on this — without +// error propagation the row stays visually archived while the server +// rejects the change, and the next refresh re-introduces the email. +func TestHTTPClient_UpdateMessage_EmptyFolders_PropagatesUpstreamError(t *testing.T) { + t.Parallel() + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Pin that we even saw the request (no early exit on empty + // folders) — a regression that elided the body would never + // reach this assertion. + raw, _ := io.ReadAll(r.Body) + assert.Contains(t, string(raw), `"folders":[]`, + "adapter must still send the empty-folders body even when the upstream errors; raw=%s", string(raw)) + w.WriteHeader(http.StatusForbidden) + _, _ = w.Write([]byte(`{"error":"label permission denied"}`)) + })) + defer server.Close() + + client := nylas.NewHTTPClient() + client.SetCredentials("client-id", "secret", "api-key") + client.SetBaseURL(server.URL) + + _, err := client.UpdateMessage(context.Background(), "grant-123", "msg-456", &domain.UpdateMessageRequest{ + Folders: []string{}, + }) + require.Error(t, err, "4xx upstream must surface as a real error, not nil") +} + +// TestHTTPClient_UpdateMessage_EmptyFolders_PropagatesServerError pins +// the 5xx path: a transient Nylas outage during archive must not be +// silently swallowed. Air's offline queue keys off this error to retry +// the action when connectivity returns. +func TestHTTPClient_UpdateMessage_EmptyFolders_PropagatesServerError(t *testing.T) { + t.Parallel() + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusBadGateway) + _, _ = w.Write([]byte(`{"error":"upstream timeout"}`)) + })) + defer server.Close() + + client := nylas.NewHTTPClient() + client.SetCredentials("client-id", "secret", "api-key") + client.SetBaseURL(server.URL) + + _, err := client.UpdateMessage(context.Background(), "grant-123", "msg-456", &domain.UpdateMessageRequest{ + Folders: []string{}, + }) + require.Error(t, err, "5xx upstream must surface as a real error so the offline queue can retry") +} + func TestHTTPClient_UpdateMessage_RetriesReplayBody(t *testing.T) { t.Parallel() diff --git a/internal/adapters/nylas/mock_client.go b/internal/adapters/nylas/mock_client.go index ce3f634..6020975 100644 --- a/internal/adapters/nylas/mock_client.go +++ b/internal/adapters/nylas/mock_client.go @@ -154,14 +154,16 @@ type MockClient struct { DeleteWorkflowFunc func(ctx context.Context, scope domain.RemoteScope, grantID, workflowID string) error // Calendar functions - GetCalendarsFunc func(ctx context.Context, grantID string) ([]domain.Calendar, error) - GetEventsFunc func(ctx context.Context, grantID, calendarID string, params *domain.EventQueryParams) ([]domain.Event, error) - GetEventFunc func(ctx context.Context, grantID, calendarID, eventID string) (*domain.Event, error) - CreateEventFunc func(ctx context.Context, grantID, calendarID string, req *domain.CreateEventRequest) (*domain.Event, error) - UpdateEventFunc func(ctx context.Context, grantID, calendarID, eventID string, req *domain.UpdateEventRequest) (*domain.Event, error) - DeleteEventFunc func(ctx context.Context, grantID, calendarID, eventID string) error - GetFreeBusyFunc func(ctx context.Context, grantID string, req *domain.FreeBusyRequest) (*domain.FreeBusyResponse, error) - GetAvailabilityFunc func(ctx context.Context, req *domain.AvailabilityRequest) (*domain.AvailabilityResponse, error) + GetCalendarsFunc func(ctx context.Context, grantID string) ([]domain.Calendar, error) + GetEventsFunc func(ctx context.Context, grantID, calendarID string, params *domain.EventQueryParams) ([]domain.Event, error) + GetEventsWithCursorFunc func(ctx context.Context, grantID, calendarID string, params *domain.EventQueryParams) (*domain.EventListResponse, error) + GetEventFunc func(ctx context.Context, grantID, calendarID, eventID string) (*domain.Event, error) + CreateEventFunc func(ctx context.Context, grantID, calendarID string, req *domain.CreateEventRequest) (*domain.Event, error) + UpdateEventFunc func(ctx context.Context, grantID, calendarID, eventID string, req *domain.UpdateEventRequest) (*domain.Event, error) + DeleteEventFunc func(ctx context.Context, grantID, calendarID, eventID string) error + SendRSVPFunc func(ctx context.Context, grantID, calendarID, eventID string, req *domain.SendRSVPRequest) error + GetFreeBusyFunc func(ctx context.Context, grantID string, req *domain.FreeBusyRequest) (*domain.FreeBusyResponse, error) + GetAvailabilityFunc func(ctx context.Context, req *domain.AvailabilityRequest) (*domain.AvailabilityResponse, error) // Contact functions GetContactsFunc func(ctx context.Context, grantID string, params *domain.ContactQueryParams) ([]domain.Contact, error) diff --git a/internal/adapters/nylas/mock_events.go b/internal/adapters/nylas/mock_events.go index 78c90a0..38ad740 100644 --- a/internal/adapters/nylas/mock_events.go +++ b/internal/adapters/nylas/mock_events.go @@ -15,6 +15,9 @@ func (m *MockClient) GetEvents(ctx context.Context, grantID, calendarID string, // GetEventsWithCursor retrieves events with pagination. func (m *MockClient) GetEventsWithCursor(ctx context.Context, grantID, calendarID string, params *domain.EventQueryParams) (*domain.EventListResponse, error) { + if m.GetEventsWithCursorFunc != nil { + return m.GetEventsWithCursorFunc(ctx, grantID, calendarID, params) + } return &domain.EventListResponse{Data: []domain.Event{}}, nil } @@ -67,6 +70,9 @@ func (m *MockClient) DeleteEvent(ctx context.Context, grantID, calendarID, event // SendRSVP sends an RSVP response to an event invitation. func (m *MockClient) SendRSVP(ctx context.Context, grantID, calendarID, eventID string, req *domain.SendRSVPRequest) error { + if m.SendRSVPFunc != nil { + return m.SendRSVPFunc(ctx, grantID, calendarID, eventID, req) + } return nil } diff --git a/internal/adapters/nylas/threads.go b/internal/adapters/nylas/threads.go index be4d878..0a0e9a2 100644 --- a/internal/adapters/nylas/threads.go +++ b/internal/adapters/nylas/threads.go @@ -89,7 +89,8 @@ func (c *HTTPClient) UpdateThread(ctx context.Context, grantID, threadID string, if req.Starred != nil { payload["starred"] = *req.Starred } - if len(req.Folders) > 0 { + // nil = leave alone; non-nil (incl. empty) = set; empty archives in Gmail. + if req.Folders != nil { payload["folders"] = req.Folders } diff --git a/internal/adapters/nylas/threads_http_test.go b/internal/adapters/nylas/threads_http_test.go index 9e5f1ae..7342b11 100644 --- a/internal/adapters/nylas/threads_http_test.go +++ b/internal/adapters/nylas/threads_http_test.go @@ -6,6 +6,7 @@ package nylas_test import ( "context" "encoding/json" + "io" "net/http" "net/http/httptest" "testing" @@ -342,9 +343,10 @@ func TestHTTPClient_UpdateThread(t *testing.T) { }, { name: "moves thread to folders", - request: &domain.UpdateMessageRequest{ - Folders: []string{"Archive", "Work"}, - }, + request: func() *domain.UpdateMessageRequest { + folders := []string{"Archive", "Work"} + return &domain.UpdateMessageRequest{Folders: folders} + }(), wantFields: []string{"folders"}, }, { @@ -352,10 +354,11 @@ func TestHTTPClient_UpdateThread(t *testing.T) { request: func() *domain.UpdateMessageRequest { unread := true starred := true + folders := []string{"INBOX"} return &domain.UpdateMessageRequest{ Unread: &unread, Starred: &starred, - Folders: []string{"INBOX"}, + Folders: folders, } }(), wantFields: []string{"unread", "starred", "folders"}, @@ -404,6 +407,95 @@ func TestHTTPClient_UpdateThread(t *testing.T) { } } +// TestHTTPClient_UpdateThread_ForwardsEmptyFolders is the thread-level +// twin of the same test on UpdateMessage: archiving a Gmail thread (drop +// every label) sends &[]string{} to the adapter and the resulting PUT +// body MUST contain "folders":[] verbatim. UpdateThread shares the same +// "build payload then PUT" code shape as UpdateMessage, so a future +// refactor that re-introduces the `len(req.Folders) > 0` guard would +// silently regress thread archive on Gmail without this test. +func TestHTTPClient_UpdateThread_ForwardsEmptyFolders(t *testing.T) { + t.Parallel() + + var rawBody string + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + raw, _ := io.ReadAll(r.Body) + rawBody = string(raw) + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(map[string]any{ + "data": map[string]any{ + "id": "thread-456", + "grant_id": "grant-123", + "subject": "Archived", + }, + }) + })) + defer server.Close() + + client := nylas.NewHTTPClient() + client.SetCredentials("client-id", "secret", "api-key") + client.SetBaseURL(server.URL) + + _, err := client.UpdateThread(context.Background(), "grant-123", "thread-456", &domain.UpdateMessageRequest{ + Folders: []string{}, + }) + require.NoError(t, err) + + var parsed map[string]any + require.NoError(t, json.Unmarshal([]byte(rawBody), &parsed)) + folders, present := parsed["folders"] + require.True(t, present, "folders key must be present so Gmail thread archive reaches the API; raw=%s", rawBody) + assert.Equal(t, []any{}, folders, "folders must serialize as an explicit empty array; got %#v", folders) +} + +// TestHTTPClient_UpdateThread_EmptyFolders_PropagatesUpstreamError is +// the thread-level twin of the same UpdateMessage test: a 4xx from +// Nylas on archive must surface as a real error so Air's optimistic UI +// can roll back, not silently report success. +func TestHTTPClient_UpdateThread_EmptyFolders_PropagatesUpstreamError(t *testing.T) { + t.Parallel() + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + raw, _ := io.ReadAll(r.Body) + assert.Contains(t, string(raw), `"folders":[]`, + "adapter must still send the empty-folders body even when the upstream errors; raw=%s", string(raw)) + w.WriteHeader(http.StatusForbidden) + _, _ = w.Write([]byte(`{"error":"label permission denied"}`)) + })) + defer server.Close() + + client := nylas.NewHTTPClient() + client.SetCredentials("client-id", "secret", "api-key") + client.SetBaseURL(server.URL) + + _, err := client.UpdateThread(context.Background(), "grant-123", "thread-456", &domain.UpdateMessageRequest{ + Folders: []string{}, + }) + require.Error(t, err, "4xx upstream must surface as a real error, not nil") +} + +// TestHTTPClient_UpdateThread_EmptyFolders_PropagatesServerError pins +// the 5xx path: a transient Nylas outage during thread archive must +// surface so Air's offline queue can retry the action. +func TestHTTPClient_UpdateThread_EmptyFolders_PropagatesServerError(t *testing.T) { + t.Parallel() + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusBadGateway) + _, _ = w.Write([]byte(`{"error":"upstream timeout"}`)) + })) + defer server.Close() + + client := nylas.NewHTTPClient() + client.SetCredentials("client-id", "secret", "api-key") + client.SetBaseURL(server.URL) + + _, err := client.UpdateThread(context.Background(), "grant-123", "thread-456", &domain.UpdateMessageRequest{ + Folders: []string{}, + }) + require.Error(t, err, "5xx upstream must surface as a real error so the offline queue can retry") +} + func TestHTTPClient_DeleteThread(t *testing.T) { tests := []struct { name string diff --git a/internal/adapters/nylas/threads_test.go b/internal/adapters/nylas/threads_test.go index 76315d1..965ef57 100644 --- a/internal/adapters/nylas/threads_test.go +++ b/internal/adapters/nylas/threads_test.go @@ -428,8 +428,9 @@ func TestHTTPClient_UpdateThread_WithFolders(t *testing.T) { client.SetBaseURL(server.URL) ctx := context.Background() + folders := []string{"folder-archive", "folder-done"} req := &domain.UpdateMessageRequest{ - Folders: []string{"folder-archive", "folder-done"}, + Folders: folders, } thread, err := client.UpdateThread(ctx, "grant-folders", "thread-move", req) diff --git a/internal/adapters/output/quiet.go b/internal/adapters/output/quiet.go index aeb4dad..87c6d62 100644 --- a/internal/adapters/output/quiet.go +++ b/internal/adapters/output/quiet.go @@ -30,7 +30,7 @@ func (qw *QuietWriter) Write(data any) error { // WriteList outputs IDs only, one per line. func (qw *QuietWriter) WriteList(data any, _ []ports.Column) error { v := reflect.ValueOf(data) - if v.Kind() == reflect.Ptr { + if v.Kind() == reflect.Pointer { v = v.Elem() } @@ -64,7 +64,7 @@ func extractQuietField(data any) string { } v := reflect.ValueOf(data) - if v.Kind() == reflect.Ptr { + if v.Kind() == reflect.Pointer { v = v.Elem() } diff --git a/internal/adapters/output/table.go b/internal/adapters/output/table.go index c8eef55..cd4c844 100644 --- a/internal/adapters/output/table.go +++ b/internal/adapters/output/table.go @@ -33,7 +33,7 @@ func NewTableWriter(w io.Writer, colored bool) *TableWriter { func (tw *TableWriter) Write(data any) error { // For single objects, output as key: value pairs v := reflect.ValueOf(data) - if v.Kind() == reflect.Ptr { + if v.Kind() == reflect.Pointer { v = v.Elem() } @@ -69,7 +69,7 @@ func (tw *TableWriter) Write(data any) error { // WriteList outputs a list of objects as a table func (tw *TableWriter) WriteList(data any, columns []ports.Column) error { v := reflect.ValueOf(data) - if v.Kind() == reflect.Ptr { + if v.Kind() == reflect.Pointer { v = v.Elem() } @@ -93,7 +93,7 @@ func (tw *TableWriter) WriteList(data any, columns []ports.Column) error { // Write rows for i := range v.Len() { row := v.Index(i) - if row.Kind() == reflect.Ptr { + if row.Kind() == reflect.Pointer { row = row.Elem() } @@ -144,7 +144,7 @@ func (tw *TableWriter) WriteError(err error) error { // getFieldValue extracts a field value from a struct or map func getFieldValue(v reflect.Value, field string) any { - if v.Kind() == reflect.Ptr { + if v.Kind() == reflect.Pointer { v = v.Elem() } diff --git a/internal/air/cache/offline.go b/internal/air/cache/offline.go index 15836e7..96afed6 100644 --- a/internal/air/cache/offline.go +++ b/internal/air/cache/offline.go @@ -262,13 +262,15 @@ type MarkReadPayload struct { Unread bool `json:"unread"` } -// UpdateMessagePayload is the payload for a generic message update. +// UpdateMessagePayload is the queued form of domain.UpdateMessageRequest. +// Folders has no `omitempty` so the nil-vs-empty distinction round-trips +// through SQLite; see domain.UpdateMessageRequest. type UpdateMessagePayload struct { GrantID string `json:"grant_id,omitempty"` EmailID string `json:"email_id"` Unread *bool `json:"unread,omitempty"` Starred *bool `json:"starred,omitempty"` - Folders []string `json:"folders,omitempty"` + Folders []string `json:"folders"` } // StarPayload is the payload for star/unstar actions. diff --git a/internal/air/handlers_availability.go b/internal/air/handlers_availability.go index 5a30d51..d014eeb 100644 --- a/internal/air/handlers_availability.go +++ b/internal/air/handlers_availability.go @@ -1,7 +1,9 @@ package air import ( + "fmt" "net/http" + "net/url" "strconv" "strings" "time" @@ -13,6 +15,31 @@ import ( // AVAILABILITY & FIND TIME HANDLERS // ==================================== +// parseInt64Param reads an int64 query parameter; an absent param yields zero. +func parseInt64Param(query url.Values, key string) (int64, error) { + raw := query.Get(key) + if raw == "" { + return 0, nil + } + v, err := strconv.ParseInt(raw, 10, 64) + if err != nil { + return 0, fmt.Errorf("invalid %s: %q is not a valid integer", key, raw) + } + return v, nil +} + +func parseIntParam(query url.Values, key string) (int, error) { + raw := query.Get(key) + if raw == "" { + return 0, nil + } + v, err := strconv.Atoi(raw) + if err != nil { + return 0, fmt.Errorf("invalid %s: %q is not a valid integer", key, raw) + } + return v, nil +} + // AvailabilityRequest represents a request to find available times. type AvailabilityRequest struct { StartTime int64 `json:"start_time"` @@ -75,7 +102,7 @@ type EventConflict struct { // handleAvailability finds available meeting times. func (s *Server) handleAvailability(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodGet && r.Method != http.MethodPost { - http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) + writeError(w, http.StatusMethodNotAllowed, "Method not allowed") return } @@ -108,22 +135,30 @@ func (s *Server) handleAvailability(w http.ResponseWriter, r *http.Request) { return } } else { - // Parse from query params for GET + // Parse from query params for GET. We validate each numeric param + // rather than swallowing errors — the previous version silently + // fell through to the default 7-day window when callers sent + // malformed input, which masked client bugs in test harnesses. query := r.URL.Query() - if startStr := query.Get("start_time"); startStr != "" { - req.StartTime, _ = strconv.ParseInt(startStr, 10, 64) + var perr error + if req.StartTime, perr = parseInt64Param(query, "start_time"); perr != nil { + writeBadParamError(w, "start_time", perr) + return } - if endStr := query.Get("end_time"); endStr != "" { - req.EndTime, _ = strconv.ParseInt(endStr, 10, 64) + if req.EndTime, perr = parseInt64Param(query, "end_time"); perr != nil { + writeBadParamError(w, "end_time", perr) + return } - if durationStr := query.Get("duration_minutes"); durationStr != "" { - req.DurationMinutes, _ = strconv.Atoi(durationStr) + if req.DurationMinutes, perr = parseIntParam(query, "duration_minutes"); perr != nil { + writeBadParamError(w, "duration_minutes", perr) + return } if participants := query.Get("participants"); participants != "" { req.Participants = strings.Split(participants, ",") } - if intervalStr := query.Get("interval_minutes"); intervalStr != "" { - req.IntervalMinutes, _ = strconv.Atoi(intervalStr) + if req.IntervalMinutes, perr = parseIntParam(query, "interval_minutes"); perr != nil { + writeBadParamError(w, "interval_minutes", perr) + return } } @@ -180,9 +215,8 @@ func (s *Server) handleAvailability(w http.ResponseWriter, r *http.Request) { result, err := s.nylasClient.GetAvailability(ctx, domainReq) if err != nil { - writeJSON(w, http.StatusInternalServerError, map[string]string{ - "error": "Failed to get availability: " + err.Error(), - }) + writeUpstreamError(w, http.StatusInternalServerError, + "Failed to get availability — please try again", err) return } @@ -204,7 +238,7 @@ func (s *Server) handleAvailability(w http.ResponseWriter, r *http.Request) { // handleFreeBusy returns free/busy information for participants. func (s *Server) handleFreeBusy(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodGet && r.Method != http.MethodPost { - http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) + writeError(w, http.StatusMethodNotAllowed, "Method not allowed") return } @@ -238,13 +272,17 @@ func (s *Server) handleFreeBusy(w http.ResponseWriter, r *http.Request) { return } } else { - // Parse from query params for GET + // Parse from query params for GET (see availability handler for + // rationale on validating instead of swallowing parse errors). query := r.URL.Query() - if startStr := query.Get("start_time"); startStr != "" { - req.StartTime, _ = strconv.ParseInt(startStr, 10, 64) + var perr error + if req.StartTime, perr = parseInt64Param(query, "start_time"); perr != nil { + writeBadParamError(w, "start_time", perr) + return } - if endStr := query.Get("end_time"); endStr != "" { - req.EndTime, _ = strconv.ParseInt(endStr, 10, 64) + if req.EndTime, perr = parseInt64Param(query, "end_time"); perr != nil { + writeBadParamError(w, "end_time", perr) + return } if emails := query.Get("emails"); emails != "" { req.Emails = strings.Split(emails, ",") @@ -287,9 +325,8 @@ func (s *Server) handleFreeBusy(w http.ResponseWriter, r *http.Request) { result, err := s.nylasClient.GetFreeBusy(ctx, grantID, domainReq) if err != nil { - writeJSON(w, http.StatusInternalServerError, map[string]string{ - "error": "Failed to get free/busy: " + err.Error(), - }) + writeUpstreamError(w, http.StatusInternalServerError, + "Failed to get free/busy — please try again", err) return } @@ -361,12 +398,15 @@ func (s *Server) handleConflicts(w http.ResponseWriter, r *http.Request) { } // Parse time range - var startTime, endTime int64 - if startStr := query.Get("start_time"); startStr != "" { - startTime, _ = strconv.ParseInt(startStr, 10, 64) + startTime, perr := parseInt64Param(query, "start_time") + if perr != nil { + writeBadParamError(w, "start_time", perr) + return } - if endStr := query.Get("end_time"); endStr != "" { - endTime, _ = strconv.ParseInt(endStr, 10, 64) + endTime, perr := parseInt64Param(query, "end_time") + if perr != nil { + writeBadParamError(w, "end_time", perr) + return } // Default to current week @@ -392,9 +432,9 @@ func (s *Server) handleConflicts(w http.ResponseWriter, r *http.Request) { result, err := s.nylasClient.GetEventsWithCursor(ctx, grantID, calendarID, params) if err != nil { - writeJSON(w, http.StatusInternalServerError, map[string]string{ - "error": "Failed to fetch events: " + err.Error(), - }) + writeUpstreamError(w, http.StatusInternalServerError, + "Failed to fetch events — please try again", err, + "calendarID", calendarID) return } diff --git a/internal/air/handlers_availability_test.go b/internal/air/handlers_availability_test.go index 16386d7..8680bc8 100644 --- a/internal/air/handlers_availability_test.go +++ b/internal/air/handlers_availability_test.go @@ -6,12 +6,203 @@ import ( "encoding/json" "net/http" "net/http/httptest" + "net/url" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +// TestParseInt64Param pins the contract that empty values are treated as +// "not provided" (zero, no error) but malformed values surface a clear +// error — the previous handler-level `_ = strconv.ParseInt(...)` form +// silently coerced both into zero, which masked client bugs. +func TestParseInt64Param(t *testing.T) { + t.Parallel() + cases := []struct { + name string + value string + want int64 + wantErr bool + }{ + {"empty is zero, no error", "", 0, false}, + {"valid integer", "1700000000", 1700000000, false}, + {"negative valid", "-42", -42, false}, + {"non-numeric", "tomorrow", 0, true}, + {"trailing junk", "12abc", 0, true}, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + q := url.Values{} + if tc.value != "" { + q.Set("k", tc.value) + } + got, err := parseInt64Param(q, "k") + if tc.wantErr { + assert.Error(t, err) + assert.Contains(t, err.Error(), "k") + } else { + require.NoError(t, err) + assert.Equal(t, tc.want, got) + } + }) + } +} + +// TestHandleAvailability_BadIntegerParams verifies the regression: bad +// query params now produce 400 Bad Request instead of being silently +// substituted by the "next 7 days" default. We use a fully configured +// server (mock client + a default grant) so the request reaches the +// param-validation stage instead of bouncing on requireConfig. +func TestHandleAvailability_BadIntegerParams(t *testing.T) { + t.Parallel() + cases := []struct { + name string + url string + }{ + {"bad start_time", "/api/availability?start_time=tomorrow"}, + {"bad end_time", "/api/availability?end_time=never"}, + {"bad duration_minutes", "/api/availability?duration_minutes=halfhour"}, + {"bad interval_minutes", "/api/availability?interval_minutes=fifteenish"}, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + s, _, _ := newCachedTestServer(t) + req := httptest.NewRequest(http.MethodGet, tc.url, nil) + w := httptest.NewRecorder() + s.handleAvailability(w, req) + assert.Equal(t, http.StatusBadRequest, w.Code, "body=%s", w.Body.String()) + }) + } +} + +func TestHandleFreeBusy_BadIntegerParams(t *testing.T) { + t.Parallel() + s, _, _ := newCachedTestServer(t) + req := httptest.NewRequest(http.MethodGet, "/api/calendars/freebusy?start_time=oops", nil) + w := httptest.NewRecorder() + s.handleFreeBusy(w, req) + assert.Equal(t, http.StatusBadRequest, w.Code, "body=%s", w.Body.String()) +} + +// TestHandleConflicts_BadIntegerParams covers the parity gap left by the +// air-i003 review-pass: handleAvailability and handleFreeBusy got +// parseInt64Param-based validation AND BadIntegerParams tests, but +// handleConflicts only got the validation. A future refactor that +// re-introduces `_, _ = strconv.ParseInt` here would silently fall +// through to the default "current week" window with no test failure. +func TestHandleConflicts_BadIntegerParams(t *testing.T) { + t.Parallel() + cases := []struct { + name string + url string + }{ + {"bad start_time", "/api/calendar/conflicts?start_time=tomorrow"}, + {"bad end_time", "/api/calendar/conflicts?end_time=never"}, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + s, _, _ := newCachedTestServer(t) + req := httptest.NewRequest(http.MethodGet, tc.url, nil) + w := httptest.NewRecorder() + s.handleConflicts(w, req) + assert.Equal(t, http.StatusBadRequest, w.Code, "body=%s", w.Body.String()) + }) + } +} + +// TestHandleAvailability_BadParams_DoesNotEchoRawValue pins the privacy +// contract on the query-param error path. `parseInt64Param` builds +// +// fmt.Errorf("invalid %s: %q is not a valid integer", key, raw) +// +// and the handler hands that error string straight to the client via +// `writeError(w, http.StatusBadRequest, perr.Error())`. The %q-encoded +// `raw` is the attacker's input echoed back into a JSON response — +// reflective surface that contradicts the writeUpstreamError +// redaction discipline this PR introduced one file over. +// +// EXPECTED FAILURE today: the response body contains the literal raw +// query value. After the fix the body should be a generic message +// (e.g., "invalid start_time") and the raw value should appear only in +// slog attrs. +func TestHandleAvailability_BadParams_DoesNotEchoRawValue(t *testing.T) { + t.Parallel() + const sentinel = "tomorrow-XYZ-canary" + s, _, _ := newCachedTestServer(t) + req := httptest.NewRequest(http.MethodGet, + "/api/availability?start_time="+sentinel, nil) + w := httptest.NewRecorder() + s.handleAvailability(w, req) + assert.Equal(t, http.StatusBadRequest, w.Code) + assert.NotContains(t, w.Body.String(), sentinel, + "response must not reflect raw query value back to client") +} + +func TestHandleFreeBusy_BadParams_DoesNotEchoRawValue(t *testing.T) { + t.Parallel() + const sentinel = "never-XYZ-canary" + s, _, _ := newCachedTestServer(t) + req := httptest.NewRequest(http.MethodGet, + "/api/calendars/freebusy?start_time="+sentinel, nil) + w := httptest.NewRecorder() + s.handleFreeBusy(w, req) + assert.Equal(t, http.StatusBadRequest, w.Code) + assert.NotContains(t, w.Body.String(), sentinel, + "response must not reflect raw query value back to client") +} + +func TestHandleConflicts_BadParams_DoesNotEchoRawValue(t *testing.T) { + t.Parallel() + const sentinel = "yesterday-XYZ-canary" + s, _, _ := newCachedTestServer(t) + req := httptest.NewRequest(http.MethodGet, + "/api/calendar/conflicts?start_time="+sentinel, nil) + w := httptest.NewRecorder() + s.handleConflicts(w, req) + assert.Equal(t, http.StatusBadRequest, w.Code) + assert.NotContains(t, w.Body.String(), sentinel, + "response must not reflect raw query value back to client") +} + +// TestHandleAvailability_BadDurationMinutes_DoesNotEchoRawValue closes +// the parity gap in the privacy sweep. Today the sweep covers +// start_time/end_time but not duration_minutes/interval_minutes — +// handlers_availability.go:160-169 routes all four through the same +// writeBadParamError helper, so a regression that inlines +// `writeError(perr.Error())` at one of the inner branches would escape +// the existing tests. Lock-down: same code path, same canary. +func TestHandleAvailability_BadDurationMinutes_DoesNotEchoRawValue(t *testing.T) { + t.Parallel() + const sentinel = "halfhour-XYZ-duration-canary" + s, _, _ := newCachedTestServer(t) + req := httptest.NewRequest(http.MethodGet, + "/api/availability?duration_minutes="+sentinel, nil) + w := httptest.NewRecorder() + s.handleAvailability(w, req) + assert.Equal(t, http.StatusBadRequest, w.Code) + assert.NotContains(t, w.Body.String(), sentinel, + "duration_minutes raw value must not be reflected back to the client") +} + +func TestHandleAvailability_BadIntervalMinutes_DoesNotEchoRawValue(t *testing.T) { + t.Parallel() + const sentinel = "fifteenish-XYZ-interval-canary" + s, _, _ := newCachedTestServer(t) + // IntervalMinutes is parsed AFTER start/end/duration/participants; + // supply valid values for those so we reach the interval branch. + req := httptest.NewRequest(http.MethodGet, + "/api/availability?start_time=1700000000&end_time=1700100000&duration_minutes=30&interval_minutes="+sentinel, nil) + w := httptest.NewRecorder() + s.handleAvailability(w, req) + assert.Equal(t, http.StatusBadRequest, w.Code) + assert.NotContains(t, w.Body.String(), sentinel, + "interval_minutes raw value must not be reflected back to the client") +} + // Test handler types func TestAvailabilityRequest_Fields(t *testing.T) { req := AvailabilityRequest{ diff --git a/internal/air/handlers_bundles.go b/internal/air/handlers_bundles.go index acf3530..d27292b 100644 --- a/internal/air/handlers_bundles.go +++ b/internal/air/handlers_bundles.go @@ -2,6 +2,7 @@ package air import ( "encoding/json" + "log/slog" "net/http" "regexp" "strconv" @@ -323,7 +324,8 @@ func (s *Server) handleUpdateBundle(w http.ResponseWriter, r *http.Request) { for i, rule := range bundle.Rules { if err := validateBundleRule(rule); err != nil { - http.Error(w, "Invalid regex in rule "+strconv.Itoa(i)+": "+err.Error(), http.StatusBadRequest) + slog.Warn("invalid bundle rule regex", "rule_index", i, "err", err) + http.Error(w, "Invalid regex in rule "+strconv.Itoa(i), http.StatusBadRequest) return } } diff --git a/internal/air/handlers_email.go b/internal/air/handlers_email.go index b3d011d..18311fc 100644 --- a/internal/air/handlers_email.go +++ b/internal/air/handlers_email.go @@ -1,12 +1,9 @@ package air import ( - "context" - "errors" - "net" + "log/slog" "net/http" "strings" - "time" "github.com/nylas/cli/internal/air/cache" "github.com/nylas/cli/internal/domain" @@ -123,20 +120,28 @@ func (s *Server) handleListEmails(w http.ResponseWriter, r *http.Request) { return } } - writeJSON(w, http.StatusInternalServerError, map[string]string{ - "error": "Failed to fetch emails: " + err.Error(), - }) + writeUpstreamError(w, http.StatusInternalServerError, + "Failed to fetch emails — please try again", err, + "account", redactEmail(accountEmail)) return } - // Cache the results + // Cache the results. Cache write failures must not fail the request + // (the user already has the data), but a silently-wedged cache will + // drift further from server state on every refresh, so we log the + // first put error per request to keep the failure debuggable. if s.cacheAvailable() { - _ = s.withEmailStore(accountEmail, func(store *cache.EmailStore) error { + if cacheErr := s.withEmailStore(accountEmail, func(store *cache.EmailStore) error { + var firstErr error for i := range result.Data { - _ = store.Put(domainMessageToCached(&result.Data[i])) + if putErr := store.Put(domainMessageToCached(&result.Data[i])); putErr != nil && firstErr == nil { + firstErr = putErr + } } - return nil - }) + return firstErr + }); cacheErr != nil { + slog.Warn("email list cache fill failed", "account", redactEmail(accountEmail), "err", cacheErr) + } } // Convert to response format @@ -158,7 +163,7 @@ func (s *Server) handleEmailByID(w http.ResponseWriter, r *http.Request) { path := strings.TrimPrefix(r.URL.Path, "/api/emails/") parts := strings.Split(path, "/") if len(parts) == 0 || parts[0] == "" { - http.Error(w, "Email ID required", http.StatusBadRequest) + writeError(w, http.StatusBadRequest, "Email ID required") return } emailID := parts[0] @@ -169,6 +174,13 @@ func (s *Server) handleEmailByID(w http.ResponseWriter, r *http.Request) { s.handleEmailInvite(w, r, emailID) return } + // Sub-resource: /api/emails/{id}/rsvp accepts {status: yes|no|maybe} + // and forwards to the Nylas send-rsvp endpoint after resolving the + // invite's iCalendar UID to a Nylas event. + if len(parts) > 1 && parts[1] == "rsvp" { + s.handleEmailRSVP(w, r, emailID) + return + } switch r.Method { case http.MethodGet: @@ -178,7 +190,7 @@ func (s *Server) handleEmailByID(w http.ResponseWriter, r *http.Request) { case http.MethodDelete: s.handleDeleteEmail(w, r, emailID) default: - http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) + writeError(w, http.StatusMethodNotAllowed, "Method not allowed") } } @@ -238,17 +250,25 @@ func (s *Server) handleGetEmail(w http.ResponseWriter, r *http.Request, emailID return } } - writeJSON(w, http.StatusInternalServerError, map[string]string{ - "error": "Failed to fetch email: " + err.Error(), - }) + writeUpstreamError(w, http.StatusInternalServerError, + "Failed to fetch email — please try again", err, + "emailID", emailID, "account", redactEmail(accountEmail)) return } - // Cache the result + // Cache the result. A wedged single-message cache silently drifts + // from server state on every fetch otherwise; mirror the + // handleListEmails:Cache fill failed log so support can diagnose + // from production logs without changing the user-facing 200. if s.cacheAvailable() { - _ = s.withEmailStore(accountEmail, func(store *cache.EmailStore) error { + if err := s.withEmailStore(accountEmail, func(store *cache.EmailStore) error { return store.Put(domainMessageToCached(msg)) - }) + }); err != nil { + slog.Warn("get-email cache fill failed", + "emailID", emailID, + "account", redactEmail(accountEmail), + "err", err) + } } writeJSON(w, http.StatusOK, emailToResponse(*msg, true)) @@ -285,13 +305,24 @@ func (s *Server) handleUpdateEmail(w http.ResponseWriter, r *http.Request, email Message: "Email update queued until connection is restored", }) return + } else { + // Offline AND the queue is broken. Falling through to the live + // API call is a best-effort retry (IsOnline() can be stale), + // but the queue failure itself must not be invisible — a + // silently-wedged queue will drop user actions repeatedly. + slog.Warn("offline enqueue failed, attempting live API call", + "emailID", emailID, + "account", redactEmail(accountEmail), + "err", err, + ) } } _, err := s.nylasClient.UpdateMessage(ctx, grantID, emailID, updateReq) if err != nil { if s.shouldQueueEmailAction(err) { - if queueErr := s.enqueueMessageUpdate(grantID, accountEmail, emailID, updateReq); queueErr == nil { + queueErr := s.enqueueMessageUpdate(grantID, accountEmail, emailID, updateReq) + if queueErr == nil { s.SetOnline(false) s.updateCachedEmail(accountEmail, emailID, req.Unread, req.Starred, req.Folders) writeJSON(w, http.StatusOK, UpdateEmailResponse{ @@ -300,10 +331,27 @@ func (s *Server) handleUpdateEmail(w http.ResponseWriter, r *http.Request, email }) return } + // Queue write failed under a known-transient upstream error — + // the user is about to see a 500, but they also lost the + // fallback path that would have stashed their action. Log so + // the queue health regression is debuggable. + slog.Error("queue enqueue after transient API error failed", + "emailID", emailID, + "account", redactEmail(accountEmail), + "apiErr", err, + "queueErr", queueErr, + ) } + // UpdateEmailResponse envelope — frontend reads `error`. Raw err + // is logged, generic message goes to the user. + slog.Error("Failed to update email", + "err", err, + "emailID", emailID, + "account", redactEmail(accountEmail), + ) writeJSON(w, http.StatusInternalServerError, UpdateEmailResponse{ Success: false, - Error: "Failed to update email: " + err.Error(), + Error: "Failed to update email — please try again", }) return } @@ -336,6 +384,17 @@ func (s *Server) handleDeleteEmail(w http.ResponseWriter, r *http.Request, email Message: "Email delete queued until connection is restored", }) return + } else { + // Offline AND the queue is broken. Falling through to the live + // API call is a best-effort retry (IsOnline() can be stale), + // but the queue failure itself must not be invisible — a + // silently-wedged queue will drop user actions repeatedly. + // Mirrors handleUpdateEmail's offline-enqueue log. + slog.Warn("offline enqueue failed, attempting live API call", + "emailID", emailID, + "account", redactEmail(accountEmail), + "err", err, + ) } } @@ -350,11 +409,27 @@ func (s *Server) handleDeleteEmail(w http.ResponseWriter, r *http.Request, email Message: "Email delete queued until connection is restored", }) return + } else { + // Queue write failed under a known-transient upstream + // error. The user is about to see a 500 AND lost the + // fallback path. Co-log apiErr + queueErr so the + // double-failure is debuggable. Mirrors handleUpdateEmail. + slog.Error("queue enqueue after transient API error failed", + "emailID", emailID, + "account", redactEmail(accountEmail), + "apiErr", err, + "queueErr", queueErr, + ) } } + slog.Error("Failed to delete email", + "err", err, + "emailID", emailID, + "account", redactEmail(accountEmail), + ) writeJSON(w, http.StatusInternalServerError, UpdateEmailResponse{ Success: false, - Error: "Failed to delete email: " + err.Error(), + Error: "Failed to delete email — please try again", }) return } @@ -405,70 +480,6 @@ func cachedEmailsToResponse(cached []*cache.CachedEmail, limit int) EmailsRespon return resp } -func (s *Server) shouldQueueEmailAction(err error) bool { - if !s.offlineQueueEnabled() { - return false - } - if !s.IsOnline() { - return true - } - var netErr net.Error - return errors.As(err, &netErr) || errors.Is(err, context.DeadlineExceeded) -} - -func (s *Server) offlineQueueEnabled() bool { - return s.offlineQueueConfigured() -} - -func (s *Server) enqueueMessageUpdate(grantID, accountEmail, emailID string, updateReq *domain.UpdateMessageRequest) error { - if accountEmail == "" || !s.offlineQueueEnabled() { - return errors.New("offline queue unavailable") - } - - return s.withOfflineQueue(accountEmail, func(queue *cache.OfflineQueue) error { - return queue.Enqueue(cache.ActionUpdateMessage, emailID, cache.UpdateMessagePayload{ - GrantID: grantID, - EmailID: emailID, - Unread: updateReq.Unread, - Starred: updateReq.Starred, - Folders: updateReq.Folders, - }) - }) -} - -func (s *Server) enqueueMessageDelete(grantID, accountEmail, emailID string) error { - if accountEmail == "" || !s.offlineQueueEnabled() { - return errors.New("offline queue unavailable") - } - - return s.withOfflineQueue(accountEmail, func(queue *cache.OfflineQueue) error { - return queue.Enqueue(cache.ActionDelete, emailID, cache.DeleteMessagePayload{ - GrantID: grantID, - EmailID: emailID, - }) - }) -} - -func (s *Server) updateCachedEmail(accountEmail, emailID string, unread, starred *bool, folders []string) { - if accountEmail == "" || !s.cacheAvailable() { - return - } - - _ = s.withEmailStore(accountEmail, func(store *cache.EmailStore) error { - return store.UpdateMessage(emailID, unread, starred, folders) - }) -} - -func (s *Server) deleteCachedEmail(accountEmail, emailID string) { - if accountEmail == "" || !s.cacheAvailable() { - return - } - - _ = s.withEmailStore(accountEmail, func(store *cache.EmailStore) error { - return store.Delete(emailID) - }) -} - // emailToResponse converts a domain message to an API response. func emailToResponse(m domain.Message, includeBody bool) EmailResponse { resp := EmailResponse{ @@ -547,225 +558,3 @@ func cachedEmailToResponse(e *cache.CachedEmail) EmailResponse { Folders: []string{e.FolderID}, } } - -// demoEmails returns demo email data spread across multiple folders so the -// sidebar (Inbox / Sent / Drafts / Archive / Trash) actually shows different -// content per folder. Includes one calendar-invite (.ics) email so the -// calendar-invite card UI has something to render. -func demoEmails() []EmailResponse { - now := time.Now() - return []EmailResponse{ - // Inbox - { - ID: "demo-email-001", - Subject: "Q4 Product Roadmap Review", - Snippet: "Hi team, I've attached the updated roadmap for Q4...", - Body: "

Hi team,

I've attached the updated roadmap for Q4. Please review the timeline changes and let me know if you have any concerns.

", - From: []EmailParticipantResponse{{Name: "Sarah Chen", Email: "sarah.chen@company.com"}}, - To: []EmailParticipantResponse{{Name: "Team", Email: "team@company.com"}}, - Date: now.Add(-2 * time.Minute).Unix(), - Unread: true, - Starred: true, - Folders: []string{"inbox"}, - Attachments: []AttachmentResponse{ - {ID: "att-001", Filename: "Q4_Roadmap_v2.pdf", ContentType: "application/pdf", Size: 2516582}, - }, - }, - { - ID: "demo-email-002", - Subject: "[nylas/cli] PR #142: Add focus time feature", - Snippet: "mergify[bot] merged 1 commit into main...", - From: []EmailParticipantResponse{{Name: "GitHub", Email: "notifications@github.com"}}, - To: []EmailParticipantResponse{{Name: "You", Email: "you@example.com"}}, - Date: now.Add(-15 * time.Minute).Unix(), - Unread: true, - Starred: false, - Folders: []string{"inbox"}, - }, - { - ID: "demo-email-003", - Subject: "Re: Meeting Tomorrow", - Snippet: "That works for me. I'll send a calendar invite...", - From: []EmailParticipantResponse{{Name: "Alex Johnson", Email: "demo@example.com"}}, - To: []EmailParticipantResponse{{Name: "You", Email: "you@example.com"}}, - Date: now.Add(-1 * time.Hour).Unix(), - Unread: false, - Starred: false, - Folders: []string{"inbox"}, - }, - { - ID: "demo-email-004", - Subject: "Your December invoice is ready", - Snippet: "Your invoice for December 2024 is now available...", - From: []EmailParticipantResponse{{Name: "Stripe", Email: "billing@stripe.com"}}, - To: []EmailParticipantResponse{{Name: "You", Email: "you@example.com"}}, - Date: now.Add(-3 * time.Hour).Unix(), - Unread: false, - Starred: true, - Folders: []string{"inbox"}, - }, - { - ID: "demo-email-005", - Subject: "This week in design: AI tools reshaping...", - Snippet: "The latest trends, tools, and inspiration...", - From: []EmailParticipantResponse{{Name: "Design Weekly", Email: "newsletter@designweekly.com"}}, - To: []EmailParticipantResponse{{Name: "You", Email: "you@example.com"}}, - Date: now.Add(-5 * time.Hour).Unix(), - Unread: false, - Starred: false, - Folders: []string{"inbox"}, - }, - // Calendar invite (with .ics attachment) - { - ID: "demo-email-invite-001", - Subject: "Event Invitation: Quarterly Sync", - Snippet: "You have received a calendar invitation: Quarterly Sync", - Body: "

You have received a calendar invitation: Quarterly Sync

Please let me know if this time works.

", - From: []EmailParticipantResponse{{Name: "Priya Patel", Email: "priya@partner.example"}}, - To: []EmailParticipantResponse{{Name: "You", Email: "you@example.com"}}, - Date: now.Add(-30 * time.Minute).Unix(), - Unread: true, - Starred: false, - Folders: []string{"inbox"}, - Attachments: []AttachmentResponse{ - { - ID: "att-invite-001", - Filename: "invite.ics", - ContentType: "text/calendar", - Size: 1024, - }, - }, - }, - // Sent — explicitly more than one so we can prove the filter works. - { - ID: "demo-email-sent-001", - Subject: "Re: Q4 Product Roadmap Review", - Snippet: "Thanks Sarah, here are my comments on the roadmap...", - Body: "

Thanks Sarah,

Here are my comments on the roadmap. Looks good overall — happy to discuss the Q4 priorities live.

", - From: []EmailParticipantResponse{{Name: "You", Email: "you@example.com"}}, - To: []EmailParticipantResponse{{Name: "Sarah Chen", Email: "sarah.chen@company.com"}}, - Date: now.Add(-1 * time.Hour).Unix(), - Folders: []string{"sent"}, - }, - { - ID: "demo-email-sent-002", - Subject: "Pricing follow-up", - Snippet: "Hi Mike, sending the updated pricing sheet...", - Body: "

Hi Mike,

Sending the updated pricing sheet as discussed. Let me know if you need any changes.

", - From: []EmailParticipantResponse{{Name: "You", Email: "you@example.com"}}, - To: []EmailParticipantResponse{{Name: "Mike Johnson", Email: "mike@customer.example"}}, - Date: now.Add(-3 * time.Hour).Unix(), - Folders: []string{"sent"}, - }, - { - ID: "demo-email-sent-003", - Subject: "Welcome to the team!", - Snippet: "Excited to have you joining us next Monday...", - Body: "

Excited to have you joining us next Monday! Here's the on-boarding checklist.

", - From: []EmailParticipantResponse{{Name: "You", Email: "you@example.com"}}, - To: []EmailParticipantResponse{{Name: "Jamie Lee", Email: "jamie@newhire.example"}}, - Date: now.Add(-1 * 24 * time.Hour).Unix(), - Folders: []string{"sent"}, - }, - // Drafts - { - ID: "demo-email-draft-001", - Subject: "Draft: Proposal for Acme", - Snippet: "Hi Acme team, here's the rough proposal...", - Body: "

Hi Acme team,

Here's the rough proposal — still working through the timeline section.

", - From: []EmailParticipantResponse{{Name: "You", Email: "you@example.com"}}, - To: []EmailParticipantResponse{{Name: "Acme Procurement", Email: "buyers@acme.example"}}, - Date: now.Add(-4 * time.Hour).Unix(), - Folders: []string{"drafts"}, - }, - // Archive - { - ID: "demo-email-archive-001", - Subject: "Confirmation: Subscription renewed", - Snippet: "Your annual subscription has been renewed...", - Body: "

Your annual subscription has been renewed for another year.

", - From: []EmailParticipantResponse{{Name: "Acme Billing", Email: "billing@acme.example"}}, - To: []EmailParticipantResponse{{Name: "You", Email: "you@example.com"}}, - Date: now.Add(-30 * 24 * time.Hour).Unix(), - Folders: []string{"archive"}, - }, - // Trash - { - ID: "demo-email-trash-001", - Subject: "URGENT: Winning offer (don't miss out)", - Snippet: "You've been pre-selected for an exclusive offer...", - Body: "

You've been pre-selected.

", - From: []EmailParticipantResponse{{Name: "Promo Bot", Email: "deals@spammy.example"}}, - To: []EmailParticipantResponse{{Name: "You", Email: "you@example.com"}}, - Date: now.Add(-7 * 24 * time.Hour).Unix(), - Folders: []string{"trash"}, - }, - } -} - -// filterDemoEmails applies folder/unread/starred filters to a demo email -// list. Folder matching is case-insensitive against email.Folders entries -// and against well-known aliases (e.g., "SENT" → "sent", "Sent Items" -// → "sent"). Empty folder string means "no folder filter." -func filterDemoEmails(emails []EmailResponse, folder string, onlyUnread, onlyStarred bool) []EmailResponse { - target := normalizeDemoFolder(folder) - out := make([]EmailResponse, 0, len(emails)) - for _, e := range emails { - if onlyUnread && !e.Unread { - continue - } - if onlyStarred && !e.Starred { - continue - } - if target != "" && !demoEmailIsInFolder(e, target) { - continue - } - out = append(out, e) - } - return out -} - -// normalizeDemoFolder turns a UI-supplied folder identifier (e.g., the -// system-folder ID "SENT", the Microsoft display name "Sent Items", or the -// canonical "sent") into the lowercase canonical name used in demoEmails. -func normalizeDemoFolder(folder string) string { - f := strings.ToLower(strings.TrimSpace(folder)) - switch f { - case "": - return "" - case "inbox": - return "inbox" - case "sent", "sent items", "sent mail": - return "sent" - case "drafts", "draft": - return "drafts" - case "archive", "all", "all mail": - return "archive" - case "trash", "deleted items", "deleted": - return "trash" - case "spam", "junk", "junk email": - return "spam" - case "starred": - return "starred" - default: - return f - } -} - -// demoEmailIsInFolder reports whether the email is in `target` (already -// canonicalised). Special-cases "starred" since starring is a flag, not a -// folder. "all" never filters anything out. -func demoEmailIsInFolder(e EmailResponse, target string) bool { - if target == "starred" { - return e.Starred - } - if target == "all" { - return true - } - for _, f := range e.Folders { - if strings.EqualFold(f, target) { - return true - } - } - return false -} diff --git a/internal/air/handlers_email_cache_runtime_test.go b/internal/air/handlers_email_cache_runtime_test.go index 3da3838..7ef9967 100644 --- a/internal/air/handlers_email_cache_runtime_test.go +++ b/internal/air/handlers_email_cache_runtime_test.go @@ -636,10 +636,11 @@ func TestProcessOfflineQueues_UsesQueuedGrantID(t *testing.T) { t.Helper() unread := false starred := true + folders := []string{"archive"} if err := server.enqueueMessageUpdate("grant-123", accountEmail, "email-2", &domain.UpdateMessageRequest{ Unread: &unread, Starred: &starred, - Folders: []string{"archive"}, + Folders: folders, }); err != nil { t.Fatalf("enqueue update: %v", err) } diff --git a/internal/air/handlers_email_demo.go b/internal/air/handlers_email_demo.go new file mode 100644 index 0000000..b4d2720 --- /dev/null +++ b/internal/air/handlers_email_demo.go @@ -0,0 +1,237 @@ +package air + +import ( + "strings" + "time" +) + +// demoEmails returns demo email data spread across multiple folders so the +// sidebar (Inbox / Sent / Drafts / Archive / Trash) actually shows different +// content per folder. Includes one calendar-invite (.ics) email so the +// calendar-invite card UI has something to render. +func demoEmails() []EmailResponse { + now := time.Now() + return []EmailResponse{ + // Inbox + { + ID: "demo-email-001", + Subject: "Q4 Product Roadmap Review", + Snippet: "Hi team, I've attached the updated roadmap for Q4...", + Body: "

Hi team,

I've attached the updated roadmap for Q4. Please review the timeline changes and let me know if you have any concerns.

", + From: []EmailParticipantResponse{{Name: "Sarah Chen", Email: "sarah.chen@company.com"}}, + To: []EmailParticipantResponse{{Name: "Team", Email: "team@company.com"}}, + Date: now.Add(-2 * time.Minute).Unix(), + Unread: true, + Starred: true, + Folders: []string{"inbox"}, + Attachments: []AttachmentResponse{ + {ID: "att-001", Filename: "Q4_Roadmap_v2.pdf", ContentType: "application/pdf", Size: 2516582}, + }, + }, + { + ID: "demo-email-002", + Subject: "[nylas/cli] PR #142: Add focus time feature", + Snippet: "mergify[bot] merged 1 commit into main...", + From: []EmailParticipantResponse{{Name: "GitHub", Email: "notifications@github.com"}}, + To: []EmailParticipantResponse{{Name: "You", Email: "you@example.com"}}, + Date: now.Add(-15 * time.Minute).Unix(), + Unread: true, + Starred: false, + Folders: []string{"inbox"}, + }, + { + ID: "demo-email-003", + Subject: "Re: Meeting Tomorrow", + Snippet: "That works for me. I'll send a calendar invite...", + From: []EmailParticipantResponse{{Name: "Alex Johnson", Email: "demo@example.com"}}, + To: []EmailParticipantResponse{{Name: "You", Email: "you@example.com"}}, + Date: now.Add(-1 * time.Hour).Unix(), + Unread: false, + Starred: false, + Folders: []string{"inbox"}, + }, + { + ID: "demo-email-004", + Subject: "Your December invoice is ready", + Snippet: "Your invoice for December 2024 is now available...", + From: []EmailParticipantResponse{{Name: "Stripe", Email: "billing@stripe.com"}}, + To: []EmailParticipantResponse{{Name: "You", Email: "you@example.com"}}, + Date: now.Add(-3 * time.Hour).Unix(), + Unread: false, + Starred: true, + Folders: []string{"inbox"}, + }, + { + ID: "demo-email-005", + Subject: "This week in design: AI tools reshaping...", + Snippet: "The latest trends, tools, and inspiration...", + From: []EmailParticipantResponse{{Name: "Design Weekly", Email: "newsletter@designweekly.com"}}, + To: []EmailParticipantResponse{{Name: "You", Email: "you@example.com"}}, + Date: now.Add(-5 * time.Hour).Unix(), + Unread: false, + Starred: false, + Folders: []string{"inbox"}, + }, + // Calendar invite (with .ics attachment) + { + ID: "demo-email-invite-001", + Subject: "Event Invitation: Quarterly Sync", + Snippet: "You have received a calendar invitation: Quarterly Sync", + Body: "

You have received a calendar invitation: Quarterly Sync

Please let me know if this time works.

", + From: []EmailParticipantResponse{{Name: "Priya Patel", Email: "priya@partner.example"}}, + To: []EmailParticipantResponse{{Name: "You", Email: "you@example.com"}}, + Date: now.Add(-30 * time.Minute).Unix(), + Unread: true, + Starred: false, + Folders: []string{"inbox"}, + Attachments: []AttachmentResponse{ + { + ID: "att-invite-001", + Filename: "invite.ics", + ContentType: "text/calendar", + Size: 1024, + }, + }, + }, + // Sent — explicitly more than one so we can prove the filter works. + { + ID: "demo-email-sent-001", + Subject: "Re: Q4 Product Roadmap Review", + Snippet: "Thanks Sarah, here are my comments on the roadmap...", + Body: "

Thanks Sarah,

Here are my comments on the roadmap. Looks good overall — happy to discuss the Q4 priorities live.

", + From: []EmailParticipantResponse{{Name: "You", Email: "you@example.com"}}, + To: []EmailParticipantResponse{{Name: "Sarah Chen", Email: "sarah.chen@company.com"}}, + Date: now.Add(-1 * time.Hour).Unix(), + Folders: []string{"sent"}, + }, + { + ID: "demo-email-sent-002", + Subject: "Pricing follow-up", + Snippet: "Hi Mike, sending the updated pricing sheet...", + Body: "

Hi Mike,

Sending the updated pricing sheet as discussed. Let me know if you need any changes.

", + From: []EmailParticipantResponse{{Name: "You", Email: "you@example.com"}}, + To: []EmailParticipantResponse{{Name: "Mike Johnson", Email: "mike@customer.example"}}, + Date: now.Add(-3 * time.Hour).Unix(), + Folders: []string{"sent"}, + }, + { + ID: "demo-email-sent-003", + Subject: "Welcome to the team!", + Snippet: "Excited to have you joining us next Monday...", + Body: "

Excited to have you joining us next Monday! Here's the on-boarding checklist.

", + From: []EmailParticipantResponse{{Name: "You", Email: "you@example.com"}}, + To: []EmailParticipantResponse{{Name: "Jamie Lee", Email: "jamie@newhire.example"}}, + Date: now.Add(-1 * 24 * time.Hour).Unix(), + Folders: []string{"sent"}, + }, + // Drafts + { + ID: "demo-email-draft-001", + Subject: "Draft: Proposal for Acme", + Snippet: "Hi Acme team, here's the rough proposal...", + Body: "

Hi Acme team,

Here's the rough proposal — still working through the timeline section.

", + From: []EmailParticipantResponse{{Name: "You", Email: "you@example.com"}}, + To: []EmailParticipantResponse{{Name: "Acme Procurement", Email: "buyers@acme.example"}}, + Date: now.Add(-4 * time.Hour).Unix(), + Folders: []string{"drafts"}, + }, + // Archive + { + ID: "demo-email-archive-001", + Subject: "Confirmation: Subscription renewed", + Snippet: "Your annual subscription has been renewed...", + Body: "

Your annual subscription has been renewed for another year.

", + From: []EmailParticipantResponse{{Name: "Acme Billing", Email: "billing@acme.example"}}, + To: []EmailParticipantResponse{{Name: "You", Email: "you@example.com"}}, + Date: now.Add(-30 * 24 * time.Hour).Unix(), + Folders: []string{"archive"}, + }, + // Trash + { + ID: "demo-email-trash-001", + Subject: "URGENT: Winning offer (don't miss out)", + Snippet: "You've been pre-selected for an exclusive offer...", + Body: "

You've been pre-selected.

", + From: []EmailParticipantResponse{{Name: "Promo Bot", Email: "deals@spammy.example"}}, + To: []EmailParticipantResponse{{Name: "You", Email: "you@example.com"}}, + Date: now.Add(-7 * 24 * time.Hour).Unix(), + Folders: []string{"trash"}, + }, + } +} + +// filterDemoEmails applies folder/unread/starred filters to a demo email +// list. Folder matching is case-insensitive against email.Folders entries +// and against well-known aliases (e.g., "SENT" → "sent", "Sent Items" +// → "sent"). Empty folder string means "no folder filter." +func filterDemoEmails(emails []EmailResponse, folder string, onlyUnread, onlyStarred bool) []EmailResponse { + target := normalizeDemoFolder(folder) + out := make([]EmailResponse, 0, len(emails)) + for _, e := range emails { + if onlyUnread && !e.Unread { + continue + } + if onlyStarred && !e.Starred { + continue + } + if target != "" && !demoEmailIsInFolder(e, target) { + continue + } + out = append(out, e) + } + return out +} + +// normalizeDemoFolder turns a UI-supplied folder identifier (e.g., the +// system-folder ID "SENT", the Microsoft display name "Sent Items", or the +// canonical "sent") into the lowercase canonical name used in demoEmails. +// +// "all" and "all mail" route to the dedicated "all" target rather than +// collapsing into "archive": Gmail's "All Mail" view shows every +// message regardless of folder, and demoEmailIsInFolder's `target == +// "all"` branch matches that semantic. Aliasing them to "archive" +// instead would surface only the single demo email tagged with the +// archive folder, which is wrong end-user behavior. +func normalizeDemoFolder(folder string) string { + f := strings.ToLower(strings.TrimSpace(folder)) + switch f { + case "": + return "" + case "inbox": + return "inbox" + case "sent", "sent items", "sent mail": + return "sent" + case "drafts", "draft": + return "drafts" + case "archive": + return "archive" + case "all", "all mail": + return "all" + case "trash", "deleted items", "deleted": + return "trash" + case "spam", "junk", "junk email": + return "spam" + case "starred": + return "starred" + default: + return f + } +} + +// demoEmailIsInFolder reports whether the email is in `target` (already +// canonicalised). Special-cases "starred" since starring is a flag, not a +// folder. "all" never filters anything out. +func demoEmailIsInFolder(e EmailResponse, target string) bool { + if target == "starred" { + return e.Starred + } + if target == "all" { + return true + } + for _, f := range e.Folders { + if strings.EqualFold(f, target) { + return true + } + } + return false +} diff --git a/internal/air/handlers_email_invite.go b/internal/air/handlers_email_invite.go index eb028e6..7e056c1 100644 --- a/internal/air/handlers_email_invite.go +++ b/internal/air/handlers_email_invite.go @@ -3,7 +3,9 @@ package air import ( "context" "errors" + "fmt" "io" + "log/slog" "net/http" "strings" "time" @@ -23,6 +25,7 @@ type CalendarInviteResponse struct { HasInvite bool `json:"has_invite"` AttachmentID string `json:"attachment_id,omitempty"` Filename string `json:"filename,omitempty"` + ICalUID string `json:"ical_uid,omitempty"` // VEVENT UID; lets the RSVP endpoint resolve a Nylas event ID Title string `json:"title,omitempty"` Location string `json:"location,omitempty"` Description string `json:"description,omitempty"` @@ -50,14 +53,20 @@ type InviteAttendee struct { IsOrganizer bool `json:"is_organizer,omitempty"` } +// errInviteFetchFailed flags a transient upstream failure on the second +// raw_mime fetch (the fallback after attachment download fails). Callers +// distinguish it from "the email simply has no invite" so they can choose +// between silent degrade (preview card) and 502 (RSVP — the user is +// actively trying to do something and deserves an actionable error). +var errInviteFetchFailed = errors.New("invite: failed to fetch raw_mime") + // handleEmailInvite returns parsed iCalendar invite data for an email. -// Resolution order: -// 1. attachments[] entry with text/calendar or .ics filename — Microsoft, -// custom senders typically arrive this way. -// 2. inline text/calendar part inside raw_mime (Gmail's invitation -// shape — the ICS rides as a multipart/alternative leaf). -// -// Returns has_invite=false when neither path yields a calendar payload. +// Returns has_invite=false when neither attachments[] nor raw_mime yields +// a calendar payload — the frontend silently degrades to the regular +// email render in that case. Transient raw_mime fetch failures are also +// silently degraded here so a flaky upstream doesn't break the inbox +// view; the RSVP endpoint surfaces the same condition as 502 because the +// user is actively trying to act on the invite. func (s *Server) handleEmailInvite(w http.ResponseWriter, r *http.Request, emailID string) { if !requireMethod(w, r, http.MethodGet) { return @@ -76,19 +85,42 @@ func (s *Server) handleEmailInvite(w http.ResponseWriter, r *http.Request, email ctx, cancel := s.withTimeout(r) defer cancel() - msg, err := s.nylasClient.GetMessage(ctx, grantID, emailID) + resp, err := s.resolveEmailInvite(ctx, grantID, emailID) if err != nil { - writeError(w, http.StatusInternalServerError, "Failed to fetch email: "+err.Error()) + // Preserve the legacy "preview card silently disappears" UX even + // when raw_mime can't be fetched. Only the initial GetMessage + // failure (a hard error) reaches here. + if errors.Is(err, errInviteFetchFailed) { + writeJSON(w, http.StatusOK, CalendarInviteResponse{HasInvite: false}) + return + } + writeUpstreamError(w, http.StatusInternalServerError, + "Failed to fetch email — please try again", err, + "emailID", emailID) return } + writeJSON(w, http.StatusOK, resp) +} + +// resolveEmailInvite parses a message's iCalendar invite, if any. +// Resolution order: +// 1. attachments[] with text/calendar or .ics (Microsoft/custom senders). +// 2. inline text/calendar in raw_mime (Gmail's multipart/alternative shape). +// +// Returns HasInvite=false on no-invite. Non-nil error only on initial +// GetMessage failure — attachment-download and raw_mime errors are swallowed. +func (s *Server) resolveEmailInvite(ctx context.Context, grantID, emailID string) (CalendarInviteResponse, error) { + msg, err := s.nylasClient.GetMessage(ctx, grantID, emailID) + if err != nil { + return CalendarInviteResponse{}, err + } att := findCalendarAttachment(msg.Attachments) if att != nil { if parsed, ok := s.tryParseAttachmentInvite(ctx, grantID, emailID, att); ok { - writeJSON(w, http.StatusOK, parsed) - return + return parsed, nil } - // Download or parse failed. Don't surface a 5xx — Nylas + // Download or parse failed. Don't surface as an error — Nylas // frequently returns synthetic attachment IDs (v0:base64(...):...) // that look like real attachments but cannot be downloaded. // Falling through to the raw_mime walker recovers the calendar @@ -97,22 +129,28 @@ func (s *Server) handleEmailInvite(w http.ResponseWriter, r *http.Request, email // Fetch raw MIME and look for a text/calendar part — both Gmail's // inline-multipart shape AND Nylas's "synthetic attachment that - // can't be downloaded" case land here. + // can't be downloaded" case land here. A network error on this call + // is *transient* and distinguishable from "email has no invite", so + // we surface it as errInviteFetchFailed: callers in actionable paths + // (RSVP) can return 502, while the preview path silently degrades. full, err := s.nylasClient.GetMessageWithFields(ctx, grantID, emailID, "raw_mime") - if err != nil || full == nil || full.RawMIME == "" { - writeJSON(w, http.StatusOK, CalendarInviteResponse{HasInvite: false}) - return + if err != nil { + // Double-wrap so callers can errors.Is against errInviteFetchFailed + // (the sentinel) AND unwrap the underlying transport error for + // logging. Go 1.20+ supports multiple %w in a single Errorf. + return CalendarInviteResponse{}, fmt.Errorf("%w: %w", errInviteFetchFailed, err) + } + if full == nil || full.RawMIME == "" { + return CalendarInviteResponse{HasInvite: false}, nil } parts := findInlineCalendarParts(full.RawMIME) if len(parts) == 0 { - writeJSON(w, http.StatusOK, CalendarInviteResponse{HasInvite: false}) - return + return CalendarInviteResponse{HasInvite: false}, nil } parsed, err := parseICS(parts[0].Body) if err != nil { - writeJSON(w, http.StatusOK, CalendarInviteResponse{HasInvite: false}) - return + return CalendarInviteResponse{HasInvite: false}, nil } parsed.HasInvite = true // If we had a Nylas attachment entry (even an undownloadable @@ -133,28 +171,33 @@ func (s *Server) handleEmailInvite(w http.ResponseWriter, r *http.Request, email if parsed.Method == "" && parts[0].Method != "" { parsed.Method = parts[0].Method } - writeJSON(w, http.StatusOK, parsed) + return parsed, nil } -// tryParseAttachmentInvite attempts the legacy path: download an ICS -// attachment via the Nylas attachments endpoint and parse it. Returns -// ok=false on any failure so the caller can fall back to raw_mime. -// Errors are intentionally swallowed: we don't want 5xxs for transient -// download problems when the calendar payload is recoverable from MIME. +// tryParseAttachmentInvite downloads and parses an ICS attachment. +// Returns ok=false on any failure so the caller falls back to raw_mime; +// each failure logs at slog.Debug for diagnosability. func (s *Server) tryParseAttachmentInvite(ctx context.Context, grantID, emailID string, att *domain.Attachment) (CalendarInviteResponse, bool) { body, err := s.nylasClient.DownloadAttachment(ctx, grantID, emailID, att.ID) if err != nil { + slog.Debug("invite attachment download failed", + "attachment_id", att.ID, "filename", att.Filename, "err", err) return CalendarInviteResponse{}, false } defer func() { _ = body.Close() }() raw, err := io.ReadAll(io.LimitReader(body, maxICSBytes+1)) if err != nil || len(raw) > maxICSBytes { + slog.Debug("invite attachment read failed or oversized", + "attachment_id", att.ID, "filename", att.Filename, + "size", len(raw), "max", maxICSBytes, "err", err) return CalendarInviteResponse{}, false } parsed, err := parseICS(string(raw)) if err != nil { + slog.Debug("invite attachment ICS parse failed", + "attachment_id", att.ID, "filename", att.Filename, "err", err) return CalendarInviteResponse{}, false } parsed.HasInvite = true @@ -227,6 +270,7 @@ func demoInviteFor(emailID string) CalendarInviteResponse { HasInvite: true, AttachmentID: "att-invite-001", Filename: "invite.ics", + ICalUID: "demo-invite-001@nylas.example", Title: "Quarterly Sync", Location: "Conference Room A / Online", Description: "Quarterly review with the partner team.", diff --git a/internal/air/handlers_email_invite_handler_test.go b/internal/air/handlers_email_invite_handler_test.go index 3275ba5..33d3f76 100644 --- a/internal/air/handlers_email_invite_handler_test.go +++ b/internal/air/handlers_email_invite_handler_test.go @@ -307,3 +307,27 @@ func TestParseICS_CancelMethod(t *testing.T) { t.Errorf("Status=%q, want CANCELLED", resp.Status) } } + +// TestParseICS_ClampsOversizedUID pins the UID-length cap. A hostile +// inviter could craft a multi-MB UID; without the clamp we'd round-trip +// it into the Nylas ical_uid query parameter and form a giant URL. The +// prefix is kept (so the lookup can still succeed for legitimate long +// UIDs that happen to share a prefix) but the byte budget is bounded. +func TestParseICS_ClampsOversizedUID(t *testing.T) { + huge := strings.Repeat("a", 64*1024) // 64KB UID + body := "BEGIN:VCALENDAR\r\nVERSION:2.0\r\nPRODID:-//T//EN\r\nMETHOD:REQUEST\r\n" + + "BEGIN:VEVENT\r\nUID:" + huge + "\r\nSUMMARY:OversizedUID\r\n" + + "DTSTART:20260501T140000Z\r\nDTEND:20260501T150000Z\r\n" + + "END:VEVENT\r\nEND:VCALENDAR\r\n" + + resp, err := parseICS(body) + if err != nil { + t.Fatalf("parseICS: %v", err) + } + if got := len(resp.ICalUID); got > 1024 { + t.Errorf("ICalUID len=%d, want <=1024 after clamp", got) + } + if !strings.HasPrefix(huge, resp.ICalUID) { + t.Errorf("ICalUID=%q does not look like a prefix of the input", resp.ICalUID) + } +} diff --git a/internal/air/handlers_email_invite_parse.go b/internal/air/handlers_email_invite_parse.go index 8aec508..bca7386 100644 --- a/internal/air/handlers_email_invite_parse.go +++ b/internal/air/handlers_email_invite_parse.go @@ -1,12 +1,18 @@ package air import ( - "errors" "strings" ical "github.com/arran4/golang-ical" ) +// maxICalUIDBytes caps the iCalendar UID we will round-trip into Nylas +// query parameters. RFC 5545 doesn't define a maximum, but a hostile +// inviter could craft a multi-MB UID — URL-encoded that becomes a giant +// query string. 1KB comfortably covers Outlook's 200-char defaults and +// Google's 100-byte UIDs while clamping the worst case. +const maxICalUIDBytes = 1024 + // parseICS parses an iCalendar payload and returns the first VEVENT in a // shape the Air invite card understands. Backed by golang-ical so we // inherit RFC 5545 line-folding, TZID resolution, ATTENDEE parsing, and @@ -59,6 +65,16 @@ func calendarMethod(cal *ical.Calendar) string { func mapVEvent(ev *ical.VEvent) CalendarInviteResponse { var resp CalendarInviteResponse + if p := ev.GetProperty(ical.ComponentPropertyUniqueId); p != nil { + uid := strings.TrimSpace(p.Value) + // Clamp pathological UIDs. A UID we cannot trust is preferable to + // dropping the invite, so keep the prefix and let the downstream + // ical_uid filter still find the event. + if len(uid) > maxICalUIDBytes { + uid = uid[:maxICalUIDBytes] + } + resp.ICalUID = uid + } if p := ev.GetProperty(ical.ComponentPropertySummary); p != nil { resp.Title = p.Value } @@ -193,7 +209,3 @@ func firstParam(params map[string][]string, key string) string { } return "" } - -// Sanity check that errNoUsableEvent is wired — a regression where the -// parser silently ignored bad input would let the UI render a blank card. -var _ = errors.New diff --git a/internal/air/handlers_email_invite_silent_failure_test.go b/internal/air/handlers_email_invite_silent_failure_test.go new file mode 100644 index 0000000..1e3fd1e --- /dev/null +++ b/internal/air/handlers_email_invite_silent_failure_test.go @@ -0,0 +1,172 @@ +package air + +import ( + "context" + "errors" + "io" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/nylas/cli/internal/domain" + "github.com/stretchr/testify/assert" +) + +// TestHandleEmailInvite_RealAttachmentDownloadFailure_LogsFailure pins +// the silent-failure gap in tryParseAttachmentInvite +// (handlers_email_invite.go:188-191). When DownloadAttachment fails on +// a real (non-synthetic) attachment ID — e.g., a transient Nylas 5xx, +// disk-full while streaming, decryption error — the function returns +// (CalendarInviteResponse{}, false) with no log. The handler then +// falls through to raw_mime, which is the right behavior for the user +// (the invite card might still render via the inline path) but means +// support cannot diagnose "RSVP card never appears for X.ics" without +// knowing which leg of the parser failed. +// +// EXPECTED FAILURE today: handlers_email_invite.go:189-191 returns +// silently. After the fix an slog Debug or Warn entry should record +// the attachment ID and the underlying download error so a wedged +// attachments endpoint shows up in production logs. +func TestHandleEmailInvite_RealAttachmentDownloadFailure_LogsFailure(t *testing.T) { + // No t.Parallel — captureSlog mutates process-global slog default. + server, client, _ := newCachedTestServer(t) + + const realAttID = "real-att-canary-DOWNLOAD-XYZ" + const downloadErrSentinel = "nylas-503-download-canary-7777" + client.GetMessageFunc = func(_ context.Context, _, msgID string) (*domain.Message, error) { + return &domain.Message{ + ID: msgID, + Subject: "Calendar invite", + Attachments: []domain.Attachment{ + {ID: realAttID, Filename: "invite.ics", ContentType: "text/calendar"}, + }, + }, nil + } + client.DownloadAttachmentFunc = func(context.Context, string, string, string) (io.ReadCloser, error) { + return nil, errors.New(downloadErrSentinel) + } + // raw_mime fallback exists but does not contain a calendar part — + // confirms the handler completes (200 has_invite=false) while still + // expecting the download leg to have left a log breadcrumb. + client.GetMessageWithFieldsFunc = func(_ context.Context, _ string, msgID, _ string) (*domain.Message, error) { + return &domain.Message{ID: msgID, RawMIME: ""}, nil + } + + logs := captureSlog(t) + + r := httptest.NewRequest(http.MethodGet, "/api/emails/email-1/invite", http.NoBody) + w := httptest.NewRecorder() + server.handleEmailInvite(w, r, "email-1") + + if w.Code != http.StatusOK { + t.Fatalf("status=%d body=%s — silent fall-through must still produce a 200", + w.Code, w.Body.String()) + } + + got := logs.String() + assert.Contains(t, got, realAttID, + "slog must record the failed attachment ID for diagnosability; got %q", got) + assert.Contains(t, got, downloadErrSentinel, + "slog must record the underlying download error so transient Nylas "+ + "failures are diagnosable; got %q", got) +} + +// TestHandleEmailInvite_OversizedAttachment_LogsFailure pins the silent +// drop at handlers_email_invite.go:194-197 — when the streamed body +// exceeds maxICSBytes (or io.ReadAll returns an error), the function +// silently returns false. An attacker-controlled multi-MB ICS would +// land here with no record at all. Also covers the legitimate case +// where Nylas (rarely) ships a misencoded attachment that streams much +// larger than reported. +// +// EXPECTED FAILURE today: silent return. After the fix an slog Warn +// should record the attID + filename so oversize-DOS attempts and +// runaway attachments are visible. +func TestHandleEmailInvite_OversizedAttachment_LogsFailure(t *testing.T) { + // No t.Parallel — captureSlog mutates process-global slog default. + server, client, _ := newCachedTestServer(t) + + const fatAttID = "fat-att-canary-OVERSIZE-XYZ" + client.GetMessageFunc = func(_ context.Context, _, msgID string) (*domain.Message, error) { + return &domain.Message{ + ID: msgID, + Attachments: []domain.Attachment{ + {ID: fatAttID, Filename: "huge-invite.ics", ContentType: "text/calendar", Size: 1}, + }, + }, nil + } + // 1MB+1 of 'A' — exceeds maxICSBytes (1<<20 in handlers_email_invite.go), + // so the io.LimitReader read returns len(raw) == maxICSBytes+1, which + // trips the oversized-payload swallow at line ~194-197. + huge := strings.Repeat("A", (1<<20)+1) + client.DownloadAttachmentFunc = func(context.Context, string, string, string) (io.ReadCloser, error) { + return io.NopCloser(strings.NewReader(huge)), nil + } + client.GetMessageWithFieldsFunc = func(_ context.Context, _ string, msgID, _ string) (*domain.Message, error) { + return &domain.Message{ID: msgID, RawMIME: ""}, nil + } + + logs := captureSlog(t) + + r := httptest.NewRequest(http.MethodGet, "/api/emails/email-1/invite", http.NoBody) + w := httptest.NewRecorder() + server.handleEmailInvite(w, r, "email-1") + + if w.Code != http.StatusOK { + t.Fatalf("status=%d body=%s — silent fall-through must still produce a 200", + w.Code, w.Body.String()) + } + + got := logs.String() + assert.Contains(t, got, fatAttID, + "slog must record the oversize attachment ID — currently silent at "+ + "handlers_email_invite.go:194-197; got %q", got) +} + +// TestHandleEmailInvite_MalformedICS_LogsFailure pins the third silent +// drop in tryParseAttachmentInvite (handlers_email_invite.go:199-202): +// parseICS errors are swallowed entirely. A malformed calendar payload +// should be visible to support, even if the user-facing path falls +// through to raw_mime. +// +// EXPECTED FAILURE today: parseICS error returned without log. After +// the fix an slog Debug entry should fire with the attachment ID and +// the parse error so misencoded ICS payloads are diagnosable. +func TestHandleEmailInvite_MalformedICS_LogsFailure(t *testing.T) { + // No t.Parallel — captureSlog mutates process-global slog default. + server, client, _ := newCachedTestServer(t) + + const badICSAttID = "bad-ics-att-canary-PARSE-XYZ" + client.GetMessageFunc = func(_ context.Context, _, msgID string) (*domain.Message, error) { + return &domain.Message{ + ID: msgID, + Attachments: []domain.Attachment{ + {ID: badICSAttID, Filename: "broken.ics", ContentType: "text/calendar"}, + }, + }, nil + } + client.DownloadAttachmentFunc = func(context.Context, string, string, string) (io.ReadCloser, error) { + // Not a valid VCALENDAR — parseICS will error. + return io.NopCloser(strings.NewReader("THIS IS NOT VALID ICS PAYLOAD")), nil + } + client.GetMessageWithFieldsFunc = func(_ context.Context, _ string, msgID, _ string) (*domain.Message, error) { + return &domain.Message{ID: msgID, RawMIME: ""}, nil + } + + logs := captureSlog(t) + + r := httptest.NewRequest(http.MethodGet, "/api/emails/email-1/invite", http.NoBody) + w := httptest.NewRecorder() + server.handleEmailInvite(w, r, "email-1") + + if w.Code != http.StatusOK { + t.Fatalf("status=%d body=%s — silent fall-through must still produce a 200", + w.Code, w.Body.String()) + } + + got := logs.String() + assert.Contains(t, got, badICSAttID, + "slog must record the malformed-ICS attachment ID — currently silent at "+ + "handlers_email_invite.go:199-202; got %q", got) +} diff --git a/internal/air/handlers_email_offline.go b/internal/air/handlers_email_offline.go new file mode 100644 index 0000000..e012de7 --- /dev/null +++ b/internal/air/handlers_email_offline.go @@ -0,0 +1,83 @@ +package air + +import ( + "context" + "errors" + "log/slog" + "net" + + "github.com/nylas/cli/internal/air/cache" + "github.com/nylas/cli/internal/domain" +) + +// shouldQueueEmailAction reports whether an upstream API failure should +// route through the offline queue. The queue must be enabled (cache is +// configured AND queueing is opted in), and the failure has to look like +// a transient network/timeout problem — application errors (4xx) flow +// straight back to the caller. +func (s *Server) shouldQueueEmailAction(err error) bool { + if !s.offlineQueueEnabled() { + return false + } + if !s.IsOnline() { + return true + } + var netErr net.Error + return errors.As(err, &netErr) || errors.Is(err, context.DeadlineExceeded) +} + +func (s *Server) enqueueMessageUpdate(grantID, accountEmail, emailID string, updateReq *domain.UpdateMessageRequest) error { + if accountEmail == "" || !s.offlineQueueEnabled() { + return errors.New("offline queue unavailable") + } + + return s.withOfflineQueue(accountEmail, func(queue *cache.OfflineQueue) error { + return queue.Enqueue(cache.ActionUpdateMessage, emailID, cache.UpdateMessagePayload{ + GrantID: grantID, + EmailID: emailID, + Unread: updateReq.Unread, + Starred: updateReq.Starred, + Folders: updateReq.Folders, + }) + }) +} + +func (s *Server) enqueueMessageDelete(grantID, accountEmail, emailID string) error { + if accountEmail == "" || !s.offlineQueueEnabled() { + return errors.New("offline queue unavailable") + } + + return s.withOfflineQueue(accountEmail, func(queue *cache.OfflineQueue) error { + return queue.Enqueue(cache.ActionDelete, emailID, cache.DeleteMessagePayload{ + GrantID: grantID, + EmailID: emailID, + }) + }) +} + +// updateCachedEmail mirrors a remote update into the local cache. +// Cache write failures are logged but never bubbled — the live update +// already succeeded. folders nil = leave alone; non-nil = set. +func (s *Server) updateCachedEmail(accountEmail, emailID string, unread, starred *bool, folders []string) { + if accountEmail == "" || !s.cacheAvailable() { + return + } + + if err := s.withEmailStore(accountEmail, func(store *cache.EmailStore) error { + return store.UpdateMessage(emailID, unread, starred, folders) + }); err != nil { + slog.Warn("cache update failed", "emailID", emailID, "account", redactEmail(accountEmail), "err", err) + } +} + +func (s *Server) deleteCachedEmail(accountEmail, emailID string) { + if accountEmail == "" || !s.cacheAvailable() { + return + } + + if err := s.withEmailStore(accountEmail, func(store *cache.EmailStore) error { + return store.Delete(emailID) + }); err != nil { + slog.Warn("cache delete failed", "emailID", emailID, "account", redactEmail(accountEmail), "err", err) + } +} diff --git a/internal/air/handlers_email_offline_replay_test.go b/internal/air/handlers_email_offline_replay_test.go new file mode 100644 index 0000000..edaf0a1 --- /dev/null +++ b/internal/air/handlers_email_offline_replay_test.go @@ -0,0 +1,100 @@ +package air + +import ( + "context" + "testing" + + "github.com/nylas/cli/internal/domain" +) + +// TestProcessOfflineQueues_PreservesEmptyFoldersIntent locks down the +// payload-shape contract that motivates the nil-vs-empty distinction on +// `[]string` Folders. The Gmail-archive intent is encoded as a non-nil +// empty slice ([]string{}) — distinct from "leave folders alone" (nil). +// A regression that re-introduced `omitempty` on cache.UpdateMessagePayload +// would silently drop the empty slice on the queue's JSON round-trip, +// replaying as nil and reverting every offline-archived message: UI says +// archived, server unchanged. +// +// This test enqueues an empty-Folders update, drains the queue, and +// asserts the captured request still has Folders as a non-nil empty +// slice. Should pass today — the find that motivated it is the absence +// of coverage, not a live bug. +func TestProcessOfflineQueues_PreservesEmptyFoldersIntent(t *testing.T) { + t.Parallel() + + server, client, accountEmail := newCachedTestServer(t) + server.SetOnline(false) + + var captured *domain.UpdateMessageRequest + client.UpdateMessageFunc = func(_ context.Context, _, _ string, req *domain.UpdateMessageRequest) (*domain.Message, error) { + // Snapshot the request so SetOnline(true)'s replay write is + // observable after the fact. Mock writes to LastGrantID etc. + // are not enough — those don't capture Folders. + captured = req + return &domain.Message{ID: "email-archive"}, nil + } + + if err := server.enqueueMessageUpdate("grant-123", accountEmail, "email-archive", &domain.UpdateMessageRequest{ + Folders: []string{}, + }); err != nil { + t.Fatalf("enqueue update: %v", err) + } + + server.SetOnline(true) // triggers processOfflineQueues synchronously + + if !client.UpdateMessageCalled { + t.Fatal("expected UpdateMessage to be replayed when going back online") + } + if captured == nil { + t.Fatal("UpdateMessage was called but request was not captured") + } + if captured.Folders == nil { + t.Fatal("replayed Folders is nil; want non-nil empty slice (Gmail archive intent)") + } + if len(captured.Folders) != 0 { + t.Errorf("replayed Folders = %v, want empty slice (Gmail archive intent)", + captured.Folders) + } +} + +// TestProcessOfflineQueues_PreservesNilFoldersIntent is the symmetric +// lock-down: nil Folders ("leave folders alone", e.g. mark-as-read +// without touching folders) must replay as nil, not as []string{}. +// Confusing the two on either side of the queue corrupts the user's +// intent: a "mark unread" enqueued offline must not also clear the +// folder list when it eventually replays. +func TestProcessOfflineQueues_PreservesNilFoldersIntent(t *testing.T) { + t.Parallel() + + server, client, accountEmail := newCachedTestServer(t) + server.SetOnline(false) + + var captured *domain.UpdateMessageRequest + client.UpdateMessageFunc = func(_ context.Context, _, _ string, req *domain.UpdateMessageRequest) (*domain.Message, error) { + captured = req + return &domain.Message{ID: "email-mark-read"}, nil + } + + unread := false + if err := server.enqueueMessageUpdate("grant-123", accountEmail, "email-mark-read", &domain.UpdateMessageRequest{ + Unread: &unread, + // Folders intentionally omitted — caller's intent is "do not + // touch folders," and the queue must not invent one on replay. + }); err != nil { + t.Fatalf("enqueue update: %v", err) + } + + server.SetOnline(true) + + if !client.UpdateMessageCalled { + t.Fatal("expected UpdateMessage to be replayed when going back online") + } + if captured == nil { + t.Fatal("UpdateMessage was called but request was not captured") + } + if captured.Folders != nil { + t.Errorf("replayed Folders = %v (non-nil), want nil (leave-alone intent)", + captured.Folders) + } +} diff --git a/internal/air/handlers_email_offline_test.go b/internal/air/handlers_email_offline_test.go new file mode 100644 index 0000000..c5c23b9 --- /dev/null +++ b/internal/air/handlers_email_offline_test.go @@ -0,0 +1,493 @@ +package air + +import ( + "context" + "encoding/json" + "errors" + "net" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/nylas/cli/internal/air/cache" + "github.com/nylas/cli/internal/domain" +) + +// TestHandleListEmails_APIErrorFallsBackToCache pins the server-online, +// upstream-failed cache fallback. The handler is expected to return +// cached results with HasMore:false (so the UI doesn't try to paginate +// past the snapshot) instead of surfacing a 500. This was an untested +// branch — without coverage a future refactor of the cache fallback +// loop could silently leave the UI dead in the water on transient +// Nylas outages even though the cache is healthy. +func TestHandleListEmails_APIErrorFallsBackToCache(t *testing.T) { + t.Parallel() + + server, client, accountEmail := newCachedTestServer(t) + putCachedEmail(t, server, accountEmail, &cache.CachedEmail{ + ID: "cached-1", + FolderID: "inbox", + Subject: "Cached when API is down", + FromName: "Cache", + FromEmail: "cache@example.com", + Date: time.Now(), + CachedAt: time.Now(), + }) + + // Single-message cache won't satisfy the "full page" short-circuit + // when a folder filter is applied (the threshold is len >= limit), + // so we hit the API path. Force the API to fail with a transient + // error — handler must serve the stale cache rather than 500. + apiCalled := false + client.GetMessagesWithParamsFunc = func(_ context.Context, _ string, _ *domain.MessageQueryParams) ([]domain.Message, error) { + apiCalled = true + return nil, errors.New("nylas 503 service unavailable") + } + + req := httptest.NewRequest(http.MethodGet, "/api/emails?folder=inbox", nil) + w := httptest.NewRecorder() + server.handleListEmails(w, req) + + if !apiCalled { + t.Fatal("expected the API to be called once before falling back to cache") + } + if w.Code != http.StatusOK { + t.Fatalf("status=%d body=%s, want 200 (cache fallback)", w.Code, w.Body.String()) + } + var resp EmailsResponse + if err := json.Unmarshal(w.Body.Bytes(), &resp); err != nil { + t.Fatalf("decode: %v", err) + } + if len(resp.Emails) != 1 || resp.Emails[0].ID != "cached-1" { + t.Errorf("response emails=%+v, want single cached-1", resp.Emails) + } + // HasMore must be false — paginating past the cache snapshot would + // hit the same broken upstream and confuse the user. + if resp.HasMore { + t.Errorf("HasMore=true on cache fallback; expected false to prevent retry-paginate") + } +} + +// TestHandleUpdateEmail_OnlineTransientErrorQueuesAction pins the +// transient-error branch in handleUpdateEmail. When the API call +// returns a network-shaped error AND the offline queue is configured, +// the handler must enqueue the action, flip server state to offline, +// and return a 200 "queued" envelope — not a 500. +func TestHandleUpdateEmail_OnlineTransientErrorQueuesAction(t *testing.T) { + t.Parallel() + + server, client, _ := newCachedTestServer(t) + // Force a transient error type that shouldQueueEmailAction recognises. + client.UpdateMessageFunc = func(context.Context, string, string, *domain.UpdateMessageRequest) (*domain.Message, error) { + return nil, &transientNetErr{} + } + + req := httptest.NewRequest(http.MethodPut, "/api/emails/email-1", + strings.NewReader(`{"unread":false}`)) + req.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + server.handleUpdateEmail(w, req, "email-1") + + if w.Code != http.StatusOK { + t.Fatalf("status=%d body=%s, want 200 (transient error must queue, not 500)", w.Code, w.Body.String()) + } + var resp UpdateEmailResponse + if err := json.Unmarshal(w.Body.Bytes(), &resp); err != nil { + t.Fatalf("decode: %v", err) + } + if !resp.Success || !strings.Contains(resp.Message, "queued") { + t.Errorf("response=%+v, want Success:true with 'queued' in message", resp) + } + // Server should now be marked offline so subsequent calls take the + // fast offline-first path instead of round-tripping the broken API. + if server.IsOnline() { + t.Error("expected SetOnline(false) after transient API error, got isOnline=true") + } +} + +// TestHandleUpdateEmail_OfflineButQueueFails_FallsThroughToAPI pins +// the rare case where the server is offline AND the offline queue +// itself is broken. The handler should still attempt the live API +// call (it might succeed — IsOnline can be stale) rather than dropping +// the user's action silently. +func TestHandleUpdateEmail_OfflineButQueueFails_FallsThroughToAPI(t *testing.T) { + t.Parallel() + + server, client, _ := newCachedTestServer(t) + server.SetOnline(false) + // Disable the offline queue so enqueueMessageUpdate fails. With no + // account-email lookup possible the helper short-circuits with an + // "offline queue unavailable" error. + server.offlineQueues = nil + server.cacheSettings.OfflineQueueEnabled = false + + apiCalled := false + client.UpdateMessageFunc = func(context.Context, string, string, *domain.UpdateMessageRequest) (*domain.Message, error) { + apiCalled = true + return &domain.Message{ID: "email-1"}, nil + } + + req := httptest.NewRequest(http.MethodPut, "/api/emails/email-1", + strings.NewReader(`{"unread":true}`)) + req.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + server.handleUpdateEmail(w, req, "email-1") + + if !apiCalled { + t.Error("expected handler to attempt live API call when queue is unavailable") + } + if w.Code != http.StatusOK { + t.Errorf("status=%d body=%s, want 200 (live retry succeeded)", w.Code, w.Body.String()) + } +} + +// TestShouldQueueEmailAction pins the predicate that gates whether +// upstream errors enter the offline queue. Without direct coverage, +// regressions in the net.Error / context.DeadlineExceeded matching +// are invisible — the only signal would be "archives queue properly +// most of the time." +func TestShouldQueueEmailAction(t *testing.T) { + t.Parallel() + + server, _, _ := newCachedTestServer(t) + + cases := []struct { + name string + err error + online bool + enabled bool + want bool + }{ + { + name: "offline + queue enabled → queue", + err: errors.New("any error"), + online: false, + enabled: true, + want: true, + }, + { + name: "online + transient net.Error → queue", + err: &transientNetErr{}, + online: true, + enabled: true, + want: true, + }, + { + name: "online + context deadline exceeded → queue", + err: context.DeadlineExceeded, + online: true, + enabled: true, + want: true, + }, + { + name: "online + plain error (4xx-shaped) → don't queue", + err: errors.New("nylas 401 unauthorized"), + online: true, + enabled: true, + want: false, + }, + { + name: "queue disabled → never queue", + err: &transientNetErr{}, + online: false, + enabled: false, + want: false, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + // shouldQueueEmailAction is a method on *Server, not pure — + // reset state between cases. Each subtest is sequential so + // the shared server is safe. + server.SetOnline(tc.online) + server.cacheSettings.OfflineQueueEnabled = tc.enabled + got := server.shouldQueueEmailAction(tc.err) + if got != tc.want { + t.Errorf("shouldQueueEmailAction(%v) [online=%v enabled=%v] = %v, want %v", + tc.err, tc.online, tc.enabled, got, tc.want) + } + }) + } +} + +// TestNormalizeDemoFolder pins the alias map for demo-mode folder +// filtering. Without coverage, "Sent Items" → "sent" silently breaks +// when a future refactor inlines the switch into demoEmailIsInFolder. +func TestNormalizeDemoFolder(t *testing.T) { + t.Parallel() + + cases := []struct { + input string + want string + }{ + {"", ""}, + {"inbox", "inbox"}, + {"INBOX", "inbox"}, + {" inbox ", "inbox"}, + {"sent", "sent"}, + {"Sent Items", "sent"}, // Microsoft display name + {"Sent Mail", "sent"}, // Gmail display name + {"drafts", "drafts"}, + {"Draft", "drafts"}, + {"archive", "archive"}, + {"All Mail", "all"}, // Gmail "All Mail" routes to the "show everything" target + {"all", "all"}, + {"trash", "trash"}, + {"Deleted Items", "trash"}, // Microsoft + {"Deleted", "trash"}, + {"spam", "spam"}, + {"Junk", "spam"}, + {"Junk Email", "spam"}, // Outlook + {"starred", "starred"}, + {"unknown-folder", "unknown-folder"}, // pass-through, lowercased + } + for _, tc := range cases { + t.Run(tc.input, func(t *testing.T) { + got := normalizeDemoFolder(tc.input) + if got != tc.want { + t.Errorf("normalizeDemoFolder(%q) = %q, want %q", tc.input, got, tc.want) + } + }) + } +} + +// TestDemoEmailIsInFolder_AllAliasMatchesEverything pins the special +// "all" branch — a UI-supplied "all" target should match every demo +// email regardless of its folders[]. +func TestDemoEmailIsInFolder_AllAliasMatchesEverything(t *testing.T) { + t.Parallel() + + cases := []EmailResponse{ + {Folders: []string{"inbox"}}, + {Folders: []string{"sent"}}, + {Folders: []string{"trash"}}, + {Folders: nil}, + {Folders: []string{}}, + } + for i, e := range cases { + if !demoEmailIsInFolder(e, "all") { + t.Errorf("case %d: demoEmailIsInFolder(%+v, \"all\") = false, want true", i, e) + } + } +} + +// TestFilterDemoEmails_FolderAllReturnsEverything exercises the same +// "all means everything" intent end-to-end through the call chain a UI +// request actually takes: +// +// filterDemoEmails(emails, "all", false, false) +// → normalizeDemoFolder("all") // canonicalises folder string +// → demoEmailIsInFolder(e, target) // matches against e.Folders +// +// `demoEmailIsInFolder` has a dedicated `if target == "all" { return +// true }` branch (handlers_email_demo.go:219), but +// `normalizeDemoFolder` collapses "all" into "archive" via the alias +// case `"archive", "all", "all mail"`. The branch is therefore +// unreachable from the UI path, and `filterDemoEmails(emails, "all", +// ...)` returns only emails whose Folders[] contains "archive" — one +// email — instead of all 13. +// +// EXPECTED FAILURE today: assertion expects len(filtered) == +// len(demoEmails()), got 1. After the fix (drop "all"/"all mail" from +// the archive aliases — or remove the dead branch and update +// TestNormalizeDemoFolder) this test passes. +func TestFilterDemoEmails_FolderAllReturnsEverything(t *testing.T) { + t.Parallel() + + all := demoEmails() + got := filterDemoEmails(all, "all", false, false) + + if len(got) != len(all) { + t.Errorf("filterDemoEmails(_, \"all\") returned %d email(s), want %d (every demo email)", + len(got), len(all)) + } +} + +// TestHandleGetEmail_CachedBodyServedWhenAPIWouldFail pins the user- +// visible promise: "when Nylas is down but the cache holds the email, +// I can still read it." The handler's cache-first short-circuit +// returns the cached body before ever calling GetMessage, so the API +// stub here exists as a fail-loud guard — if a refactor were to +// reorder the lookups so the API call fires first AND fails, the +// stub would record it AND the response would still need to deliver +// the body (via the fallback at handlers_email.go ~241). +func TestHandleGetEmail_CachedBodyServedWhenAPIWouldFail(t *testing.T) { + t.Parallel() + + server, client, accountEmail := newCachedTestServer(t) + putCachedEmail(t, server, accountEmail, &cache.CachedEmail{ + ID: "email-1", + FolderID: "inbox", + Subject: "Cached body survives outage", + FromName: "Cache", + FromEmail: "cache@example.com", + BodyHTML: "

cached body

", + Date: time.Now(), + CachedAt: time.Now(), + }) + // API failure stub: in the documented flow this never fires (the + // cache hit short-circuits first), but we leave it wired so any + // refactor that bypasses the cache-first lookup will land in a + // 500 instead of silently masking the regression. + client.GetMessageFunc = func(context.Context, string, string) (*domain.Message, error) { + return nil, errors.New("nylas 503 service unavailable") + } + + req := httptest.NewRequest(http.MethodGet, "/api/emails/email-1", nil) + w := httptest.NewRecorder() + server.handleGetEmail(w, req, "email-1") + + if w.Code != http.StatusOK { + t.Fatalf("status=%d body=%s, want 200 (cached body must be served)", w.Code, w.Body.String()) + } + var resp EmailResponse + if err := json.Unmarshal(w.Body.Bytes(), &resp); err != nil { + t.Fatalf("decode: %v", err) + } + if resp.ID != "email-1" { + t.Errorf("response id=%q, want email-1", resp.ID) + } + if resp.Body != "

cached body

" { + t.Errorf("response body=%q, want full cached BodyHTML — the cached-email response must include Body, not just metadata", resp.Body) + } +} + +// TestHandleGetEmail_APIErrorWhenNotCached_Returns500 covers the +// other end of the same branch: cache empty, API fails. The user +// sees a generic 500 (no upstream-error leakage) rather than a stuck +// loading state. Pins both the user-visible code AND the privacy +// contract that we don't echo Nylas's raw error string back. +func TestHandleGetEmail_APIErrorWhenNotCached_Returns500(t *testing.T) { + t.Parallel() + + server, client, _ := newCachedTestServer(t) + client.GetMessageFunc = func(context.Context, string, string) (*domain.Message, error) { + // Include identifying noise in the upstream error so we can + // assert the handler doesn't echo it back. + return nil, errors.New("nylas 503: grant_id=secret-grant-12345 endpoint=/messages/email-1") + } + + req := httptest.NewRequest(http.MethodGet, "/api/emails/uncached-id", nil) + w := httptest.NewRecorder() + server.handleGetEmail(w, req, "uncached-id") + + if w.Code != http.StatusInternalServerError { + t.Fatalf("status=%d body=%s, want 500", w.Code, w.Body.String()) + } + body := w.Body.String() + if strings.Contains(body, "secret-grant-12345") || strings.Contains(body, "/messages/") { + t.Errorf("response leaked upstream error details: %s", body) + } +} + +// TestHandleDeleteEmail_OfflineButQueueFails_FallsThroughToAPI pins +// that an offline server with a broken queue still attempts the live +// API call. Without this fallthrough a misconfigured cache silently +// swallows every delete the user issues — invisible data loss. +func TestHandleDeleteEmail_OfflineButQueueFails_FallsThroughToAPI(t *testing.T) { + t.Parallel() + + server, client, _ := newCachedTestServer(t) + server.SetOnline(false) + // Disable queueing so enqueueMessageDelete fails closed. + server.cacheSettings.OfflineQueueEnabled = false + + apiCalled := false + client.DeleteMessageFunc = func(context.Context, string, string) error { + apiCalled = true + return nil + } + + req := httptest.NewRequest(http.MethodDelete, "/api/emails/email-1", nil) + w := httptest.NewRecorder() + server.handleDeleteEmail(w, req, "email-1") + + if !apiCalled { + t.Error("expected handler to attempt live DeleteMessage when queue is unavailable") + } + if w.Code != http.StatusOK { + t.Errorf("status=%d body=%s, want 200 (live retry succeeded)", w.Code, w.Body.String()) + } +} + +// TestHandleDeleteEmail_OnlineTransientErrorQueuesAction pins the +// online-transient-error path for delete: API errors with a +// queue-eligible error AND the queue is healthy → enqueue and return +// 200. Mirrors the update-side test so a future refactor can't +// accidentally diverge the two flows. +func TestHandleDeleteEmail_OnlineTransientErrorQueuesAction(t *testing.T) { + t.Parallel() + + server, client, _ := newCachedTestServer(t) + client.DeleteMessageFunc = func(context.Context, string, string) error { + return &transientNetErr{} + } + + req := httptest.NewRequest(http.MethodDelete, "/api/emails/email-1", nil) + w := httptest.NewRecorder() + server.handleDeleteEmail(w, req, "email-1") + + if w.Code != http.StatusOK { + t.Fatalf("status=%d body=%s, want 200 (transient error must queue)", w.Code, w.Body.String()) + } + var resp UpdateEmailResponse + if err := json.Unmarshal(w.Body.Bytes(), &resp); err != nil { + t.Fatalf("decode: %v", err) + } + if !resp.Success || !strings.Contains(resp.Message, "queued") { + t.Errorf("response=%+v, want Success:true with 'queued' in message", resp) + } + if server.IsOnline() { + t.Error("expected SetOnline(false) after transient API error, got isOnline=true") + } +} + +// TestHandleDeleteEmail_OnlineTransientErrorQueueFails_Returns500 +// pins the worst-case for delete: API errors with a transient AND the +// queue write itself fails. The user must see a 500 rather than a +// silently-dropped delete. Delete is irreversible — silent loss of +// the user's intent here is the most damaging branch in the package. +func TestHandleDeleteEmail_OnlineTransientErrorQueueFails_Returns500(t *testing.T) { + t.Parallel() + + server, client, _ := newCachedTestServer(t) + client.DeleteMessageFunc = func(context.Context, string, string) error { + return &transientNetErr{} + } + // Disable the queue so enqueueMessageDelete fails inside the + // shouldQueueEmailAction branch. + server.cacheSettings.OfflineQueueEnabled = false + + req := httptest.NewRequest(http.MethodDelete, "/api/emails/email-1", nil) + w := httptest.NewRecorder() + server.handleDeleteEmail(w, req, "email-1") + + if w.Code != http.StatusInternalServerError { + t.Fatalf("status=%d body=%s, want 500 (queue-write-fails branch)", w.Code, w.Body.String()) + } + var resp UpdateEmailResponse + if err := json.Unmarshal(w.Body.Bytes(), &resp); err != nil { + t.Fatalf("decode: %v", err) + } + if resp.Success { + t.Errorf("response Success=true on double-failure, expected false") + } + if resp.Error == "" { + t.Error("response Error should describe the failure, got empty") + } +} + +// transientNetErr implements net.Error with Timeout()=true so the +// handler classifies it as "queue this and try again later" rather +// than a permanent 4xx. +type transientNetErr struct{} + +func (transientNetErr) Error() string { return "simulated transient network error" } +func (transientNetErr) Timeout() bool { return true } +func (transientNetErr) Temporary() bool { return true } + +// Compile-time assertion: transientNetErr satisfies net.Error. +var _ net.Error = transientNetErr{} diff --git a/internal/air/handlers_email_rsvp.go b/internal/air/handlers_email_rsvp.go new file mode 100644 index 0000000..dcc76b4 --- /dev/null +++ b/internal/air/handlers_email_rsvp.go @@ -0,0 +1,238 @@ +package air + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "log/slog" + "net/http" + "strings" + + "github.com/nylas/cli/internal/domain" +) + +// rsvpCommentMaxBytes caps the free-form RSVP comment forwarded to Nylas. +// 1MB JSON body limits already protect against DoS, but a UI-meaningful +// cap surfaces a friendlier error and prevents accidentally pasting an +// entire email body into the comment field. +const rsvpCommentMaxBytes = 1024 + +// rsvpRequest is the JSON body accepted by POST /api/emails/{id}/rsvp. +// Mirrors the shape the CLI's `nylas calendar events rsvp` command uses +// so frontend and CLI converge on the same vocabulary. +type rsvpRequest struct { + Status string `json:"status"` + Comment string `json:"comment,omitempty"` +} + +// rsvpResponse is the success body — small on purpose so the frontend +// can update local state (button highlight, attendee count) without a +// follow-up fetch. +type rsvpResponse struct { + Status string `json:"status"` + EventID string `json:"event_id"` + CalendarID string `json:"calendar_id"` +} + +// validRSVPStatuses pins the Nylas v3 send-rsvp vocabulary. +// "noreply" exists in the API but isn't a meaningful UI choice — the +// user just doesn't click anything — so we don't accept it here. +var validRSVPStatuses = map[string]struct{}{ + "yes": {}, + "no": {}, + "maybe": {}, +} + +// errEventNotImported: the invite UID hasn't been ingested by the Nylas +// calendar importer yet — surface as 404 with a "try again" hint. +var errEventNotImported = errors.New("invite has not been imported into your calendar yet") + +// errNoWritableCalendar: grant has only read-only calendars (e.g. a +// "Holidays" subscription) — surface as 422, retry won't help. +var errNoWritableCalendar = errors.New("no writable calendar available") + +// handleEmailRSVP forwards the user's RSVP choice to Nylas. +// +// Security invariant: never trust a client-supplied event ID — always +// re-resolve via the email's VEVENT UID. Otherwise a forged frontend +// could RSVP to arbitrary events on the user's behalf. +func (s *Server) handleEmailRSVP(w http.ResponseWriter, r *http.Request, emailID string) { + if !requireMethod(w, r, http.MethodPost) { + return + } + + var body rsvpRequest + if err := json.NewDecoder(limitedBody(w, r)).Decode(&body); err != nil { + slog.Warn("RSVP request body decode failed", "emailID", emailID, "err", err) + var maxErr *http.MaxBytesError + if errors.As(err, &maxErr) { + writeError(w, http.StatusRequestEntityTooLarge, "Request body too large") + return + } + writeError(w, http.StatusBadRequest, "Invalid request body") + return + } + status := strings.ToLower(strings.TrimSpace(body.Status)) + if _, ok := validRSVPStatuses[status]; !ok { + writeError(w, http.StatusBadRequest, "status must be one of: yes, no, maybe") + return + } + body.Comment = strings.TrimSpace(body.Comment) + if len(body.Comment) > rsvpCommentMaxBytes { + writeError(w, http.StatusBadRequest, fmt.Sprintf("comment must be %d bytes or fewer", rsvpCommentMaxBytes)) + return + } + + if s.demoMode { + writeJSON(w, http.StatusOK, rsvpResponse{ + Status: status, + EventID: "demo-event-001", + CalendarID: "primary", + }) + return + } + + grantID := s.withAuthGrant(w, nil) + if grantID == "" { + return + } + + ctx, cancel := s.withTimeout(r) + defer cancel() + + invite, err := s.resolveEmailInvite(ctx, grantID, emailID) + if err != nil { + // Both initial GetMessage failure and the raw_mime fallback land + // here — both are transient. 502 lets the frontend offer a retry. + slog.Error("RSVP failed to fetch email", "emailID", emailID, "err", err) + writeError(w, http.StatusBadGateway, "Failed to fetch email — please try again") + return + } + if !invite.HasInvite { + writeError(w, http.StatusNotFound, "This email does not contain a calendar invitation") + return + } + if strings.EqualFold(invite.Method, "CANCEL") || strings.EqualFold(invite.Status, "CANCELLED") { + writeError(w, http.StatusConflict, "This event has been cancelled — RSVP is no longer accepted") + return + } + if invite.ICalUID == "" { + // Some Microsoft senders ship invites without a UID. + writeError(w, http.StatusUnprocessableEntity, "Invite has no UID — open the event in your calendar to RSVP") + return + } + + // Search ALL writable calendars: invites often land in non-primary ones + // (work + personal under one Google account, shared team calendars). + calendarID, eventID, err := s.findInviteEventAcrossCalendars(ctx, grantID, invite.ICalUID) + if err != nil { + if errors.Is(err, errEventNotImported) { + writeError(w, http.StatusNotFound, err.Error()) + return + } + // "No writable calendar" is a config-shaped failure (only + // read-only subscriptions on this grant). Retrying won't help — + // surface a 422 with a clear message so the user knows where to + // look instead of being told to retry forever. + if errors.Is(err, errNoWritableCalendar) { + writeError(w, http.StatusUnprocessableEntity, + "No writable calendar on this account — RSVP requires a calendar you can edit") + return + } + slog.Error("RSVP calendar lookup failed", "emailID", emailID, "icalUID", invite.ICalUID, "err", err) + writeError(w, http.StatusBadGateway, "Failed to look up event — please try again") + return + } + + rsvpReq := &domain.SendRSVPRequest{Status: status, Comment: body.Comment} + if err := s.nylasClient.SendRSVP(ctx, grantID, calendarID, eventID, rsvpReq); err != nil { + slog.Error("RSVP send failed", + "emailID", emailID, + "calendarID", calendarID, + "eventID", eventID, + "status", status, + "err", err, + ) + writeError(w, http.StatusBadGateway, "Failed to send RSVP — please try again") + return + } + + writeJSON(w, http.StatusOK, rsvpResponse{ + Status: status, + EventID: eventID, + CalendarID: calendarID, + }) +} + +// findInviteEventAcrossCalendars searches every writable calendar +// (primary first) for an event matching icalUID. Returns +// errEventNotImported when no calendar contains the UID. A single +// calendar erroring doesn't kill the search — the next might hold it. +func (s *Server) findInviteEventAcrossCalendars(ctx context.Context, grantID, icalUID string) (string, string, error) { + calendars, err := s.nylasClient.GetCalendars(ctx, grantID) + if err != nil { + return "", "", fmt.Errorf("failed to list calendars: %w", err) + } + if len(calendars) == 0 { + return "", "", errors.New("no calendars found for this account") + } + + writable := writableCalendars(calendars) + if len(writable) == 0 { + return "", "", errNoWritableCalendar + } + + var lastLookupErr error + for _, c := range writable { + eventID, err := s.findEventByICalUID(ctx, grantID, c.ID, icalUID) + if err == nil { + return c.ID, eventID, nil + } + if !errors.Is(err, errEventNotImported) { + slog.Warn("RSVP per-calendar lookup failed", + "calendarID", c.ID, + "icalUID", icalUID, + "err", err, + ) + lastLookupErr = err + } + } + + if lastLookupErr != nil { + return "", "", fmt.Errorf("failed to look up event: %w", lastLookupErr) + } + return "", "", errEventNotImported +} + +// writableCalendars returns writable calendars, primary first. +func writableCalendars(calendars []domain.Calendar) []domain.Calendar { + out := make([]domain.Calendar, 0, len(calendars)) + for _, c := range calendars { + if c.IsPrimary && !c.ReadOnly { + out = append(out, c) + } + } + for _, c := range calendars { + if !c.IsPrimary && !c.ReadOnly { + out = append(out, c) + } + } + return out +} + +// findEventByICalUID resolves a UID via Nylas v3's `ical_uid` filter. +// Returns errEventNotImported when no event matches. +func (s *Server) findEventByICalUID(ctx context.Context, grantID, calendarID, icalUID string) (string, error) { + resp, err := s.nylasClient.GetEventsWithCursor(ctx, grantID, calendarID, &domain.EventQueryParams{ + ICalUID: icalUID, + Limit: 1, + }) + if err != nil { + return "", err + } + if resp == nil || len(resp.Data) == 0 { + return "", errEventNotImported + } + return resp.Data[0].ID, nil +} diff --git a/internal/air/handlers_email_rsvp_edge_cases_test.go b/internal/air/handlers_email_rsvp_edge_cases_test.go new file mode 100644 index 0000000..6fa2e6b --- /dev/null +++ b/internal/air/handlers_email_rsvp_edge_cases_test.go @@ -0,0 +1,307 @@ +package air + +import ( + "context" + "encoding/json" + "errors" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/nylas/cli/internal/domain" +) + +// statusCancelledOnlyMIME pins the case where METHOD is REQUEST (so the +// invite is "live") but the VEVENT carries STATUS:CANCELLED. The handler +// must still 409 — accepting an RSVP on a cancelled event would create +// a confusing diverging UI state. +const statusCancelledOnlyMIME = "Content-Type: multipart/alternative; boundary=\"B\"\r\n" + + "\r\n--B\r\n" + + "Content-Type: text/calendar; charset=UTF-8; method=REQUEST\r\n\r\n" + + "BEGIN:VCALENDAR\r\nVERSION:2.0\r\nPRODID:-//T//EN\r\nMETHOD:REQUEST\r\n" + + "BEGIN:VEVENT\r\nUID:status-cancelled-uid@example.com\r\nSUMMARY:CancelledStatusOnly\r\n" + + "DTSTART:20260501T140000Z\r\nDTEND:20260501T150000Z\r\n" + + "STATUS:CANCELLED\r\n" + + "END:VEVENT\r\nEND:VCALENDAR\r\n" + + "--B--\r\n" + +// TestHandleEmailRSVP_StatusCancelledOnly_Rejected covers the case where +// only STATUS:CANCELLED (no METHOD:CANCEL) marks the event as dead. +// Without this branch the 409 guard would only fire on Outlook-style +// cancellations and silently accept Google's status-only path. +func TestHandleEmailRSVP_StatusCancelledOnly_Rejected(t *testing.T) { + t.Parallel() + server, mock := rsvpHappyPathMock(t, statusCancelledOnlyMIME) + + calledRSVP := false + mock.SendRSVPFunc = func(context.Context, string, string, string, *domain.SendRSVPRequest) error { + calledRSVP = true + return nil + } + + w := postRSVP(t, server, "email-1", `{"status":"yes"}`) + if w.Code != http.StatusConflict { + t.Errorf("status=%d body=%s, want 409 (cancelled)", w.Code, w.Body.String()) + } + if calledRSVP { + t.Error("SendRSVP was called for STATUS:CANCELLED event — guard is missing") + } +} + +// TestHandleEmailRSVP_GetCalendarsError pins the upstream-failure path +// for the calendars listing. The handler should NOT report "no calendars +// found" (which would mislead the user) — it must surface 502 so the +// frontend offers a retry. +func TestHandleEmailRSVP_GetCalendarsError(t *testing.T) { + t.Parallel() + server, mock := rsvpHappyPathMock(t, rsvpInviteMIME) + mock.GetCalendarsFunc = func(context.Context, string) ([]domain.Calendar, error) { + return nil, errors.New("nylas listing failed: 503") + } + + w := postRSVP(t, server, "email-1", `{"status":"yes"}`) + if w.Code != http.StatusBadGateway { + t.Errorf("status=%d body=%s, want 502 on calendar listing failure", w.Code, w.Body.String()) + } +} + +// TestHandleEmailRSVP_NoCalendars covers the zero-calendar branch. +// If Nylas returns an empty calendars list (no error), the handler +// should not silently RSVP to an empty calendarID — it must 502. +func TestHandleEmailRSVP_NoCalendars(t *testing.T) { + t.Parallel() + server, mock := rsvpHappyPathMock(t, rsvpInviteMIME) + mock.GetCalendarsFunc = func(context.Context, string) ([]domain.Calendar, error) { + return []domain.Calendar{}, nil + } + + w := postRSVP(t, server, "email-1", `{"status":"yes"}`) + if w.Code != http.StatusBadGateway { + t.Errorf("status=%d body=%s, want 502 (no calendars)", w.Code, w.Body.String()) + } +} + +// TestHandleEmailRSVP_MissingDefaultGrant exercises the auth path: when +// no default grant is configured, withAuthGrant writes a 400 envelope +// and the handler returns without calling Nylas. Without this test, a +// future refactor could break the "select an account first" UX. +func TestHandleEmailRSVP_MissingDefaultGrant(t *testing.T) { + t.Parallel() + server, mock, _ := newCachedTestServer(t) + + // Strip the default grant so requireDefaultGrant fails closed. + if err := server.grantStore.ClearGrants(); err != nil { + t.Fatalf("clear grants: %v", err) + } + + calledNylas := false + mock.SendRSVPFunc = func(context.Context, string, string, string, *domain.SendRSVPRequest) error { + calledNylas = true + return nil + } + + w := postRSVP(t, server, "email-1", `{"status":"yes"}`) + if w.Code != http.StatusBadRequest { + t.Errorf("status=%d body=%s, want 400 (no default grant)", w.Code, w.Body.String()) + } + if calledNylas { + t.Error("SendRSVP called without a default grant — auth gate failed") + } +} + +// TestHandleEmailRSVP_DemoMode_RejectsInvalidStatus pins that body +// validation runs BEFORE the demo-mode short-circuit, so a hostile or +// stale frontend can't get a green-path RSVP response with garbage data. +func TestHandleEmailRSVP_DemoMode_RejectsInvalidStatus(t *testing.T) { + t.Parallel() + server, _, _ := newCachedTestServer(t) + server.demoMode = true + + w := postRSVP(t, server, "any-email", `{"status":"bogus"}`) + if w.Code != http.StatusBadRequest { + t.Errorf("status=%d body=%s, want 400 even in demo mode", w.Code, w.Body.String()) + } +} + +// TestHandleEmailRSVP_DemoMode_PinsLiteralEventID pins the demo response +// shape. The frontend reads event_id back to highlight the active button, +// so a regression in the demo literal would silently break the demo UX. +func TestHandleEmailRSVP_DemoMode_PinsLiteralEventID(t *testing.T) { + t.Parallel() + server, _, _ := newCachedTestServer(t) + server.demoMode = true + + w := postRSVP(t, server, "any-email", `{"status":"maybe"}`) + if w.Code != http.StatusOK { + t.Fatalf("status=%d body=%s", w.Code, w.Body.String()) + } + var got rsvpResponse + if err := json.Unmarshal(w.Body.Bytes(), &got); err != nil { + t.Fatalf("decode: %v", err) + } + if got.EventID != "demo-event-001" || got.CalendarID != "primary" || got.Status != "maybe" { + t.Errorf("demo response=%+v, want {Status:maybe, EventID:demo-event-001, CalendarID:primary}", got) + } +} + +// TestHandleEmailRSVP_OversizedComment pins the comment-length cap. +// Without this guard a single user could submit megabytes of comment +// text against an organiser's RSVP — the LimitedBody (1MB) bound is too +// permissive, so we cap at the UI-meaningful rsvpCommentMaxBytes. +// +// Boundary cases are also pinned: exactly rsvpCommentMaxBytes is OK, +// rsvpCommentMaxBytes+1 is rejected. This keeps the cap interpretation +// frozen against an off-by-one refactor. +func TestHandleEmailRSVP_OversizedComment(t *testing.T) { + t.Parallel() + cases := []struct { + name string + size int + wantCode int + wantCalls bool + }{ + {name: "at cap", size: rsvpCommentMaxBytes, wantCode: http.StatusOK, wantCalls: true}, + {name: "one over cap", size: rsvpCommentMaxBytes + 1, wantCode: http.StatusBadRequest, wantCalls: false}, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + server, mock := rsvpHappyPathMock(t, rsvpInviteMIME) + calledRSVP := false + mock.SendRSVPFunc = func(context.Context, string, string, string, *domain.SendRSVPRequest) error { + calledRSVP = true + return nil + } + body := `{"status":"yes","comment":"` + strings.Repeat("x", tc.size) + `"}` + w := postRSVP(t, server, "email-1", body) + if w.Code != tc.wantCode { + t.Errorf("status=%d body=%s, want %d", w.Code, w.Body.String(), tc.wantCode) + } + if calledRSVP != tc.wantCalls { + t.Errorf("SendRSVP called=%v, want %v", calledRSVP, tc.wantCalls) + } + }) + } +} + +// TestHandleEmailRSVP_TrimsCommentBeforeLengthCheck pins that surrounding +// whitespace is stripped before the comment cap is applied. Without this, +// a user pasting from a WYSIWYG editor (which often includes trailing +// newline/space) could trip the limit on a message they perceive as +// short. Also pins that the trimmed form is what gets forwarded — the +// Nylas organiser shouldn't see " yes please " in their notification. +func TestHandleEmailRSVP_TrimsCommentBeforeLengthCheck(t *testing.T) { + t.Parallel() + server, mock := rsvpHappyPathMock(t, rsvpInviteMIME) + + var got string + mock.SendRSVPFunc = func(_ context.Context, _, _, _ string, req *domain.SendRSVPRequest) error { + got = req.Comment + return nil + } + body := `{"status":"yes","comment":" see you there \n"}` + w := postRSVP(t, server, "email-1", body) + if w.Code != http.StatusOK { + t.Fatalf("status=%d body=%s", w.Code, w.Body.String()) + } + if got != "see you there" { + t.Errorf("forwarded comment=%q, want %q (trimmed)", got, "see you there") + } +} + +// TestHandleEmailRSVP_OversizedBodyReturns413 pins that a request body +// exceeding the 1MB MaxRequestBodySize cap returns 413 (RequestEntityTooLarge), +// not 400 (BadRequest). Without this distinction, clients can't tell +// "your JSON is malformed" (caller bug) from "your payload is too big" +// (caller can shrink and retry). +func TestHandleEmailRSVP_OversizedBodyReturns413(t *testing.T) { + t.Parallel() + server, _ := rsvpHappyPathMock(t, rsvpInviteMIME) + + // 2MB body — well past the 1MB MaxRequestBodySize cap. + huge := strings.Repeat("a", 2<<20) + body := `{"status":"yes","comment":"` + huge + `"}` + w := postRSVP(t, server, "email-1", body) + if w.Code != http.StatusRequestEntityTooLarge { + t.Errorf("status=%d body=%s, want 413 on >1MB body", w.Code, w.Body.String()) + } +} + +// TestHandleEmailRSVP_BodyParseErrorIsGeneric pins the privacy contract: +// the JSON-decode error must NOT echo the raw bytes back to the client. +// (Localhost-only mitigates blast radius, but echoing the input weakens +// defense-in-depth and complicates anti-XSS reasoning.) +func TestHandleEmailRSVP_BodyParseErrorIsGeneric(t *testing.T) { + t.Parallel() + server, _ := rsvpHappyPathMock(t, rsvpInviteMIME) + + probe := "" + w := postRSVP(t, server, "email-1", probe) + if w.Code != http.StatusBadRequest { + t.Fatalf("status=%d, want 400", w.Code) + } + if strings.Contains(w.Body.String(), probe) { + t.Errorf("error response echoed raw client bytes: %s", w.Body.String()) + } +} + +// TestHandleEmailRSVP_RouteDispatch wires the test through handleEmailByID +// (the actual entry point bound to /api/emails/{id}/rsvp) instead of +// calling handleEmailRSVP directly. This pins the path-splitting logic +// in handlers_email.go so a future refactor of `parts[1] == "rsvp"` +// can't silently break the route while every direct-call test still +// passes. +func TestHandleEmailRSVP_RouteDispatch(t *testing.T) { + t.Parallel() + server, mock := rsvpHappyPathMock(t, rsvpInviteMIME) + + hit := false + mock.SendRSVPFunc = func(context.Context, string, string, string, *domain.SendRSVPRequest) error { + hit = true + return nil + } + + w := httptest.NewRecorder() + r := httptest.NewRequest(http.MethodPost, "/api/emails/email-1/rsvp", strings.NewReader(`{"status":"yes"}`)) + r.Header.Set("Content-Type", "application/json") + server.handleEmailByID(w, r) + + if w.Code != http.StatusOK { + t.Fatalf("status=%d body=%s — route dispatch is broken", w.Code, w.Body.String()) + } + if !hit { + t.Error("SendRSVP not called via /rsvp route — handleEmailByID failed to dispatch") + } +} + +// (TestHandleEmailRSVP_LooksUpAcrossCalendars removed — duplicate of +// TestHandleEmailRSVP_FindsEventInSecondaryCalendar in +// handlers_email_rsvp_test.go, which has stricter assertions on the +// ical_uid filter and uses reflect.DeepEqual for the walk order.) + +// TestHandleEmailRSVP_TransientCalendarLookupFailureSurfacedWhenAllMiss +// pins that a non-errEventNotImported error from a per-calendar lookup +// (e.g. Nylas 5xx) is surfaced as 502 when no other calendar resolves +// the event. Without this, repeated "calendar not imported" 404s would +// hide a real Nylas outage. +func TestHandleEmailRSVP_TransientCalendarLookupFailureSurfacedWhenAllMiss(t *testing.T) { + t.Parallel() + server, mock := rsvpHappyPathMock(t, rsvpInviteMIME) + mock.GetCalendarsFunc = func(context.Context, string) ([]domain.Calendar, error) { + return []domain.Calendar{ + {ID: "cal-primary", IsPrimary: true}, + }, nil + } + mock.GetEventsWithCursorFunc = func(context.Context, string, string, *domain.EventQueryParams) (*domain.EventListResponse, error) { + return nil, errors.New("nylas events listing returned 503") + } + + w := postRSVP(t, server, "email-1", `{"status":"yes"}`) + if w.Code != http.StatusBadGateway { + t.Errorf("status=%d body=%s, want 502 (transient lookup error)", w.Code, w.Body.String()) + } + if strings.Contains(w.Body.String(), "503") { + t.Errorf("response leaked upstream status code: %s", w.Body.String()) + } +} diff --git a/internal/air/handlers_email_rsvp_fixtures_test.go b/internal/air/handlers_email_rsvp_fixtures_test.go new file mode 100644 index 0000000..0d65dbc --- /dev/null +++ b/internal/air/handlers_email_rsvp_fixtures_test.go @@ -0,0 +1,97 @@ +package air + +import ( + "context" + "net/http" + "net/http/httptest" + "strings" + "testing" + + nylasmock "github.com/nylas/cli/internal/adapters/nylas" + "github.com/nylas/cli/internal/domain" +) + +// Shared MIME fixtures and helpers for the RSVP handler test suites. +// Lifted out of handlers_email_rsvp_test.go so the main test file stays +// under the 600-line cap. + +// rsvpInviteMIME is a Gmail-style invite shaped like the production +// payload the RSVP path needs to handle: inline text/calendar with a +// stable UID we can resolve to a Nylas event. +const rsvpInviteMIME = "From: organizer@example.com\r\n" + + "To: user@example.com\r\n" + + "Subject: Event Invitation: Standup\r\n" + + "Content-Type: multipart/alternative; boundary=\"BOUNDARY1\"\r\n" + + "\r\n" + + "--BOUNDARY1\r\n" + + "Content-Type: text/calendar; charset=UTF-8; method=REQUEST\r\n" + + "\r\n" + + "BEGIN:VCALENDAR\r\nVERSION:2.0\r\nPRODID:-//Test//EN\r\n" + + "METHOD:REQUEST\r\n" + + "BEGIN:VEVENT\r\nUID:rsvp-event-uid@example.com\r\nSUMMARY:Standup\r\n" + + "DTSTART:20260501T140000Z\r\nDTEND:20260501T143000Z\r\n" + + "END:VEVENT\r\nEND:VCALENDAR\r\n" + + "--BOUNDARY1--\r\n" + +// cancelledInviteMIME exercises the METHOD:CANCEL guard. +const cancelledInviteMIME = "Content-Type: multipart/alternative; boundary=\"B\"\r\n" + + "\r\n--B\r\n" + + "Content-Type: text/calendar; charset=UTF-8; method=CANCEL\r\n\r\n" + + "BEGIN:VCALENDAR\r\nVERSION:2.0\r\nPRODID:-//T//EN\r\nMETHOD:CANCEL\r\n" + + "BEGIN:VEVENT\r\nUID:cancelled-uid@example.com\r\nSUMMARY:Killed\r\n" + + "DTSTART:20260501T140000Z\r\nDTEND:20260501T150000Z\r\n" + + "STATUS:CANCELLED\r\n" + + "END:VEVENT\r\nEND:VCALENDAR\r\n" + + "--B--\r\n" + +// noUIDInviteMIME pins the "Microsoft sent us an invite without a UID" +// edge case — handler should fail loudly, not silently RSVP to nothing. +const noUIDInviteMIME = "Content-Type: multipart/alternative; boundary=\"B\"\r\n" + + "\r\n--B\r\n" + + "Content-Type: text/calendar; charset=UTF-8; method=REQUEST\r\n\r\n" + + "BEGIN:VCALENDAR\r\nVERSION:2.0\r\nPRODID:-//T//EN\r\nMETHOD:REQUEST\r\n" + + "BEGIN:VEVENT\r\nSUMMARY:UID-less\r\n" + + "DTSTART:20260501T140000Z\r\nDTEND:20260501T150000Z\r\n" + + "END:VEVENT\r\nEND:VCALENDAR\r\n" + + "--B--\r\n" + +// rsvpHappyPathMock returns a server + mock pre-wired for the standard +// "everything works" path: the email exposes mime, the user has a primary +// writable calendar, and the iCal UID resolves to a Nylas event. +// Individual tests override fields on the mock to exercise failure modes. +func rsvpHappyPathMock(t *testing.T, mime string) (*Server, *nylasmock.MockClient) { + t.Helper() + server, mock, _ := newCachedTestServer(t) + + mock.GetMessageFunc = func(_ context.Context, _, messageID string) (*domain.Message, error) { + return &domain.Message{ID: messageID, Subject: "Event Invitation: Standup"}, nil + } + mock.GetMessageWithFieldsFunc = func(_ context.Context, _, messageID, _ string) (*domain.Message, error) { + return &domain.Message{ID: messageID, RawMIME: mime}, nil + } + mock.GetCalendarsFunc = func(context.Context, string) ([]domain.Calendar, error) { + return []domain.Calendar{ + {ID: "cal-primary", Name: "Primary", IsPrimary: true, ReadOnly: false}, + }, nil + } + mock.GetEventsWithCursorFunc = func(_ context.Context, _, _ string, params *domain.EventQueryParams) (*domain.EventListResponse, error) { + // Pin the contract: handler must filter by ical_uid, not scan + // the whole calendar — without this the lookup could return the + // first random event and we'd RSVP to the wrong meeting. + if params == nil || params.ICalUID == "" { + t.Errorf("GetEventsWithCursor called without ICalUID filter; params=%+v", params) + } + return &domain.EventListResponse{Data: []domain.Event{{ID: "evt-resolved-1"}}}, nil + } + return server, mock +} + +// postRSVP sends a POST to the RSVP endpoint and returns the recorder. +func postRSVP(t *testing.T, server *Server, emailID, body string) *httptest.ResponseRecorder { + t.Helper() + w := httptest.NewRecorder() + r := httptest.NewRequest(http.MethodPost, "/api/emails/"+emailID+"/rsvp", strings.NewReader(body)) + r.Header.Set("Content-Type", "application/json") + server.handleEmailRSVP(w, r, emailID) + return w +} diff --git a/internal/air/handlers_email_rsvp_test.go b/internal/air/handlers_email_rsvp_test.go new file mode 100644 index 0000000..dc6b2e1 --- /dev/null +++ b/internal/air/handlers_email_rsvp_test.go @@ -0,0 +1,542 @@ +package air + +import ( + "context" + "encoding/json" + "errors" + "io" + "net/http" + "net/http/httptest" + "reflect" + "strings" + "testing" + + "github.com/nylas/cli/internal/domain" +) + +// MIME fixtures, rsvpHappyPathMock, and postRSVP live in +// handlers_email_rsvp_fixtures_test.go (extracted to keep this file +// under the 600-line ceiling). + +func TestHandleEmailRSVP_HappyPath(t *testing.T) { + t.Parallel() + server, mock := rsvpHappyPathMock(t, rsvpInviteMIME) + + var sentReq *domain.SendRSVPRequest + var sentEventID, sentCalendarID string + mock.SendRSVPFunc = func(_ context.Context, grantID, calendarID, eventID string, req *domain.SendRSVPRequest) error { + if grantID != "grant-123" { + // Use Fatalf — the rest of this assertion block (event ID, + // calendar ID, request body) is meaningless if we ended up on + // the wrong grant. Loud-fail at the source so debugging + // points at the auth gate, not a downstream symptom. + t.Fatalf("grantID=%q, want grant-123", grantID) + } + sentEventID = eventID + sentCalendarID = calendarID + sentReq = req + return nil + } + + w := postRSVP(t, server, "email-1", `{"status":"yes","comment":"see you there"}`) + if w.Code != http.StatusOK { + t.Fatalf("status=%d body=%s", w.Code, w.Body.String()) + } + if sentEventID != "evt-resolved-1" { + t.Errorf("eventID=%q, want evt-resolved-1 (resolved by ical_uid)", sentEventID) + } + if sentCalendarID != "cal-primary" { + t.Errorf("calendarID=%q, want cal-primary", sentCalendarID) + } + if sentReq == nil || sentReq.Status != "yes" || sentReq.Comment != "see you there" { + t.Errorf("request=%+v, want {Status:yes, Comment:see you there}", sentReq) + } + + var got rsvpResponse + if err := json.Unmarshal(w.Body.Bytes(), &got); err != nil { + t.Fatalf("decode: %v", err) + } + if got.Status != "yes" || got.EventID != "evt-resolved-1" || got.CalendarID != "cal-primary" { + t.Errorf("response=%+v", got) + } +} + +func TestHandleEmailRSVP_MethodNotAllowed(t *testing.T) { + t.Parallel() + server, _ := rsvpHappyPathMock(t, rsvpInviteMIME) + + w := httptest.NewRecorder() + r := httptest.NewRequest(http.MethodGet, "/api/emails/email-1/rsvp", http.NoBody) + server.handleEmailRSVP(w, r, "email-1") + + if w.Code != http.StatusMethodNotAllowed { + t.Errorf("status=%d, want 405", w.Code) + } +} + +func TestHandleEmailRSVP_InvalidStatus(t *testing.T) { + t.Parallel() + cases := []struct { + name string + body string + }{ + {"empty", `{"status":""}`}, + {"unknown", `{"status":"sure"}`}, + {"yes-with-typo", `{"status":"yess"}`}, + {"noreply rejected", `{"status":"noreply"}`}, // valid in Nylas API but no UI affordance + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + server, _ := rsvpHappyPathMock(t, rsvpInviteMIME) + w := postRSVP(t, server, "email-1", tc.body) + if w.Code != http.StatusBadRequest { + t.Errorf("status=%d, want 400, body=%s", w.Code, w.Body.String()) + } + }) + } +} + +// TestHandleEmailRSVP_StatusCaseInsensitive pins that yes/no/maybe in +// any case (including with surrounding whitespace) all normalize to the +// lowercase Nylas vocabulary before being forwarded. Without exercising +// every branch a future refactor of the `strings.ToLower` chain could +// silently regress one variant. +func TestHandleEmailRSVP_StatusCaseInsensitive(t *testing.T) { + t.Parallel() + cases := []struct { + name string + input string + want string + }{ + {name: "uppercase yes", input: "YES", want: "yes"}, + {name: "titlecase maybe", input: "Maybe", want: "maybe"}, + {name: "uppercase no", input: "NO", want: "no"}, + {name: "padded yes", input: " yes ", want: "yes"}, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + server, mock := rsvpHappyPathMock(t, rsvpInviteMIME) + var got string + mock.SendRSVPFunc = func(_ context.Context, _, _, _ string, req *domain.SendRSVPRequest) error { + got = req.Status + return nil + } + w := postRSVP(t, server, "email-1", `{"status":"`+tc.input+`"}`) + if w.Code != http.StatusOK { + t.Fatalf("status=%d body=%s", w.Code, w.Body.String()) + } + if got != tc.want { + t.Errorf("forwarded status=%q, want %q", got, tc.want) + } + }) + } +} + +func TestHandleEmailRSVP_InvalidJSONBody(t *testing.T) { + t.Parallel() + server, _ := rsvpHappyPathMock(t, rsvpInviteMIME) + + w := postRSVP(t, server, "email-1", `not json at all`) + if w.Code != http.StatusBadRequest { + t.Errorf("status=%d, want 400", w.Code) + } +} + +func TestHandleEmailRSVP_NoInviteOnEmail(t *testing.T) { + t.Parallel() + server, mock := rsvpHappyPathMock(t, rsvpInviteMIME) + + mock.GetMessageWithFieldsFunc = func(context.Context, string, string, string) (*domain.Message, error) { + return &domain.Message{RawMIME: "From: a@b.example\r\nSubject: Hi\r\n\r\nplain"}, nil + } + + w := postRSVP(t, server, "email-1", `{"status":"yes"}`) + if w.Code != http.StatusNotFound { + t.Errorf("status=%d body=%s, want 404 (no calendar invitation)", w.Code, w.Body.String()) + } +} + +func TestHandleEmailRSVP_CancelledInviteRejected(t *testing.T) { + t.Parallel() + server, mock := rsvpHappyPathMock(t, cancelledInviteMIME) + + calledRSVP := false + mock.SendRSVPFunc = func(context.Context, string, string, string, *domain.SendRSVPRequest) error { + calledRSVP = true + return nil + } + + w := postRSVP(t, server, "email-1", `{"status":"yes"}`) + if w.Code != http.StatusConflict { + t.Errorf("status=%d body=%s, want 409 (cancelled)", w.Code, w.Body.String()) + } + if calledRSVP { + t.Error("SendRSVP was called for a cancelled event — guard is missing") + } +} + +func TestHandleEmailRSVP_MissingUIDRejected(t *testing.T) { + t.Parallel() + server, _ := rsvpHappyPathMock(t, noUIDInviteMIME) + + w := postRSVP(t, server, "email-1", `{"status":"yes"}`) + if w.Code != http.StatusUnprocessableEntity { + t.Errorf("status=%d body=%s, want 422 (no UID)", w.Code, w.Body.String()) + } +} + +func TestHandleEmailRSVP_NoMatchingEvent(t *testing.T) { + t.Parallel() + server, mock := rsvpHappyPathMock(t, rsvpInviteMIME) + + mock.GetEventsWithCursorFunc = func(context.Context, string, string, *domain.EventQueryParams) (*domain.EventListResponse, error) { + return &domain.EventListResponse{Data: nil}, nil + } + + w := postRSVP(t, server, "email-1", `{"status":"yes"}`) + if w.Code != http.StatusNotFound { + t.Errorf("status=%d body=%s, want 404 (event not imported)", w.Code, w.Body.String()) + } + if !strings.Contains(w.Body.String(), "imported") { + t.Errorf("error message should mention import lag, got %s", w.Body.String()) + } +} + +// TestHandleEmailRSVP_NoWritableCalendar pins the user-facing surface +// for "this account has only read-only calendars". This is a +// config-shaped failure — not transient — so the response must be 422 +// with a descriptive message, not a generic 502 that would invite a +// useless retry. Asserting the message body too keeps a future refactor +// from silently broadening the user-visible string. +func TestHandleEmailRSVP_NoWritableCalendar(t *testing.T) { + t.Parallel() + server, mock := rsvpHappyPathMock(t, rsvpInviteMIME) + + mock.GetCalendarsFunc = func(context.Context, string) ([]domain.Calendar, error) { + return []domain.Calendar{ + {ID: "subscribed", Name: "US Holidays", IsPrimary: true, ReadOnly: true}, + }, nil + } + + w := postRSVP(t, server, "email-1", `{"status":"yes"}`) + if w.Code != http.StatusUnprocessableEntity { + t.Errorf("status=%d body=%s, want 422 (no writable calendar — config issue, not transient)", w.Code, w.Body.String()) + } + if !strings.Contains(w.Body.String(), "writable") { + t.Errorf("response should mention 'writable' so users know what to fix; got %s", w.Body.String()) + } +} + +// TestHandleEmailRSVP_FindsEventInSecondaryCalendar pins the multi-calendar +// search: when the invite landed in a writable calendar that ISN'T the +// primary (work + personal account, shared team calendar, etc.), the +// handler must keep looking instead of returning "not imported" off the +// first calendar's empty result. Also pins the lookup ORDER — primary +// must be queried first so we don't surprise users with a slower or +// less-likely-to-match calendar getting priority. +func TestHandleEmailRSVP_FindsEventInSecondaryCalendar(t *testing.T) { + t.Parallel() + server, mock := rsvpHappyPathMock(t, rsvpInviteMIME) + + mock.GetCalendarsFunc = func(context.Context, string) ([]domain.Calendar, error) { + return []domain.Calendar{ + {ID: "primary-cal", IsPrimary: true, ReadOnly: false}, + {ID: "team-cal", IsPrimary: false, ReadOnly: false}, + }, nil + } + // Record query order so we can pin "primary first, then secondaries" + // — the docstring on findInviteEventAcrossCalendars promises this and + // without an order assertion a future refactor could quietly invert it. + var queryOrder []string + mock.GetEventsWithCursorFunc = func(_ context.Context, _, calendarID string, params *domain.EventQueryParams) (*domain.EventListResponse, error) { + if params == nil || params.ICalUID == "" { + t.Errorf("ical_uid filter missing on calendar %q", calendarID) + } + queryOrder = append(queryOrder, calendarID) + if calendarID == "team-cal" { + return &domain.EventListResponse{Data: []domain.Event{{ID: "evt-team-1"}}}, nil + } + return &domain.EventListResponse{Data: nil}, nil + } + + var sentCal, sentEvent string + mock.SendRSVPFunc = func(_ context.Context, _, calendarID, eventID string, _ *domain.SendRSVPRequest) error { + sentCal = calendarID + sentEvent = eventID + return nil + } + + w := postRSVP(t, server, "email-1", `{"status":"yes"}`) + if w.Code != http.StatusOK { + t.Fatalf("status=%d body=%s, want 200 (event must resolve via secondary calendar)", w.Code, w.Body.String()) + } + if sentCal != "team-cal" || sentEvent != "evt-team-1" { + t.Errorf("RSVP sent to (%q, %q), want (team-cal, evt-team-1)", sentCal, sentEvent) + } + wantOrder := []string{"primary-cal", "team-cal"} + if !reflect.DeepEqual(queryOrder, wantOrder) { + t.Errorf("calendar lookup order=%v, want %v (primary must be queried first)", queryOrder, wantOrder) + } +} + +// TestHandleEmailRSVP_TransientErrorOnPrimaryFallsThroughToSecondary +// pins the partial-failure walk: a flaky primary should NOT abort the +// search if a later calendar holds the event. Without this test, a +// future refactor could decide "tracked errors abort the loop" without +// breaking any existing assertion. +func TestHandleEmailRSVP_TransientErrorOnPrimaryFallsThroughToSecondary(t *testing.T) { + t.Parallel() + server, mock := rsvpHappyPathMock(t, rsvpInviteMIME) + + mock.GetCalendarsFunc = func(context.Context, string) ([]domain.Calendar, error) { + return []domain.Calendar{ + {ID: "primary-cal", IsPrimary: true, ReadOnly: false}, + {ID: "team-cal", IsPrimary: false, ReadOnly: false}, + }, nil + } + mock.GetEventsWithCursorFunc = func(_ context.Context, _, calendarID string, _ *domain.EventQueryParams) (*domain.EventListResponse, error) { + if calendarID == "primary-cal" { + // Transient blip — must NOT short-circuit the walk. + return nil, errors.New("upstream 503 service unavailable") + } + return &domain.EventListResponse{Data: []domain.Event{{ID: "evt-team-2"}}}, nil + } + + var sentCal, sentEvent string + mock.SendRSVPFunc = func(_ context.Context, _, calendarID, eventID string, _ *domain.SendRSVPRequest) error { + sentCal = calendarID + sentEvent = eventID + return nil + } + + w := postRSVP(t, server, "email-1", `{"status":"yes"}`) + if w.Code != http.StatusOK { + t.Fatalf("status=%d body=%s, want 200 (transient err on primary must fall through)", w.Code, w.Body.String()) + } + if sentCal != "team-cal" || sentEvent != "evt-team-2" { + t.Errorf("RSVP sent to (%q, %q), want (team-cal, evt-team-2)", sentCal, sentEvent) + } +} + +// TestHandleEmailRSVP_TransientLookupErrorSurfacedWhenAllFail pins that +// when every writable calendar errors on the lookup, the handler returns +// 502 (so the user can retry) — NOT 404. A transient Nylas blip on a +// secondary calendar shouldn't be reported as "this invite doesn't +// exist." +func TestHandleEmailRSVP_TransientLookupErrorSurfacedWhenAllFail(t *testing.T) { + t.Parallel() + server, mock := rsvpHappyPathMock(t, rsvpInviteMIME) + + mock.GetCalendarsFunc = func(context.Context, string) ([]domain.Calendar, error) { + return []domain.Calendar{ + {ID: "cal-a", IsPrimary: true, ReadOnly: false}, + {ID: "cal-b", IsPrimary: false, ReadOnly: false}, + }, nil + } + mock.GetEventsWithCursorFunc = func(context.Context, string, string, *domain.EventQueryParams) (*domain.EventListResponse, error) { + return nil, errors.New("upstream 502") + } + + w := postRSVP(t, server, "email-1", `{"status":"yes"}`) + if w.Code != http.StatusBadGateway { + t.Errorf("status=%d body=%s, want 502 (transient lookup error must not be 404)", w.Code, w.Body.String()) + } +} + +func TestHandleEmailRSVP_FallsBackToFirstWritableCalendar(t *testing.T) { + t.Parallel() + server, mock := rsvpHappyPathMock(t, rsvpInviteMIME) + + mock.GetCalendarsFunc = func(context.Context, string) ([]domain.Calendar, error) { + // Primary is read-only (e.g. a "holidays" subscription marked + // primary by mistake) — handler should fall through to the next + // writable calendar instead of failing. + return []domain.Calendar{ + {ID: "ro", IsPrimary: true, ReadOnly: true}, + {ID: "writable", IsPrimary: false, ReadOnly: false}, + }, nil + } + + var seenCal string + mock.SendRSVPFunc = func(_ context.Context, _, calendarID, _ string, _ *domain.SendRSVPRequest) error { + seenCal = calendarID + return nil + } + + w := postRSVP(t, server, "email-1", `{"status":"maybe"}`) + if w.Code != http.StatusOK { + t.Fatalf("status=%d body=%s", w.Code, w.Body.String()) + } + if seenCal != "writable" { + t.Errorf("calendarID=%q, want fallback to writable", seenCal) + } +} + +func TestHandleEmailRSVP_UpstreamSendRSVPError(t *testing.T) { + t.Parallel() + server, mock := rsvpHappyPathMock(t, rsvpInviteMIME) + + mock.SendRSVPFunc = func(context.Context, string, string, string, *domain.SendRSVPRequest) error { + return errors.New("nylas API error: 503 service unavailable") + } + + w := postRSVP(t, server, "email-1", `{"status":"no"}`) + if w.Code != http.StatusBadGateway { + t.Errorf("status=%d body=%s, want 502", w.Code, w.Body.String()) + } +} + +func TestHandleEmailRSVP_UpstreamGetMessageError(t *testing.T) { + t.Parallel() + server, mock := rsvpHappyPathMock(t, rsvpInviteMIME) + + mock.GetMessageFunc = func(context.Context, string, string) (*domain.Message, error) { + return nil, errors.New("nylas API error: 500") + } + + w := postRSVP(t, server, "email-1", `{"status":"yes"}`) + if w.Code != http.StatusBadGateway { + t.Errorf("status=%d body=%s, want 502", w.Code, w.Body.String()) + } +} + +// TestHandleEmailRSVP_RawMimeFetchFails pins the regression where a +// transient raw_mime fetch failure was misclassified as "no invite" and +// returned 404. Now it must propagate as 502 so the frontend can offer +// a retry rather than telling the user the invite doesn't exist. +func TestHandleEmailRSVP_RawMimeFetchFails(t *testing.T) { + t.Parallel() + server, mock := rsvpHappyPathMock(t, rsvpInviteMIME) + + // GetMessage succeeds but exposes no parseable attachment, so the + // resolver MUST fall through to GetMessageWithFields. Make that fail + // with a transient-looking error. + mock.GetMessageFunc = func(_ context.Context, _, messageID string) (*domain.Message, error) { + return &domain.Message{ID: messageID, Subject: "Event Invitation"}, nil + } + mock.GetMessageWithFieldsFunc = func(context.Context, string, string, string) (*domain.Message, error) { + return nil, errors.New("upstream timeout: 504") + } + + w := postRSVP(t, server, "email-1", `{"status":"yes"}`) + if w.Code != http.StatusBadGateway { + t.Errorf("status=%d body=%s, want 502 (raw_mime fetch failure should not be 404)", w.Code, w.Body.String()) + } + if strings.Contains(w.Body.String(), "does not contain a calendar invitation") { + t.Errorf("raw_mime fetch failure must not be misreported as 'no invite'; body=%s", w.Body.String()) + } +} + +// TestHandleEmailInvite_RawMimeFetchSilentlyDegrades pins the inverse +// for the preview endpoint: the same upstream failure should silently +// return HasInvite:false (so the email view doesn't error out) rather +// than crash the page with 500. Behavioural pair to the test above. +func TestHandleEmailInvite_RawMimeFetchSilentlyDegrades(t *testing.T) { + t.Parallel() + server, mock, _ := newCachedTestServer(t) + + mock.GetMessageFunc = func(_ context.Context, _, messageID string) (*domain.Message, error) { + return &domain.Message{ID: messageID, Subject: "Event Invitation"}, nil + } + mock.GetMessageWithFieldsFunc = func(context.Context, string, string, string) (*domain.Message, error) { + return nil, errors.New("upstream timeout: 504") + } + + w := httptest.NewRecorder() + r := httptest.NewRequest(http.MethodGet, "/api/emails/email-1/invite", http.NoBody) + server.handleEmailInvite(w, r, "email-1") + + if w.Code != http.StatusOK { + t.Fatalf("status=%d body=%s, want 200 (silent degrade)", w.Code, w.Body.String()) + } + var got CalendarInviteResponse + if err := json.Unmarshal(w.Body.Bytes(), &got); err != nil { + t.Fatalf("decode: %v", err) + } + if got.HasInvite { + t.Errorf("HasInvite=true on raw_mime fetch failure, want false; body=%s", w.Body.String()) + } +} + +func TestHandleEmailRSVP_DemoMode(t *testing.T) { + t.Parallel() + server, _, _ := newCachedTestServer(t) + server.demoMode = true + + w := postRSVP(t, server, "any-email", `{"status":"yes"}`) + if w.Code != http.StatusOK { + t.Fatalf("status=%d body=%s", w.Code, w.Body.String()) + } + var got rsvpResponse + if err := json.Unmarshal(w.Body.Bytes(), &got); err != nil { + t.Fatalf("decode: %v", err) + } + if got.Status != "yes" || got.EventID == "" { + t.Errorf("demo response=%+v", got) + } +} + +func TestHandleEmailRSVP_AttachmentPathHasUID(t *testing.T) { + // When the email carries the ICS as a real attachment (Microsoft's + // shape), the parser should still surface the UID so RSVP works. + t.Parallel() + server, mock, _ := newCachedTestServer(t) + + icsBody := "BEGIN:VCALENDAR\r\nVERSION:2.0\r\nPRODID:-//T//EN\r\nMETHOD:REQUEST\r\n" + + "BEGIN:VEVENT\r\nUID:attached-uid@example.com\r\nSUMMARY:Attached\r\n" + + "DTSTART:20260501T140000Z\r\nDTEND:20260501T150000Z\r\n" + + "END:VEVENT\r\nEND:VCALENDAR\r\n" + + mock.GetMessageFunc = func(context.Context, string, string) (*domain.Message, error) { + return &domain.Message{ + ID: "email-x", + Attachments: []domain.Attachment{ + {ID: "att-1", Filename: "invite.ics", ContentType: "text/calendar"}, + }, + }, nil + } + mock.DownloadAttachmentFunc = func(context.Context, string, string, string) (io.ReadCloser, error) { + return io.NopCloser(strings.NewReader(icsBody)), nil + } + mock.GetCalendarsFunc = func(context.Context, string) ([]domain.Calendar, error) { + return []domain.Calendar{{ID: "primary", IsPrimary: true}}, nil + } + var seenUID string + mock.GetEventsWithCursorFunc = func(_ context.Context, _, _ string, params *domain.EventQueryParams) (*domain.EventListResponse, error) { + if params != nil { + seenUID = params.ICalUID + } + return &domain.EventListResponse{Data: []domain.Event{{ID: "evt-attached-1"}}}, nil + } + mock.SendRSVPFunc = func(context.Context, string, string, string, *domain.SendRSVPRequest) error { + return nil + } + + w := postRSVP(t, server, "email-x", `{"status":"yes"}`) + if w.Code != http.StatusOK { + t.Fatalf("status=%d body=%s", w.Code, w.Body.String()) + } + if seenUID != "attached-uid@example.com" { + t.Errorf("ical_uid passed to events lookup=%q, want attached-uid@example.com", seenUID) + } +} + +// TestParseICS_SurfacesUID covers the parser change for the RSVP feature. +// Without the UID the RSVP handler can't resolve a Nylas event ID. +func TestParseICS_SurfacesUID(t *testing.T) { + body := "BEGIN:VCALENDAR\r\nVERSION:2.0\r\nPRODID:-//T//EN\r\nMETHOD:REQUEST\r\n" + + "BEGIN:VEVENT\r\nUID:my-uid@example.com\r\nSUMMARY:Standup\r\n" + + "DTSTART:20260501T140000Z\r\nDTEND:20260501T150000Z\r\n" + + "END:VEVENT\r\nEND:VCALENDAR\r\n" + got, err := parseICS(body) + if err != nil { + t.Fatalf("parseICS: %v", err) + } + if got.ICalUID != "my-uid@example.com" { + t.Errorf("ICalUID=%q, want my-uid@example.com", got.ICalUID) + } +} + +// (postRSVP moved to handlers_email_rsvp_fixtures_test.go) diff --git a/internal/air/handlers_email_silent_failure_test.go b/internal/air/handlers_email_silent_failure_test.go new file mode 100644 index 0000000..e485760 --- /dev/null +++ b/internal/air/handlers_email_silent_failure_test.go @@ -0,0 +1,204 @@ +package air + +import ( + "bytes" + "context" + "log/slog" + "net/http" + "net/http/httptest" + "testing" + + "github.com/nylas/cli/internal/air/cache" + "github.com/nylas/cli/internal/domain" + "github.com/stretchr/testify/assert" +) + +// captureSlog redirects slog.Default to a buffer for the duration of t. +// Tests using it must not call t.Parallel() — slog.Default is process-global. +func captureSlog(t *testing.T) *bytes.Buffer { + t.Helper() + buf := &bytes.Buffer{} + handler := slog.NewTextHandler(buf, &slog.HandlerOptions{Level: slog.LevelDebug}) + prev := slog.Default() + slog.SetDefault(slog.New(handler)) + t.Cleanup(func() { slog.SetDefault(prev) }) + return buf +} + +// TestHandleDeleteEmail_OfflineQueueFails_LogsFailure exposes the +// observability divergence between the near-twin handlers +// handleUpdateEmail and handleDeleteEmail. Both follow the same +// shape: +// +// if !s.IsOnline() { +// if err := enqueue(...); err == nil { return queued-200 } +// // err != nil: queue is broken, falling through to API +// } +// ... live API call ... +// +// handleUpdateEmail logs the offline-enqueue failure at +// handlers_email.go:305 ("offline enqueue failed, attempting live API +// call"). handleDeleteEmail does not — the err return from +// enqueueMessageDelete on line 372 is silently dropped (no else +// clause). A queue health regression that affects both code paths is +// debuggable for update and invisible for delete. +// +// EXPECTED FAILURE today: the slog buffer for the delete path is +// empty (no warning emitted). After the fix it contains a warning +// referencing the emailID. The fail-first message is the bug receipt. +func TestHandleDeleteEmail_OfflineQueueFails_LogsFailure(t *testing.T) { + // No t.Parallel — captureSlog mutates the process-global default. + server, client, _ := newCachedTestServer(t) + server.SetOnline(false) + // Disable the offline queue so enqueueMessageDelete returns + // "offline queue unavailable" (handlers_email_offline.go:51). + server.cacheSettings.OfflineQueueEnabled = false + + apiCalled := false + client.DeleteMessageFunc = func(context.Context, string, string) error { + apiCalled = true + return nil + } + + const sentinelEmailID = "deletefail-canary-offline-XYZ" + logs := captureSlog(t) + + req := httptest.NewRequest(http.MethodDelete, "/api/emails/"+sentinelEmailID, nil) + w := httptest.NewRecorder() + server.handleDeleteEmail(w, req, sentinelEmailID) + + if !apiCalled { + t.Fatal("expected fall-through to live DeleteMessage when queue fails") + } + got := logs.String() + assert.Contains(t, got, sentinelEmailID, + "slog should record the offline-queue failure for emailID %q (parity with handleUpdateEmail line 305); got %q", + sentinelEmailID, got) +} + +// TestHandleDeleteEmail_QueueWriteAfterTransientError_LogsQueueFailure +// pins the second observability gap. When the live API errors with a +// transient/queue-eligible error AND the offline queue write itself +// fails, handleUpdateEmail logs both error contexts together at +// handlers_email.go:330-335: +// +// slog.Error("queue enqueue after transient API error failed", +// "emailID", emailID, "apiErr", err, "queueErr", queueErr) +// +// handleDeleteEmail's mirror branch (handlers_email.go:384-393) has no +// such log — only the catch-all "Failed to delete email" log fires +// with `err = apiErr`, dropping the `queueErr` context entirely. The +// double-failure mode is exactly when a queue health alert matters +// most (the user is also about to see a 500), and it's exactly when +// the divergence makes it invisible. +// +// EXPECTED FAILURE today: the slog buffer captures "Failed to delete +// email" but does not record the queueErr substring "offline queue +// unavailable". After the fix the queueErr is co-logged with apiErr. +func TestHandleDeleteEmail_QueueWriteAfterTransientError_LogsQueueFailure(t *testing.T) { + // No t.Parallel — captureSlog mutates the process-global default. + server, client, _ := newCachedTestServer(t) + client.DeleteMessageFunc = func(context.Context, string, string) error { + return &transientNetErr{} + } + // To reach the inner queueErr branch we need shouldQueueEmailAction + // to say YES (queue is configured AND error looks transient) but + // the actual enqueue to fail. Achieved by replacing the grant + // store's grant with one whose Email is empty: withAuthGrant still + // resolves the default grant (ID-based), but + // `getAccountEmail(grantID)` returns "" because the grant's Email + // is blank, which trips enqueueMessageDelete's first guard: + // `if accountEmail == "" { return errors.New("offline queue unavailable") }`. + grantStore := server.grantStore.(*testGrantStore) + grantStore.grants = []domain.GrantInfo{{ + ID: "grant-123", + Email: "", // ← the load-bearing empty + Provider: domain.ProviderGoogle, + }} + grantStore.defaultGrant = "grant-123" + + const sentinelEmailID = "deletefail-canary-double-XYZ" + logs := captureSlog(t) + + req := httptest.NewRequest(http.MethodDelete, "/api/emails/"+sentinelEmailID, nil) + w := httptest.NewRecorder() + server.handleDeleteEmail(w, req, sentinelEmailID) + + if w.Code != http.StatusInternalServerError { + t.Fatalf("status=%d body=%s, want 500 (double-failure path)", + w.Code, w.Body.String()) + } + got := logs.String() + // Sentinel from the queue-error message in handlers_email_offline.go:51. + const queueErrSentinel = "offline queue unavailable" + assert.Contains(t, got, queueErrSentinel, + "slog should record the queueErr context (parity with handleUpdateEmail lines 330-335); got %q", + got) + // Also assert the test's emailID appears so the log is correlatable + // to the failing request — same correlation handleUpdateEmail offers. + assert.Contains(t, got, sentinelEmailID, + "slog should record emailID for correlation; got %q", got) +} + +// TestHandleGetEmail_CacheFillFailure_LogsFailure pins the parity gap +// between handleListEmails (handlers_email.go:144 logs cache-fill +// failures) and handleGetEmail (line 261-264 silently does +// `_ = s.withEmailStore(...)`). The user-facing behavior is correct — +// cache write failures must not break the response — but the silent +// drop means a wedged single-message cache drifts further from server +// state on every request, with no log to debug from. +// +// We force the write to fail by dropping the underlying SQLite emails +// table after lazy-init: the lookup call returns "no such table" (no +// cache hit, falls through), the live GetMessage succeeds, the +// post-fetch Put then fails for the same reason. Today no log fires. +// After the fix, slog should record the failure with the sentinel +// emailID for support diagnosability. +// +// EXPECTED FAILURE today: the slog buffer for the cache-fill path is +// silent. After the fix it contains a warning referencing the emailID +// (parity with handleListEmails). +func TestHandleGetEmail_CacheFillFailure_LogsFailure(t *testing.T) { + // No t.Parallel — captureSlog mutates the process-global default. + server, client, accountEmail := newCachedTestServer(t) + + const sentinelEmailID = "cachefill-canary-XYZ" + client.GetMessageFunc = func(_ context.Context, _, msgID string) (*domain.Message, error) { + return &domain.Message{ + ID: msgID, + Subject: "Test message", + From: []domain.EmailParticipant{{Email: "x@example.com"}}, + }, nil + } + + // Lazy-init the emails table by acquiring the store once, then + // drop it so the post-GetMessage Put fails with "no such table: + // emails". The handler-side `_ = s.withEmailStore(...)` swallows + // that error today. + if err := server.withEmailStore(accountEmail, func(_ *cache.EmailStore) error { return nil }); err != nil { + t.Fatalf("lazy-init emails store: %v", err) + } + db, err := server.cacheManager.GetDB(accountEmail) + if err != nil { + t.Fatalf("get db: %v", err) + } + if _, err := db.Exec("DROP TABLE IF EXISTS emails"); err != nil { + t.Fatalf("drop table: %v", err) + } + + logs := captureSlog(t) + + req := httptest.NewRequest(http.MethodGet, "/api/emails/"+sentinelEmailID, nil) + w := httptest.NewRecorder() + server.handleGetEmail(w, req, sentinelEmailID) + + if w.Code != http.StatusOK { + t.Fatalf("status=%d body=%s, want 200 — user-facing response must not break on cache failure", + w.Code, w.Body.String()) + } + + got := logs.String() + assert.Contains(t, got, sentinelEmailID, + "slog should record the emailID for cache-fill failures (parity with handleListEmails:144); got %q", + got) +} diff --git a/internal/air/handlers_helpers.go b/internal/air/handlers_helpers.go index 4b259b9..1ae84e1 100644 --- a/internal/air/handlers_helpers.go +++ b/internal/air/handlers_helpers.go @@ -3,7 +3,9 @@ package air import ( "context" "encoding/json" + "log/slog" "net/http" + "strings" "time" ) @@ -29,19 +31,29 @@ func (s *Server) requireConfig(w http.ResponseWriter) bool { return true } -// parseJSONBody decodes a JSON request body into the provided destination. -// Returns true if successful, false if not (error response already written). -// Callers should return immediately when this returns false. +// parseJSONBody decodes the request body into dest. Returns false on +// error after writing a generic 400; the raw decoder error stays in slog +// (it can quote request bytes — PII or attacker input). func parseJSONBody[T any](w http.ResponseWriter, r *http.Request, dest *T) bool { if err := json.NewDecoder(limitedBody(w, r)).Decode(dest); err != nil { - writeJSON(w, http.StatusBadRequest, map[string]string{ - "error": "Invalid request body: " + err.Error(), - }) + slog.Warn("invalid JSON request body", + "err", err, + "path", r.URL.Path, + "method", r.Method, + ) + writeError(w, http.StatusBadRequest, "Invalid request body") return false } return true } +// writeBadParamError writes a generic 400 ("invalid ") and logs the +// raw parser error via slog (it formats the raw query value with %q). +func writeBadParamError(w http.ResponseWriter, key string, perr error) { + slog.Warn("invalid query parameter", "key", key, "err", perr) + writeError(w, http.StatusBadRequest, "invalid "+key) +} + // handleDemoMode returns the demo response if in demo mode. // Returns true if demo mode is active (response already written), false otherwise. // Callers should return immediately when this returns true. @@ -58,7 +70,7 @@ func (s *Server) handleDemoMode(w http.ResponseWriter, data any) bool { // Callers should return immediately when this returns false. func requireMethod(w http.ResponseWriter, r *http.Request, method string) bool { if r.Method != method { - http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) + writeError(w, http.StatusMethodNotAllowed, "Method not allowed") return false } return true @@ -69,6 +81,31 @@ func writeError(w http.ResponseWriter, status int, message string) { writeJSON(w, status, map[string]string{"error": message}) } +// writeUpstreamError writes msg to the client and logs the raw err via +// slog. Use whenever an upstream error string could leak grant IDs, +// endpoint paths, or response fragments. attrs are extra slog kv pairs. +func writeUpstreamError(w http.ResponseWriter, status int, msg string, err error, attrs ...any) { + slog.Error(msg, append([]any{"err", err}, attrs...)...) + writeError(w, status, msg) +} + +// redactEmail returns a log-safe rendering of an email address: the +// local part is replaced with "***" so the domain remains debuggable +// without writing the full address into log files. Empty input stays +// empty so missing-account branches don't gain a misleading "***". +// +// Example: "alice@example.com" → "***@example.com". +func redactEmail(email string) string { + if email == "" { + return "" + } + at := strings.LastIndex(email, "@") + if at <= 0 || at == len(email)-1 { + return "***" + } + return "***" + email[at:] +} + // withAuthGrant combines demo mode check, config check, and grant ID retrieval. // Returns the grant ID if all checks pass, or empty string if any check fails // (appropriate error response already written). diff --git a/internal/air/handlers_helpers_test.go b/internal/air/handlers_helpers_test.go index f9cceea..b03d6ef 100644 --- a/internal/air/handlers_helpers_test.go +++ b/internal/air/handlers_helpers_test.go @@ -284,3 +284,81 @@ func TestWriteError(t *testing.T) { assert.Contains(t, body, "quotes") }) } + +// TestParseJSONBody_DoesNotLeakDecoderError pins the privacy contract on +// the JSON-body intake helper. Today it returns +// +// "Invalid request body: " + err.Error() +// +// to the client. encoding/json's decode errors fingerprint the request +// — they include the Go struct field path, the wire type, and (for +// UnmarshalTypeError) value fragments from the body. None of that +// belongs in a browser toast or a customer-visible error envelope. +// +// The same writeUpstreamError discipline applied at six handler sites in +// the air-i003 review-pass should apply here. Until parseJSONBody is +// migrated, this test fails on the substring assertions and serves as +// the bug receipt. +// +// EXPECTED FAILURE today: response body contains "cannot unmarshal" and +// the field path. After the fix the body is a generic message and the +// raw decoder error lives in slog only. +func TestParseJSONBody_DoesNotLeakDecoderError(t *testing.T) { + t.Parallel() + + body := strings.NewReader(`{"unread":"not-a-bool"}`) + req := httptest.NewRequest(http.MethodPost, "/x", body) + w := httptest.NewRecorder() + + var dest struct { + Unread *bool `json:"unread"` + } + ok := parseJSONBody(w, req, &dest) + + assert.False(t, ok, "parseJSONBody should reject malformed body") + assert.Equal(t, http.StatusBadRequest, w.Code) + + got := w.Body.String() + // Substrings drawn from encoding/json's UnmarshalTypeError format. + // Any of them in the response body proves the decoder error leaked. + forbidden := []string{ + "cannot unmarshal", + "Go struct field", + ".unread", + "of type bool", + } + for _, s := range forbidden { + assert.NotContains(t, got, s, + "response body must not echo decoder detail %q; got %s", s, got) + } +} + +// TestRedactEmail pins the local-part-only redaction. Six-line privacy +// helper, exercised today only indirectly through slog calls — a +// regression that returned the full address would only show up in logs +// (which tests don't observe) until a customer report surfaced it. +func TestRedactEmail(t *testing.T) { + t.Parallel() + + cases := []struct { + in string + want string + }{ + {"", ""}, + {"alice@example.com", "***@example.com"}, + {"a@b.example", "***@b.example"}, + // LastIndex semantics: only the final @ is treated as the boundary. + {"user@inner@outer.example", "***@outer.example"}, + // Degenerate inputs: no local part, no domain, no @ at all. + {"@example.com", "***"}, + {"user@", "***"}, + {"no-at-sign", "***"}, + {"@", "***"}, + } + for _, tc := range cases { + t.Run(tc.in, func(t *testing.T) { + t.Parallel() + assert.Equal(t, tc.want, redactEmail(tc.in)) + }) + } +} diff --git a/internal/air/handlers_privacy_sweep_test.go b/internal/air/handlers_privacy_sweep_test.go new file mode 100644 index 0000000..17affd2 --- /dev/null +++ b/internal/air/handlers_privacy_sweep_test.go @@ -0,0 +1,181 @@ +package air + +import ( + "context" + "errors" + "net/http" + "net/http/httptest" + "testing" + + "github.com/nylas/cli/internal/domain" + "github.com/stretchr/testify/assert" +) + +// upstreamErrorSentinel is a noise-laden upstream error string that +// resembles real Nylas API failures: it carries grant identifiers, +// endpoint paths, and token-shaped fragments. Each writeUpstreamError +// site is supposed to redact this entirely from the response body and +// log the raw err via slog only. Tests assert the response body does +// not echo any of these substrings. +const upstreamErrorSentinel = "nylas 503: grant_id=secret-grant-leak-XYZ refresh_token=ya29.zzCANARYzz endpoint=/v3/grants/g/leak" + +func bodyDoesNotLeakUpstream(t *testing.T, body string) { + t.Helper() + for _, fragment := range []string{ + "secret-grant-leak-XYZ", + "ya29.zzCANARYzz", + "/v3/grants/g/leak", + "503", + } { + assert.NotContains(t, body, fragment, + "response body leaked upstream-error fragment %q; got %s", + fragment, body) + } +} + +// TestHandleListEmails_APIError_NoCache_DoesNotLeakUpstream pins the +// privacy contract on handlers_email.go:123 — the writeUpstreamError +// site reached when the API call fails AND the cache fallback yields +// nothing. The user gets a 500; the body must be a generic "please +// try again" message; the upstream err lives in slog only. +// +// Lock-down: redaction is in place today. A regression that +// re-introduced `err.Error()` interpolation would surface here. +func TestHandleListEmails_APIError_NoCache_DoesNotLeakUpstream(t *testing.T) { + t.Parallel() + server, client, _ := newCachedTestServer(t) + // Mock routes GetMessagesWithCursor through GetMessagesWithParamsFunc + // (see mock_messages.go:32) — set the params variant so the cursor + // call still hits our error path. + client.GetMessagesWithParamsFunc = func(_ context.Context, _ string, _ *domain.MessageQueryParams) ([]domain.Message, error) { + return nil, errors.New(upstreamErrorSentinel) + } + + req := httptest.NewRequest(http.MethodGet, "/api/emails?folder=inbox", nil) + w := httptest.NewRecorder() + server.handleListEmails(w, req) + + if w.Code != http.StatusInternalServerError { + t.Fatalf("status=%d body=%s, want 500 (no cache → upstream error)", + w.Code, w.Body.String()) + } + bodyDoesNotLeakUpstream(t, w.Body.String()) +} + +// TestHandleEmailInvite_APIError_DoesNotLeakUpstream pins the privacy +// contract on handlers_email_invite.go:96 — the writeUpstreamError +// site reached on the initial GetMessage failure (a hard error, +// distinct from errInviteFetchFailed which yields a silent +// HasInvite:false 200). +func TestHandleEmailInvite_APIError_DoesNotLeakUpstream(t *testing.T) { + t.Parallel() + server, client, _ := newCachedTestServer(t) + client.GetMessageFunc = func(context.Context, string, string) (*domain.Message, error) { + return nil, errors.New(upstreamErrorSentinel) + } + + req := httptest.NewRequest(http.MethodGet, "/api/emails/email-1/invite", nil) + w := httptest.NewRecorder() + server.handleEmailInvite(w, req, "email-1") + + if w.Code != http.StatusInternalServerError { + t.Fatalf("status=%d body=%s, want 500 (upstream GetMessage failure)", + w.Code, w.Body.String()) + } + bodyDoesNotLeakUpstream(t, w.Body.String()) +} + +// TestHandleAvailability_UpstreamError_DoesNotLeakUpstream pins +// handlers_availability.go:226 — Nylas GetAvailability failure. +func TestHandleAvailability_UpstreamError_DoesNotLeakUpstream(t *testing.T) { + t.Parallel() + server, client, _ := newCachedTestServer(t) + client.GetAvailabilityFunc = func(context.Context, *domain.AvailabilityRequest) (*domain.AvailabilityResponse, error) { + return nil, errors.New(upstreamErrorSentinel) + } + + req := httptest.NewRequest(http.MethodGet, + "/api/availability?start_time=1700000000&end_time=1700100000&participants=alice@example.com", + nil) + w := httptest.NewRecorder() + server.handleAvailability(w, req) + + if w.Code != http.StatusInternalServerError { + t.Fatalf("status=%d body=%s, want 500 (upstream GetAvailability failure)", + w.Code, w.Body.String()) + } + bodyDoesNotLeakUpstream(t, w.Body.String()) +} + +// TestHandleFreeBusy_UpstreamError_DoesNotLeakUpstream pins +// handlers_availability.go:336 — Nylas GetFreeBusy failure. +func TestHandleFreeBusy_UpstreamError_DoesNotLeakUpstream(t *testing.T) { + t.Parallel() + server, client, _ := newCachedTestServer(t) + client.GetFreeBusyFunc = func(context.Context, string, *domain.FreeBusyRequest) (*domain.FreeBusyResponse, error) { + return nil, errors.New(upstreamErrorSentinel) + } + + req := httptest.NewRequest(http.MethodGet, + "/api/calendars/freebusy?start_time=1700000000&end_time=1700100000&emails=alice@example.com", + nil) + w := httptest.NewRecorder() + server.handleFreeBusy(w, req) + + if w.Code != http.StatusInternalServerError { + t.Fatalf("status=%d body=%s, want 500 (upstream GetFreeBusy failure)", + w.Code, w.Body.String()) + } + bodyDoesNotLeakUpstream(t, w.Body.String()) +} + +// TestHandleConflicts_UpstreamError_DoesNotLeakUpstream pins +// handlers_availability.go:443 — Nylas GetEventsWithCursor failure. +func TestHandleConflicts_UpstreamError_DoesNotLeakUpstream(t *testing.T) { + t.Parallel() + server, client, _ := newCachedTestServer(t) + client.GetEventsWithCursorFunc = func(context.Context, string, string, *domain.EventQueryParams) (*domain.EventListResponse, error) { + return nil, errors.New(upstreamErrorSentinel) + } + + req := httptest.NewRequest(http.MethodGet, + "/api/calendar/conflicts?start_time=1700000000&end_time=1700100000", + nil) + w := httptest.NewRecorder() + server.handleConflicts(w, req) + + if w.Code != http.StatusInternalServerError { + t.Fatalf("status=%d body=%s, want 500 (upstream GetEventsWithCursor failure)", + w.Code, w.Body.String()) + } + bodyDoesNotLeakUpstream(t, w.Body.String()) +} + +// TestHandleGetEmail_APIError_NoCache_DoesNotLeakUpstream pins the +// privacy contract on handlers_email.go:253 — the writeUpstreamError +// site reached when GetMessage fails AND the cache fallback yields +// nothing (cache miss or cache disabled). The previous sweep covered +// list/invite/availability/freebusy/conflicts but the single-message +// fetch path was left out, so a regression that re-introduced +// %v(err) interpolation here would not be caught. +// +// Lock-down: redaction is in place today. Mirrors the pattern of +// TestHandleListEmails_APIError_NoCache_DoesNotLeakUpstream — same +// helper, same sentinel. +func TestHandleGetEmail_APIError_NoCache_DoesNotLeakUpstream(t *testing.T) { + t.Parallel() + server, client, _ := newCachedTestServer(t) + client.GetMessageFunc = func(context.Context, string, string) (*domain.Message, error) { + return nil, errors.New(upstreamErrorSentinel) + } + + req := httptest.NewRequest(http.MethodGet, "/api/emails/email-1", nil) + w := httptest.NewRecorder() + server.handleGetEmail(w, req, "email-1") + + if w.Code != http.StatusInternalServerError { + t.Fatalf("status=%d body=%s, want 500 (no cache → upstream error)", + w.Code, w.Body.String()) + } + bodyDoesNotLeakUpstream(t, w.Body.String()) +} diff --git a/internal/air/handlers_types.go b/internal/air/handlers_types.go index 6a6902c..93d3fe2 100644 --- a/internal/air/handlers_types.go +++ b/internal/air/handlers_types.go @@ -115,6 +115,8 @@ type EmailsResponse struct { } // UpdateEmailRequest represents a request to update an email. +// Folders: nil = leave alone; non-nil empty = Gmail archive +// (json.Unmarshal preserves this; see domain.UpdateMessageRequest). type UpdateEmailRequest struct { Unread *bool `json:"unread,omitempty"` Starred *bool `json:"starred,omitempty"` diff --git a/internal/air/server_offline.go b/internal/air/server_offline.go index a216200..bf39045 100644 --- a/internal/air/server_offline.go +++ b/internal/air/server_offline.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "log/slog" "github.com/nylas/cli/internal/air/cache" "github.com/nylas/cli/internal/domain" @@ -33,6 +34,11 @@ func (s *Server) processOfflineQueue(email string) { grantID, err := s.resolveQueuedActionGrantID(email, action) if err != nil { if action.Attempts >= 3 { + slog.Error("offline action permanently dropped: grant unresolvable", + "type", action.Type, + "resource_id", action.ResourceID, + "attempts", action.Attempts, + "err", err.Error()) _ = s.removeOfflineAction(email, action.ID) } else { _ = s.markOfflineActionFailed(email, action.ID, err) @@ -43,6 +49,11 @@ func (s *Server) processOfflineQueue(email string) { err = s.processOfflineAction(ctx, grantID, action) if err != nil { if action.Attempts >= 3 { + slog.Error("offline action permanently dropped after 3 attempts", + "type", action.Type, + "resource_id", action.ResourceID, + "attempts", action.Attempts, + "last_error", err.Error()) _ = s.removeOfflineAction(email, action.ID) } else { _ = s.markOfflineActionFailed(email, action.ID, err) diff --git a/internal/air/server_offline_silent_failure_test.go b/internal/air/server_offline_silent_failure_test.go new file mode 100644 index 0000000..9b8082a --- /dev/null +++ b/internal/air/server_offline_silent_failure_test.go @@ -0,0 +1,116 @@ +package air + +import ( + "context" + "errors" + "testing" + + "github.com/nylas/cli/internal/domain" + "github.com/stretchr/testify/assert" +) + +// TestProcessOfflineQueue_ActionDroppedAfter3Attempts_LogsFailure pins +// the worst silent-failure mode in the offline replay loop: +// server_offline.go:45-46 silently does +// +// if action.Attempts >= 3 { +// _ = s.removeOfflineAction(email, action.ID) +// } +// +// when an action has failed three times. The action is permanently +// dropped — the user took an action offline, expected it to sync, and +// will never know it didn't. There is no slog record, no metric, no +// trace at all. Support cannot correlate "my offline archive +// disappeared" reports to log lines because no log line was produced. +// +// EXPECTED FAILURE today: after four processOfflineQueue ticks the +// action is gone from the queue but the slog buffer contains nothing +// referencing the dropped emailID or the upstream-error sentinel. +// After the fix an Error-level log entry should fire with the action's +// resource_id and the last error captured during the attempts. +func TestProcessOfflineQueue_ActionDroppedAfter3Attempts_LogsFailure(t *testing.T) { + // No t.Parallel — captureSlog mutates process-global slog default. + server, client, accountEmail := newCachedTestServer(t) + + const sentinelEmailID = "drop-canary-3strike-XYZ" + const upstreamErrSentinel = "TRANSIENT-NYLAS-ERR-CANARY-555" + client.UpdateMessageFunc = func(_ context.Context, _, _ string, _ *domain.UpdateMessageRequest) (*domain.Message, error) { + return nil, errors.New(upstreamErrSentinel) + } + + unread := false + if err := server.enqueueMessageUpdate("grant-123", accountEmail, sentinelEmailID, &domain.UpdateMessageRequest{ + Unread: &unread, + }); err != nil { + t.Fatalf("enqueue: %v", err) + } + + logs := captureSlog(t) + + // Four ticks: the peek returns the action.Attempts value AFTER the + // previous markFailed wrote attempts+=1, so the sequence is + // tick 1: peek attempts=0 → fail → markFailed → attempts=1 + // tick 2: peek attempts=1 → fail → markFailed → attempts=2 + // tick 3: peek attempts=2 → fail → markFailed → attempts=3 + // tick 4: peek attempts=3 → fail → 3>=3 → SILENT removeOfflineAction + for range 4 { + server.processOfflineQueue(accountEmail) + } + + // Sanity-check: action really was dropped. + db, err := server.cacheManager.GetDB(accountEmail) + if err != nil { + t.Fatalf("get db: %v", err) + } + var count int + if err := db.QueryRow("SELECT COUNT(*) FROM offline_queue").Scan(&count); err != nil { + t.Fatalf("count: %v", err) + } + if count != 0 { + t.Fatalf("queue not drained: count=%d, want 0 (3-strike drop expected)", count) + } + + got := logs.String() + assert.Contains(t, got, sentinelEmailID, + "slog must record the dropped action's resource_id for support diagnosis "+ + "(server_offline.go:46 currently silent); got %q", got) + assert.Contains(t, got, upstreamErrSentinel, + "slog must record the upstream error context that drove the action "+ + "to be permanently dropped; got %q", got) +} + +// TestProcessOfflineQueue_PermanentResolveFailure_LogsFailure pins the +// twin silent-drop site at server_offline.go:35-36: when the grant +// referenced by a queued action can no longer be resolved (e.g., the +// user disconnected the account while an offline action was sitting in +// the queue), the same 3-strike silent removal applies. Today this +// also fails closed with no observability. +func TestProcessOfflineQueue_PermanentGrantResolveFailure_LogsFailure(t *testing.T) { + // No t.Parallel — captureSlog mutates process-global slog default. + server, _, accountEmail := newCachedTestServer(t) + + const sentinelEmailID = "drop-canary-grant-resolve-XYZ" + + unread := false + if err := server.enqueueMessageUpdate("grant-DISCONNECTED-123", accountEmail, sentinelEmailID, &domain.UpdateMessageRequest{ + Unread: &unread, + }); err != nil { + t.Fatalf("enqueue: %v", err) + } + + // Remove the grant after enqueue so resolveQueuedActionGrantID + // returns "queued grant unavailable" on every tick. + store := server.grantStore.(*testGrantStore) + store.grants = nil + + logs := captureSlog(t) + + for range 4 { + server.processOfflineQueue(accountEmail) + } + + got := logs.String() + assert.Contains(t, got, sentinelEmailID, + "slog must record the dropped action's resource_id when the grant "+ + "can no longer be resolved (server_offline.go:36 currently silent); got %q", got) +} diff --git a/internal/air/server_stores.go b/internal/air/server_stores.go index 6341ac1..b069706 100644 --- a/internal/air/server_stores.go +++ b/internal/air/server_stores.go @@ -29,7 +29,7 @@ func (s *Server) cacheAvailable() bool { return s.cacheSettings != nil && s.cacheSettings.IsCacheEnabled() && s.hasCacheRuntime() } -func (s *Server) offlineQueueConfigured() bool { +func (s *Server) offlineQueueEnabled() bool { return s.cacheSettings != nil && s.cacheSettings.Get().OfflineQueueEnabled && s.hasCacheRuntime() } diff --git a/internal/air/server_template.go b/internal/air/server_template.go index 29a2b44..025b49a 100644 --- a/internal/air/server_template.go +++ b/internal/air/server_template.go @@ -2,6 +2,7 @@ package air import ( "html/template" + "log/slog" "net/http" "sync" ) @@ -23,10 +24,13 @@ func (s *Server) handleIndex(w http.ResponseWriter, r *http.Request) { // Build page data - use real data when available, fall back to mock data := s.buildPageData() - // Render template + // Render template. Template errors can include data field paths and + // snippets of upstream content; log the raw error and return a + // generic message to the client. w.Header().Set("Content-Type", "text/html; charset=utf-8") if err := s.templates.ExecuteTemplate(w, "base", data); err != nil { - http.Error(w, "Template error: "+err.Error(), http.StatusInternalServerError) + slog.Error("template render failed", "template", "base", "err", err) + http.Error(w, "Failed to render page", http.StatusInternalServerError) } } diff --git a/internal/air/static/css/preview.css b/internal/air/static/css/preview.css index 21aec9e..0f22aaf 100644 --- a/internal/air/static/css/preview.css +++ b/internal/air/static/css/preview.css @@ -591,6 +591,22 @@ outline-offset: 1px; } + .calendar-invite-btn:disabled { + cursor: not-allowed; + opacity: 0.55; + } + + /* Pulse the in-flight button so the user sees the click */ + /* registered while the RSVP round-trip is pending. */ + .calendar-invite-btn.is-loading { + animation: calendar-invite-pulse 1.1s ease-in-out infinite; + } + + @keyframes calendar-invite-pulse { + 0%, 100% { opacity: 0.55; } + 50% { opacity: 0.85; } + } + /* Cancellation banner — shown when METHOD:CANCEL or */ /* STATUS:CANCELLED is present. Replaces the RSVP buttons so */ /* the user can't RSVP to a dead invitation. */ diff --git a/internal/air/static/js/app-core.js b/internal/air/static/js/app-core.js index 94bc1d7..46440f5 100644 --- a/internal/air/static/js/app-core.js +++ b/internal/air/static/js/app-core.js @@ -2,10 +2,22 @@ * App Core - Toast system, animations, view switching, and modal toggles */ // Toast System with action button support + // Cap concurrent toasts so a burst of archive/RSVP/error events + // can't stack up, scroll the screen, or pile up Undo timers + // pointing at stale closures. + const TOAST_MAX_VISIBLE = 4; + function showToast(type, title, message, options = null) { const container = document.getElementById('toastContainer'); if (!container) return; + // Drop oldest toasts beyond the cap. Children are appended + // to the bottom of the container, so the FIRST child is the + // oldest — pop it before adding a new one. + while (container.children.length >= TOAST_MAX_VISIBLE && container.firstChild) { + container.removeChild(container.firstChild); + } + const toast = document.createElement('div'); toast.className = `toast ${type}`; @@ -20,9 +32,14 @@ messageDiv.className = 'toast-message'; // Use textContent for XSS prevention - user data may be in title/message const strong = document.createElement('strong'); - strong.textContent = title; + // Truncate user-supplied strings so a runaway upstream error + // (e.g. a 200kB HTML body parsed as JSON) can't blow up the UI. + const TOAST_MAX_TEXT = 200; + const safeTitle = String(title == null ? '' : title).slice(0, TOAST_MAX_TEXT); + const safeMessage = String(message == null ? '' : message).slice(0, TOAST_MAX_TEXT); + strong.textContent = safeTitle; messageDiv.appendChild(strong); - messageDiv.appendChild(document.createTextNode(' — ' + message)); + messageDiv.appendChild(document.createTextNode(' — ' + safeMessage)); toast.appendChild(iconSpan); toast.appendChild(messageDiv); diff --git a/internal/air/static/js/bundles.js b/internal/air/static/js/bundles.js index 1ff6124..fa27637 100644 --- a/internal/air/static/js/bundles.js +++ b/internal/air/static/js/bundles.js @@ -2,6 +2,15 @@ * Email Bundles Module * Smart email categorization inspired by Shortwave/Google Inbox * Groups emails by type: newsletters, receipts, social, etc. + * + * STATUS: This module is NOT currently included by base.gohtml — the + * backend `/api/bundles*` routes ship and have integration tests, but + * the front-end UI is gated on a future sidebar redesign. The defensive + * cleanup in this file (fallback bundles when the API 5xx's, localStorage + * collapse persistence, no PUT-to-GET-only route) is kept intact so the + * module is ready to wire up via a single ` + + + diff --git a/internal/ui/templates/pages/agent.gohtml b/internal/ui/templates/pages/agent.gohtml new file mode 100644 index 0000000..a9d4f00 --- /dev/null +++ b/internal/ui/templates/pages/agent.gohtml @@ -0,0 +1,71 @@ +{{define "page-agent"}} +
+
+
+
+
+ + + + + + +

Agent Commands

+

Select a command from the right to see details

+
+ +
+
+
+
+
Agent Commands
+
+ +
+
+
+
+
+{{end}} diff --git a/internal/ui/templates/pages/audit.gohtml b/internal/ui/templates/pages/audit.gohtml new file mode 100644 index 0000000..684cba8 --- /dev/null +++ b/internal/ui/templates/pages/audit.gohtml @@ -0,0 +1,72 @@ +{{define "page-audit"}} +
+
+
+
+
+ + + + + + + +

Audit Commands

+

Select a command from the right to see details

+
+ +
+
+
+
+
Audit Commands
+
+ +
+
+
+
+
+{{end}} diff --git a/internal/ui/templates/pages/dashboard.gohtml b/internal/ui/templates/pages/dashboard.gohtml new file mode 100644 index 0000000..41ff2ff --- /dev/null +++ b/internal/ui/templates/pages/dashboard.gohtml @@ -0,0 +1,70 @@ +{{define "page-dashboard"}} +
+
+
+
+
+ + + + + +

Nylas Dashboard Commands

+

Manage your Nylas Dashboard account, applications, and organizations

+
+ +
+
+
+
+
Dashboard Commands
+
+ +
+
+
+
+
+{{end}} diff --git a/internal/ui/templates/pages/overview.gohtml b/internal/ui/templates/pages/overview.gohtml index 70da1e3..49d59f4 100644 --- a/internal/ui/templates/pages/overview.gohtml +++ b/internal/ui/templates/pages/overview.gohtml @@ -64,7 +64,7 @@

RESOURCES

- +
- API v3 Reference - Endpoints & schemas + CLI Command Reference + Commands & flags
diff --git a/internal/ui/templates/partials/content.gohtml b/internal/ui/templates/partials/content.gohtml index 66831f8..1c96e68 100644 --- a/internal/ui/templates/partials/content.gohtml +++ b/internal/ui/templates/partials/content.gohtml @@ -92,6 +92,25 @@ Admin + + + + - - + + {{end}} diff --git a/tests/air/e2e/archive-and-rsvp.spec.js b/tests/air/e2e/archive-and-rsvp.spec.js new file mode 100644 index 0000000..24092cf --- /dev/null +++ b/tests/air/e2e/archive-and-rsvp.spec.js @@ -0,0 +1,813 @@ +// @ts-check +const { test, expect } = require('@playwright/test'); + +/** + * Archive + RSVP E2E tests — pin two regressions: + * + * 1. Gmail Archive: must send `folders: []` (an explicit empty array) + * over the wire. Earlier code elided the empty array via omitempty, + * making "archive" a no-op upstream while the optimistic UI removed + * the row. + * + * 2. RSVP: clicking Yes/Maybe/No must POST to /api/emails/{id}/rsvp + * with the matching status, show the in-flight loading state, and + * settle on the "active" highlight when the server confirms. + * + * The in-page parts of these tests (spying on AirAPI / window.fetch, + * reading EmailListManager state) live in `page.evaluate` because no + * Playwright locator can intercept a same-origin fetch — but every + * user-facing click goes through semantic locators (`getByRole`) and + * every assertion is on the test side, NOT inside `page.evaluate`. + * + * Semantic locators only — per CLAUDE.md, never CSS/XPath. The app + * shell exposes role="application" with aria-label "Nylas Air Email + * Client"; each email row exposes role="option" via email-renderer.js. + */ +test.describe('Archive + RSVP Integrations', () => { + test.beforeEach(async ({ page }) => { + await page.goto('/'); + await expect(page.getByRole('application', { name: /Nylas Air/i })).toBeVisible(); + await page.waitForLoadState('domcontentloaded'); + // Wait for at least one email to render — beats arbitrary timeouts + // and surfaces "demo data didn't load" as an explicit failure. + await expect(page.getByRole('option').first()).toBeVisible({ timeout: 5000 }); + }); + + test('archive on Gmail (no typed Archive folder) sends explicit empty folders array', async ({ page }) => { + // Sanity-check that the bundle is wired up. We deliberately avoid + // `if (skipped) return` here — a missing EmailListManager means the + // bundle is broken and the test SHOULD fail, not silently pass. + await expect.poll( + () => page.evaluate(() => typeof EmailListManager !== 'undefined' && Array.isArray(EmailListManager.emails)), + { timeout: 5000, message: 'EmailListManager bundle did not initialise' } + ).toBe(true); + + // Pin Strategy 2 (Gmail label removal) explicitly. Real Gmail + // accounts surface "All Mail" as system_folder='all', NOT 'archive', + // so computeArchiveFolders falls through to dropping the INBOX + // label and the resulting payload is folders:[]. Seed a Gmail-style + // folder list directly rather than relying on the demo dataset, + // which intentionally exposes every system_folder type for UI + // exercise — including a typed Archive that would (correctly) + // make Strategy 1 fire. + // + // The previous bug here was on the Go side: omitempty elided the + // empty array on the wire, so the backend never received the + // archive change. That fix is pinned by the Go-side tests + // TestHTTPClient_UpdateMessage_ForwardsEmptyFolders and friends; + // this test pins the JS half — that the payload reaches the wire + // as exactly [] for a Gmail-shaped account. + const result = await page.evaluate(async () => { + const fakeEmail = { id: 'fake-gmail-archive-1', folders: ['INBOX'], unread: false, starred: false, subject: 'x', from: [], to: [] }; + EmailListManager.emails = [fakeEmail, ...EmailListManager.emails]; + EmailListManager.folders = [ + { id: 'INBOX', system_folder: 'inbox', name: 'INBOX' }, + { id: 'all-mail', system_folder: 'all', name: 'All Mail' }, + ]; + + let capturedPayload = null; + const originalUpdate = AirAPI.updateEmail; + AirAPI.updateEmail = async (id, payload) => { + capturedPayload = payload; + return { status: 'success' }; + }; + + try { + await EmailListManager.archiveEmail(fakeEmail.id); + return { + payload: capturedPayload, + wasRemoved: !EmailListManager.emails.find(e => e.id === fakeEmail.id), + }; + } finally { + AirAPI.updateEmail = originalUpdate; + } + }); + + expect(result.payload).toBeDefined(); + expect(result.payload).toHaveProperty('folders'); + expect(result.payload.folders, 'Gmail archive must send an explicit empty array, not omit the key').toEqual([]); + expect(result.wasRemoved).toBe(true); + }); + + test('archive on Microsoft/IMAP/EWS uses the Archive folder ID (Strategy 2)', async ({ page }) => { + // Pin computeArchiveFolders' Strategy 2 — when the email is NOT in + // an INBOX label (Gmail-style) but the account has a typed Archive + // folder, archive replaces the email's folders with that one ID. + // Without coverage, a refactor that "simplifies" the helper could + // silently break archive on every non-Gmail provider. + const result = await page.evaluate(async () => { + // Manufacture a minimal non-Gmail email + folder shape directly on + // the manager. We avoid touching the demo dataset so the assertion + // is independent of provider classification heuristics. + const fakeEmail = { id: 'fake-ms-1', folders: ['some-other-folder-id'], unread: false, starred: false, subject: 'x', from: [], to: [] }; + EmailListManager.emails = [fakeEmail, ...EmailListManager.emails]; + EmailListManager.folders = [{ id: 'arch-id-1', system_folder: 'archive', name: 'Archive' }]; + + let captured = null; + const originalUpdate = AirAPI.updateEmail; + AirAPI.updateEmail = async (id, payload) => { captured = payload; return { status: 'success' }; }; + try { + await EmailListManager.archiveEmail(fakeEmail.id); + return { payload: captured }; + } finally { + AirAPI.updateEmail = originalUpdate; + } + }); + + expect(result.payload).toBeDefined(); + expect(result.payload.folders, 'Strategy 2 must replace folders with the Archive folder id, not send empty') + .toEqual(['arch-id-1']); + }); + + test('archive on IMAP/EWS with folders:["INBOX"] + typed Archive uses the Archive folder, not folders:[]', async ({ page }) => { + // Regression: IMAP/EWS surfaces the inbox folder as the literal name + // "INBOX". The old computeArchiveFolders stripped INBOX before + // checking for a typed Archive folder, sending folders:[] upstream + // and moving the message out of every folder instead of into Archive. + // This pins the corrected order — typed Archive wins over label + // removal whenever a system_folder='archive' exists. + const result = await page.evaluate(async () => { + const fakeEmail = { id: 'fake-imap-1', folders: ['INBOX'], unread: false, starred: false, subject: 'x', from: [], to: [] }; + EmailListManager.emails = [fakeEmail, ...EmailListManager.emails]; + EmailListManager.folders = [ + { id: 'inbox-id', system_folder: 'inbox', name: 'INBOX' }, + { id: 'arch-imap-1', system_folder: 'archive', name: 'Archive' }, + ]; + + let captured = null; + const originalUpdate = AirAPI.updateEmail; + AirAPI.updateEmail = async (id, payload) => { captured = payload; return { status: 'success' }; }; + try { + await EmailListManager.archiveEmail(fakeEmail.id); + return { payload: captured }; + } finally { + AirAPI.updateEmail = originalUpdate; + } + }); + + expect(result.payload).toBeDefined(); + expect(result.payload.folders, 'IMAP/EWS archive must move into the typed Archive folder, not send folders:[]') + .toEqual(['arch-imap-1']); + }); + + test('archive does NOT mistake a Gmail user label named "Archive" for the system destination', async ({ page }) => { + // Pin the safety property of using system_folder match instead of + // name fallback: a Gmail account with a user-created label literally + // named "Archive" (system_folder unset) must still archive via + // Strategy 2 (drop INBOX), not move into the user's vanity label. + const result = await page.evaluate(async () => { + const fakeEmail = { id: 'fake-gmail-vanity-1', folders: ['INBOX', 'IMPORTANT'], unread: false, starred: false, subject: 'x', from: [], to: [] }; + EmailListManager.emails = [fakeEmail, ...EmailListManager.emails]; + // Gmail-style: only labels with empty system_folder. The user + // happens to have a label called "Archive" — must not be picked. + EmailListManager.folders = [ + { id: 'label-inbox', system_folder: 'inbox', name: 'INBOX' }, + { id: 'label-archive-vanity', system_folder: '', name: 'Archive' }, + ]; + + let captured = null; + const originalUpdate = AirAPI.updateEmail; + AirAPI.updateEmail = async (id, payload) => { captured = payload; return { status: 'success' }; }; + try { + await EmailListManager.archiveEmail(fakeEmail.id); + return { payload: captured }; + } finally { + AirAPI.updateEmail = originalUpdate; + } + }); + + expect(result.payload).toBeDefined(); + expect(result.payload.folders, 'Gmail label removal must drop only INBOX, leaving other labels intact') + .toEqual(['IMPORTANT']); + }); + + test('archive surfaces "no archive target" when no INBOX and no Archive folder', async ({ page }) => { + // Pin the null branch of computeArchiveFolders: if the email isn't + // in INBOX (Strategy 1 doesn't apply) AND no system Archive folder + // is known (Strategy 2 doesn't apply), archiveEmail must fail + // closed — show an error toast and return false. Without this, a + // misconfigured account would archive into the void. + const result = await page.evaluate(async () => { + const fakeEmail = { id: 'fake-no-target-1', folders: ['exotic-folder'], unread: false, starred: false, subject: 'x', from: [], to: [] }; + EmailListManager.emails = [fakeEmail, ...EmailListManager.emails]; + EmailListManager.folders = []; // no archive folder available + + let updateCalled = false; + const originalUpdate = AirAPI.updateEmail; + AirAPI.updateEmail = async () => { updateCalled = true; return { status: 'success' }; }; + try { + const ok = await EmailListManager.archiveEmail(fakeEmail.id); + return { + ok, + updateCalled, + stillInList: !!EmailListManager.emails.find(e => e.id === fakeEmail.id), + }; + } finally { + AirAPI.updateEmail = originalUpdate; + } + }); + + expect(result.ok, 'archiveEmail must return false when no archive target exists').toBe(false); + expect(result.updateCalled, 'AirAPI.updateEmail must NOT be called — fail closed').toBe(false); + expect(result.stillInList, 'email must remain in the list when archive is unavailable').toBe(true); + }); + + test('clicking RSVP Yes calls POST /rsvp with status=yes and highlights the button', async ({ page }) => { + // Select the demo invite email through the API surface so the test + // doesn't depend on the email list's exact ordering. + await page.evaluate(async () => EmailListManager.selectEmail('demo-email-invite-001')); + + const inviteCard = page.getByRole('region', { name: 'Calendar invitation' }); + await expect(inviteCard, 'invite card must render after selecting the demo invite').toBeVisible({ timeout: 3000 }); + + const yesButton = inviteCard.getByRole('button', { name: 'Yes' }); + await expect(yesButton).toBeVisible(); + await expect(yesButton).toBeEnabled(); + + // Install fetch spy in the page context — Playwright's request + // interception runs in a different process and can't intercept + // same-origin fetches reliably for SPAs. + await page.evaluate(() => { + window.__rsvpCall = null; + window.__originalFetch = window.fetch; + window.fetch = async (url, init) => { + if (typeof url === 'string' && url.includes('/rsvp') && init?.method === 'POST') { + window.__rsvpCall = { url, body: JSON.parse(init.body) }; + return new Response(JSON.stringify({ status: 'yes', event_id: 'evt-1', calendar_id: 'cal-1' }), { + status: 200, + headers: { 'Content-Type': 'application/json' }, + }); + } + return window.__originalFetch(url, init); + }; + }); + + try { + await yesButton.click(); + + // Wait for the active class to appear via Playwright's auto-retry. + await expect(yesButton).toHaveClass(/active/); + // The is-loading class is added during flight and removed when + // the request settles. After click + auto-retry above, it should + // be gone — pin both halves of the contract. + await expect(yesButton).not.toHaveClass(/is-loading/); + + const rsvpCall = await page.evaluate(() => window.__rsvpCall); + expect(rsvpCall, 'fetch was never invoked — click did not route through the RSVP handler').not.toBeNull(); + expect(rsvpCall.url).toContain('/api/emails/demo-email-invite-001/rsvp'); + expect(rsvpCall.body.status).toBe('yes'); + } finally { + await page.evaluate(() => { window.fetch = window.__originalFetch; }); + } + }); + + test('RSVP button shows loading + disabled state while in flight', async ({ page }) => { + await page.evaluate(async () => EmailListManager.selectEmail('demo-email-invite-001')); + const inviteCard = page.getByRole('region', { name: 'Calendar invitation' }); + await expect(inviteCard).toBeVisible({ timeout: 3000 }); + + // Install a hanging fetch spy so we can observe the in-flight UI + // state without a race. Resume the fetch from the test side once + // we've snapshotted the loading classes. + await page.evaluate(() => { + window.__resolveFetch = null; + window.__originalFetch = window.fetch; + window.fetch = (url, init) => { + if (typeof url === 'string' && url.includes('/rsvp')) { + return new Promise((resolve) => { + window.__resolveFetch = () => resolve(new Response( + JSON.stringify({ status: 'yes' }), + { status: 200, headers: { 'Content-Type': 'application/json' } } + )); + }); + } + return window.__originalFetch(url, init); + }; + }); + + try { + const yesButton = inviteCard.getByRole('button', { name: 'Yes' }); + await yesButton.click(); + + // While the fetch is hanging, the button must be disabled and + // class-loaded. expect(...).toHaveClass auto-retries up to the + // default timeout, so this catches the in-flight state without + // fragile setTimeouts. + await expect(yesButton).toBeDisabled(); + await expect(yesButton).toHaveClass(/is-loading/); + + // Resume the fetch and verify the loading state goes away. + await page.evaluate(() => window.__resolveFetch && window.__resolveFetch()); + await expect(yesButton).not.toHaveClass(/is-loading/); + await expect(yesButton).toBeEnabled(); + } finally { + await page.evaluate(() => { window.fetch = window.__originalFetch; }); + } + }); + + test('RSVP failure restores prior selection and surfaces an error toast', async ({ page }) => { + await page.evaluate(async () => EmailListManager.selectEmail('demo-email-invite-001')); + const inviteCard = page.getByRole('region', { name: 'Calendar invitation' }); + await expect(inviteCard).toBeVisible({ timeout: 3000 }); + + await page.evaluate(() => { + window.__originalFetch = window.fetch; + window.fetch = async (url) => { + if (typeof url === 'string' && url.includes('/rsvp')) { + return new Response(JSON.stringify({ error: 'event not imported' }), { + status: 404, + headers: { 'Content-Type': 'application/json' }, + }); + } + return window.__originalFetch(url); + }; + }); + + try { + const yesButton = inviteCard.getByRole('button', { name: 'Yes' }); + await yesButton.click(); + + // On failure, the button should NOT be left "active" — that + // would lie to the user about the server state. + await expect(yesButton).not.toHaveClass(/active/); + await expect(yesButton).not.toHaveClass(/is-loading/); + await expect(yesButton).toBeEnabled(); + } finally { + await page.evaluate(() => { window.fetch = window.__originalFetch; }); + } + }); + + test('RSVP network failure (fetch throws) restores prior selection without active class', async ({ page }) => { + // Pin the catch-block path: fetch rejects with a network error + // (vs returning !response.ok). The previous tests cover the + // !resp.ok branch; this one targets the `catch (err)` block where + // previouslyActive is re-applied. Without this test a future + // refactor that reorders the catch could silently break the + // "no connectivity → restore prior selection" UX. + await page.evaluate(async () => EmailListManager.selectEmail('demo-email-invite-001')); + const inviteCard = page.getByRole('region', { name: 'Calendar invitation' }); + await expect(inviteCard).toBeVisible({ timeout: 3000 }); + + // Pre-mark Maybe as the prior selection so we can verify the + // catch block restores it (rather than leaving the chosen Yes + // button active in error). + const maybeButton = inviteCard.getByRole('button', { name: 'Maybe' }); + await page.evaluate(() => { + const slot = document.querySelector('[id^="inviteSlot-"]'); + const buttons = slot ? slot.querySelectorAll('.calendar-invite-btn') : []; + buttons.forEach((b) => { + b.classList.toggle('active', b.dataset.rsvp === 'maybe'); + }); + }); + await expect(maybeButton).toHaveClass(/active/); + + await page.evaluate(() => { + window.__originalFetch = window.fetch; + window.fetch = async (url) => { + if (typeof url === 'string' && url.includes('/rsvp')) { + // Reject with a TypeError, mirroring how browsers report + // "fetch failed" on connectivity loss / DNS failure. + throw new TypeError('Failed to fetch'); + } + return window.__originalFetch(url); + }; + }); + + try { + const yesButton = inviteCard.getByRole('button', { name: 'Yes' }); + await yesButton.click(); + + // Yes must not be marked active after a network error. + await expect(yesButton).not.toHaveClass(/active/); + // The previously-active button (Maybe) must be restored. + await expect(maybeButton).toHaveClass(/active/); + // Both buttons should be re-enabled and not in the loading + // state, regardless of which one was clicked. + await expect(yesButton).toBeEnabled(); + await expect(yesButton).not.toHaveClass(/is-loading/); + await expect(maybeButton).toBeEnabled(); + } finally { + await page.evaluate(() => { window.fetch = window.__originalFetch; }); + } + }); + + test('RSVP navigated-away guard skips active-class toggle on the new email', async ({ page }) => { + // Pin the `this.selectedEmailId === emailId` guard inside + // rsvpToInvite. Sequence: + // 1) Select invite email A, click RSVP Yes (fetch hangs) + // 2) Mid-flight, switch to a different email B + // 3) Resolve the fetch + // The guard must prevent the active class from being applied to + // the (no-longer-visible) invite card on email A. Without the + // guard, returning to A later would show a stale "active" Yes. + await page.evaluate(async () => EmailListManager.selectEmail('demo-email-invite-001')); + const inviteCard = page.getByRole('region', { name: 'Calendar invitation' }); + await expect(inviteCard).toBeVisible({ timeout: 3000 }); + + await page.evaluate(() => { + window.__resolveFetch = null; + window.__originalFetch = window.fetch; + window.fetch = (url) => { + if (typeof url === 'string' && url.includes('/rsvp')) { + return new Promise((resolve) => { + window.__resolveFetch = () => resolve(new Response( + JSON.stringify({ status: 'yes', event_id: 'evt-1', calendar_id: 'cal-1' }), + { status: 200, headers: { 'Content-Type': 'application/json' } } + )); + }); + } + return window.__originalFetch(url); + }; + }); + + try { + const yesButton = inviteCard.getByRole('button', { name: 'Yes' }); + // Don't await — we need the request to be in flight when we + // navigate away. + void yesButton.click(); + + // Wait for the loading class to confirm the request is in flight. + await expect(yesButton).toHaveClass(/is-loading/); + + // Navigate to a different email while the RSVP is hanging. + const otherEmail = await page.evaluate(() => { + const other = EmailListManager.emails.find((e) => e.id !== 'demo-email-invite-001'); + return other ? other.id : null; + }); + expect(otherEmail, 'demo dataset must contain at least one non-invite email').not.toBeNull(); + await page.evaluate((id) => EmailListManager.selectEmail(id), otherEmail); + + // Now resolve the fetch. The guard must prevent the active + // class from being applied to the original invite card. + await page.evaluate(() => window.__resolveFetch && window.__resolveFetch()); + + // Switch back to the invite email and verify Yes is NOT active. + await page.evaluate(async () => EmailListManager.selectEmail('demo-email-invite-001')); + const reselected = page.getByRole('region', { name: 'Calendar invitation' }); + await expect(reselected).toBeVisible({ timeout: 3000 }); + const reselectedYes = reselected.getByRole('button', { name: 'Yes' }); + await expect(reselectedYes, 'navigated-away guard must NOT mark Yes active on the original card').not.toHaveClass(/active/); + } finally { + await page.evaluate(() => { window.fetch = window.__originalFetch; }); + } + }); + + test('archive Undo: "Restore unavailable" toast when original had no folders', async ({ page }) => { + // Pin the archive-Undo defensive branch: if the email started + // with folders:[] (rare — drafts/sent paths), we cannot restore + // it via PUT folders:[...] without a meaningful target. The Undo + // callback must short-circuit with an error toast rather than + // making a no-op or, worse, an empty-folders PUT that would land + // the email unfiled. + const result = await page.evaluate(async () => { + // Manufacture a fake email with empty folders so we control the + // archive-state shape without depending on the demo dataset. + const fake = { + id: 'fake-empty-folders-1', + folders: [], + unread: false, + starred: false, + subject: 'Empty folders', + from: [], + to: [], + }; + EmailListManager.emails = [fake, ...EmailListManager.emails]; + // Provide a system Archive folder so Strategy 2 succeeds during + // the initial archive (the regression we're testing is the + // Undo path, not the archive path). + EmailListManager.folders = [{ id: 'arch-id-1', system_folder: 'archive', name: 'Archive' }]; + + const captured = { toasts: [], updateCalls: 0 }; + const originalUpdate = AirAPI.updateEmail; + const originalToast = window.showToast; + AirAPI.updateEmail = async () => { + captured.updateCalls++; + return { status: 'success' }; + }; + window.showToast = (type, title, msg, opts) => { + captured.toasts.push({ type, title, msg }); + // Synchronously fire the Undo action so the test can observe + // the resulting toast without timing dance. We deliberately + // capture only the toast events; the action itself goes + // through the real callback. + if (opts && typeof opts.onAction === 'function' && title === 'Archived') { + // Fire the undo action — this is what the user clicking + // "Undo" would trigger. + opts.onAction(); + } + }; + try { + await EmailListManager.archiveEmail(fake.id); + // Wait one microtask tick so the Undo's async toast lands. + await new Promise((r) => setTimeout(r, 0)); + return captured; + } finally { + AirAPI.updateEmail = originalUpdate; + window.showToast = originalToast; + } + }); + + // First call is the archive PUT; no Undo PUT should fire because + // the original folders array was empty. + expect(result.updateCalls, 'only the initial archive should call updateEmail; Undo must short-circuit').toBe(1); + const restoreToast = result.toasts.find((t) => t.title === 'Restore unavailable'); + expect(restoreToast, 'Undo with empty original folders must surface "Restore unavailable"').toBeDefined(); + expect(restoreToast.type).toBe('error'); + }); + + test('archive Undo: "Restore failed" toast when restore PUT throws', async ({ page }) => { + // Pin the archive-Undo error path: the initial archive PUT + // succeeds, but the user clicks Undo and the restore PUT fails. + // Without surfacing "Restore failed", the user thinks the email + // is back in the inbox while it is still archived on the server. + const result = await page.evaluate(async () => { + const inbox = EmailListManager.emails.find((e) => + (e.folders || []).includes('inbox') && + !(e.folders || []).includes('archive') + ); + if (!inbox) return { error: 'no inbox-only demo email available' }; + + const captured = { toasts: [], updateCalls: 0, restoreThrew: false }; + const originalUpdate = AirAPI.updateEmail; + const originalToast = window.showToast; + AirAPI.updateEmail = async (id, payload) => { + captured.updateCalls++; + // First call (archive) succeeds; second call (Undo restore) + // must throw to exercise the error branch. + if (captured.updateCalls === 1) return { status: 'success' }; + captured.restoreThrew = true; + throw new Error('simulated 503 on restore'); + }; + window.showToast = (type, title, msg, opts) => { + captured.toasts.push({ type, title, msg }); + if (opts && typeof opts.onAction === 'function' && title === 'Archived') { + opts.onAction(); + } + }; + try { + await EmailListManager.archiveEmail(inbox.id); + await new Promise((r) => setTimeout(r, 0)); + return captured; + } finally { + AirAPI.updateEmail = originalUpdate; + window.showToast = originalToast; + } + }); + + expect(result.error, `setup failed: ${result.error}`).toBeUndefined(); + expect(result.updateCalls, 'archive + restore = 2 PUTs').toBe(2); + expect(result.restoreThrew, 'restore branch must have been invoked').toBe(true); + const failed = result.toasts.find((t) => t.title === 'Restore failed'); + expect(failed, 'Undo restore failure must surface "Restore failed"').toBeDefined(); + expect(failed.type).toBe('error'); + // Crucially, the success toast for "Restored" must NOT fire on + // a failed restore — that would lie to the user. + const restoredOK = result.toasts.find((t) => t.title === 'Restored'); + expect(restoredOK, 'must NOT announce "Restored" when the PUT failed').toBeUndefined(); + }); + + test('archive success: detail pane transitions to empty state when archived email was selected', async ({ page }) => { + // Pin the success-path counterpart to "archive failure restores + // selection AND repopulates the detail pane". When archiveEmail + // succeeds AND the email being archived was the currently + // selected one, the detail pane MUST flip to the empty state — + // otherwise the user sees stale content for an email that no + // longer exists in the list. The failure case has its own test + // (line ~306); this is the missing positive. + await expect.poll( + () => page.evaluate(() => typeof EmailListManager !== 'undefined' && Array.isArray(EmailListManager.emails)), + { timeout: 5000, message: 'EmailListManager bundle did not initialise' } + ).toBe(true); + + // Select an inbox-only email and confirm the detail pane is + // populated (i.e., NOT showing the empty state) before we + // archive. Anchoring on the empty-state copy directly is more + // robust than checking class names that change between layouts. + const setup = await page.evaluate(async () => { + const email = EmailListManager.emails.find((e) => + (e.folders || []).includes('inbox') && + !(e.folders || []).includes('archive') + ); + if (!email) return { error: 'no inbox-only demo email available' }; + await EmailListManager.selectEmail(email.id); + return { emailId: email.id }; + }); + expect(setup.error, `setup failed: ${setup.error}`).toBeUndefined(); + + // Pre-condition: detail pane shows the email, NOT the empty state. + await expect( + page.getByText('Select an email to view'), + 'detail pane must be populated before we archive', + ).not.toBeVisible(); + + // Stub a successful archive so we exercise the success branch + // deterministically (the demo backend would also succeed, but + // we want to also assert the AirAPI payload along the way). + const archive = await page.evaluate(async (emailId) => { + const originalUpdate = AirAPI.updateEmail; + let capturedPayload = null; + AirAPI.updateEmail = async (_id, payload) => { + capturedPayload = payload; + return { status: 'success' }; + }; + try { + const ok = await EmailListManager.archiveEmail(emailId); + return { + ok, + payload: capturedPayload, + stillInList: !!EmailListManager.emails.find((e) => e.id === emailId), + // Read selectedEmailId AFTER archive — the success path + // sets it to null when the archived email was selected. + selectionCleared: EmailListManager.selectedEmailId === null, + }; + } finally { + AirAPI.updateEmail = originalUpdate; + } + }, setup.emailId); + + expect(archive.ok, 'archiveEmail success-path return value').toBe(true); + expect(archive.payload, 'archive PUT must include the new folders array').toBeDefined(); + expect(archive.stillInList, 'archived email must be removed from the list').toBe(false); + expect(archive.selectionCleared, 'selectedEmailId must be cleared so the pane transitions out').toBe(true); + + // Detail pane MUST flip to the empty state. Using getByText keeps + // us in semantic-locator territory and benefits from Playwright's + // auto-retry, replacing any class-based assertion that could + // shift with a future layout refactor. + await expect( + page.getByText('Select an email to view'), + 'detail pane MUST show the empty state after archive succeeds on the selected email', + ).toBeVisible(); + }); + + test('loadEmails returns 3-state outcome (loaded / in-progress / failed)', async ({ page }) => { + // Pin the contract change behind the pull-to-refresh "Refresh failed" + // false-alarm fix: the previous boolean conflated "skipped because + // already loading" with "fetch failed", so a second pull during an + // in-flight load showed an error toast even though no fetch failed. + await expect.poll( + () => page.evaluate(() => typeof EmailListManager !== 'undefined' && typeof EmailListManager.loadEmails === 'function'), + { timeout: 5000, message: 'EmailListManager bundle did not initialise' } + ).toBe(true); + + const result = await page.evaluate(async () => { + const outcomes = {}; + const originalGet = AirAPI.getEmails; + + // 1) in-progress: another load is already running. + EmailListManager.isLoading = true; + outcomes.duringConcurrent = await EmailListManager.loadEmails(); + EmailListManager.isLoading = false; + + // 2) failed: the API throws. + AirAPI.getEmails = async () => { throw new Error('simulated network failure'); }; + outcomes.onError = await EmailListManager.loadEmails(); + + // 3) loaded: the API returns data. + AirAPI.getEmails = async () => ({ emails: [], next_cursor: null, has_more: false }); + outcomes.onSuccess = await EmailListManager.loadEmails(); + + AirAPI.getEmails = originalGet; + return outcomes; + }); + + expect(result.duringConcurrent, 'concurrent load must NOT be reported as a failure').toBe('in-progress'); + expect(result.onError).toBe('failed'); + expect(result.onSuccess).toBe('loaded'); + }); + + test('archive failure restores selection AND repopulates the detail pane', async ({ page }) => { + // Pin the regression where archive rollback restored `this.emails` + // but left `selectedEmailId === null` and the detail pane showing + // the empty state — list and preview drifted out of sync. + await expect.poll( + () => page.evaluate(() => typeof EmailListManager !== 'undefined' && Array.isArray(EmailListManager.emails)), + { timeout: 5000 } + ).toBe(true); + + const setup = await page.evaluate(async () => { + const email = EmailListManager.emails.find(e => + (e.folders || []).includes('inbox') && + !(e.folders || []).includes('archive') + ); + if (!email) return { error: 'no inbox-only demo email available' }; + + // Select the email so the detail pane is populated. + await EmailListManager.selectEmail(email.id); + return { emailId: email.id }; + }); + expect(setup.error, `setup failed: ${setup.error}`).toBeUndefined(); + + // Stub AirAPI.updateEmail to fail; trigger the rollback path. Use + // evaluate so the spy lives in the page context where the click + // handler runs. We return only application state from the spy and + // assert on the user-visible UI via Playwright locators below. + const rollback = await page.evaluate(async (emailId) => { + const originalUpdate = AirAPI.updateEmail; + AirAPI.updateEmail = async () => { throw new Error('simulated 503'); }; + try { + await EmailListManager.archiveEmail(emailId); + return { + stillInList: !!EmailListManager.emails.find(e => e.id === emailId), + isSelected: EmailListManager.selectedEmailId === emailId, + }; + } finally { + AirAPI.updateEmail = originalUpdate; + } + }, setup.emailId); + + expect(rollback.stillInList, 'rollback must put the email back in the list').toBe(true); + expect(rollback.isSelected, 'selectedEmailId must be restored after rollback').toBe(true); + + // Detail pane must NOT show the "Select an email to view" empty + // state. Using getByText keeps us in semantic-locator territory and + // benefits from Playwright's auto-retry, replacing the + // detail.querySelector('.empty-state') CSS lookup. + await expect( + page.getByText('Select an email to view'), + 'detail pane must NOT be left at the empty state', + ).not.toBeVisible(); + }); + + test('keyboard Delete announces "Email deleted" only on success, "Delete failed" on rollback', async ({ page }) => { + // Pin the screen-reader regression: the previous code fired + // announce('Email deleted') synchronously, even when the underlying + // fetch failed and the email was rolled back into the list. + await expect.poll( + () => page.evaluate(() => typeof EmailListManager !== 'undefined' && Array.isArray(EmailListManager.emails)), + { timeout: 5000 } + ).toBe(true); + + // The screen-reader announcer is a single live region near the top + // of base.gohtml with role="status" and id="announcer". We address + // it via the role; falling back to the id-based locator only if a + // future refactor moves it. Both forms are stable — class names are + // not. + // The announcer is a single live region near the top of base.gohtml. + // Use getByTestId rather than role='status' because the toast + // container also has role='status' (so screen readers pick toasts up + // too) — testid keeps us pinned to the announcer specifically. + const announcer = page.getByTestId('announcer'); + + // dispatchKeydown fires a synthetic Delete on the first row's + // containing list. We use evaluate for the actual dispatch because + // KeyboardEvent isn't reachable through Playwright's keyboard API + // when the focus target is inside an SPA-managed widget, but every + // assertion that follows is on the test side. + async function pressDeleteOnFirstEmail() { + return page.evaluate(() => { + const email = EmailListManager.emails[0]; + if (!email) return { error: 'no demo emails available' }; + const row = document.querySelector(`[data-email-id="${email.id}"]`); + if (!row) return { error: 'no list row for first email' }; + row.classList.add('focused'); + row.focus(); + const list = row.closest('[data-testid="email-list"]') || row.parentElement; + list.dispatchEvent(new KeyboardEvent('keydown', { key: 'Delete', bubbles: true })); + return { ok: true }; + }); + } + + // ============== Success path ============== + await page.evaluate(() => { + window.__originalDel = AirAPI.deleteEmail; + AirAPI.deleteEmail = async () => ({ success: true }); + }); + try { + const success = await pressDeleteOnFirstEmail(); + expect(success.error).toBeUndefined(); + // expect.poll auto-retries up to the default timeout — replaces + // the brittle 250 ms setTimeout while still waiting for both the + // deleteEmail await AND the announce setTimeout to fire. + await expect.poll( + () => announcer.textContent(), + { timeout: 2000, message: 'announcer must say "Email deleted" on success' } + ).toContain('Email deleted'); + } finally { + await page.evaluate(() => { AirAPI.deleteEmail = window.__originalDel; }); + } + + // Reload to reset list state cleanly between phases. + await page.reload(); + await expect(page.getByRole('option').first()).toBeVisible({ timeout: 5000 }); + + // ============== Failure path ============== + await page.evaluate(() => { + window.__originalDel = AirAPI.deleteEmail; + AirAPI.deleteEmail = async () => { throw new Error('simulated 5xx'); }; + }); + try { + const failure = await pressDeleteOnFirstEmail(); + expect(failure.error).toBeUndefined(); + const announcer2 = page.getByTestId('announcer'); + await expect.poll( + () => announcer2.textContent(), + { timeout: 2000, message: 'announcer must say "Delete failed" on rollback' } + ).toContain('Delete failed'); + expect(await announcer2.textContent(), 'must NOT announce success on rollback') + .not.toContain('Email deleted'); + } finally { + await page.evaluate(() => { AirAPI.deleteEmail = window.__originalDel; }); + } + }); +}); diff --git a/tests/air/e2e/folders-and-invite.spec.js b/tests/air/e2e/folders-and-invite.spec.js index 9e5732d..0211558 100644 --- a/tests/air/e2e/folders-and-invite.spec.js +++ b/tests/air/e2e/folders-and-invite.spec.js @@ -229,7 +229,10 @@ test.describe('Folders + calendar invite — Air', () => { window.__inviteXss = false; // Try injecting via a synthetic invite render. const slot = document.getElementById('inviteSlot-demo-email-invite-001'); - const html = EmailListManager.renderCalendarInviteCard( + // buildCalendarInviteCard returns a DOM node (HTMLElement) — see + // email-selection.js. We mount it directly via replaceChildren so + // there is no HTML-string surface for hostile fields to land in. + const cardNode = EmailListManager.buildCalendarInviteCard( { has_invite: true, title: 'Hi', @@ -242,8 +245,7 @@ test.describe('Folders + calendar invite — Air', () => { }, 'demo-email-invite-001', ); - slot.replaceChildren(); - slot.insertAdjacentHTML('beforeend', html); + slot.replaceChildren(cardNode); // Click Yes. const yesBtn = slot.querySelector('[data-rsvp="yes"]'); @@ -325,12 +327,15 @@ test.describe('Folders + calendar invite — Air', () => { // cancellation banner. Test renders a synthetic card directly so we // exercise the conditional UI without needing a real cancelled demo // email. - test('renderCalendarInviteCard: METHOD=CANCEL replaces RSVP buttons with banner', async ({ page }) => { + test('buildCalendarInviteCard: METHOD=CANCEL replaces RSVP buttons with banner', async ({ page }) => { const result = await page.evaluate(() => { if (typeof EmailListManager === 'undefined' || !EmailListManager) { return { skipped: true }; } - const html = EmailListManager.renderCalendarInviteCard( + // buildCalendarInviteCard returns a DOM node — query it directly. + // No DOMParser dance needed: the node is detached but fully + // queryable, and it's never serialised to/from HTML. + const node = EmailListManager.buildCalendarInviteCard( { has_invite: true, title: 'Quarterly Sync', @@ -341,15 +346,12 @@ test.describe('Folders + calendar invite — Air', () => { 'cancelled-test-id', ); - // Parse via DOMParser so we never assign untrusted strings to - // innerHTML — even in test code, that's the codebase rule. - const doc = new DOMParser().parseFromString(html, 'text/html'); return { skipped: false, - hasBanner: !!doc.querySelector('.calendar-invite-banner-cancel'), - bannerText: doc.querySelector('.calendar-invite-banner-cancel')?.textContent?.trim(), - rsvpButtonCount: doc.querySelectorAll('.calendar-invite-btn').length, - cardHasCancelClass: doc.querySelector('.calendar-invite-card.is-cancelled') !== null, + hasBanner: !!node.querySelector('.calendar-invite-banner-cancel'), + bannerText: node.querySelector('.calendar-invite-banner-cancel')?.textContent?.trim(), + rsvpButtonCount: node.querySelectorAll('.calendar-invite-btn').length, + cardHasCancelClass: node.classList.contains('is-cancelled'), }; }); diff --git a/tests/shared/helpers/ui-selectors.js b/tests/shared/helpers/ui-selectors.js index 1c70c2d..d7cc867 100644 --- a/tests/shared/helpers/ui-selectors.js +++ b/tests/shared/helpers/ui-selectors.js @@ -74,6 +74,9 @@ exports.nav = { scheduler: '[data-page="scheduler"]', timezone: '[data-page="timezone"]', webhook: '[data-page="webhook"]', + agent: '[data-page="agent"]', + audit: '[data-page="audit"]', + dashboardCmd: '[data-page="dashboard"]', }; // Pages @@ -89,6 +92,9 @@ exports.pages = { scheduler: '#page-scheduler', timezone: '#page-timezone', webhook: '#page-webhook', + agent: '#page-agent', + audit: '#page-audit', + dashboardCmd: '#page-dashboard', activePage: '.page.active', }; diff --git a/tests/ui/e2e/keyboard.spec.js b/tests/ui/e2e/keyboard.spec.js index 05bcbf8..02c97a1 100644 --- a/tests/ui/e2e/keyboard.spec.js +++ b/tests/ui/e2e/keyboard.spec.js @@ -376,11 +376,17 @@ test.describe('Form Keyboard Navigation', () => { await expect(apiKeyInput).toBeFocused(); - // Tab to region select - await page.keyboard.press('Tab'); - await page.waitForTimeout(100); - + // The setup form has a "show password" toggle button between the API key + // input and the region select, so tab past it to reach the region select. const regionSelect = page.locator(selectors.setup.regionSelect); + for (let i = 0; i < 5; i++) { + if (await regionSelect.evaluate((el) => el === document.activeElement)) { + break; + } + await page.keyboard.press('Tab'); + await page.waitForTimeout(50); + } + await expect(regionSelect).toBeFocused(); }); }); diff --git a/tests/ui/e2e/overview-page.spec.js b/tests/ui/e2e/overview-page.spec.js index b0da6ee..7f7d243 100644 --- a/tests/ui/e2e/overview-page.spec.js +++ b/tests/ui/e2e/overview-page.spec.js @@ -197,15 +197,15 @@ test.describe('Resources Card', () => { const docLink = page.getByRole('link', { name: /Documentation/i }); await expect(docLink).toBeVisible(); - await expect(docLink).toHaveAttribute('href', 'https://developer.nylas.com/docs'); + await expect(docLink).toHaveAttribute('href', 'https://developer.nylas.com/'); }); - test('API v3 Reference link is present', async ({ page }, testInfo) => { + test('CLI Command Reference link is present', async ({ page }, testInfo) => { await skipIfNotConfigured(page, testInfo); - const apiLink = page.getByRole('link', { name: /API v3 Reference/i }); + const apiLink = page.getByRole('link', { name: /CLI Command Reference/i }); await expect(apiLink).toBeVisible(); - await expect(apiLink).toHaveAttribute('href', 'https://developer.nylas.com/docs/v3'); + await expect(apiLink).toHaveAttribute('href', 'https://cli.nylas.com/docs/commands'); }); test('GitHub link is present', async ({ page }, testInfo) => { @@ -229,7 +229,7 @@ test.describe('Resources Card', () => { // Check for descriptions await expect(page.getByText('API reference & guides')).toBeVisible(); - await expect(page.getByText('Endpoints & schemas')).toBeVisible(); + await expect(page.getByText('Commands & flags')).toBeVisible(); await expect(page.getByText('SDKs & examples')).toBeVisible(); await expect(page.getByText('Get help from our team')).toBeVisible(); }); @@ -348,7 +348,7 @@ test.describe('Overview Page Responsiveness', () => { // Overview should still be visible await expect(page.locator(selectors.pages.overview)).toBeVisible(); - await expect(page.getByText('Dashboard')).toBeVisible(); + await expect(page.getByRole('heading', { name: 'Dashboard' })).toBeVisible(); }); test('overview page is visible on tablet viewport', async ({ page }, testInfo) => { @@ -360,6 +360,6 @@ test.describe('Overview Page Responsiveness', () => { // Overview should still be visible await expect(page.locator(selectors.pages.overview)).toBeVisible(); - await expect(page.getByText('Dashboard')).toBeVisible(); + await expect(page.getByRole('heading', { name: 'Dashboard' })).toBeVisible(); }); });