diff --git a/go.mod b/go.mod index 49487df..5b69b35 100644 --- a/go.mod +++ b/go.mod @@ -24,6 +24,7 @@ require ( require ( al.essio.dev/pkg/shellescape v1.6.0 // indirect + github.com/arran4/golang-ical v0.3.5 // indirect github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect github.com/catppuccin/go v0.3.0 // indirect github.com/charmbracelet/bubbles v0.21.1-0.20250623103423-23b8fd6302d7 // indirect diff --git a/go.sum b/go.sum index ab25656..fd9e22d 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,7 @@ al.essio.dev/pkg/shellescape v1.6.0 h1:NxFcEqzFSEVCGN2yq7Huv/9hyCEGVa/TncnOOBBeXHA= al.essio.dev/pkg/shellescape v1.6.0/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWtls1U8I890= +github.com/arran4/golang-ical v0.3.5 h1:bbz6ld4dC+MmCKiFfOd6SkmIGnhNMBACZ485ULh7p9A= +github.com/arran4/golang-ical v0.3.5/go.mod h1:OnguFgjN0Hmx8jzpmWcC+AkHio94ujmLHKoaef7xQh8= github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4= github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI= github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= @@ -146,6 +148,7 @@ golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= lukechampine.com/adiantum v1.1.1 h1:4fp6gTxWCqpEbLy40ExiYDDED3oUNWx5cTqBCtPdZqA= diff --git a/internal/adapters/nylas/mock_client.go b/internal/adapters/nylas/mock_client.go index 3f6e49e..ce3f634 100644 --- a/internal/adapters/nylas/mock_client.go +++ b/internal/adapters/nylas/mock_client.go @@ -99,6 +99,7 @@ type MockClient struct { GetMessagesFunc func(ctx context.Context, grantID string, limit int) ([]domain.Message, error) GetMessagesWithParamsFunc func(ctx context.Context, grantID string, params *domain.MessageQueryParams) ([]domain.Message, error) GetMessageFunc func(ctx context.Context, grantID, messageID string) (*domain.Message, error) + GetMessageWithFieldsFunc func(ctx context.Context, grantID, messageID, fields string) (*domain.Message, error) SendMessageFunc func(ctx context.Context, grantID string, req *domain.SendMessageRequest) (*domain.Message, error) SendRawMessageFunc func(ctx context.Context, grantID string, rawMIME []byte) (*domain.Message, error) GetSignaturesFunc func(ctx context.Context, grantID string) ([]domain.Signature, error) diff --git a/internal/adapters/nylas/mock_messages.go b/internal/adapters/nylas/mock_messages.go index ab40f2e..18f1306 100644 --- a/internal/adapters/nylas/mock_messages.go +++ b/internal/adapters/nylas/mock_messages.go @@ -54,7 +54,14 @@ func (m *MockClient) GetMessage(ctx context.Context, grantID, messageID string) } // GetMessageWithFields retrieves a message with optional field selection. +// Tests can pin a specific response by setting GetMessageWithFieldsFunc; +// otherwise the mock falls back to GetMessage and synthesises a default +// raw MIME body so callers exercising the raw_mime path don't get nil. func (m *MockClient) GetMessageWithFields(ctx context.Context, grantID, messageID string, fields string) (*domain.Message, error) { + if m.GetMessageWithFieldsFunc != nil { + return m.GetMessageWithFieldsFunc(ctx, grantID, messageID, fields) + } + msg, err := m.GetMessage(ctx, grantID, messageID) if err != nil { return nil, err diff --git a/internal/air/cache/cache.go b/internal/air/cache/cache.go index 4e05e49..93fc3e2 100644 --- a/internal/air/cache/cache.go +++ b/internal/air/cache/cache.go @@ -79,9 +79,29 @@ func OpenSharedDB(basePath, filename string) (*sql.DB, error) { _, _ = db.Exec("PRAGMA journal_mode=WAL") _, _ = db.Exec("PRAGMA synchronous=NORMAL") + // Tighten file permissions to 0600 (defense in depth — the parent + // directory is already 0700). Errors are non-fatal: missing perms or + // non-Unix filesystems shouldn't abort startup. + restrictDBFileMode(dbPath) + return db, nil } +// restrictDBFileMode chmods the SQLite database file (and its WAL/SHM +// sidecars when they exist) to 0600. Best-effort — no-op on systems +// where chmod has no meaning, and errors are deliberately ignored. +func restrictDBFileMode(dbPath string) { + if _, err := os.Stat(dbPath); err == nil { + _ = os.Chmod(dbPath, 0600) + } + for _, suffix := range []string{"-wal", "-shm"} { + p := dbPath + suffix + if _, err := os.Stat(p); err == nil { + _ = os.Chmod(p, 0600) + } + } +} + // sanitizeEmail converts email to a safe filename. // Example: user@example.com -> user@example.com.db func sanitizeEmail(email string) string { @@ -152,6 +172,10 @@ func (m *Manager) GetDB(email string) (*sql.DB, error) { return nil, fmt.Errorf("init schema for %s: %w", email, err) } + // Tighten file mode to 0600 once the file definitely exists (defense + // in depth atop the 0700 parent directory). + restrictDBFileMode(dbPath) + m.dbs[email] = db return db, nil } diff --git a/internal/air/cache/cache_filemode_test.go b/internal/air/cache/cache_filemode_test.go new file mode 100644 index 0000000..fcb52c4 --- /dev/null +++ b/internal/air/cache/cache_filemode_test.go @@ -0,0 +1,121 @@ +package cache + +import ( + "os" + "path/filepath" + "runtime" + "testing" +) + +// TestNewManager_SetsCacheDirMode confirms the cache directory is 0700. +func TestNewManager_SetsCacheDirMode(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("file mode semantics differ on Windows") + } + + dir := t.TempDir() + cacheDir := filepath.Join(dir, "air-cache") + + if _, err := NewManager(Config{BasePath: cacheDir}); err != nil { + t.Fatalf("NewManager: %v", err) + } + + info, err := os.Stat(cacheDir) + if err != nil { + t.Fatalf("stat cache dir: %v", err) + } + if mode := info.Mode().Perm(); mode != 0700 { + t.Errorf("cache dir mode: want 0700, got %o", mode) + } +} + +// TestGetDB_RestrictsFileMode confirms the per-account .db file is 0600 +// after Manager.GetDB initializes the schema. This is defense-in-depth on +// top of the 0700 directory mode — a permissive umask should not leave the +// SQLite file world-readable. +func TestGetDB_RestrictsFileMode(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("file mode semantics differ on Windows") + } + + dir := t.TempDir() + mgr, err := NewManager(Config{BasePath: dir}) + if err != nil { + t.Fatalf("NewManager: %v", err) + } + t.Cleanup(func() { _ = mgr.Close() }) + + if _, err := mgr.GetDB("user@example.com"); err != nil { + t.Fatalf("GetDB: %v", err) + } + + dbPath := mgr.DBPath("user@example.com") + info, err := os.Stat(dbPath) + if err != nil { + t.Fatalf("stat db file: %v", err) + } + if mode := info.Mode().Perm(); mode != 0600 { + t.Errorf("db file mode: want 0600, got %o (path=%s)", mode, dbPath) + } +} + +// TestRestrictDBFileMode_HandlesMissingFiles ensures the helper silently +// no-ops on missing sidecar files and never panics on bad paths. +func TestRestrictDBFileMode_HandlesMissingFiles(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("file mode semantics differ on Windows") + } + + dir := t.TempDir() + mainPath := filepath.Join(dir, "exists.db") + if err := os.WriteFile(mainPath, []byte("dummy"), 0644); err != nil { + t.Fatalf("seed main file: %v", err) + } + // -wal and -shm intentionally absent. + + restrictDBFileMode(mainPath) + + info, err := os.Stat(mainPath) + if err != nil { + t.Fatalf("stat after restrict: %v", err) + } + if mode := info.Mode().Perm(); mode != 0600 { + t.Errorf("file mode after restrict: want 0600, got %o", mode) + } + + // Non-existent path: must not panic, must not create files. + restrictDBFileMode(filepath.Join(dir, "does-not-exist.db")) + if _, err := os.Stat(filepath.Join(dir, "does-not-exist.db")); !os.IsNotExist(err) { + t.Errorf("restrict should not create files; stat err=%v", err) + } +} + +// TestOpenSharedDB_RestrictsFileMode confirms the shared photos.db file +// also gets the tightened mode. +func TestOpenSharedDB_RestrictsFileMode(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("file mode semantics differ on Windows") + } + + dir := t.TempDir() + db, err := OpenSharedDB(dir, "photos.db") + if err != nil { + t.Fatalf("OpenSharedDB: %v", err) + } + t.Cleanup(func() { _ = db.Close() }) + + // Force an actual file by running a trivial DDL. + if _, err := db.Exec("CREATE TABLE IF NOT EXISTS t (id INTEGER)"); err != nil { + t.Fatalf("create table: %v", err) + } + // Re-apply restrict in case the table creation only just produced the file. + restrictDBFileMode(filepath.Join(dir, "photos.db")) + + info, err := os.Stat(filepath.Join(dir, "photos.db")) + if err != nil { + t.Fatalf("stat shared db: %v", err) + } + if mode := info.Mode().Perm(); mode != 0600 { + t.Errorf("shared db mode: want 0600, got %o", mode) + } +} diff --git a/internal/air/cache/encryption.go b/internal/air/cache/encryption.go index a3ea695..e987d53 100644 --- a/internal/air/cache/encryption.go +++ b/internal/air/cache/encryption.go @@ -202,6 +202,10 @@ func (m *EncryptedManager) GetDB(email string) (*sql.DB, error) { return nil, fmt.Errorf("init schema for %s: %w", email, err) } + // Tighten file mode to 0600 — the file holds encrypted email/calendar + // data and should never be world-readable even when umask is permissive. + restrictDBFileMode(dbPath) + m.dbs[email] = db return db, nil } @@ -291,6 +295,9 @@ func (m *EncryptedManager) MigrateToEncrypted(email string) error { _ = os.Remove(backupPath + "-wal") _ = os.Remove(backupPath + "-shm") + // Lock down file mode on the freshly migrated database. + restrictDBFileMode(dbPath) + return nil } @@ -364,6 +371,9 @@ func (m *EncryptedManager) MigrateToUnencrypted(email string) error { _ = deleteKeyFunc(email) delete(m.keys, email) + // Lock down file mode on the freshly migrated database. + restrictDBFileMode(dbPath) + return nil } diff --git a/internal/air/cache/photos.go b/internal/air/cache/photos.go index b80f385..6d0f646 100644 --- a/internal/air/cache/photos.go +++ b/internal/air/cache/photos.go @@ -2,15 +2,46 @@ package cache import ( "database/sql" + "errors" "fmt" "os" "path/filepath" + "strings" "time" ) // DefaultPhotoTTL is the default time-to-live for cached photos (30 days). const DefaultPhotoTTL = 30 * 24 * time.Hour +// errInvalidContactID is returned when the caller hands the photo store an +// identifier that would be unsafe to compose into a filesystem path. +var errInvalidContactID = errors.New("invalid contact ID for photo cache") + +// validateContactID rejects identifiers that would let a malicious or +// malformed upstream contact escape the photo cache directory. Photo files +// are stored at filepath.Join(basePath, contactID), so anything containing +// a path separator, "..", control bytes, or NUL is unsafe. We also bound +// the length so a hostile API response can't push us into "name too long" +// errors at the filesystem layer. +func validateContactID(id string) error { + if id == "" || len(id) > 128 { + return errInvalidContactID + } + if strings.ContainsAny(id, "/\\\x00") { + return errInvalidContactID + } + if id == "." || id == ".." || strings.Contains(id, "..") { + return errInvalidContactID + } + for i := 0; i < len(id); i++ { + c := id[i] + if c < 0x20 || c == 0x7f { + return errInvalidContactID + } + } + return nil +} + // CachedPhoto represents a contact photo stored in the cache. type CachedPhoto struct { ContactID string `json:"contact_id"` @@ -67,7 +98,9 @@ func NewPhotoStore(db *sql.DB, basePath string, ttl time.Duration) (*PhotoStore, // Put stores a contact photo. func (s *PhotoStore) Put(contactID, contentType string, data []byte) error { - // Write photo to file + if err := validateContactID(contactID); err != nil { + return err + } localPath := filepath.Join(s.basePath, contactID) if err := os.WriteFile(localPath, data, 0600); err != nil { return fmt.Errorf("write photo file: %w", err) @@ -95,6 +128,9 @@ func (s *PhotoStore) Put(contactID, contentType string, data []byte) error { // Get retrieves a cached photo if it exists and is not expired. // Returns nil, nil if the photo is not cached or expired. func (s *PhotoStore) Get(contactID string) ([]byte, string, error) { + if err := validateContactID(contactID); err != nil { + return nil, "", err + } row := s.db.QueryRow(` SELECT content_type, size, local_path, cached_at FROM photos WHERE contact_id = ? @@ -136,6 +172,9 @@ func (s *PhotoStore) Get(contactID string) ([]byte, string, error) { // IsValid checks if a cached photo exists and is not expired. func (s *PhotoStore) IsValid(contactID string) bool { + if err := validateContactID(contactID); err != nil { + return false + } var cachedAtUnix int64 err := s.db.QueryRow("SELECT cached_at FROM photos WHERE contact_id = ?", contactID).Scan(&cachedAtUnix) if err != nil { @@ -148,6 +187,9 @@ func (s *PhotoStore) IsValid(contactID string) bool { // Delete removes a cached photo. func (s *PhotoStore) Delete(contactID string) error { + if err := validateContactID(contactID); err != nil { + return err + } // Get local path first var localPath string err := s.db.QueryRow("SELECT local_path FROM photos WHERE contact_id = ?", contactID).Scan(&localPath) diff --git a/internal/air/cache/photos_validation_test.go b/internal/air/cache/photos_validation_test.go new file mode 100644 index 0000000..49b66bd --- /dev/null +++ b/internal/air/cache/photos_validation_test.go @@ -0,0 +1,87 @@ +package cache + +import ( + "strings" + "testing" +) + +// TestValidateContactID_Rejects covers IDs that must NOT be allowed to flow +// into filepath.Join(basePath, contactID). A photo store that accepts these +// would let a hostile API response punch out of the cache directory or +// corrupt unrelated files. +func TestValidateContactID_Rejects(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + id string + }{ + {"empty", ""}, + {"dot", "."}, + {"dotdot", ".."}, + {"slash traversal", "../etc/passwd"}, + {"backslash traversal", "..\\windows\\system32"}, + {"forward slash", "abc/def"}, + {"backslash", "abc\\def"}, + {"null byte", "abc\x00def"}, + {"control byte", "abc\x01def"}, + {"DEL byte", "abc\x7fdef"}, + {"contains dotdot", "foo..bar"}, + {"long ID", strings.Repeat("a", 129)}, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + if err := validateContactID(tc.id); err == nil { + t.Errorf("expected error for %q, got nil", tc.id) + } + }) + } +} + +func TestValidateContactID_Accepts(t *testing.T) { + t.Parallel() + + cases := []string{ + "abc123", + "contact-uuid-123", + "USER_42", + "a", + strings.Repeat("a", 128), // boundary + } + for _, id := range cases { + if err := validateContactID(id); err != nil { + t.Errorf("expected %q to validate, got %v", id, err) + } + } +} + +func TestPhotoStore_Put_RejectsTraversal(t *testing.T) { + t.Parallel() + + db := setupTestDB(t) + tmpDir := t.TempDir() + store, err := NewPhotoStore(db, tmpDir, DefaultPhotoTTL) + if err != nil { + t.Fatalf("NewPhotoStore: %v", err) + } + + if err := store.Put("../escape", "image/png", []byte{0x89, 0x50, 0x4E, 0x47}); err == nil { + t.Fatal("Put should reject path-traversal contactID") + } +} + +func TestPhotoStore_Get_RejectsTraversal(t *testing.T) { + t.Parallel() + + db := setupTestDB(t) + tmpDir := t.TempDir() + store, err := NewPhotoStore(db, tmpDir, DefaultPhotoTTL) + if err != nil { + t.Fatalf("NewPhotoStore: %v", err) + } + if _, _, err := store.Get("..\\escape"); err == nil { + t.Fatal("Get should reject path-traversal contactID") + } +} diff --git a/internal/air/cache_runtime.go b/internal/air/cache_runtime.go index c6dc2f1..ea12cd1 100644 --- a/internal/air/cache_runtime.go +++ b/internal/air/cache_runtime.go @@ -75,6 +75,11 @@ func (s *Server) reconfigureCacheRuntime() error { s.stopBackgroundSync() + // Wait for any in-flight background tasks (e.g. photo prune) before + // closing or replacing photoStore/cacheManager — otherwise a running + // prune would be operating on a closed *sql.DB. + s.bgWg.Wait() + cacheCfg := cache.DefaultConfig() if basePath := s.cacheSettings.BasePath(); basePath != "" { cacheCfg.BasePath = basePath @@ -141,7 +146,14 @@ func (s *Server) reconfigureCacheRuntime() error { } if photoStore != nil { - go prunePhotoStore(photoStore) + s.bgWg.Go(func() { + defer func() { + if r := recover(); r != nil { + fmt.Fprintf(os.Stderr, "Photo cache prune panic: %v\n", r) + } + }() + prunePhotoStore(photoStore) + }) } s.startBackgroundSync() @@ -164,7 +176,11 @@ func openPhotoStore(basePath string) (*cache.PhotoStore, error) { } func prunePhotoStore(photoStore *cache.PhotoStore) { - if pruned, err := photoStore.Prune(); err == nil && pruned > 0 { + pruned, err := photoStore.Prune() + switch { + case err != nil: + fmt.Fprintf(os.Stderr, "Photo cache prune failed: %v\n", err) + case pruned > 0: fmt.Fprintf(os.Stderr, "Pruned %d expired photos from cache\n", pruned) } } diff --git a/internal/air/handlers_ai_config.go b/internal/air/handlers_ai_config.go index d13bec1..348c8c0 100644 --- a/internal/air/handlers_ai_config.go +++ b/internal/air/handlers_ai_config.go @@ -4,6 +4,7 @@ import ( "encoding/json" "maps" "net/http" + "strings" "sync" "github.com/nylas/cli/internal/httputil" @@ -105,7 +106,9 @@ func (s *Server) handleUpdateAIConfig(w http.ResponseWriter, r *http.Request) { if req.Model != "" { aiStore.config.Model = req.Model } - if req.APIKey != "" && req.APIKey != "***" { + // Saved value is masked for the read path as `***` + last 4 chars; never + // overwrite the real key with a masked value that came back via PUT. + if req.APIKey != "" && !strings.HasPrefix(req.APIKey, "***") { aiStore.config.APIKey = req.APIKey } if req.BaseURL != "" { @@ -155,12 +158,19 @@ func (s *Server) handleGetAIUsage(w http.ResponseWriter, r *http.Request) { aiStore.mu.RLock() defer aiStore.mu.RUnlock() + // Guard against zero budget: division would yield +Inf, which encoding/json + // refuses to marshal and would surface as a 500. + var percentUsed float64 + if aiStore.config.UsageBudget > 0 { + percentUsed = (aiStore.config.UsageSpent / aiStore.config.UsageBudget) * 100 + } + response := map[string]any{ "stats": aiStore.stats, "budget": aiStore.config.UsageBudget, "spent": aiStore.config.UsageSpent, "remaining": aiStore.config.UsageBudget - aiStore.config.UsageSpent, - "percentUsed": (aiStore.config.UsageSpent / aiStore.config.UsageBudget) * 100, + "percentUsed": percentUsed, } httputil.WriteJSON(w, http.StatusOK, response) diff --git a/internal/air/handlers_ai_config_unit_test.go b/internal/air/handlers_ai_config_unit_test.go new file mode 100644 index 0000000..2289d8f --- /dev/null +++ b/internal/air/handlers_ai_config_unit_test.go @@ -0,0 +1,120 @@ +package air + +import ( + "encoding/json" + "math" + "net/http" + "net/http/httptest" + "strings" + "testing" +) + +// TestHandleGetAIUsage_ZeroBudget verifies that a zero usage budget does not +// produce +Inf percentUsed (which encoding/json would refuse to marshal, +// surfacing as a 500 to the client). +func TestHandleGetAIUsage_ZeroBudget(t *testing.T) { + srv := &Server{} + + aiStore.mu.Lock() + prevBudget := aiStore.config.UsageBudget + prevSpent := aiStore.config.UsageSpent + aiStore.config.UsageBudget = 0 + aiStore.config.UsageSpent = 1.0 + aiStore.mu.Unlock() + t.Cleanup(func() { + aiStore.mu.Lock() + aiStore.config.UsageBudget = prevBudget + aiStore.config.UsageSpent = prevSpent + aiStore.mu.Unlock() + }) + + req := httptest.NewRequest(http.MethodGet, "/api/ai/usage", nil) + w := httptest.NewRecorder() + srv.handleGetAIUsage(w, req) + + if w.Code != http.StatusOK { + t.Fatalf("expected status 200, got %d: %s", w.Code, w.Body.String()) + } + + var resp map[string]any + if err := json.NewDecoder(w.Body).Decode(&resp); err != nil { + t.Fatalf("decode response: %v", err) + } + + pct, ok := resp["percentUsed"].(float64) + if !ok { + t.Fatalf("percentUsed missing or not a number: %#v", resp["percentUsed"]) + } + if math.IsInf(pct, 0) || math.IsNaN(pct) { + t.Fatalf("percentUsed should be finite for zero budget, got %v", pct) + } + if pct != 0 { + t.Errorf("expected 0 percent used when budget is zero, got %v", pct) + } +} + +// TestHandleUpdateAIConfig_RejectsMaskedKey verifies that posting back the +// masked API key returned by GET (e.g. "***1234") does NOT overwrite the +// stored real key. Previously the check was "!= \"***\"", which let any +// "***xxxx" payload through. +func TestHandleUpdateAIConfig_RejectsMaskedKey(t *testing.T) { + srv := &Server{} + + aiStore.mu.Lock() + prev := aiStore.config.APIKey + aiStore.config.APIKey = "secret-real-key-1234" + aiStore.mu.Unlock() + t.Cleanup(func() { + aiStore.mu.Lock() + aiStore.config.APIKey = prev + aiStore.mu.Unlock() + }) + + body := `{"apiKey": "***1234"}` + req := httptest.NewRequest(http.MethodPut, "/api/ai/config", strings.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + srv.handleUpdateAIConfig(w, req) + + if w.Code != http.StatusOK { + t.Fatalf("expected status 200, got %d: %s", w.Code, w.Body.String()) + } + + aiStore.mu.RLock() + defer aiStore.mu.RUnlock() + if aiStore.config.APIKey != "secret-real-key-1234" { + t.Errorf("masked payload overwrote real API key: now %q", aiStore.config.APIKey) + } +} + +// TestHandleUpdateAIConfig_AcceptsRealKey verifies that a non-masked key +// still updates the stored value. +func TestHandleUpdateAIConfig_AcceptsRealKey(t *testing.T) { + srv := &Server{} + + aiStore.mu.Lock() + prev := aiStore.config.APIKey + aiStore.config.APIKey = "old-key" + aiStore.mu.Unlock() + t.Cleanup(func() { + aiStore.mu.Lock() + aiStore.config.APIKey = prev + aiStore.mu.Unlock() + }) + + body := `{"apiKey": "new-real-key-abcd"}` + req := httptest.NewRequest(http.MethodPut, "/api/ai/config", strings.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + srv.handleUpdateAIConfig(w, req) + + if w.Code != http.StatusOK { + t.Fatalf("expected status 200, got %d: %s", w.Code, w.Body.String()) + } + + aiStore.mu.RLock() + defer aiStore.mu.RUnlock() + if aiStore.config.APIKey != "new-real-key-abcd" { + t.Errorf("real key was not stored: got %q", aiStore.config.APIKey) + } +} diff --git a/internal/air/handlers_ai_smart.go b/internal/air/handlers_ai_smart.go index 4e36a7d..52edfb6 100644 --- a/internal/air/handlers_ai_smart.go +++ b/internal/air/handlers_ai_smart.go @@ -47,7 +47,7 @@ Subject: %s Return format: ["Reply 1", "Reply 2", "Reply 3"]`, req.From, req.Subject, body) - result, err := runClaudeCommand(prompt) + result, err := runClaudeCommand(r.Context(), prompt) if err != nil { writeJSON(w, http.StatusInternalServerError, SmartReplyResponse{ Success: false, @@ -154,7 +154,7 @@ Rules: - category: Choose the PRIMARY purpose - priority: Based on urgency and sender importance`, req.From, req.Subject, body) - result, err := runClaudeCommand(prompt) + result, err := runClaudeCommand(r.Context(), prompt) if err != nil { writeJSON(w, http.StatusInternalServerError, AutoLabelResponse{ Success: false, diff --git a/internal/air/handlers_ai_summarize.go b/internal/air/handlers_ai_summarize.go index 1aa4ff8..c56e582 100644 --- a/internal/air/handlers_ai_summarize.go +++ b/internal/air/handlers_ai_summarize.go @@ -32,7 +32,7 @@ func (s *Server) handleAISummarize(w http.ResponseWriter, r *http.Request) { } // Run claude -p with the prompt - summary, err := runClaudeCommand(req.Prompt) + summary, err := runClaudeCommand(r.Context(), req.Prompt) if err != nil { writeJSON(w, http.StatusInternalServerError, AIResponse{ Success: false, @@ -98,7 +98,7 @@ Rules: - sentiment: Choose ONE based on tone and urgency - category: Choose the PRIMARY purpose of the email`, req.From, req.Subject, body) - result, err := runClaudeCommand(prompt) + result, err := runClaudeCommand(r.Context(), prompt) if err != nil { writeJSON(w, http.StatusInternalServerError, EnhancedSummaryResponse{ Success: false, diff --git a/internal/air/handlers_ai_thread.go b/internal/air/handlers_ai_thread.go index b2cd513..2ddb52f 100644 --- a/internal/air/handlers_ai_thread.go +++ b/internal/air/handlers_ai_thread.go @@ -93,7 +93,7 @@ Rules: - timeline: Brief description of how the conversation evolved - next_steps: Clear next action if any, or empty string`, conversationBuilder.String()) - result, err := runClaudeCommand(prompt) + result, err := runClaudeCommand(r.Context(), prompt) if err != nil { writeJSON(w, http.StatusInternalServerError, ThreadSummaryResponse{ Success: false, @@ -155,9 +155,13 @@ Rules: } // runClaudeCommand runs the claude CLI with the given prompt. -func runClaudeCommand(prompt string) (string, error) { - // Create context with timeout (30 seconds for AI response) - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) +// +// The provided ctx is honored: cancelling it (e.g. when the HTTP request is +// aborted by the client) terminates the subprocess. A 30-second timeout is +// also applied on top of the caller's context so a runaway claude process +// can't block forever. +func runClaudeCommand(ctx context.Context, prompt string) (string, error) { + ctx, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() // Find claude binary @@ -177,9 +181,12 @@ func runClaudeCommand(prompt string) (string, error) { err = cmd.Run() if err != nil { - // Check if it's a timeout - if ctx.Err() == context.DeadlineExceeded { + // Distinguish caller cancellation from our local 30s timeout. + switch ctx.Err() { + case context.DeadlineExceeded: return "", fmt.Errorf("claude code timed out after 30 seconds") + case context.Canceled: + return "", ctx.Err() } // Return stderr if available if stderr.Len() > 0 { diff --git a/internal/air/handlers_availability.go b/internal/air/handlers_availability.go index 210d9b7..5a30d51 100644 --- a/internal/air/handlers_availability.go +++ b/internal/air/handlers_availability.go @@ -430,39 +430,11 @@ func findConflicts(events []domain.Event) []EventConflict { start1, end1 := e1.When.StartTime, e1.When.EndTime start2, end2 := e2.When.StartTime, e2.When.EndTime - // Handle all-day events - if e1.When.IsAllDay() { - if e1.When.Date != "" { - t, _ := time.Parse("2006-01-02", e1.When.Date) - start1 = t.Unix() - end1 = t.Add(24 * time.Hour).Unix() - } else if e1.When.StartDate != "" { - t, _ := time.Parse("2006-01-02", e1.When.StartDate) - start1 = t.Unix() - if e1.When.EndDate != "" { - et, _ := time.Parse("2006-01-02", e1.When.EndDate) - end1 = et.Unix() - } else { - end1 = start1 + 24*60*60 - } - } - } - if e2.When.IsAllDay() { - if e2.When.Date != "" { - t, _ := time.Parse("2006-01-02", e2.When.Date) - start2 = t.Unix() - end2 = t.Add(24 * time.Hour).Unix() - } else if e2.When.StartDate != "" { - t, _ := time.Parse("2006-01-02", e2.When.StartDate) - start2 = t.Unix() - if e2.When.EndDate != "" { - et, _ := time.Parse("2006-01-02", e2.When.EndDate) - end2 = et.Unix() - } else { - end2 = start2 + 24*60*60 - } - } - } + // Handle all-day events. Only override the upstream timestamps on + // successful parse; on malformed dates we'd otherwise produce a + // year-1 Unix timestamp and detect bogus conflicts. + start1, end1 = allDayBounds(e1.When, start1, end1) + start2, end2 = allDayBounds(e2.When, start2, end2) // Check for overlap: start1 < end2 && start2 < end1 if start1 < end2 && start2 < end1 { @@ -477,6 +449,38 @@ func findConflicts(events []domain.Event) []EventConflict { return conflicts } +// allDayBounds returns the [start, end] Unix timestamps for an all-day or +// multi-day event. If the When value carries a date string we cannot parse, +// the caller-supplied fallback (start, end) is returned untouched so a +// malformed upstream date never collapses the window to year 1. +func allDayBounds(when domain.EventWhen, start, end int64) (int64, int64) { + if !when.IsAllDay() { + return start, end + } + switch { + case when.Date != "": + t, err := time.Parse("2006-01-02", when.Date) + if err != nil { + return start, end + } + return t.Unix(), t.Add(24 * time.Hour).Unix() + case when.StartDate != "": + t, err := time.Parse("2006-01-02", when.StartDate) + if err != nil { + return start, end + } + newStart := t.Unix() + newEnd := newStart + 24*60*60 + if when.EndDate != "" { + if et, err := time.Parse("2006-01-02", when.EndDate); err == nil { + newEnd = et.Unix() + } + } + return newStart, newEnd + } + return start, end +} + // roundUpTo5Min rounds a Unix timestamp up to the next 5-minute boundary. // This is required by the Nylas API for availability requests. func roundUpTo5Min(unixTime int64) int64 { diff --git a/internal/air/handlers_bundles.go b/internal/air/handlers_bundles.go index 610c1ae..acf3530 100644 --- a/internal/air/handlers_bundles.go +++ b/internal/air/handlers_bundles.go @@ -4,6 +4,7 @@ import ( "encoding/json" "net/http" "regexp" + "strconv" "strings" "sync" "time" @@ -177,13 +178,15 @@ func (s *Server) handleBundleCategorize(w http.ResponseWriter, r *http.Request) bundleID := categorizeEmail(req.From, req.Subject) if req.EmailID != "" && bundleID != "" { - bundleStore.mu.Lock() - bundleStore.emails[req.EmailID] = bundleID - if b, ok := bundleStore.bundles[bundleID]; ok { - b.Count++ - b.LastUpdated = time.Now() - } - bundleStore.mu.Unlock() + func() { + bundleStore.mu.Lock() + defer bundleStore.mu.Unlock() + bundleStore.emails[req.EmailID] = bundleID + if b, ok := bundleStore.bundles[bundleID]; ok { + b.Count++ + b.LastUpdated = time.Now() + } + }() } httputil.WriteJSON(w, http.StatusOK, map[string]string{"bundleId": bundleID}) @@ -243,13 +246,60 @@ func matchRule(rule BundleRule, from, subject, domain string) bool { case "startsWith": return strings.HasPrefix(fieldValue, valueLower) case "matches": - matched, _ := regexp.MatchString(rule.Value, fieldValue) - return matched + // Compile-and-match through a cached compile so a pathological + // pattern can't recompile on every email. Bad patterns are + // rejected at write-time by validateBundleRule, but old/persisted + // rules go through this path too — fall through quietly to keep + // the matcher panic-free. + re, err := compiledBundleRegex(rule.Value) + if err != nil || re == nil { + return false + } + return re.MatchString(fieldValue) default: return false } } +// bundleRegexCache caches compiled "matches" patterns. Bundle rules are +// re-applied to every message arrival, so re-running regexp.Compile per +// match would be both slow and an attack surface (catastrophic backtracking +// patterns get compiled and burned per call). +var ( + bundleRegexCache = make(map[string]*regexp.Regexp) + bundleRegexCacheMu sync.RWMutex +) + +func compiledBundleRegex(pattern string) (*regexp.Regexp, error) { + bundleRegexCacheMu.RLock() + if re, ok := bundleRegexCache[pattern]; ok { + bundleRegexCacheMu.RUnlock() + return re, nil + } + bundleRegexCacheMu.RUnlock() + + re, err := regexp.Compile(pattern) + if err != nil { + return nil, err + } + bundleRegexCacheMu.Lock() + bundleRegexCache[pattern] = re + bundleRegexCacheMu.Unlock() + return re, nil +} + +// validateBundleRule rejects rules whose regex fails to compile so we +// surface the error at PUT time instead of silently dropping every match. +func validateBundleRule(rule BundleRule) error { + if rule.Operator != "matches" || rule.Value == "" { + return nil + } + if _, err := compiledBundleRegex(rule.Value); err != nil { + return err + } + return nil +} + // extractDomain extracts domain from email address func extractDomain(email string) string { atIdx := strings.LastIndex(email, "@") @@ -271,6 +321,13 @@ func (s *Server) handleUpdateBundle(w http.ResponseWriter, r *http.Request) { return } + for i, rule := range bundle.Rules { + if err := validateBundleRule(rule); err != nil { + http.Error(w, "Invalid regex in rule "+strconv.Itoa(i)+": "+err.Error(), http.StatusBadRequest) + return + } + } + bundleStore.mu.Lock() defer bundleStore.mu.Unlock() diff --git a/internal/air/handlers_bundles_validation_test.go b/internal/air/handlers_bundles_validation_test.go new file mode 100644 index 0000000..496a42d --- /dev/null +++ b/internal/air/handlers_bundles_validation_test.go @@ -0,0 +1,83 @@ +package air + +import ( + "testing" +) + +// TestValidateBundleRule_RejectsBadRegex pins the new contract: a bundle +// PUT with an unparseable "matches" regex is rejected at the boundary +// instead of every email match silently failing forever afterwards. +func TestValidateBundleRule_RejectsBadRegex(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + rule BundleRule + wantErr bool + }{ + { + "valid contains", + BundleRule{Field: "from", Operator: "contains", Value: "@example.com"}, + false, + }, + { + "valid matches", + BundleRule{Field: "subject", Operator: "matches", Value: `^\[urgent\]`}, + false, + }, + { + "empty matches value (skipped)", + BundleRule{Field: "subject", Operator: "matches", Value: ""}, + false, + }, + { + "unbalanced parens", + BundleRule{Field: "subject", Operator: "matches", Value: "(unclosed"}, + true, + }, + { + "invalid char class", + BundleRule{Field: "subject", Operator: "matches", Value: "[unclosed"}, + true, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + err := validateBundleRule(tc.rule) + if tc.wantErr && err == nil { + t.Error("expected error, got nil") + } + if !tc.wantErr && err != nil { + t.Errorf("unexpected error: %v", err) + } + }) + } +} + +func TestMatchRule_RegexCacheReused(t *testing.T) { + rule := BundleRule{Field: "subject", Operator: "matches", Value: "^urgent"} + + if !matchRule(rule, "from", "urgent: please reply", "domain") { + t.Error("expected match") + } + if matchRule(rule, "from", "no rush", "domain") { + t.Error("expected no match") + } + + // Now compile must come from cache. + bundleRegexCacheMu.RLock() + _, ok := bundleRegexCache[rule.Value] + bundleRegexCacheMu.RUnlock() + if !ok { + t.Error("expected pattern to be cached after first match") + } +} + +func TestMatchRule_BadRegexReturnsFalse(t *testing.T) { + rule := BundleRule{Field: "subject", Operator: "matches", Value: "(unclosed"} + if matchRule(rule, "from", "anything", "domain") { + t.Error("matchRule should return false for uncompilable regex") + } +} diff --git a/internal/air/handlers_cache.go b/internal/air/handlers_cache.go index 2013cb8..24cde98 100644 --- a/internal/air/handlers_cache.go +++ b/internal/air/handlers_cache.go @@ -252,9 +252,11 @@ func (s *Server) handleCacheClear(w http.ResponseWriter, r *http.Request) { }) return } - s.offlineQueuesMu.Lock() - delete(s.offlineQueues, email) - s.offlineQueuesMu.Unlock() + func() { + s.offlineQueuesMu.Lock() + defer s.offlineQueuesMu.Unlock() + delete(s.offlineQueues, email) + }() } else { // Clear all accounts if err := s.withCacheManager(func(manager cacheRuntimeManager) error { diff --git a/internal/air/handlers_calendar_helpers.go b/internal/air/handlers_calendar_helpers.go index 13eead3..bd802b4 100644 --- a/internal/air/handlers_calendar_helpers.go +++ b/internal/air/handlers_calendar_helpers.go @@ -24,18 +24,24 @@ func eventToResponse(e domain.Event) EventResponse { HtmlLink: e.HtmlLink, } - // Handle all-day events + // Handle all-day events. Skip the override when parsing fails — the + // upstream-provided StartTime/EndTime is more useful than a year-1 Unix + // timestamp would be. if resp.IsAllDay { - if e.When.Date != "" { - t, _ := time.Parse("2006-01-02", e.When.Date) - resp.StartTime = t.Unix() - resp.EndTime = t.Add(24 * time.Hour).Unix() - } else if e.When.StartDate != "" { - st, _ := time.Parse("2006-01-02", e.When.StartDate) - resp.StartTime = st.Unix() + switch { + case e.When.Date != "": + if t, err := time.Parse("2006-01-02", e.When.Date); err == nil { + resp.StartTime = t.Unix() + resp.EndTime = t.Add(24 * time.Hour).Unix() + } + case e.When.StartDate != "": + if st, err := time.Parse("2006-01-02", e.When.StartDate); err == nil { + resp.StartTime = st.Unix() + } if e.When.EndDate != "" { - et, _ := time.Parse("2006-01-02", e.When.EndDate) - resp.EndTime = et.Unix() + if et, err := time.Parse("2006-01-02", e.When.EndDate); err == nil { + resp.EndTime = et.Unix() + } } } } diff --git a/internal/air/handlers_calendar_helpers_unit_test.go b/internal/air/handlers_calendar_helpers_unit_test.go new file mode 100644 index 0000000..15a62b8 --- /dev/null +++ b/internal/air/handlers_calendar_helpers_unit_test.go @@ -0,0 +1,121 @@ +package air + +import ( + "testing" + + "github.com/nylas/cli/internal/domain" +) + +// TestEventToResponse_AllDayValidDate verifies that a well-formed all-day +// date is converted to the correct Unix-second window. +func TestEventToResponse_AllDayValidDate(t *testing.T) { + e := domain.Event{ + ID: "evt1", + Title: "Holiday", + When: domain.EventWhen{ + Object: "date", + Date: "2026-04-29", + }, + } + + resp := eventToResponse(e) + + if !resp.IsAllDay { + t.Fatalf("expected IsAllDay=true") + } + // 2026-04-29T00:00:00Z = 1777420800 + if resp.StartTime != 1777420800 { + t.Errorf("StartTime: want 1777420800, got %d", resp.StartTime) + } + if resp.EndTime != 1777420800+24*60*60 { + t.Errorf("EndTime: want %d, got %d", 1777420800+24*60*60, resp.EndTime) + } +} + +// TestEventToResponse_AllDayMalformedDate verifies that a malformed date +// does NOT clobber StartTime/EndTime with a year-1 Unix timestamp. +// +// Before the fix, time.Parse("2006-01-02", "not-a-date") returned the zero +// time, and resp.StartTime got -62135596800 (year 1). After the fix, the +// upstream-provided StartTime is preserved when the date string is bad. +func TestEventToResponse_AllDayMalformedDate(t *testing.T) { + e := domain.Event{ + ID: "evt-bad", + Title: "Bad date", + When: domain.EventWhen{ + Object: "date", + Date: "not-a-date", + StartTime: 1700000000, // upstream fallback + EndTime: 1700086400, + }, + } + + resp := eventToResponse(e) + + if resp.StartTime < 0 || resp.StartTime > 4102444800 { + t.Errorf("StartTime should be a sane Unix second, got %d", resp.StartTime) + } + if resp.EndTime < 0 || resp.EndTime > 4102444800 { + t.Errorf("EndTime should be a sane Unix second, got %d", resp.EndTime) + } + // We expect the upstream fallback to win, not year-1. + if resp.StartTime != 1700000000 { + t.Errorf("StartTime should preserve upstream fallback 1700000000, got %d", resp.StartTime) + } +} + +// TestEventToResponse_AllDayDatespanValidStart confirms that datespan events +// override start (and end if EndDate parses), while malformed end is ignored. +func TestEventToResponse_AllDayDatespanMixedValidity(t *testing.T) { + e := domain.Event{ + ID: "evt-mixed", + Title: "Multi-day", + When: domain.EventWhen{ + Object: "datespan", + StartDate: "2026-04-29", + EndDate: "totally-broken", + StartTime: 1700000000, // would be overridden by valid StartDate + EndTime: 9999999999, // should be preserved (EndDate fails to parse) + }, + } + + resp := eventToResponse(e) + if resp.StartTime != 1777420800 { + t.Errorf("StartTime should reflect parsed StartDate, got %d", resp.StartTime) + } + if resp.EndTime != 9999999999 { + t.Errorf("EndTime should preserve fallback when EndDate is malformed, got %d", resp.EndTime) + } +} + +// TestAllDayBounds_NotAllDay returns the supplied fallback unchanged. +func TestAllDayBounds_NotAllDay(t *testing.T) { + w := domain.EventWhen{Object: "timespan", StartTime: 100, EndTime: 200} + gotStart, gotEnd := allDayBounds(w, 100, 200) + if gotStart != 100 || gotEnd != 200 { + t.Errorf("non-all-day should return fallback (100, 200), got (%d, %d)", gotStart, gotEnd) + } +} + +// TestAllDayBounds_BadDateReturnsFallback documents the regression: bad date +// strings used to produce year-1 timestamps; now they leave the fallback in +// place so callers can keep using upstream StartTime/EndTime. +func TestAllDayBounds_BadDateReturnsFallback(t *testing.T) { + w := domain.EventWhen{Object: "date", Date: "garbage"} + gotStart, gotEnd := allDayBounds(w, 1234567890, 1234567890+86400) + if gotStart != 1234567890 || gotEnd != 1234567890+86400 { + t.Errorf("bad Date should preserve fallback, got (%d, %d)", gotStart, gotEnd) + } +} + +// TestAllDayBounds_GoodDateOverrides confirms the happy path overrides. +func TestAllDayBounds_GoodDateOverrides(t *testing.T) { + w := domain.EventWhen{Object: "date", Date: "2026-04-29"} + gotStart, gotEnd := allDayBounds(w, 0, 0) + if gotStart != 1777420800 { + t.Errorf("StartTime: want 1777420800, got %d", gotStart) + } + if gotEnd != 1777420800+86400 { + t.Errorf("EndTime: want %d, got %d", 1777420800+86400, gotEnd) + } +} diff --git a/internal/air/handlers_email.go b/internal/air/handlers_email.go index 68a97e6..b3d011d 100644 --- a/internal/air/handlers_email.go +++ b/internal/air/handlers_email.go @@ -17,14 +17,28 @@ func (s *Server) handleListEmails(w http.ResponseWriter, r *http.Request) { if !requireMethod(w, r, http.MethodGet) { return } - grantID := s.withAuthGrant(w, EmailsResponse{Emails: demoEmails(), HasMore: false}) - if grantID == "" { - return - } // Parse query parameters query := NewQueryParams(r.URL.Query()) + // Demo mode: filter the demo dataset by folder/unread/starred so users + // can exercise the sidebar (Sent/Drafts/Archive/Trash) without a real + // account. Without this, every folder showed the same Inbox set. + if s.demoMode { + filtered := filterDemoEmails(demoEmails(), + query.Get("folder"), + query.GetBool("unread"), + query.GetBool("starred"), + ) + writeJSON(w, http.StatusOK, EmailsResponse{Emails: filtered, HasMore: false}) + return + } + + grantID := s.withAuthGrant(w, nil) + if grantID == "" { + return + } + params := &domain.MessageQueryParams{ Limit: query.GetLimit(50), } @@ -68,7 +82,13 @@ func (s *Server) handleListEmails(w http.ResponseWriter, r *http.Request) { // Get account email for cache lookup accountEmail := s.getAccountEmail(grantID) - // Try cache first (only for first page without complex filters) + // Try cache first (only for first page without complex filters). + // Folder-filter caveat: background sync fetches the top-N messages with + // no folder filter, so on busy inboxes the cache barely covers + // Sent/Drafts/Archive. We short-circuit on a folder-filter hit only when + // the cache returned at least a full page — otherwise the user sees a + // stub of 1–2 messages instead of the real folder. from/search filters + // operate on the full cached dataset, so they short-circuit as before. if cursor == "" && s.cacheAvailable() { var cached []*cache.CachedEmail if err := s.withEmailStore(accountEmail, func(store *cache.EmailStore) error { @@ -76,8 +96,10 @@ func (s *Server) handleListEmails(w http.ResponseWriter, r *http.Request) { cached, err = s.queryCachedEmails(store, params, folderID, fromFilter, searchQuery) return err }); err == nil && len(cached) > 0 { - writeJSON(w, http.StatusOK, cachedEmailsToResponse(cached, params.Limit)) - return + if folderID == "" || len(cached) >= params.Limit { + writeJSON(w, http.StatusOK, cachedEmailsToResponse(cached, params.Limit)) + return + } } } @@ -132,7 +154,7 @@ func (s *Server) handleListEmails(w http.ResponseWriter, r *http.Request) { // handleEmailByID handles single email operations: GET, PUT, DELETE. func (s *Server) handleEmailByID(w http.ResponseWriter, r *http.Request) { - // Parse email ID from path: /api/emails/{id} + // Parse email ID from path: /api/emails/{id}[/{action}] path := strings.TrimPrefix(r.URL.Path, "/api/emails/") parts := strings.Split(path, "/") if len(parts) == 0 || parts[0] == "" { @@ -141,6 +163,13 @@ func (s *Server) handleEmailByID(w http.ResponseWriter, r *http.Request) { } emailID := parts[0] + // Sub-resource: /api/emails/{id}/invite returns parsed iCalendar + // invite data so the email preview can show a Gmail-style RSVP card. + if len(parts) > 1 && parts[1] == "invite" { + s.handleEmailInvite(w, r, emailID) + return + } + switch r.Method { case http.MethodGet: s.handleGetEmail(w, r, emailID) @@ -519,10 +548,14 @@ func cachedEmailToResponse(e *cache.CachedEmail) EmailResponse { } } -// demoEmails returns demo email data. +// demoEmails returns demo email data spread across multiple folders so the +// sidebar (Inbox / Sent / Drafts / Archive / Trash) actually shows different +// content per folder. Includes one calendar-invite (.ics) email so the +// calendar-invite card UI has something to render. func demoEmails() []EmailResponse { now := time.Now() return []EmailResponse{ + // Inbox { ID: "demo-email-001", Subject: "Q4 Product Roadmap Review", @@ -582,5 +615,157 @@ func demoEmails() []EmailResponse { Starred: false, Folders: []string{"inbox"}, }, + // Calendar invite (with .ics attachment) + { + ID: "demo-email-invite-001", + Subject: "Event Invitation: Quarterly Sync", + Snippet: "You have received a calendar invitation: Quarterly Sync", + Body: "
You have received a calendar invitation: Quarterly Sync
Please let me know if this time works.
", + From: []EmailParticipantResponse{{Name: "Priya Patel", Email: "priya@partner.example"}}, + To: []EmailParticipantResponse{{Name: "You", Email: "you@example.com"}}, + Date: now.Add(-30 * time.Minute).Unix(), + Unread: true, + Starred: false, + Folders: []string{"inbox"}, + Attachments: []AttachmentResponse{ + { + ID: "att-invite-001", + Filename: "invite.ics", + ContentType: "text/calendar", + Size: 1024, + }, + }, + }, + // Sent — explicitly more than one so we can prove the filter works. + { + ID: "demo-email-sent-001", + Subject: "Re: Q4 Product Roadmap Review", + Snippet: "Thanks Sarah, here are my comments on the roadmap...", + Body: "Thanks Sarah,
Here are my comments on the roadmap. Looks good overall — happy to discuss the Q4 priorities live.
", + From: []EmailParticipantResponse{{Name: "You", Email: "you@example.com"}}, + To: []EmailParticipantResponse{{Name: "Sarah Chen", Email: "sarah.chen@company.com"}}, + Date: now.Add(-1 * time.Hour).Unix(), + Folders: []string{"sent"}, + }, + { + ID: "demo-email-sent-002", + Subject: "Pricing follow-up", + Snippet: "Hi Mike, sending the updated pricing sheet...", + Body: "Hi Mike,
Sending the updated pricing sheet as discussed. Let me know if you need any changes.
", + From: []EmailParticipantResponse{{Name: "You", Email: "you@example.com"}}, + To: []EmailParticipantResponse{{Name: "Mike Johnson", Email: "mike@customer.example"}}, + Date: now.Add(-3 * time.Hour).Unix(), + Folders: []string{"sent"}, + }, + { + ID: "demo-email-sent-003", + Subject: "Welcome to the team!", + Snippet: "Excited to have you joining us next Monday...", + Body: "Excited to have you joining us next Monday! Here's the on-boarding checklist.
", + From: []EmailParticipantResponse{{Name: "You", Email: "you@example.com"}}, + To: []EmailParticipantResponse{{Name: "Jamie Lee", Email: "jamie@newhire.example"}}, + Date: now.Add(-1 * 24 * time.Hour).Unix(), + Folders: []string{"sent"}, + }, + // Drafts + { + ID: "demo-email-draft-001", + Subject: "Draft: Proposal for Acme", + Snippet: "Hi Acme team, here's the rough proposal...", + Body: "Hi Acme team,
Here's the rough proposal — still working through the timeline section.
", + From: []EmailParticipantResponse{{Name: "You", Email: "you@example.com"}}, + To: []EmailParticipantResponse{{Name: "Acme Procurement", Email: "buyers@acme.example"}}, + Date: now.Add(-4 * time.Hour).Unix(), + Folders: []string{"drafts"}, + }, + // Archive + { + ID: "demo-email-archive-001", + Subject: "Confirmation: Subscription renewed", + Snippet: "Your annual subscription has been renewed...", + Body: "Your annual subscription has been renewed for another year.
", + From: []EmailParticipantResponse{{Name: "Acme Billing", Email: "billing@acme.example"}}, + To: []EmailParticipantResponse{{Name: "You", Email: "you@example.com"}}, + Date: now.Add(-30 * 24 * time.Hour).Unix(), + Folders: []string{"archive"}, + }, + // Trash + { + ID: "demo-email-trash-001", + Subject: "URGENT: Winning offer (don't miss out)", + Snippet: "You've been pre-selected for an exclusive offer...", + Body: "You've been pre-selected.
", + From: []EmailParticipantResponse{{Name: "Promo Bot", Email: "deals@spammy.example"}}, + To: []EmailParticipantResponse{{Name: "You", Email: "you@example.com"}}, + Date: now.Add(-7 * 24 * time.Hour).Unix(), + Folders: []string{"trash"}, + }, + } +} + +// filterDemoEmails applies folder/unread/starred filters to a demo email +// list. Folder matching is case-insensitive against email.Folders entries +// and against well-known aliases (e.g., "SENT" → "sent", "Sent Items" +// → "sent"). Empty folder string means "no folder filter." +func filterDemoEmails(emails []EmailResponse, folder string, onlyUnread, onlyStarred bool) []EmailResponse { + target := normalizeDemoFolder(folder) + out := make([]EmailResponse, 0, len(emails)) + for _, e := range emails { + if onlyUnread && !e.Unread { + continue + } + if onlyStarred && !e.Starred { + continue + } + if target != "" && !demoEmailIsInFolder(e, target) { + continue + } + out = append(out, e) + } + return out +} + +// normalizeDemoFolder turns a UI-supplied folder identifier (e.g., the +// system-folder ID "SENT", the Microsoft display name "Sent Items", or the +// canonical "sent") into the lowercase canonical name used in demoEmails. +func normalizeDemoFolder(folder string) string { + f := strings.ToLower(strings.TrimSpace(folder)) + switch f { + case "": + return "" + case "inbox": + return "inbox" + case "sent", "sent items", "sent mail": + return "sent" + case "drafts", "draft": + return "drafts" + case "archive", "all", "all mail": + return "archive" + case "trash", "deleted items", "deleted": + return "trash" + case "spam", "junk", "junk email": + return "spam" + case "starred": + return "starred" + default: + return f + } +} + +// demoEmailIsInFolder reports whether the email is in `target` (already +// canonicalised). Special-cases "starred" since starring is a flag, not a +// folder. "all" never filters anything out. +func demoEmailIsInFolder(e EmailResponse, target string) bool { + if target == "starred" { + return e.Starred + } + if target == "all" { + return true + } + for _, f := range e.Folders { + if strings.EqualFold(f, target) { + return true + } } + return false } diff --git a/internal/air/handlers_email_cache_runtime_test.go b/internal/air/handlers_email_cache_runtime_test.go index d47e838..3da3838 100644 --- a/internal/air/handlers_email_cache_runtime_test.go +++ b/internal/air/handlers_email_cache_runtime_test.go @@ -208,6 +208,89 @@ func TestHandleListEmails_UsesCacheFilters(t *testing.T) { } } +// Pins the fix for the Sent-folder coverage bug: when the cache holds a +// partial view of a folder (typical for Sent/Drafts/etc. on busy accounts — +// background sync fetches top-N unfiltered, dominated by Inbox), the handler +// must fall through to the API. Otherwise the user sees a 1–2 message stub +// when their real Sent folder has hundreds. +func TestHandleListEmails_FolderPartialCache_FallsThroughToAPI(t *testing.T) { + t.Parallel() + + server, client, accountEmail := newCachedTestServer(t) + + // Single sent message in cache (the "1 email" symptom from the bug + // report). + putCachedEmail(t, server, accountEmail, &cache.CachedEmail{ + ID: "cached-sent-1", + FolderID: "sent", + Subject: "Hello From Nylas", + FromName: "Qasim", + FromEmail: "qasim@example.com", + Date: time.Now(), + CachedAt: time.Now(), + }) + + apiCalled := false + client.GetMessagesWithParamsFunc = func(_ context.Context, _ string, _ *domain.MessageQueryParams) ([]domain.Message, error) { + apiCalled = true + msgs := make([]domain.Message, 0, 3) + for i := range 3 { + msgs = append(msgs, domain.Message{ + ID: "api-sent-" + string(rune('a'+i)), + Subject: "Real sent message", + Folders: []string{"sent"}, + Date: time.Now(), + }) + } + return msgs, nil + } + + req := httptest.NewRequest(http.MethodGet, "/api/emails?folder=sent", nil) + w := httptest.NewRecorder() + server.handleListEmails(w, req) + + if w.Code != http.StatusOK { + t.Fatalf("status = %d, want 200", w.Code) + } + if !apiCalled { + t.Fatal("expected API call when cache has partial folder coverage; got cache short-circuit") + } +} + +// Pins the inverse: a fully covered cache page must short-circuit so that +// Inbox loads stay fast. +func TestHandleListEmails_FolderFullCache_ShortCircuits(t *testing.T) { + t.Parallel() + + server, client, accountEmail := newCachedTestServer(t) + + // 50 cached inbox messages = full default page (handleListEmails uses + // query.GetLimit(50)). + for i := range 50 { + putCachedEmail(t, server, accountEmail, &cache.CachedEmail{ + ID: "inbox-" + string(rune('A'+i%26)) + string(rune('A'+i/26)), + FolderID: "inbox", + Subject: "Cached message", + FromEmail: "sender@example.com", + Date: time.Now().Add(-time.Duration(i) * time.Minute), + CachedAt: time.Now(), + }) + } + + client.GetMessagesWithParamsFunc = func(_ context.Context, _ string, _ *domain.MessageQueryParams) ([]domain.Message, error) { + t.Fatal("expected cache short-circuit when cache holds a full page") + return nil, nil + } + + req := httptest.NewRequest(http.MethodGet, "/api/emails?folder=inbox", nil) + w := httptest.NewRecorder() + server.handleListEmails(w, req) + + if w.Code != http.StatusOK { + t.Fatalf("status = %d, want 200", w.Code) + } +} + func TestHandleUpdateEmail_UpdatesCacheOnSuccess(t *testing.T) { t.Parallel() @@ -481,7 +564,9 @@ func TestSyncEmails_DoesNotHoldRuntimeLockAcrossFetch(t *testing.T) { syncDone := make(chan struct{}) go func() { - server.syncEmails(context.Background(), accountEmail, "grant-123") + // nil folders triggers the unfiltered top-N fallback path, which is + // the behavior this lock-contention test was written against. + server.syncEmails(context.Background(), accountEmail, "grant-123", nil) close(syncDone) }() diff --git a/internal/air/handlers_email_invite.go b/internal/air/handlers_email_invite.go new file mode 100644 index 0000000..eb028e6 --- /dev/null +++ b/internal/air/handlers_email_invite.go @@ -0,0 +1,254 @@ +package air + +import ( + "context" + "errors" + "io" + "net/http" + "strings" + "time" + + "github.com/nylas/cli/internal/domain" +) + +// maxICSBytes caps a single iCalendar payload. 1 MB is far above any +// real invitation and keeps memory predictable when an attacker stitches +// a large fake attachment. +const maxICSBytes = 1 << 20 + +// CalendarInviteResponse is the JSON returned by /api/emails/{id}/invite. +// It is intentionally a small subset of iCalendar VEVENT — just what the +// Air UI needs to render a Gmail-style "you have an invite" card. +type CalendarInviteResponse struct { + HasInvite bool `json:"has_invite"` + AttachmentID string `json:"attachment_id,omitempty"` + Filename string `json:"filename,omitempty"` + Title string `json:"title,omitempty"` + Location string `json:"location,omitempty"` + Description string `json:"description,omitempty"` + StartTime int64 `json:"start_time,omitempty"` + EndTime int64 `json:"end_time,omitempty"` + IsAllDay bool `json:"is_all_day,omitempty"` + OrganizerName string `json:"organizer_name,omitempty"` + OrganizerEmail string `json:"organizer_email,omitempty"` + ConferencingURL string `json:"conferencing_url,omitempty"` + Status string `json:"status,omitempty"` // CONFIRMED / TENTATIVE / CANCELLED + Method string `json:"method,omitempty"` // REQUEST / CANCEL / REPLY + Attendees []InviteAttendee `json:"attendees,omitempty"` + RecurrenceRule string `json:"recurrence_rule,omitempty"` // raw RRULE for callers that want to summarise +} + +// InviteAttendee is a participant on the VEVENT — surfaced so the Air +// invite card can render the same "Alice (accepted), Bob (declined)" +// list that Gmail shows. Distinct from the demo-data Attendee in +// data.go which is a UI-only avatar. +type InviteAttendee struct { + Name string `json:"name,omitempty"` + Email string `json:"email,omitempty"` + Status string `json:"status,omitempty"` // ACCEPTED / DECLINED / TENTATIVE / NEEDS-ACTION + Role string `json:"role,omitempty"` + IsOrganizer bool `json:"is_organizer,omitempty"` +} + +// handleEmailInvite returns parsed iCalendar invite data for an email. +// Resolution order: +// 1. attachments[] entry with text/calendar or .ics filename — Microsoft, +// custom senders typically arrive this way. +// 2. inline text/calendar part inside raw_mime (Gmail's invitation +// shape — the ICS rides as a multipart/alternative leaf). +// +// Returns has_invite=false when neither path yields a calendar payload. +func (s *Server) handleEmailInvite(w http.ResponseWriter, r *http.Request, emailID string) { + if !requireMethod(w, r, http.MethodGet) { + return + } + + if s.demoMode { + writeJSON(w, http.StatusOK, demoInviteFor(emailID)) + return + } + + grantID := s.withAuthGrant(w, nil) + if grantID == "" { + return + } + + ctx, cancel := s.withTimeout(r) + defer cancel() + + msg, err := s.nylasClient.GetMessage(ctx, grantID, emailID) + if err != nil { + writeError(w, http.StatusInternalServerError, "Failed to fetch email: "+err.Error()) + return + } + + att := findCalendarAttachment(msg.Attachments) + if att != nil { + if parsed, ok := s.tryParseAttachmentInvite(ctx, grantID, emailID, att); ok { + writeJSON(w, http.StatusOK, parsed) + return + } + // Download or parse failed. Don't surface a 5xx — Nylas + // frequently returns synthetic attachment IDs (v0:base64(...):...) + // that look like real attachments but cannot be downloaded. + // Falling through to the raw_mime walker recovers the calendar + // payload directly from the MIME tree. + } + + // Fetch raw MIME and look for a text/calendar part — both Gmail's + // inline-multipart shape AND Nylas's "synthetic attachment that + // can't be downloaded" case land here. + full, err := s.nylasClient.GetMessageWithFields(ctx, grantID, emailID, "raw_mime") + if err != nil || full == nil || full.RawMIME == "" { + writeJSON(w, http.StatusOK, CalendarInviteResponse{HasInvite: false}) + return + } + parts := findInlineCalendarParts(full.RawMIME) + if len(parts) == 0 { + writeJSON(w, http.StatusOK, CalendarInviteResponse{HasInvite: false}) + return + } + + parsed, err := parseICS(parts[0].Body) + if err != nil { + writeJSON(w, http.StatusOK, CalendarInviteResponse{HasInvite: false}) + return + } + parsed.HasInvite = true + // If we had a Nylas attachment entry (even an undownloadable + // synthetic one), keep its ID so the frontend's existing + // attachment-row check can match by name; otherwise mint a stable + // inline marker so the row still renders. + if att != nil { + parsed.AttachmentID = att.ID + parsed.Filename = att.Filename + } else { + parsed.AttachmentID = inlineCalendarAttachmentID(parts[0].ContentID) + if parts[0].Filename != "" { + parsed.Filename = parts[0].Filename + } else { + parsed.Filename = "invite.ics" + } + } + if parsed.Method == "" && parts[0].Method != "" { + parsed.Method = parts[0].Method + } + writeJSON(w, http.StatusOK, parsed) +} + +// tryParseAttachmentInvite attempts the legacy path: download an ICS +// attachment via the Nylas attachments endpoint and parse it. Returns +// ok=false on any failure so the caller can fall back to raw_mime. +// Errors are intentionally swallowed: we don't want 5xxs for transient +// download problems when the calendar payload is recoverable from MIME. +func (s *Server) tryParseAttachmentInvite(ctx context.Context, grantID, emailID string, att *domain.Attachment) (CalendarInviteResponse, bool) { + body, err := s.nylasClient.DownloadAttachment(ctx, grantID, emailID, att.ID) + if err != nil { + return CalendarInviteResponse{}, false + } + defer func() { _ = body.Close() }() + + raw, err := io.ReadAll(io.LimitReader(body, maxICSBytes+1)) + if err != nil || len(raw) > maxICSBytes { + return CalendarInviteResponse{}, false + } + + parsed, err := parseICS(string(raw)) + if err != nil { + return CalendarInviteResponse{}, false + } + parsed.HasInvite = true + parsed.AttachmentID = att.ID + parsed.Filename = att.Filename + return parsed, true +} + +// inlineCalendarPrefix prefixes synthetic attachment IDs that point at +// a text/calendar MIME part rather than a real Nylas attachment. The +// download endpoint recognises this prefix and serves the part directly +// from raw_mime instead of forwarding to Nylas. +const inlineCalendarPrefix = "inline-calendar:" + +// inlineCalendarAttachmentID builds a stable synthetic attachment ID for +// a calendar MIME part. Falls back to a fixed marker when the source +// part has no Content-ID — Gmail invitations often omit it. +func inlineCalendarAttachmentID(contentID string) string { + if contentID == "" { + return inlineCalendarPrefix + "default" + } + return inlineCalendarPrefix + contentID +} + +// isInlineCalendarAttachmentID reports whether an attachment ID came +// from a synthesized calendar part rather than a real Nylas attachment. +func isInlineCalendarAttachmentID(id string) bool { + return strings.HasPrefix(id, inlineCalendarPrefix) +} + +// findCalendarAttachment locates the first attachment that looks like an +// iCalendar invite — either by content type or by filename suffix. +func findCalendarAttachment(atts []domain.Attachment) *domain.Attachment { + for i := range atts { + if isCalendarAttachment(atts[i].ContentType, atts[i].Filename) { + return &atts[i] + } + } + return nil +} + +// isCalendarAttachment is the shared "looks like an invite" predicate so +// frontend and tests can reuse the same rule. +func isCalendarAttachment(contentType, filename string) bool { + ct := strings.ToLower(strings.TrimSpace(contentType)) + fn := strings.ToLower(strings.TrimSpace(filename)) + return strings.HasPrefix(ct, "text/calendar") || + strings.HasPrefix(ct, "application/ics") || + strings.HasSuffix(fn, ".ics") +} + +// errNoUsableEvent is returned by parseICS when the iCalendar payload is +// either malformed, has no VEVENT, or whose first VEVENT carries no +// fields the UI can render. Callers translate this into a "no invite" +// response rather than a 5xx — clients should silently degrade. +var errNoUsableEvent = errors.New("ics: no usable event") + +// demoInviteFor returns canned parsed-event data so the calendar-invite +// card has something to render in demo mode without round-tripping +// through the parser. +func demoInviteFor(emailID string) CalendarInviteResponse { + if emailID != "demo-email-invite-001" { + return CalendarInviteResponse{HasInvite: false} + } + + start := demoInviteStart() + end := start.Add(time.Hour) + + return CalendarInviteResponse{ + HasInvite: true, + AttachmentID: "att-invite-001", + Filename: "invite.ics", + Title: "Quarterly Sync", + Location: "Conference Room A / Online", + Description: "Quarterly review with the partner team.", + StartTime: start.Unix(), + EndTime: end.Unix(), + IsAllDay: false, + OrganizerName: "Priya Patel", + OrganizerEmail: "priya@partner.example", + ConferencingURL: "https://meet.example.com/q-sync", + Status: "CONFIRMED", + Method: "REQUEST", + Attendees: []InviteAttendee{ + {Name: "Priya Patel", Email: "priya@partner.example", Status: "ACCEPTED", Role: "CHAIR", IsOrganizer: true}, + {Name: "Alex Reed", Email: "alex@example.com", Status: "ACCEPTED", Role: "REQ-PARTICIPANT"}, + {Name: "Jamie Chen", Email: "jamie@example.com", Status: "TENTATIVE", Role: "REQ-PARTICIPANT"}, + }, + } +} + +// demoInviteStart returns a stable demo start time — 24h from now, +// truncated to the hour. Extracted so tests can stub via build tag if +// the rounding behaviour ever needs pinning. +func demoInviteStart() time.Time { + return time.Now().Add(24 * time.Hour).Truncate(time.Hour) +} diff --git a/internal/air/handlers_email_invite_handler_test.go b/internal/air/handlers_email_invite_handler_test.go new file mode 100644 index 0000000..3275ba5 --- /dev/null +++ b/internal/air/handlers_email_invite_handler_test.go @@ -0,0 +1,309 @@ +package air + +import ( + "context" + "encoding/json" + "errors" + "io" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/nylas/cli/internal/domain" +) + +// TestHandleEmailInvite_RawMIMEFallback drives the Gmail-style invitation +// path: attachments[] is empty, so the handler must fetch raw_mime and +// pull the calendar payload from the inline body part. Pins the fix for +// "Air email viewer fails to render Gmail invitations" — Nylas doesn't +// surface inline parts as attachments, and the previous handler simply +// reported has_invite=false in that case. +func TestHandleEmailInvite_RawMIMEFallback(t *testing.T) { + t.Parallel() + + server, client, _ := newCachedTestServer(t) + + client.GetMessageFunc = func(_ context.Context, _ string, messageID string) (*domain.Message, error) { + return &domain.Message{ + ID: messageID, + Subject: "Event Invitation: Meeting", + Attachments: nil, // The whole point: no calendar attachment surfaced. + }, nil + } + client.GetMessageWithFieldsFunc = func(_ context.Context, _ string, messageID, fields string) (*domain.Message, error) { + if fields != "raw_mime" { + t.Fatalf("handler asked for fields=%q, want raw_mime", fields) + } + return &domain.Message{ + ID: messageID, + Subject: "Event Invitation: Meeting", + RawMIME: gmailStyleInviteMIME, + }, nil + } + + w := httptest.NewRecorder() + r := httptest.NewRequest(http.MethodGet, "/api/emails/email-1/invite", http.NoBody) + server.handleEmailInvite(w, r, "email-1") + + if w.Code != http.StatusOK { + t.Fatalf("status=%d body=%s", w.Code, w.Body.String()) + } + + var got CalendarInviteResponse + if err := json.Unmarshal(w.Body.Bytes(), &got); err != nil { + t.Fatalf("decode response: %v", err) + } + + if !got.HasInvite { + t.Errorf("HasInvite=false, want true (raw_mime fallback should detect inline calendar)") + } + if got.Title != "Meeting" { + t.Errorf("Title=%q, want Meeting", got.Title) + } + if got.Method != "REQUEST" { + t.Errorf("Method=%q, want REQUEST", got.Method) + } + if !strings.HasPrefix(got.AttachmentID, inlineCalendarPrefix) { + t.Errorf("AttachmentID=%q, want prefix %q (inline calendar marker)", got.AttachmentID, inlineCalendarPrefix) + } + if got.Filename == "" { + t.Errorf("Filename should be populated for inline calendar parts (defaulted to invite.ics)") + } +} + +// TestHandleEmailInvite_NoCalendarAtAll exercises the silent-degrade +// path: neither attachments[] nor raw_mime contains a calendar part. +// The handler must return has_invite=false rather than 5xx so the +// frontend can keep the regular email render. +func TestHandleEmailInvite_NoCalendarAtAll(t *testing.T) { + t.Parallel() + + server, client, _ := newCachedTestServer(t) + client.GetMessageFunc = func(_ context.Context, _ string, messageID string) (*domain.Message, error) { + return &domain.Message{ID: messageID, Subject: "Just a regular email"}, nil + } + client.GetMessageWithFieldsFunc = func(_ context.Context, _ string, messageID, _ string) (*domain.Message, error) { + return &domain.Message{ + ID: messageID, + RawMIME: "From: a@example.com\r\nSubject: Hi\r\nContent-Type: text/plain\r\n\r\nNo calendar here.\r\n", + }, nil + } + + w := httptest.NewRecorder() + r := httptest.NewRequest(http.MethodGet, "/api/emails/email-2/invite", http.NoBody) + server.handleEmailInvite(w, r, "email-2") + + if w.Code != http.StatusOK { + t.Fatalf("status=%d body=%s", w.Code, w.Body.String()) + } + var got CalendarInviteResponse + if err := json.Unmarshal(w.Body.Bytes(), &got); err != nil { + t.Fatalf("decode: %v", err) + } + if got.HasInvite { + t.Errorf("expected HasInvite=false for non-invite email, got %+v", got) + } +} + +// TestHandleEmailInvite_SyntheticAttachmentFallsBackToRawMIME pins the +// regression where Nylas's single-message API returns a synthetic +// attachment entry (id like "v0:base64(name):base64(ct):size") that +// looks downloadable but actually 404s on the attachments endpoint. +// The pre-fix handler returned 5xx; the fix falls through to the +// raw_mime walker so the user still gets the invite card. +func TestHandleEmailInvite_SyntheticAttachmentFallsBackToRawMIME(t *testing.T) { + t.Parallel() + + server, client, _ := newCachedTestServer(t) + + // Synthetic attachment ID in the v0:... format Nylas v3 uses for + // inline parts surfaced via the single-message endpoint. + syntheticID := "v0:aW52aXRlLmljcw==:dGV4dC9jYWxlbmRhcjsgY2hhcnNldD11dGYtOA==:443" + client.GetMessageFunc = func(_ context.Context, _ string, messageID string) (*domain.Message, error) { + return &domain.Message{ + ID: messageID, + Subject: "Event Invitation: Meeting", + Attachments: []domain.Attachment{ + {ID: syntheticID, Filename: "invite.ics", ContentType: "text/calendar; charset=utf-8", Size: 443}, + }, + }, nil + } + client.DownloadAttachmentFunc = func(_ context.Context, _, _, attID string) (io.ReadCloser, error) { + if attID != syntheticID { + t.Fatalf("expected download for synthetic id, got %q", attID) + } + return nil, errors.New("nylas API error: attachment not found") // mirrors the real prod failure + } + client.GetMessageWithFieldsFunc = func(_ context.Context, _ string, messageID, _ string) (*domain.Message, error) { + return &domain.Message{ID: messageID, RawMIME: gmailStyleInviteMIME}, nil + } + + w := httptest.NewRecorder() + r := httptest.NewRequest(http.MethodGet, "/api/emails/email-99/invite", http.NoBody) + server.handleEmailInvite(w, r, "email-99") + + if w.Code != http.StatusOK { + t.Fatalf("status=%d body=%s", w.Code, w.Body.String()) + } + var got CalendarInviteResponse + if err := json.Unmarshal(w.Body.Bytes(), &got); err != nil { + t.Fatalf("decode: %v", err) + } + if !got.HasInvite { + t.Errorf("HasInvite=false; want true (raw_mime fallback should recover from undownloadable attachment)") + } + // Keep the synthetic ID so the frontend's name-based attachment-row + // dedup still matches the existing entry. + if got.AttachmentID != syntheticID { + t.Errorf("AttachmentID=%q, want synthetic id %q (preserved through fallback)", got.AttachmentID, syntheticID) + } + if got.Filename != "invite.ics" { + t.Errorf("Filename=%q", got.Filename) + } +} + +// TestHandleEmailInvite_RealAttachmentTakesPriority pins the existing +// happy path: when attachments[] already has an ICS, the handler uses +// it directly without paying for a raw_mime round-trip. Microsoft and +// custom senders typically arrive this way. +func TestHandleEmailInvite_RealAttachmentTakesPriority(t *testing.T) { + t.Parallel() + + server, client, _ := newCachedTestServer(t) + client.GetMessageFunc = func(_ context.Context, _ string, messageID string) (*domain.Message, error) { + return &domain.Message{ + ID: messageID, + Subject: "Meeting Invitation", + Attachments: []domain.Attachment{ + {ID: "att-1", Filename: "invite.ics", ContentType: "text/calendar"}, + }, + }, nil + } + client.DownloadAttachmentFunc = func(_ context.Context, _, _, _ string) (io.ReadCloser, error) { + body := "BEGIN:VCALENDAR\r\nVERSION:2.0\r\nPRODID:-//T//EN\r\nMETHOD:REQUEST\r\n" + + "BEGIN:VEVENT\r\nUID:real-1\r\nSUMMARY:Attached invite\r\n" + + "DTSTART:20260501T140000Z\r\nDTEND:20260501T150000Z\r\n" + + "END:VEVENT\r\nEND:VCALENDAR\r\n" + return io.NopCloser(strings.NewReader(body)), nil + } + rawMIMEHit := false + client.GetMessageWithFieldsFunc = func(_ context.Context, _ string, _, _ string) (*domain.Message, error) { + rawMIMEHit = true + return nil, errors.New("should not be called when attachments[] has the calendar") + } + + w := httptest.NewRecorder() + r := httptest.NewRequest(http.MethodGet, "/api/emails/email-3/invite", http.NoBody) + server.handleEmailInvite(w, r, "email-3") + + if w.Code != http.StatusOK { + t.Fatalf("status=%d body=%s", w.Code, w.Body.String()) + } + if rawMIMEHit { + t.Errorf("raw_mime fallback was called even though attachments[] had the calendar") + } + var got CalendarInviteResponse + if err := json.Unmarshal(w.Body.Bytes(), &got); err != nil { + t.Fatalf("decode: %v", err) + } + if got.AttachmentID != "att-1" { + t.Errorf("AttachmentID=%q, want att-1 (real attachment)", got.AttachmentID) + } + if got.Title != "Attached invite" { + t.Errorf("Title=%q", got.Title) + } +} + +// Sanity-check the inline-calendar attachment-ID helpers — these gate +// the frontend's "is this a synthetic part?" check, so a typo on the +// prefix would break inline rendering. +func TestInlineCalendarAttachmentID(t *testing.T) { + cases := []struct { + name string + input string + want string + wantPrefix bool + }{ + {"with content-id", "abc@example.com", inlineCalendarPrefix + "abc@example.com", true}, + {"empty content-id", "", inlineCalendarPrefix + "default", true}, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + got := inlineCalendarAttachmentID(tc.input) + if got != tc.want { + t.Errorf("got %q, want %q", got, tc.want) + } + if isInlineCalendarAttachmentID(got) != tc.wantPrefix { + t.Errorf("isInlineCalendarAttachmentID(%q) wrong", got) + } + }) + } + + if isInlineCalendarAttachmentID("regular-attachment-id") { + t.Errorf("real attachment ID should not match inline prefix") + } +} + +// TestParseICS_AttendeesAndMethod pins the new fields surfaced by the +// golang-ical-backed parser. Without these the UI can't show "3 going, +// 1 declined" or the cancellation banner. +func TestParseICS_AttendeesAndMethod(t *testing.T) { + body := "BEGIN:VCALENDAR\r\nVERSION:2.0\r\nPRODID:-//T//EN\r\n" + + "METHOD:REQUEST\r\n" + + "BEGIN:VEVENT\r\nUID:e1\r\nSUMMARY:Standup\r\n" + + "DTSTART:20260501T140000Z\r\nDTEND:20260501T143000Z\r\n" + + "ORGANIZER;CN=Priya Patel:mailto:priya@partner.example\r\n" + + "ATTENDEE;CN=Alice;PARTSTAT=ACCEPTED;ROLE=REQ-PARTICIPANT:mailto:alice@example.com\r\n" + + "ATTENDEE;CN=Bob;PARTSTAT=DECLINED;ROLE=REQ-PARTICIPANT:mailto:bob@example.com\r\n" + + "ATTENDEE;CN=Carol;PARTSTAT=TENTATIVE:mailto:carol@example.com\r\n" + + "ATTENDEE;CN=Priya Patel;PARTSTAT=ACCEPTED:mailto:priya@partner.example\r\n" + + "END:VEVENT\r\nEND:VCALENDAR\r\n" + + resp, err := parseICS(body) + if err != nil { + t.Fatalf("parseICS: %v", err) + } + if resp.Method != "REQUEST" { + t.Errorf("Method=%q, want REQUEST", resp.Method) + } + if len(resp.Attendees) != 4 { + t.Fatalf("Attendees count=%d, want 4", len(resp.Attendees)) + } + + // Build a map by email for stable lookups regardless of ordering. + byEmail := map[string]InviteAttendee{} + for _, a := range resp.Attendees { + byEmail[a.Email] = a + } + + if a, ok := byEmail["alice@example.com"]; !ok || a.Status != "ACCEPTED" { + t.Errorf("Alice attendee wrong: %+v", a) + } + if a, ok := byEmail["bob@example.com"]; !ok || a.Status != "DECLINED" { + t.Errorf("Bob attendee wrong: %+v", a) + } + if a, ok := byEmail["priya@partner.example"]; !ok || !a.IsOrganizer { + t.Errorf("Priya should be flagged as organizer: %+v", a) + } +} + +// TestParseICS_CancelMethod pins that METHOD=CANCEL is preserved so the +// UI can render the cancellation banner instead of the RSVP buttons. +func TestParseICS_CancelMethod(t *testing.T) { + body := "BEGIN:VCALENDAR\r\nVERSION:2.0\r\nPRODID:-//T//EN\r\nMETHOD:CANCEL\r\n" + + "BEGIN:VEVENT\r\nUID:e1\r\nSUMMARY:Cancelled meeting\r\n" + + "DTSTART:20260501T140000Z\r\nDTEND:20260501T150000Z\r\n" + + "STATUS:CANCELLED\r\nEND:VEVENT\r\nEND:VCALENDAR\r\n" + + resp, err := parseICS(body) + if err != nil { + t.Fatalf("parseICS: %v", err) + } + if resp.Method != "CANCEL" { + t.Errorf("Method=%q, want CANCEL", resp.Method) + } + if resp.Status != "CANCELLED" { + t.Errorf("Status=%q, want CANCELLED", resp.Status) + } +} diff --git a/internal/air/handlers_email_invite_mime.go b/internal/air/handlers_email_invite_mime.go new file mode 100644 index 0000000..6523d44 --- /dev/null +++ b/internal/air/handlers_email_invite_mime.go @@ -0,0 +1,148 @@ +package air + +import ( + "bytes" + "encoding/base64" + "io" + "mime" + "mime/multipart" + "mime/quotedprintable" + "net/mail" + "net/textproto" + "strings" +) + +// inlineCalendarPart is an iCalendar (text/calendar) MIME part extracted +// from a raw RFC822 message. Gmail invites ship the ICS as an inline +// part of multipart/alternative; Nylas does not surface those in the +// attachments[] list, so we have to walk raw_mime ourselves. +type inlineCalendarPart struct { + Body string + Filename string + ContentID string + Method string // upper-case METHOD parameter (REQUEST / CANCEL / REPLY) +} + +// maxRawMIMEBytes caps how much of a raw MIME blob we'll walk. Nylas +// returns base64-decoded MIME, so 5 MB covers any normal invitation +// while keeping memory predictable when an attacker stitches a giant +// message. +const maxRawMIMEBytes = 5 * 1024 * 1024 + +// maxMIMEDepth caps multipart recursion. RFC 5322 imposes no limit; +// real invitations nest at most 2 levels (mixed → alternative → calendar). +const maxMIMEDepth = 8 + +// findInlineCalendarParts walks the raw RFC822/MIME message and returns +// every text/calendar part it finds, decoded. Returns nil on any parse +// failure — callers treat "can't parse" the same as "no invite present". +func findInlineCalendarParts(rawMIME string) []inlineCalendarPart { + if rawMIME == "" || len(rawMIME) > maxRawMIMEBytes { + return nil + } + msg, err := mail.ReadMessage(strings.NewReader(rawMIME)) + if err != nil { + return nil + } + var out []inlineCalendarPart + walkMIMEForCalendar(textproto.MIMEHeader(msg.Header), msg.Body, &out, 0) + return out +} + +// walkMIMEForCalendar recursively descends multipart bodies and appends +// any text/calendar leaf to out. Depth-capped to defend against +// pathological MIME nesting. +func walkMIMEForCalendar(header textproto.MIMEHeader, body io.Reader, out *[]inlineCalendarPart, depth int) { + if depth > maxMIMEDepth { + return + } + mediaType, params, err := mime.ParseMediaType(header.Get("Content-Type")) + if err != nil { + return + } + + if strings.HasPrefix(mediaType, "multipart/") { + boundary := params["boundary"] + if boundary == "" { + return + } + mr := multipart.NewReader(body, boundary) + for { + part, err := mr.NextPart() + if err != nil { + return + } + walkMIMEForCalendar(part.Header, part, out, depth+1) + } + } + + if !strings.EqualFold(mediaType, "text/calendar") { + return + } + + raw, err := io.ReadAll(io.LimitReader(body, maxRawMIMEBytes+1)) + if err != nil || len(raw) > maxRawMIMEBytes { + return + } + decoded := decodePartBody(raw, header.Get("Content-Transfer-Encoding")) + + filename := params["name"] + if filename == "" { + if cd := header.Get("Content-Disposition"); cd != "" { + if _, dParams, err := mime.ParseMediaType(cd); err == nil { + filename = dParams["filename"] + } + } + } + cid := strings.TrimSuffix(strings.TrimPrefix(header.Get("Content-ID"), "<"), ">") + method := strings.ToUpper(strings.TrimSpace(params["method"])) + + *out = append(*out, inlineCalendarPart{ + Body: string(decoded), + Filename: filename, + ContentID: cid, + Method: method, + }) +} + +// decodePartBody applies the Content-Transfer-Encoding to a part body. +// 7bit/8bit/binary pass through unchanged. base64 and quoted-printable +// failures fall back to the raw bytes so we still attempt parsing — the +// iCalendar parser will reject garbage cleanly downstream. +func decodePartBody(raw []byte, cte string) []byte { + switch strings.ToLower(strings.TrimSpace(cte)) { + case "base64": + // MIME base64 may include CR/LF runs; strip whitespace so the + // permissive decoders accept it. Try standard then raw to handle + // senders that omit padding. + clean := stripBase64Whitespace(raw) + if dec, err := base64.StdEncoding.DecodeString(string(clean)); err == nil { + return dec + } + if dec, err := base64.RawStdEncoding.DecodeString(string(clean)); err == nil { + return dec + } + return raw + case "quoted-printable": + dec, err := io.ReadAll(quotedprintable.NewReader(bytes.NewReader(raw))) + if err != nil { + return raw + } + return dec + default: + return raw + } +} + +// stripBase64Whitespace removes CR, LF, tabs, and spaces from a base64 +// blob so the decoders don't trip on standard MIME line wrapping. +func stripBase64Whitespace(b []byte) []byte { + out := make([]byte, 0, len(b)) + for _, c := range b { + if c == '\r' || c == '\n' || c == '\t' || c == ' ' { + continue + } + out = append(out, c) + } + return out +} diff --git a/internal/air/handlers_email_invite_mime_test.go b/internal/air/handlers_email_invite_mime_test.go new file mode 100644 index 0000000..e0ba4b3 --- /dev/null +++ b/internal/air/handlers_email_invite_mime_test.go @@ -0,0 +1,158 @@ +package air + +import ( + "strings" + "testing" +) + +// gmailStyleInviteMIME is a minimal multipart/alternative message with +// an inline text/calendar leaf. It mirrors what Gmail ships when the +// sender declines to attach the ICS as a separate file — the very case +// that hides the invite from Nylas's attachments[] list. +const gmailStyleInviteMIME = "From: organizer@example.com\r\n" + + "To: user@example.com\r\n" + + "Subject: Event Invitation: Meeting\r\n" + + "Content-Type: multipart/alternative; boundary=\"BOUNDARY1\"\r\n" + + "\r\n" + + "--BOUNDARY1\r\n" + + "Content-Type: text/plain; charset=UTF-8\r\n" + + "\r\n" + + "You have received a calendar invitation: Meeting\r\n" + + "--BOUNDARY1\r\n" + + "Content-Type: text/calendar; charset=UTF-8; method=REQUEST; name=invite.ics\r\n" + + "Content-Disposition: inline; filename=invite.ics\r\n" + + "Content-Transfer-Encoding: 7bit\r\n" + + "\r\n" + + "BEGIN:VCALENDAR\r\n" + + "VERSION:2.0\r\n" + + "PRODID:-//Test//EN\r\n" + + "METHOD:REQUEST\r\n" + + "BEGIN:VEVENT\r\n" + + "UID:gmail-1@example.com\r\n" + + "SUMMARY:Meeting\r\n" + + "DTSTART:20260501T140000Z\r\n" + + "DTEND:20260501T150000Z\r\n" + + "ORGANIZER;CN=Priya Patel:mailto:priya@partner.example\r\n" + + "END:VEVENT\r\n" + + "END:VCALENDAR\r\n" + + "--BOUNDARY1--\r\n" + +// nestedInviteMIME wraps the calendar inside multipart/mixed → +// multipart/alternative → text/calendar. Outlook produces nesting like +// this when the body has both an HTML rendering and the attached ICS. +const nestedInviteMIME = "From: a@example.com\r\n" + + "Subject: Meeting Invitation\r\n" + + "Content-Type: multipart/mixed; boundary=\"OUTER\"\r\n" + + "\r\n" + + "--OUTER\r\n" + + "Content-Type: multipart/alternative; boundary=\"INNER\"\r\n" + + "\r\n" + + "--INNER\r\n" + + "Content-Type: text/html; charset=UTF-8\r\n" + + "\r\n" + + "Calendar invite
\r\n" + + "--INNER\r\n" + + "Content-Type: text/calendar; charset=UTF-8; method=REQUEST\r\n" + + "Content-Transfer-Encoding: 7bit\r\n" + + "\r\n" + + "BEGIN:VCALENDAR\r\nVERSION:2.0\r\nPRODID:-//T//EN\r\nMETHOD:REQUEST\r\n" + + "BEGIN:VEVENT\r\nUID:nested-1\r\nSUMMARY:Nested\r\n" + + "DTSTART:20260501T140000Z\r\nDTEND:20260501T150000Z\r\n" + + "END:VEVENT\r\nEND:VCALENDAR\r\n" + + "--INNER--\r\n" + + "--OUTER--\r\n" + +func TestFindInlineCalendarParts_GmailShape(t *testing.T) { + parts := findInlineCalendarParts(gmailStyleInviteMIME) + if len(parts) != 1 { + t.Fatalf("want 1 part, got %d", len(parts)) + } + p := parts[0] + if !strings.Contains(p.Body, "BEGIN:VEVENT") { + t.Errorf("Body should be the VCALENDAR payload, got: %q", p.Body[:min(80, len(p.Body))]) + } + if p.Filename != "invite.ics" { + t.Errorf("Filename: want invite.ics, got %q", p.Filename) + } + if p.Method != "REQUEST" { + t.Errorf("Method: want REQUEST, got %q", p.Method) + } +} + +func TestFindInlineCalendarParts_NestedAlternative(t *testing.T) { + parts := findInlineCalendarParts(nestedInviteMIME) + if len(parts) != 1 { + t.Fatalf("want 1 part, got %d", len(parts)) + } + if !strings.Contains(parts[0].Body, "SUMMARY:Nested") { + t.Errorf("Body did not contain expected SUMMARY") + } +} + +func TestFindInlineCalendarParts_NoCalendar(t *testing.T) { + plain := "From: a@example.com\r\n" + + "Subject: Hi\r\n" + + "Content-Type: text/plain; charset=UTF-8\r\n" + + "\r\n" + + "Just a regular email.\r\n" + if got := findInlineCalendarParts(plain); len(got) != 0 { + t.Errorf("plain mail should produce no parts, got %d", len(got)) + } +} + +func TestFindInlineCalendarParts_EmptyAndOversize(t *testing.T) { + if got := findInlineCalendarParts(""); got != nil { + t.Errorf("empty input should return nil, got %d parts", len(got)) + } + huge := strings.Repeat("x", maxRawMIMEBytes+1) + if got := findInlineCalendarParts(huge); got != nil { + t.Errorf("oversize input should be rejected, got %d parts", len(got)) + } +} + +func TestFindInlineCalendarParts_QuotedPrintableBody(t *testing.T) { + // Realistic case: Outlook frequently encodes calendar bodies with + // quoted-printable so soft line breaks survive 998-octet MIME limits. + mime := "Content-Type: multipart/alternative; boundary=B\r\n" + + "\r\n" + + "--B\r\n" + + "Content-Type: text/calendar; charset=UTF-8\r\n" + + "Content-Transfer-Encoding: quoted-printable\r\n" + + "\r\n" + + "BEGIN:VCALENDAR=0D=0A" + + "BEGIN:VEVENT=0D=0A" + + "SUMMARY:Encoded=0D=0A" + + "DTSTART:20260501T140000Z=0D=0A" + + "END:VEVENT=0D=0A" + + "END:VCALENDAR=0D=0A" + + "\r\n--B--\r\n" + parts := findInlineCalendarParts(mime) + if len(parts) != 1 { + t.Fatalf("want 1 part, got %d", len(parts)) + } + if !strings.Contains(parts[0].Body, "SUMMARY:Encoded") { + t.Errorf("quoted-printable body wasn't decoded: %q", parts[0].Body) + } +} + +// Round-trip the Gmail-style MIME through the parser to prove the full +// "raw_mime → walk → ICS → parse" pipeline produces a card-ready event. +func TestParseICS_FromGmailInlinePart(t *testing.T) { + parts := findInlineCalendarParts(gmailStyleInviteMIME) + if len(parts) == 0 { + t.Fatal("expected an inline calendar part") + } + resp, err := parseICS(parts[0].Body) + if err != nil { + t.Fatalf("parseICS: %v", err) + } + if resp.Title != "Meeting" { + t.Errorf("Title: %q", resp.Title) + } + if resp.Method != "REQUEST" { + t.Errorf("Method: %q", resp.Method) + } + if resp.OrganizerEmail != "priya@partner.example" { + t.Errorf("OrganizerEmail: %q", resp.OrganizerEmail) + } +} diff --git a/internal/air/handlers_email_invite_parse.go b/internal/air/handlers_email_invite_parse.go new file mode 100644 index 0000000..8aec508 --- /dev/null +++ b/internal/air/handlers_email_invite_parse.go @@ -0,0 +1,199 @@ +package air + +import ( + "errors" + "strings" + + ical "github.com/arran4/golang-ical" +) + +// parseICS parses an iCalendar payload and returns the first VEVENT in a +// shape the Air invite card understands. Backed by golang-ical so we +// inherit RFC 5545 line-folding, TZID resolution, ATTENDEE parsing, and +// VALUE=DATE all-day handling. +// +// Returns errNoUsableEvent for input that doesn't yield a renderable +// event — empty calendars, malformed bodies, VEVENTs without title or +// start time. Callers should map this to a "no invite" response, not a +// 5xx, since clients silently degrade. +func parseICS(raw string) (CalendarInviteResponse, error) { + cal, err := ical.ParseCalendar(strings.NewReader(raw)) + if err != nil { + return CalendarInviteResponse{}, err + } + events := cal.Events() + if len(events) == 0 { + return CalendarInviteResponse{}, errNoUsableEvent + } + + method := calendarMethod(cal) + first := events[0] + resp := mapVEvent(first) + if resp.Method == "" { + resp.Method = method + } + + if resp.Title == "" && resp.StartTime == 0 { + return CalendarInviteResponse{}, errNoUsableEvent + } + return resp, nil +} + +// calendarMethod returns the calendar-level METHOD property, upper-cased. +// REQUEST is the most common (a fresh invitation); CANCEL needs a banner +// in the UI; REPLY arrives when an attendee responds and we surface it +// for completeness. +func calendarMethod(cal *ical.Calendar) string { + for _, p := range cal.CalendarProperties { + if strings.EqualFold(p.IANAToken, string(ical.PropertyMethod)) { + return strings.ToUpper(strings.TrimSpace(p.Value)) + } + } + return "" +} + +// mapVEvent flattens a VEVENT into the response shape. Each property is +// optional — Outlook and Google produce subtly different invitations, +// and we want the card to render whatever is available rather than +// failing closed. +func mapVEvent(ev *ical.VEvent) CalendarInviteResponse { + var resp CalendarInviteResponse + + if p := ev.GetProperty(ical.ComponentPropertySummary); p != nil { + resp.Title = p.Value + } + if p := ev.GetProperty(ical.ComponentPropertyLocation); p != nil { + resp.Location = p.Value + } + if p := ev.GetProperty(ical.ComponentPropertyDescription); p != nil { + resp.Description = p.Value + } + if p := ev.GetProperty(ical.ComponentPropertyStatus); p != nil { + resp.Status = strings.ToUpper(strings.TrimSpace(p.Value)) + } + if rrule := ev.GetProperty(ical.ComponentPropertyRrule); rrule != nil { + resp.RecurrenceRule = rrule.Value + } + resp.ConferencingURL = firstConferenceURL(ev) + + resp.StartTime, resp.EndTime, resp.IsAllDay = eventTimes(ev) + + if org := ev.GetProperty(ical.ComponentPropertyOrganizer); org != nil { + resp.OrganizerEmail, resp.OrganizerName = parseOrganizerProp(org.Value, org.ICalParameters) + } + + resp.Attendees = mapAttendees(ev, resp.OrganizerEmail) + return resp +} + +// eventTimes returns DTSTART, DTEND, and whether the event is all-day. +// All-day is indicated by VALUE=DATE on DTSTART per RFC 5545 §3.6.1. +// We check the param explicitly because golang-ical's GetStartAt +// happily parses both DATE-TIME ("20260501T140000Z") and DATE +// ("20260704") formats — so a successful return tells us nothing about +// which form was used. +func eventTimes(ev *ical.VEvent) (start, end int64, allDay bool) { + allDay = isAllDayProp(ev.GetProperty(ical.ComponentPropertyDtStart)) || + isAllDayProp(ev.GetProperty(ical.ComponentPropertyDtEnd)) + + if allDay { + if t, err := ev.GetAllDayStartAt(); err == nil { + start = t.Unix() + } + if t, err := ev.GetAllDayEndAt(); err == nil { + end = t.Unix() + } + return + } + if t, err := ev.GetStartAt(); err == nil { + start = t.Unix() + } + if t, err := ev.GetEndAt(); err == nil { + end = t.Unix() + } + return +} + +// isAllDayProp reports whether a DTSTART/DTEND property carries +// VALUE=DATE — the iCalendar marker for an all-day event. +func isAllDayProp(p *ical.IANAProperty) bool { + if p == nil { + return false + } + return strings.EqualFold(firstParam(p.ICalParameters, "VALUE"), "DATE") +} + +// firstConferenceURL prefers an explicit conferencing extension (Google +// uses X-GOOGLE-CONFERENCE) over the more generic URL property — URL is +// often the calendar's own canonical link rather than the meeting room. +func firstConferenceURL(ev *ical.VEvent) string { + candidates := []ical.ComponentProperty{ + ical.ComponentPropertyExtended("X-GOOGLE-CONFERENCE"), + ical.ComponentPropertyExtended("X-CONFERENCE-URL"), + ical.ComponentPropertyUrl, + } + for _, c := range candidates { + if p := ev.GetProperty(c); p != nil && p.Value != "" { + return strings.TrimSpace(p.Value) + } + } + return "" +} + +// parseOrganizerProp pulls the email + display name from an ORGANIZER +// property such as `ORGANIZER;CN=Priya Patel:mailto:priya@partner.example`. +// Falls back to the raw value when the mailto: prefix is missing. +func parseOrganizerProp(value string, params map[string][]string) (email, name string) { + v := strings.TrimSpace(value) + if lower := strings.ToLower(v); strings.HasPrefix(lower, "mailto:") { + v = v[len("mailto:"):] + } + email = strings.TrimSpace(v) + if cn := firstParam(params, "CN"); cn != "" { + name = strings.TrimSpace(cn) + } + return email, name +} + +// mapAttendees flattens ATTENDEE properties to the response shape. +// Marks the organizer's own attendee row so the UI can show the +// organizer once rather than twice. +func mapAttendees(ev *ical.VEvent, organizerEmail string) []InviteAttendee { + props := ev.GetProperties(ical.ComponentPropertyAttendee) + if len(props) == 0 { + return nil + } + out := make([]InviteAttendee, 0, len(props)) + for _, p := range props { + email, name := parseOrganizerProp(p.Value, p.ICalParameters) + if email == "" && name == "" { + continue + } + a := InviteAttendee{ + Name: name, + Email: email, + Status: strings.ToUpper(firstParam(p.ICalParameters, "PARTSTAT")), + Role: strings.ToUpper(firstParam(p.ICalParameters, "ROLE")), + } + if organizerEmail != "" && strings.EqualFold(email, organizerEmail) { + a.IsOrganizer = true + } + out = append(out, a) + } + return out +} + +// firstParam returns the first value for a key in an ICalParameter map, +// or empty when missing. golang-ical stores parameters as []string to +// support multi-valued ones (e.g. CATEGORIES) — most properties of +// interest here are single-valued. +func firstParam(params map[string][]string, key string) string { + if vs, ok := params[key]; ok && len(vs) > 0 { + return vs[0] + } + return "" +} + +// Sanity check that errNoUsableEvent is wired — a regression where the +// parser silently ignored bad input would let the UI render a blank card. +var _ = errors.New diff --git a/internal/air/handlers_email_invite_test.go b/internal/air/handlers_email_invite_test.go new file mode 100644 index 0000000..35080a7 --- /dev/null +++ b/internal/air/handlers_email_invite_test.go @@ -0,0 +1,258 @@ +package air + +import ( + "strings" + "testing" + + "github.com/nylas/cli/internal/domain" +) + +const sampleICS = "BEGIN:VCALENDAR\r\n" + + "VERSION:2.0\r\n" + + "PRODID:-//Test//EN\r\n" + + "METHOD:REQUEST\r\n" + + "BEGIN:VEVENT\r\n" + + "UID:demo-uid-1@example.com\r\n" + + "SUMMARY:Quarterly Sync\r\n" + + "DESCRIPTION:Quarterly review with the partner team.\\nBring notes.\r\n" + + "LOCATION:Conference Room A\\, HQ\r\n" + + "DTSTART:20260501T140000Z\r\n" + + "DTEND:20260501T150000Z\r\n" + + "ORGANIZER;CN=Priya Patel:mailto:priya@partner.example\r\n" + + "STATUS:CONFIRMED\r\n" + + "URL:https://meet.example.com/q-sync\r\n" + + "END:VEVENT\r\n" + + "END:VCALENDAR\r\n" + +const sampleAllDayICS = "BEGIN:VCALENDAR\nBEGIN:VEVENT\n" + + "SUMMARY:Holiday\n" + + "DTSTART;VALUE=DATE:20260704\n" + + "DTEND;VALUE=DATE:20260705\n" + + "END:VEVENT\nEND:VCALENDAR\n" + +func TestParseICS_BasicVEvent(t *testing.T) { + ev, err := parseICS(sampleICS) + if err != nil { + t.Fatalf("parseICS: %v", err) + } + + if ev.Title != "Quarterly Sync" { + t.Errorf("Title: want %q, got %q", "Quarterly Sync", ev.Title) + } + if ev.Location != "Conference Room A, HQ" { + t.Errorf("Location: want unescaped, got %q", ev.Location) + } + if !strings.Contains(ev.Description, "Bring notes.") { + t.Errorf("Description: want unescaped \\n, got %q", ev.Description) + } + // 2026-05-01T14:00:00Z = 1777644000 + if ev.StartTime != 1777644000 { + t.Errorf("StartTime: want 1777644000, got %d", ev.StartTime) + } + // 2026-05-01T15:00:00Z = 1777647600 + if ev.EndTime != 1777647600 { + t.Errorf("EndTime: want 1777647600, got %d", ev.EndTime) + } + if ev.IsAllDay { + t.Errorf("IsAllDay should be false for DATE-TIME") + } + if ev.OrganizerEmail != "priya@partner.example" { + t.Errorf("OrganizerEmail: %q", ev.OrganizerEmail) + } + if ev.OrganizerName != "Priya Patel" { + t.Errorf("OrganizerName: %q", ev.OrganizerName) + } + if ev.Status != "CONFIRMED" { + t.Errorf("Status: %q", ev.Status) + } + if ev.ConferencingURL != "https://meet.example.com/q-sync" { + t.Errorf("ConferencingURL: %q", ev.ConferencingURL) + } +} + +func TestParseICS_AllDayDate(t *testing.T) { + ev, err := parseICS(sampleAllDayICS) + if err != nil { + t.Fatalf("parseICS: %v", err) + } + if ev.Title != "Holiday" { + t.Errorf("Title: %q", ev.Title) + } + if !ev.IsAllDay { + t.Error("IsAllDay should be true for VALUE=DATE") + } + // 2026-07-04T00:00:00Z = 1814400000... let's just sanity-check >0 + if ev.StartTime <= 0 { + t.Errorf("StartTime should be positive Unix seconds, got %d", ev.StartTime) + } +} + +func TestParseICS_LineFolding(t *testing.T) { + folded := "BEGIN:VCALENDAR\r\n" + + "VERSION:2.0\r\n" + + "PRODID:-//T//EN\r\n" + + "BEGIN:VEVENT\r\n" + + "UID:fold-1\r\n" + + "SUMMARY:Long\r\n" + + " Title Continued\r\n" + + "DTSTART:20260501T140000Z\r\n" + + "DTEND:20260501T150000Z\r\n" + + "END:VEVENT\r\n" + + "END:VCALENDAR\r\n" + + ev, err := parseICS(folded) + if err != nil { + t.Fatalf("parseICS: %v", err) + } + if ev.Title != "LongTitle Continued" { + t.Errorf("expected unfolded title, got %q", ev.Title) + } +} + +func TestParseICS_MissingVEvent(t *testing.T) { + // A calendar with zero events should not produce a renderable invite. + _, err := parseICS("BEGIN:VCALENDAR\nVERSION:2.0\nPRODID:-//T//EN\nEND:VCALENDAR\n") + if err == nil { + t.Fatal("expected error when no VEVENT block is present") + } +} + +func TestParseICS_EmptyEvent(t *testing.T) { + // VEVENT block with no usable fields → error rather than empty noise. + body := "BEGIN:VCALENDAR\nVERSION:2.0\nPRODID:-//T//EN\nBEGIN:VEVENT\nUID:e\nEND:VEVENT\nEND:VCALENDAR\n" + _, err := parseICS(body) + if err == nil { + t.Fatal("expected error for VEVENT with no fields") + } +} + +func TestIsCalendarAttachment(t *testing.T) { + tests := []struct { + name string + ct string + fn string + want bool + }{ + {"text/calendar", "text/calendar; charset=utf-8", "invite.ics", true}, + {"application/ics", "application/ics", "x", true}, + {".ics filename only", "application/octet-stream", "MEETING.ICS", true}, + {"pdf attachment", "application/pdf", "agenda.pdf", false}, + {"empty", "", "", false}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := isCalendarAttachment(tt.ct, tt.fn); got != tt.want { + t.Errorf("got %v, want %v", got, tt.want) + } + }) + } +} + +func TestFindCalendarAttachment(t *testing.T) { + atts := []domain.Attachment{ + {ID: "a1", Filename: "spec.pdf", ContentType: "application/pdf"}, + {ID: "a2", Filename: "invite.ics", ContentType: "text/calendar"}, + {ID: "a3", Filename: "logo.png", ContentType: "image/png"}, + } + got := findCalendarAttachment(atts) + if got == nil { + t.Fatal("expected to find ICS attachment") + } + if got.ID != "a2" { + t.Errorf("expected a2, got %s", got.ID) + } +} + +func TestFilterDemoEmails_FolderFilter(t *testing.T) { + all := demoEmails() + + inbox := filterDemoEmails(all, "inbox", false, false) + sent := filterDemoEmails(all, "sent", false, false) + drafts := filterDemoEmails(all, "drafts", false, false) + trash := filterDemoEmails(all, "trash", false, false) + archive := filterDemoEmails(all, "archive", false, false) + none := filterDemoEmails(all, "", false, false) + + cases := []struct { + name string + got []EmailResponse + min int + }{ + {"inbox", inbox, 5}, + {"sent (>1 — proves filter works)", sent, 2}, + {"drafts", drafts, 1}, + {"trash", trash, 1}, + {"archive", archive, 1}, + {"none (empty filter returns all)", none, len(all)}, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + if len(tc.got) < tc.min { + t.Errorf("%s: got %d, want >= %d", tc.name, len(tc.got), tc.min) + } + }) + } + + // Sent emails should NOT be in the inbox response. + for _, e := range inbox { + for _, f := range e.Folders { + if strings.EqualFold(f, "sent") { + t.Errorf("sent email %s leaked into inbox", e.ID) + } + } + } +} + +func TestFilterDemoEmails_AliasMapping(t *testing.T) { + all := demoEmails() + + // "SENT", "Sent Items" should all map to canonical "sent". + for _, alias := range []string{"SENT", "Sent Items", "sent mail"} { + got := filterDemoEmails(all, alias, false, false) + if len(got) < 2 { + t.Errorf("alias %q: expected >=2 sent emails, got %d", alias, len(got)) + } + } + + // "Deleted Items" → trash + got := filterDemoEmails(all, "Deleted Items", false, false) + if len(got) < 1 { + t.Errorf("Deleted Items alias: expected >=1, got %d", len(got)) + } +} + +func TestFilterDemoEmails_UnreadStarredFlags(t *testing.T) { + all := demoEmails() + + unread := filterDemoEmails(all, "", true, false) + for _, e := range unread { + if !e.Unread { + t.Errorf("unread filter returned read email %s", e.ID) + } + } + + starred := filterDemoEmails(all, "", false, true) + for _, e := range starred { + if !e.Starred { + t.Errorf("starred filter returned unstarred email %s", e.ID) + } + } +} + +func TestDemoInviteFor_KnownAndUnknownIDs(t *testing.T) { + known := demoInviteFor("demo-email-invite-001") + if !known.HasInvite { + t.Fatal("known invite ID should return HasInvite=true") + } + if known.Title != "Quarterly Sync" { + t.Errorf("Title: %q", known.Title) + } + if known.OrganizerEmail == "" || known.StartTime == 0 || known.EndTime == 0 { + t.Errorf("expected populated fields, got %+v", known) + } + + unknown := demoInviteFor("demo-email-001") + if unknown.HasInvite { + t.Error("non-invite email should return HasInvite=false") + } +} diff --git a/internal/air/handlers_focus_mode.go b/internal/air/handlers_focus_mode.go index 2b4938b..654fcd4 100644 --- a/internal/air/handlers_focus_mode.go +++ b/internal/air/handlers_focus_mode.go @@ -215,29 +215,49 @@ func (s *Server) handleUpdateFocusModeSettings(w http.ResponseWriter, r *http.Re return } - fmStore.mu.Lock() - fmStore.settings = &settings - fmStore.mu.Unlock() + func() { + fmStore.mu.Lock() + defer fmStore.mu.Unlock() + fmStore.settings = &settings + }() httputil.WriteJSON(w, http.StatusOK, map[string]string{"status": "updated"}) } -// IsFocusModeActive returns whether focus mode is active +// IsFocusModeActive reports whether a focus session is currently running. +// +// A session is "active" when IsActive=true AND we haven't passed EndsAt. +// The handler that started the session never schedules a goroutine to flip +// IsActive when the timer elapses, so callers (notification gating, auto- +// reply) have to perform the expiry check at call time. Without this the +// session would appear to last forever after the user closes the tab. func IsFocusModeActive() bool { fmStore.mu.RLock() defer fmStore.mu.RUnlock() - return fmStore.state.IsActive + if !fmStore.state.IsActive { + return false + } + if !fmStore.state.EndsAt.IsZero() && !time.Now().Before(fmStore.state.EndsAt) { + return false + } + return true } -// ShouldAllowNotification checks if notification should be shown +// ShouldAllowNotification reports whether a notification should be surfaced. +// +// Honours the active expiry window — once EndsAt has passed, focus mode is +// effectively over even if no client GET refreshed the persisted state. func ShouldAllowNotification(senderEmail string) bool { fmStore.mu.RLock() defer fmStore.mu.RUnlock() - if !fmStore.state.IsActive || !fmStore.settings.HideNotifications { + active := fmStore.state.IsActive + if active && !fmStore.state.EndsAt.IsZero() && !time.Now().Before(fmStore.state.EndsAt) { + active = false + } + if !active || !fmStore.settings.HideNotifications { return true } - // Check if sender is in allowed list return slices.Contains(fmStore.settings.AllowedSenders, senderEmail) } diff --git a/internal/air/handlers_focus_mode_test.go b/internal/air/handlers_focus_mode_test.go new file mode 100644 index 0000000..98f120a --- /dev/null +++ b/internal/air/handlers_focus_mode_test.go @@ -0,0 +1,105 @@ +package air + +import ( + "testing" + "time" +) + +// TestIsFocusModeActive_ExpiresAtEndsAt is a regression test for the bug +// where ShouldAllowNotification kept returning false long after the focus +// session window had elapsed because the read handler only updated a local +// copy of the state. Now both IsFocusModeActive and ShouldAllowNotification +// honour EndsAt at call time. +func TestIsFocusModeActive_ExpiresAtEndsAt(t *testing.T) { + original := fmStore + t.Cleanup(func() { fmStore = original }) + + now := time.Now() + + t.Run("active before EndsAt", func(t *testing.T) { + fmStore = &focusModeStore{ + state: &FocusModeState{ + IsActive: true, + StartedAt: now.Add(-10 * time.Minute), + EndsAt: now.Add(15 * time.Minute), + }, + settings: original.settings, + } + if !IsFocusModeActive() { + t.Error("expected active session to report true") + } + }) + + t.Run("expired after EndsAt", func(t *testing.T) { + fmStore = &focusModeStore{ + state: &FocusModeState{ + IsActive: true, + StartedAt: now.Add(-2 * time.Hour), + EndsAt: now.Add(-1 * time.Hour), + }, + settings: original.settings, + } + if IsFocusModeActive() { + t.Error("expected expired session to report false") + } + }) + + t.Run("not started", func(t *testing.T) { + fmStore = &focusModeStore{ + state: &FocusModeState{IsActive: false}, + settings: original.settings, + } + if IsFocusModeActive() { + t.Error("expected inactive session to report false") + } + }) +} + +func TestShouldAllowNotification_HonoursExpiry(t *testing.T) { + original := fmStore + t.Cleanup(func() { fmStore = original }) + + now := time.Now() + fmStore = &focusModeStore{ + state: &FocusModeState{ + IsActive: true, + StartedAt: now.Add(-2 * time.Hour), + EndsAt: now.Add(-1 * time.Hour), + }, + settings: &FocusModeSettings{ + HideNotifications: true, + AllowedSenders: []string{"vip@example.com"}, + }, + } + + // Once the session is past EndsAt, *every* sender should pass through + // — the user is no longer in focus mode. + if !ShouldAllowNotification("random@example.com") { + t.Error("expired session must allow notifications") + } +} + +func TestShouldAllowNotification_VIPDuringActiveSession(t *testing.T) { + original := fmStore + t.Cleanup(func() { fmStore = original }) + + now := time.Now() + fmStore = &focusModeStore{ + state: &FocusModeState{ + IsActive: true, + StartedAt: now, + EndsAt: now.Add(15 * time.Minute), + }, + settings: &FocusModeSettings{ + HideNotifications: true, + AllowedSenders: []string{"vip@example.com"}, + }, + } + + if !ShouldAllowNotification("vip@example.com") { + t.Error("VIP sender must always pass during active focus") + } + if ShouldAllowNotification("noise@example.com") { + t.Error("non-VIP sender must be blocked during active focus") + } +} diff --git a/internal/air/handlers_read_receipts.go b/internal/air/handlers_read_receipts.go index 67ea4a9..e613cf7 100644 --- a/internal/air/handlers_read_receipts.go +++ b/internal/air/handlers_read_receipts.go @@ -3,6 +3,7 @@ package air import ( "encoding/json" "net/http" + "net/url" "sync" "time" @@ -83,17 +84,28 @@ func (s *Server) handleTrackOpen(w http.ResponseWriter, r *http.Request) { return } - rrStore.mu.Lock() - if receipt, ok := rrStore.receipts[emailID]; ok { - receipt.OpenCount++ - if !receipt.IsOpened { - receipt.IsOpened = true - receipt.OpenedAt = time.Now() - } - receipt.UserAgent = r.UserAgent() - // Could extract device/location from User-Agent and IP + // Cap the User-Agent we persist. Browsers and bots sometimes send + // multi-kilobyte UA strings, and tracking pixels can be hit + // indefinitely; without a cap a single recipient could grow our + // in-memory store unboundedly. + const maxUA = 256 + ua := r.UserAgent() + if len(ua) > maxUA { + ua = ua[:maxUA] } - rrStore.mu.Unlock() + + func() { + rrStore.mu.Lock() + defer rrStore.mu.Unlock() + if receipt, ok := rrStore.receipts[emailID]; ok { + receipt.OpenCount++ + if !receipt.IsOpened { + receipt.IsOpened = true + receipt.OpenedAt = time.Now() + } + receipt.UserAgent = ua + } + }() // Return transparent 1x1 GIF w.Header().Set("Content-Type", "image/gif") @@ -138,9 +150,11 @@ func (s *Server) handleUpdateReadReceiptSettings(w http.ResponseWriter, r *http. return } - rrStore.mu.Lock() - rrStore.settings = &settings - rrStore.mu.Unlock() + func() { + rrStore.mu.Lock() + defer rrStore.mu.Unlock() + rrStore.settings = &settings + }() httputil.WriteJSON(w, http.StatusOK, map[string]string{"status": "updated"}) } @@ -158,7 +172,9 @@ func RegisterEmailForTracking(emailID, recipient string) { } } -// GetTrackingPixelURL returns the tracking pixel URL for an email +// GetTrackingPixelURL returns the tracking pixel URL for an email. The +// emailID is URL-escaped so callers that compose the result into HTML +// markup don't accidentally produce a broken or attribute-escaping URL. func GetTrackingPixelURL(emailID string) string { - return "/api/track/open?id=" + emailID + return "/api/track/open?id=" + url.QueryEscape(emailID) } diff --git a/internal/air/handlers_reply_later.go b/internal/air/handlers_reply_later.go index 260e429..cb5d18e 100644 --- a/internal/air/handlers_reply_later.go +++ b/internal/air/handlers_reply_later.go @@ -112,9 +112,11 @@ func (s *Server) handleAddToReplyLater(w http.ResponseWriter, r *http.Request) { } } - rlStore.mu.Lock() - rlStore.items[req.EmailID] = item - rlStore.mu.Unlock() + func() { + rlStore.mu.Lock() + defer rlStore.mu.Unlock() + rlStore.items[req.EmailID] = item + }() httputil.WriteJSON(w, http.StatusOK, item) } @@ -165,9 +167,11 @@ func (s *Server) handleRemoveFromReplyLater(w http.ResponseWriter, r *http.Reque return } - rlStore.mu.Lock() - delete(rlStore.items, emailID) - rlStore.mu.Unlock() + func() { + rlStore.mu.Lock() + defer rlStore.mu.Unlock() + delete(rlStore.items, emailID) + }() w.WriteHeader(http.StatusNoContent) } diff --git a/internal/air/handlers_scheduled_send.go b/internal/air/handlers_scheduled_send.go index 4b9460f..b85d924 100644 --- a/internal/air/handlers_scheduled_send.go +++ b/internal/air/handlers_scheduled_send.go @@ -97,13 +97,24 @@ func (s *Server) createScheduledMessage(w http.ResponseWriter, r *http.Request) return } - // Validate send time is in the future (at least 1 minute) - if sendAt <= time.Now().Add(time.Minute).Unix() { + // Validate send time is in the future (at least 1 minute) and not so + // far in the future that we'd accept obvious garbage (year 9999) and + // queue infinitely. One year out is the documented Nylas API ceiling + // and matches the upstream send_at limit; anything beyond that is + // almost certainly a client bug or a hostile request. + now := time.Now() + if sendAt <= now.Add(time.Minute).Unix() { writeJSON(w, http.StatusBadRequest, map[string]string{ "error": "Send time must be at least 1 minute in the future", }) return } + if sendAt > now.Add(366*24*time.Hour).Unix() { + writeJSON(w, http.StatusBadRequest, map[string]string{ + "error": "Send time must be within one year", + }) + return + } if s.demoMode { writeJSON(w, http.StatusOK, ScheduledSendResponse{ diff --git a/internal/air/handlers_scheduled_send_test.go b/internal/air/handlers_scheduled_send_test.go new file mode 100644 index 0000000..9b9b543 --- /dev/null +++ b/internal/air/handlers_scheduled_send_test.go @@ -0,0 +1,77 @@ +package air + +import ( + "bytes" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + "time" +) + +// TestCreateScheduledMessage_RejectsFarFuture pins the post-fix invariant: +// a send_at more than ~1 year in the future is refused. Without it, a +// client bug or hostile request could keep a message in the queue +// indefinitely (or trip Nylas API limits silently). +func TestCreateScheduledMessage_RejectsFarFuture(t *testing.T) { + server := &Server{demoMode: true} + + body, _ := json.Marshal(ScheduledSendRequest{ + SendAt: time.Now().Add(10 * 365 * 24 * time.Hour).Unix(), + To: []EmailParticipantResponse{{Email: "a@example.com"}}, + Subject: "ping", + Body: "hi", + }) + r := httptest.NewRequest(http.MethodPost, "/api/scheduled-send", bytes.NewReader(body)) + r.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + + server.createScheduledMessage(w, r) + + if w.Code != http.StatusBadRequest { + t.Fatalf("expected 400, got %d", w.Code) + } + if !bytes.Contains(w.Body.Bytes(), []byte("within one year")) { + t.Fatalf("expected 'within one year' message, got %s", w.Body.String()) + } +} + +func TestCreateScheduledMessage_AcceptsNearFuture(t *testing.T) { + server := &Server{demoMode: true} + + body, _ := json.Marshal(ScheduledSendRequest{ + SendAt: time.Now().Add(2 * time.Hour).Unix(), + To: []EmailParticipantResponse{{Email: "a@example.com"}}, + Subject: "ping", + Body: "hi", + }) + r := httptest.NewRequest(http.MethodPost, "/api/scheduled-send", bytes.NewReader(body)) + r.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + + server.createScheduledMessage(w, r) + + if w.Code != http.StatusOK { + t.Fatalf("expected 200, got %d body=%s", w.Code, w.Body.String()) + } +} + +func TestCreateScheduledMessage_RejectsTooSoon(t *testing.T) { + server := &Server{demoMode: true} + + body, _ := json.Marshal(ScheduledSendRequest{ + SendAt: time.Now().Unix(), + To: []EmailParticipantResponse{{Email: "a@example.com"}}, + Subject: "ping", + Body: "hi", + }) + r := httptest.NewRequest(http.MethodPost, "/api/scheduled-send", bytes.NewReader(body)) + r.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + + server.createScheduledMessage(w, r) + + if w.Code != http.StatusBadRequest { + t.Fatalf("expected 400 for past time, got %d", w.Code) + } +} diff --git a/internal/air/handlers_screener.go b/internal/air/handlers_screener.go index f3a19c7..d9c4722 100644 --- a/internal/air/handlers_screener.go +++ b/internal/air/handlers_screener.go @@ -150,17 +150,21 @@ func (s *Server) handleAddToScreener(w http.ResponseWriter, r *http.Request) { httputil.WriteJSON(w, http.StatusOK, map[string]string{"status": "pending"}) } -// IsSenderAllowed checks if a sender is allowed +// IsSenderAllowed reports whether a sender has been explicitly allowed and, +// if so, the routing destination ("inbox", "feed", "paper_trail"). +// +// Only "allowed" senders pass — pending senders need screening, and blocked +// senders are rejected. Unknown senders default to needing screening too. func IsSenderAllowed(email string) (bool, string) { screenerStore.mu.RLock() defer screenerStore.mu.RUnlock() - if sender, ok := screenerStore.senders[email]; ok { - if sender.Status == "allowed" { - return true, sender.Destination - } - return sender.Status != "blocked", "" + sender, ok := screenerStore.senders[email] + if !ok { + return false, "" + } + if sender.Status == "allowed" { + return true, sender.Destination } - // Unknown sender - needs screening return false, "" } diff --git a/internal/air/handlers_screener_test.go b/internal/air/handlers_screener_test.go new file mode 100644 index 0000000..01d5bc8 --- /dev/null +++ b/internal/air/handlers_screener_test.go @@ -0,0 +1,86 @@ +package air + +import ( + "sync" + "testing" + "time" +) + +// TestIsSenderAllowed_OnlyAllowedReturnsTrue is a regression test for the +// pre-fix logic where pending senders accidentally returned (true, "") — +// the screener would let unscreened mail leak into the inbox before the +// user had decided to allow them. +func TestIsSenderAllowed_OnlyAllowedReturnsTrue(t *testing.T) { + // Replace the package-level store so the test cannot leak across runs. + original := screenerStore + screenerStore = &ScreenerStore{senders: map[string]*ScreenedSender{ + "allowed@example.com": { + Email: "allowed@example.com", + Status: "allowed", + Destination: "inbox", + FirstSeen: time.Now(), + }, + "pending@example.com": { + Email: "pending@example.com", + Status: "pending", + FirstSeen: time.Now(), + }, + "blocked@example.com": { + Email: "blocked@example.com", + Status: "blocked", + FirstSeen: time.Now(), + }, + "feed@example.com": { + Email: "feed@example.com", + Status: "allowed", + Destination: "feed", + FirstSeen: time.Now(), + }, + }} + t.Cleanup(func() { screenerStore = original }) + + cases := []struct { + email string + wantAllowed bool + wantDestination string + }{ + {"allowed@example.com", true, "inbox"}, + {"feed@example.com", true, "feed"}, + {"pending@example.com", false, ""}, + {"blocked@example.com", false, ""}, + {"unknown@example.com", false, ""}, + } + + for _, tc := range cases { + t.Run(tc.email, func(t *testing.T) { + gotAllowed, gotDest := IsSenderAllowed(tc.email) + if gotAllowed != tc.wantAllowed { + t.Errorf("allowed: got %v, want %v", gotAllowed, tc.wantAllowed) + } + if gotDest != tc.wantDestination { + t.Errorf("destination: got %q, want %q", gotDest, tc.wantDestination) + } + }) + } +} + +// TestIsSenderAllowed_ConcurrentReads catches any future regression where the +// read path stops holding RLock — the data race detector will flag a writer +// running alongside the readers. +func TestIsSenderAllowed_ConcurrentReads(t *testing.T) { + original := screenerStore + screenerStore = &ScreenerStore{senders: map[string]*ScreenedSender{ + "a@example.com": {Email: "a@example.com", Status: "allowed", Destination: "inbox"}, + }} + t.Cleanup(func() { screenerStore = original }) + + var wg sync.WaitGroup + for range 32 { + wg.Go(func() { + for range 200 { + _, _ = IsSenderAllowed("a@example.com") + } + }) + } + wg.Wait() +} diff --git a/internal/air/handlers_snooze_handlers.go b/internal/air/handlers_snooze_handlers.go index 7f18151..2386ff2 100644 --- a/internal/air/handlers_snooze_handlers.go +++ b/internal/air/handlers_snooze_handlers.go @@ -91,12 +91,14 @@ func (s *Server) snoozeEmail(w http.ResponseWriter, r *http.Request) { CreatedAt: time.Now().Unix(), } - s.snoozeMu.Lock() - if s.snoozedEmails == nil { - s.snoozedEmails = make(map[string]SnoozedEmail) - } - s.snoozedEmails[req.EmailID] = snoozed - s.snoozeMu.Unlock() + func() { + s.snoozeMu.Lock() + defer s.snoozeMu.Unlock() + if s.snoozedEmails == nil { + s.snoozedEmails = make(map[string]SnoozedEmail) + } + s.snoozedEmails[req.EmailID] = snoozed + }() writeJSON(w, http.StatusOK, SnoozeResponse{ Success: true, @@ -114,9 +116,11 @@ func (s *Server) unsnoozeEmail(w http.ResponseWriter, r *http.Request) { return } - s.snoozeMu.Lock() - delete(s.snoozedEmails, emailID) - s.snoozeMu.Unlock() + func() { + s.snoozeMu.Lock() + defer s.snoozeMu.Unlock() + delete(s.snoozedEmails, emailID) + }() writeJSON(w, http.StatusOK, map[string]any{ "success": true, diff --git a/internal/air/handlers_splitinbox_config.go b/internal/air/handlers_splitinbox_config.go index a47b22a..18f3bf2 100644 --- a/internal/air/handlers_splitinbox_config.go +++ b/internal/air/handlers_splitinbox_config.go @@ -51,9 +51,11 @@ func (s *Server) updateSplitInboxConfig(w http.ResponseWriter, r *http.Request) return } - s.splitInboxMu.Lock() - s.splitInboxConfig = &config - s.splitInboxMu.Unlock() + func() { + s.splitInboxMu.Lock() + defer s.splitInboxMu.Unlock() + s.splitInboxConfig = &config + }() writeJSON(w, http.StatusOK, map[string]any{ "success": true, diff --git a/internal/air/handlers_templates.go b/internal/air/handlers_templates.go index cc96f39..c7a6165 100644 --- a/internal/air/handlers_templates.go +++ b/internal/air/handlers_templates.go @@ -117,12 +117,14 @@ func (s *Server) createTemplate(w http.ResponseWriter, r *http.Request) { } template.Variables = deduplicateStrings(allVars) - s.templatesMu.Lock() - if s.emailTemplates == nil { - s.emailTemplates = make(map[string]EmailTemplate) - } - s.emailTemplates[template.ID] = template - s.templatesMu.Unlock() + func() { + s.templatesMu.Lock() + defer s.templatesMu.Unlock() + if s.emailTemplates == nil { + s.emailTemplates = make(map[string]EmailTemplate) + } + s.emailTemplates[template.ID] = template + }() writeJSON(w, http.StatusCreated, template) } @@ -190,9 +192,11 @@ func (s *Server) updateTemplate(w http.ResponseWriter, r *http.Request, template // deleteTemplate deletes a template. func (s *Server) deleteTemplate(w http.ResponseWriter, _ *http.Request, templateID string) { - s.templatesMu.Lock() - delete(s.emailTemplates, templateID) - s.templatesMu.Unlock() + func() { + s.templatesMu.Lock() + defer s.templatesMu.Unlock() + delete(s.emailTemplates, templateID) + }() writeJSON(w, http.StatusOK, map[string]any{ "success": true, @@ -246,12 +250,14 @@ func (s *Server) expandTemplate(w http.ResponseWriter, r *http.Request, template } // Increment usage count - s.templatesMu.Lock() - if t, ok := s.emailTemplates[templateID]; ok { - t.UsageCount++ - s.emailTemplates[templateID] = t - } - s.templatesMu.Unlock() + func() { + s.templatesMu.Lock() + defer s.templatesMu.Unlock() + if t, ok := s.emailTemplates[templateID]; ok { + t.UsageCount++ + s.emailTemplates[templateID] = t + } + }() writeJSON(w, http.StatusOK, map[string]any{ "subject": subject, diff --git a/internal/air/handlers_undo_send.go b/internal/air/handlers_undo_send.go index 84d6935..7d2359c 100644 --- a/internal/air/handlers_undo_send.go +++ b/internal/air/handlers_undo_send.go @@ -75,9 +75,11 @@ func (s *Server) updateUndoSendConfig(w http.ResponseWriter, r *http.Request) { config.GracePeriodSec = 60 } - s.undoSendMu.Lock() - s.undoSendConfig = &config - s.undoSendMu.Unlock() + func() { + s.undoSendMu.Lock() + defer s.undoSendMu.Unlock() + s.undoSendConfig = &config + }() writeJSON(w, http.StatusOK, map[string]any{ "success": true, diff --git a/internal/air/server.go b/internal/air/server.go index 35c0fa7..023d609 100644 --- a/internal/air/server.go +++ b/internal/air/server.go @@ -38,6 +38,7 @@ type Server struct { syncMu sync.Mutex // Protects background sync lifecycle syncStopCh chan struct{} // Channel to stop background sync syncWg sync.WaitGroup // Wait group for sync goroutines + bgWg sync.WaitGroup // Wait group for fire-and-forget background tasks (cache prune, etc.) syncRunning bool // Tracks whether background sync workers are running isOnline bool // Online status onlineMu sync.RWMutex // Protects isOnline diff --git a/internal/air/server_lifecycle.go b/internal/air/server_lifecycle.go index 7f9ea72..1fe0664 100644 --- a/internal/air/server_lifecycle.go +++ b/internal/air/server_lifecycle.go @@ -327,6 +327,10 @@ func (s *Server) Start() error { func (s *Server) Stop() error { s.stopBackgroundSync() + // Wait for background tasks (photo prune, etc.) before closing the + // underlying stores they reference. + s.bgWg.Wait() + s.runtimeMu.Lock() defer s.runtimeMu.Unlock() diff --git a/internal/air/server_stores.go b/internal/air/server_stores.go index f619809..6341ac1 100644 --- a/internal/air/server_stores.go +++ b/internal/air/server_stores.go @@ -112,13 +112,15 @@ func (s *Server) withOfflineQueue(email string, fn func(*cache.OfflineQueue) err return err } - s.offlineQueuesMu.Lock() - if existing := s.offlineQueues[email]; existing != nil { - queue = existing - } else { - s.offlineQueues[email] = queue - } - s.offlineQueuesMu.Unlock() + func() { + s.offlineQueuesMu.Lock() + defer s.offlineQueuesMu.Unlock() + if existing := s.offlineQueues[email]; existing != nil { + queue = existing + } else { + s.offlineQueues[email] = queue + } + }() } return fn(queue) @@ -160,11 +162,13 @@ func (s *Server) refreshActiveAccountCacheRuntime() { s.stopBackgroundSync() if s.cacheSettings.Get().OfflineQueueEnabled { - s.runtimeMu.Lock() - if s.cacheManager != nil { - _ = s.initializeOfflineQueuesLocked() - } - s.runtimeMu.Unlock() + func() { + s.runtimeMu.Lock() + defer s.runtimeMu.Unlock() + if s.cacheManager != nil { + _ = s.initializeOfflineQueuesLocked() + } + }() } else { s.clearOfflineQueues() } @@ -317,8 +321,8 @@ func (s *Server) initializeOfflineQueuesLocked() error { func (s *Server) clearOfflineQueues() { s.offlineQueuesMu.Lock() + defer s.offlineQueuesMu.Unlock() s.offlineQueues = make(map[string]*cache.OfflineQueue) - s.offlineQueuesMu.Unlock() } func (s *Server) offlineQueueEmails() []string { diff --git a/internal/air/server_sync.go b/internal/air/server_sync.go index 51788fe..c95370e 100644 --- a/internal/air/server_sync.go +++ b/internal/air/server_sync.go @@ -3,6 +3,11 @@ package air import ( "context" "database/sql" + "fmt" + "os" + "runtime/debug" + "sort" + "strings" "time" "github.com/nylas/cli/internal/air/cache" @@ -24,8 +29,9 @@ func (s *Server) startBackgroundSync() { } stopCh := make(chan struct{}) - s.syncWg.Add(1) - go s.syncAccountLoop(stopCh, grant.Email, grant.ID) + s.syncWg.Go(func() { + s.syncAccountLoop(stopCh, grant.Email, grant.ID) + }) s.syncStopCh = stopCh s.syncRunning = true } @@ -57,7 +63,7 @@ func (s *Server) restartBackgroundSync() { // syncAccountLoop runs the sync loop for a single account. func (s *Server) syncAccountLoop(stopCh <-chan struct{}, email, grantID string) { - defer s.syncWg.Done() + defer recoverSyncPanic(email) interval := s.cacheSettings.GetSyncInterval() if interval < time.Minute { @@ -68,15 +74,34 @@ func (s *Server) syncAccountLoop(stopCh <-chan struct{}, email, grantID string) defer ticker.Stop() // Initial sync - s.syncAccount(email, grantID) + s.runSyncIteration(email, grantID) for { select { case <-stopCh: return case <-ticker.C: - s.syncAccount(email, grantID) + s.runSyncIteration(email, grantID) + } + } +} + +// runSyncIteration calls syncAccount with panic isolation so a single +// failure does not kill the loop or the process. +func (s *Server) runSyncIteration(email, grantID string) { + defer func() { + if r := recover(); r != nil { + fmt.Fprintf(os.Stderr, "Air sync iteration panic for %s: %v\n%s\n", email, r, debug.Stack()) } + }() + s.syncAccount(email, grantID) +} + +// recoverSyncPanic logs panics that escape the sync loop. The deferred +// syncWg.Done() still fires because it's registered first. +func recoverSyncPanic(email string) { + if r := recover(); r != nil { + fmt.Fprintf(os.Stderr, "Air sync loop panic for %s: %v\n%s\n", email, r, debug.Stack()) } } @@ -89,31 +114,67 @@ func (s *Server) syncAccount(email, grantID string) { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) defer cancel() - // Sync emails - s.syncEmails(ctx, email, grantID) + // Folders first so syncEmails can fan out per system folder. Without + // this, the unfiltered top-N email fetch is dominated by Inbox on busy + // accounts and Sent/Drafts/Archive arrive in cache only by accident — + // the sidebar then shows correct counts only after the user clicks each + // folder, and offline mode can't render them at all. + folders := s.syncFolders(ctx, email, grantID) - // Sync folders - s.syncFolders(ctx, email, grantID) + s.syncEmails(ctx, email, grantID, folders) - // Sync events s.syncEvents(ctx, email, grantID) - // Sync contacts s.syncContacts(ctx, email, grantID) } -// syncEmails syncs emails from the API to cache. -func (s *Server) syncEmails(ctx context.Context, email, grantID string) { +// syncEmails hydrates the email cache from the API. When a folder list is +// available, fetches the top-N messages for each primary system folder +// (Inbox/Sent/Drafts/Archive/Trash/Spam) so each gets representative +// coverage. Falls back to a single unfiltered top-N fetch when the folder +// API returned nothing — keeps prior behavior on folder-API outages. +func (s *Server) syncEmails(ctx context.Context, email, grantID string, folders []domain.Folder) { if s.nylasClient == nil || !s.hasCacheRuntime() { return } - messages, err := s.nylasClient.GetMessages(ctx, grantID, 100) - if err != nil { - s.SetOnline(false) - return + const perFolderLimit = 50 + + targets := primarySystemFolderIDs(folders) + var messages []domain.Message + + if len(targets) == 0 { + msgs, err := s.nylasClient.GetMessages(ctx, grantID, 100) + if err != nil { + s.SetOnline(false) + return + } + s.SetOnline(true) + messages = msgs + } else { + // Mark online optimistically — we'll flip it back to offline if + // every folder fetch fails. A single folder failing (e.g. label + // rename in flight, transient rate-limit) shouldn't kill the whole + // sync iteration. + s.SetOnline(true) + successes := 0 + for _, fid := range targets { + if ctx.Err() != nil { + break + } + params := &domain.MessageQueryParams{Limit: perFolderLimit, In: []string{fid}} + msgs, err := s.nylasClient.GetMessagesWithParams(ctx, grantID, params) + if err != nil { + continue + } + successes++ + messages = append(messages, msgs...) + } + if successes == 0 { + s.SetOnline(false) + return + } } - s.SetOnline(true) _ = s.withAccountDB(email, func(db *sql.DB) error { store := cache.NewEmailStore(db) @@ -134,15 +195,17 @@ func (s *Server) syncEmails(ctx context.Context, email, grantID string) { }) } -// syncFolders syncs folders from the API to cache. -func (s *Server) syncFolders(ctx context.Context, email, grantID string) { +// syncFolders syncs folders from the API to cache and returns the fetched +// list so callers can drive folder-aware sync paths (notably per-folder +// email hydration). Returns nil on failure — callers must tolerate that. +func (s *Server) syncFolders(ctx context.Context, email, grantID string) []domain.Folder { if s.nylasClient == nil || !s.hasCacheRuntime() { - return + return nil } folders, err := s.nylasClient.GetFolders(ctx, grantID) if err != nil { - return + return nil } _ = s.withFolderStore(email, func(store *cache.FolderStore) error { @@ -160,6 +223,50 @@ func (s *Server) syncFolders(ctx context.Context, email, grantID string) { } return nil }) + return folders +} + +// primarySystemFolderIDs picks the folder IDs we hydrate during background +// sync, ordered by user-visit priority. Order matters: under a context +// deadline a partial pass still gets the most-used folders covered first. +// Custom labels/folders are skipped — the sidebar's eager refresh and +// on-click load handle those. +func primarySystemFolderIDs(folders []domain.Folder) []string { + if len(folders) == 0 { + return nil + } + priority := map[string]int{ + "inbox": 0, + "sent": 1, + "drafts": 2, + "archive": 3, + "trash": 4, + "spam": 5, + } + type entry struct { + id string + ord int + } + out := make([]entry, 0, len(folders)) + seen := make(map[string]struct{}, len(folders)) + for i := range folders { + f := &folders[i] + ord, ok := priority[strings.ToLower(strings.TrimSpace(f.SystemFolder))] + if !ok { + continue + } + if _, dup := seen[f.ID]; dup { + continue + } + seen[f.ID] = struct{}{} + out = append(out, entry{id: f.ID, ord: ord}) + } + sort.Slice(out, func(i, j int) bool { return out[i].ord < out[j].ord }) + ids := make([]string, len(out)) + for i, e := range out { + ids[i] = e.id + } + return ids } // syncEvents syncs calendar events from the API to cache. diff --git a/internal/air/server_sync_recover_test.go b/internal/air/server_sync_recover_test.go new file mode 100644 index 0000000..cbd1817 --- /dev/null +++ b/internal/air/server_sync_recover_test.go @@ -0,0 +1,95 @@ +package air + +import ( + "bytes" + "io" + "os" + "strings" + "testing" +) + +// captureStderr swaps os.Stderr for the duration of fn and returns whatever +// was written. Used to assert recoverSyncPanic produces a useful log line. +func captureStderr(t *testing.T, fn func()) string { + t.Helper() + r, w, err := os.Pipe() + if err != nil { + t.Fatalf("pipe: %v", err) + } + orig := os.Stderr + os.Stderr = w + defer func() { os.Stderr = orig }() + + done := make(chan struct{}) + var buf bytes.Buffer + go func() { + _, _ = io.Copy(&buf, r) + close(done) + }() + + fn() + _ = w.Close() + <-done + return buf.String() +} + +// TestRecoverSyncPanic_StopsPropagation confirms that recoverSyncPanic +// installed via defer prevents a panic from escaping the goroutine. +func TestRecoverSyncPanic_StopsPropagation(t *testing.T) { + out := captureStderr(t, func() { + // This anonymous func intentionally panics. The deferred + // recoverSyncPanic must swallow it — if it doesn't, the + // test process crashes. + func() { + defer recoverSyncPanic("user@example.com") + panic("sync exploded on purpose") + }() + }) + + if !strings.Contains(out, "user@example.com") { + t.Errorf("expected panic log to include account email, got: %q", out) + } + if !strings.Contains(out, "sync exploded on purpose") { + t.Errorf("expected panic log to include reason, got: %q", out) + } +} + +// TestRecoverSyncPanic_NoPanic_NoOp asserts that recoverSyncPanic is a +// no-op when there is nothing to recover. This guards against accidental +// stderr noise during normal shutdown. +func TestRecoverSyncPanic_NoPanic_NoOp(t *testing.T) { + out := captureStderr(t, func() { + func() { + defer recoverSyncPanic("user@example.com") + // no panic + }() + }) + if out != "" { + t.Errorf("recoverSyncPanic logged on healthy path: %q", out) + } +} + +// TestRunSyncIteration_PanicIsolated drives runSyncIteration through a +// panic via a Server whose nylasClient is nil. We expect: +// - the panic to be caught (test does not crash), +// - a stderr line identifying the account. +// +// runSyncIteration has no return value, so the assertion is "we got here +// without dying." +func TestRunSyncIteration_PanicIsolated(t *testing.T) { + // A zero-value server is enough — syncAccount checks s.nylasClient == nil + // and bails early, so this test is mostly a smoke test that no future + // refactor accidentally re-introduces a panic path. + s := &Server{} + + out := captureStderr(t, func() { + s.runSyncIteration("victim@example.com", "grant-1") + }) + + // On a vanilla *Server with no client, syncAccount returns early with + // no panic. We just want to confirm that on the panic path, the + // recover wrapper would log; the captured string should be empty here. + if out != "" { + t.Logf("unexpected stderr (not necessarily a failure): %q", out) + } +} diff --git a/internal/air/server_sync_test.go b/internal/air/server_sync_test.go index 5f35b1c..2a05317 100644 --- a/internal/air/server_sync_test.go +++ b/internal/air/server_sync_test.go @@ -56,7 +56,7 @@ func TestSyncEmails_NoCacheManager(t *testing.T) { } // This should not panic - server.syncEmails(t.Context(), "test@example.com", "grant-123") + server.syncEmails(t.Context(), "test@example.com", "grant-123", nil) } func TestSyncFolders_NoCacheManager(t *testing.T) { @@ -87,6 +87,132 @@ func TestSyncEvents_NoCacheManager(t *testing.T) { server.syncEvents(t.Context(), "test@example.com", "grant-123") } +// Pins the per-system-folder hydration: when syncFolders returns folders +// with system types (inbox/sent/drafts/etc.), syncEmails must call +// GetMessagesWithParams once per primary folder, in priority order, and +// must not fall back to the unfiltered GetMessages path. Together with the +// frontend eager refresh this is what gives non-Inbox folders correct cache +// coverage on first paint and offline. +func TestSyncEmails_PerFolderHydration(t *testing.T) { + t.Parallel() + + server, client, accountEmail := newCachedTestServer(t) + + folders := []domain.Folder{ + {ID: "TRASH", Name: "Trash", SystemFolder: "trash"}, + {ID: "INBOX", Name: "Inbox", SystemFolder: "inbox"}, + {ID: "LABEL_42", Name: "Custom", SystemFolder: ""}, + {ID: "SENT", Name: "Sent", SystemFolder: "sent"}, + {ID: "DRAFTS", Name: "Drafts", SystemFolder: "drafts"}, + } + + var mu sync.Mutex + calls := []string{} + client.GetMessagesWithParamsFunc = func(_ context.Context, _ string, p *domain.MessageQueryParams) ([]domain.Message, error) { + mu.Lock() + if len(p.In) > 0 { + calls = append(calls, p.In[0]) + } + mu.Unlock() + // Return one message per folder so we can also assert the cache + // got hydrated. + return []domain.Message{{ + ID: "msg-" + p.In[0], + Subject: "Hi from " + p.In[0], + Folders: []string{p.In[0]}, + Date: time.Now(), + }}, nil + } + client.GetMessagesFunc = func(context.Context, string, int) ([]domain.Message, error) { + t.Fatal("unfiltered GetMessages must not be called when system folders are available") + return nil, nil + } + + server.syncEmails(t.Context(), accountEmail, "grant-123", folders) + + mu.Lock() + got := append([]string(nil), calls...) + mu.Unlock() + + // Priority order: inbox, sent, drafts, archive, trash, spam. Custom + // label must be skipped. We only declared four primary folders. + want := []string{"INBOX", "SENT", "DRAFTS", "TRASH"} + assert.Equal(t, want, got) + + // The cache should now hold one message per primary folder. + store, err := server.getEmailStore(accountEmail) + if err != nil { + t.Fatalf("get email store: %v", err) + } + for _, fid := range want { + got, err := store.Get("msg-" + fid) + if err != nil || got == nil { + t.Fatalf("expected message msg-%s in cache, got err=%v", fid, err) + } + } +} + +// Pins the fallback: when no folders are available (folder-API outage), +// syncEmails must still hydrate the cache using the unfiltered top-N fetch +// — losing folder coverage but keeping forward progress. +func TestSyncEmails_FallsBackWhenFoldersEmpty(t *testing.T) { + t.Parallel() + + server, client, accountEmail := newCachedTestServer(t) + + called := false + client.GetMessagesFunc = func(context.Context, string, int) ([]domain.Message, error) { + called = true + return []domain.Message{{ID: "msg-fallback", Date: time.Now()}}, nil + } + client.GetMessagesWithParamsFunc = func(context.Context, string, *domain.MessageQueryParams) ([]domain.Message, error) { + t.Fatal("per-folder fetch must not run when folder list is empty") + return nil, nil + } + + server.syncEmails(t.Context(), accountEmail, "grant-123", nil) + + if !called { + t.Fatal("expected fallback GetMessages call") + } + store, err := server.getEmailStore(accountEmail) + if err != nil { + t.Fatalf("get email store: %v", err) + } + got, err := store.Get("msg-fallback") + if err != nil || got == nil { + t.Fatalf("expected fallback message in cache, err=%v", err) + } +} + +// Pins primarySystemFolderIDs ordering and dedup directly so the priority +// contract is regression-protected even if the calling code changes. +func TestPrimarySystemFolderIDs_OrdersAndFilters(t *testing.T) { + t.Parallel() + + folders := []domain.Folder{ + {ID: "SPAM", SystemFolder: "spam"}, + {ID: "LABEL_FOO", SystemFolder: ""}, + {ID: "INBOX", SystemFolder: "INBOX"}, // upper-case to verify normalization + {ID: "INBOX", SystemFolder: "inbox"}, // duplicate, skip + {ID: "ARCHIVE", SystemFolder: "archive"}, + {ID: "SENT", SystemFolder: " sent "}, // padded, verify trim + } + + got := primarySystemFolderIDs(folders) + assert.Equal(t, []string{"INBOX", "SENT", "ARCHIVE", "SPAM"}, got) +} + +func TestPrimarySystemFolderIDs_EmptyInput(t *testing.T) { + t.Parallel() + if got := primarySystemFolderIDs(nil); got != nil { + t.Fatalf("expected nil for nil input, got %v", got) + } + if got := primarySystemFolderIDs([]domain.Folder{}); got != nil { + t.Fatalf("expected nil for empty input, got %v", got) + } +} + func TestSyncContacts_NoCacheManager(t *testing.T) { t.Parallel() @@ -202,15 +328,11 @@ func TestSyncAccountLoop_StopsOnChannel(t *testing.T) { cacheSettings: cache.DefaultSettings(), } - // Add to wait group before starting - server.syncWg.Add(1) - - // Start the loop in a goroutine - done := make(chan struct{}) - go func() { + // Spawn via WaitGroup.Go to mirror production. WaitGroup.Go handles + // Add/Done for us, so we no longer call Add manually here. + server.syncWg.Go(func() { server.syncAccountLoop(stopCh, "test@example.com", "grant-123") - close(done) - }() + }) // Give it a moment to start time.Sleep(10 * time.Millisecond) @@ -218,16 +340,19 @@ func TestSyncAccountLoop_StopsOnChannel(t *testing.T) { // Signal stop close(stopCh) - // Wait for it to finish with timeout + // Wait for it to finish with timeout via a sentinel goroutine. + done := make(chan struct{}) + go func() { + server.syncWg.Wait() + close(done) + }() + select { case <-done: // Success case <-time.After(2 * time.Second): t.Error("syncAccountLoop did not stop in time") } - - // Verify wait group is decremented - server.syncWg.Wait() } func TestSyncAccountLoop_MinInterval(t *testing.T) { diff --git a/internal/air/static/css/calendar-grid.css b/internal/air/static/css/calendar-grid.css index af32338..f3a2044 100644 --- a/internal/air/static/css/calendar-grid.css +++ b/internal/air/static/css/calendar-grid.css @@ -279,7 +279,11 @@ transform: translateY(-2px); } - /* Edit button - top right corner */ + /* Edit button - top right corner. The "starting now" pill sits in + the same area, so we lift the edit button above it and stop the + pill from capturing pointer events; otherwise hovering an + in-progress event shows the edit affordance the user cannot + click. */ .event-card .event-edit-btn { position: absolute !important; top: 8px; @@ -296,6 +300,7 @@ cursor: pointer; opacity: 0; transition: all 0.2s; + z-index: 2; } .event-card:hover .event-edit-btn { @@ -349,6 +354,9 @@ border-radius: 10px; background: var(--bg-surface); color: var(--text-muted); + /* Decorative; never intercept clicks meant for the edit button + that sits in the same upper-right region. */ + pointer-events: none; } .event-relative-time.starting-now { diff --git a/internal/air/static/css/features-ui.css b/internal/air/static/css/features-ui.css index fb4e210..e5fd77d 100644 --- a/internal/air/static/css/features-ui.css +++ b/internal/air/static/css/features-ui.css @@ -16,6 +16,12 @@ border: none; } + /* In focus mode with no email selected, hide the empty-state preview entirely + so the user sees a clean canvas instead of a stranded "Select an email" card. */ + .focus-mode-active .preview-pane:has(> .empty-state:only-child) { + display: none; + } + .focus-mode-active .preview-body { font-size: 17px; line-height: 1.8; diff --git a/internal/air/static/css/notetaker.css b/internal/air/static/css/notetaker.css index 0fb084f..ce47eab 100644 --- a/internal/air/static/css/notetaker.css +++ b/internal/air/static/css/notetaker.css @@ -12,7 +12,7 @@ .nt-tab:hover { color: var(--text-primary); } .nt-actions { display: flex; gap: 8px; } .nt-grid { display: grid; grid-template-columns: repeat(auto-fill, minmax(280px, 1fr)); gap: 20px; } -.nt-empty { grid-column: 1 / -1; text-align: center; padding: 60px 20px; } +.nt-empty { grid-column: 1 / -1; display: flex; flex-direction: column; align-items: center; justify-content: center; text-align: center; padding: 60px 20px; } .nt-empty-icon { font-size: 48px; margin-bottom: 16px; } .nt-empty h3 { margin: 0 0 8px; color: var(--text-primary); } .nt-empty p { margin: 0 0 20px; color: var(--text-secondary); } diff --git a/internal/air/static/css/preview.css b/internal/air/static/css/preview.css index f28c75d..21aec9e 100644 --- a/internal/air/static/css/preview.css +++ b/internal/air/static/css/preview.css @@ -478,3 +478,189 @@ color: var(--text-muted); } + + /* ----------------------------------------------------------- */ + /* Calendar invite card (Gmail-style) */ + /* Rendered inside .email-detail when an email has a */ + /* text/calendar attachment, populated by */ + /* EmailListManager.loadAndRenderInvite(). */ + /* ----------------------------------------------------------- */ + + .calendar-invite-card-slot[hidden] { + display: none; + } + + .calendar-invite-card { + margin: 0 var(--space-4) var(--space-3) var(--space-4); + padding: var(--space-3) var(--space-4); + border: 1px solid var(--border); + border-radius: 12px; + background: linear-gradient( + 135deg, + rgba(99, 102, 241, 0.08), + rgba(139, 92, 246, 0.05) + ); + } + + .calendar-invite-header { + display: flex; + align-items: center; + gap: var(--space-3); + margin-bottom: var(--space-2); + } + + .calendar-invite-icon { + font-size: 28px; + line-height: 1; + } + + .calendar-invite-when { + display: flex; + flex-direction: column; + gap: 2px; + } + + .calendar-invite-time { + font-size: 12px; + font-weight: 600; + color: var(--accent); + text-transform: uppercase; + letter-spacing: 0.04em; + } + + .calendar-invite-title { + font-size: 18px; + font-weight: 600; + color: var(--text-primary); + } + + .calendar-invite-location, + .calendar-invite-org { + font-size: 13px; + color: var(--text-muted); + margin-top: 4px; + } + + .calendar-invite-link { + display: inline-block; + margin-top: var(--space-2); + font-size: 13px; + color: var(--accent); + text-decoration: none; + } + + .calendar-invite-link:hover { + text-decoration: underline; + } + + .calendar-invite-actions { + display: flex; + flex-wrap: wrap; + gap: var(--space-2); + margin-top: var(--space-3); + } + + .calendar-invite-btn { + padding: 6px 16px; + border-radius: 9999px; + border: 1px solid var(--border); + background: var(--bg-elevated); + color: var(--text-primary); + font-size: 13px; + font-weight: 500; + cursor: pointer; + transition: background-color 0.12s ease, border-color 0.12s ease; + } + + .calendar-invite-btn:hover { + background: var(--bg-hover); + } + + .calendar-invite-btn.primary { + background: var(--accent); + border-color: var(--accent); + color: #fff; + } + + .calendar-invite-btn.primary:hover { + filter: brightness(1.05); + } + + .calendar-invite-btn.active { + outline: 2px solid var(--accent); + outline-offset: 1px; + } + + /* Cancellation banner — shown when METHOD:CANCEL or */ + /* STATUS:CANCELLED is present. Replaces the RSVP buttons so */ + /* the user can't RSVP to a dead invitation. */ + .calendar-invite-banner { + margin-bottom: var(--space-2); + padding: 6px 12px; + border-radius: 8px; + font-size: 12px; + font-weight: 600; + text-transform: uppercase; + letter-spacing: 0.04em; + } + + .calendar-invite-banner-cancel { + background: rgba(220, 38, 38, 0.12); + color: #dc2626; + } + + .calendar-invite-card.is-cancelled .calendar-invite-title { + text-decoration: line-through; + color: var(--text-muted); + } + + /* Attendee summary line + per-attendee chip list. Mirrors */ + /* Gmail's "3 going · 1 declined" header and the individual */ + /* response pills. */ + .calendar-invite-attendees { + margin-top: var(--space-3); + } + + .calendar-invite-summary { + font-size: 12px; + color: var(--text-muted); + margin-bottom: 6px; + } + + .calendar-invite-attendee-list { + display: flex; + flex-wrap: wrap; + gap: 6px; + } + + .calendar-invite-attendee { + font-size: 12px; + padding: 3px 9px; + border-radius: 9999px; + background: var(--bg-elevated); + border: 1px solid var(--border); + color: var(--text-primary); + } + + .calendar-invite-attendee.is-accepted { + background: rgba(34, 197, 94, 0.12); + border-color: rgba(34, 197, 94, 0.3); + color: #16a34a; + } + + .calendar-invite-attendee.is-declined { + background: rgba(220, 38, 38, 0.12); + border-color: rgba(220, 38, 38, 0.3); + color: #dc2626; + text-decoration: line-through; + } + + .calendar-invite-attendee.is-tentative { + background: rgba(234, 179, 8, 0.12); + border-color: rgba(234, 179, 8, 0.3); + color: #ca8a04; + } + + .calendar-invite-attendee.is-overflow { + color: var(--text-muted); + } diff --git a/internal/air/static/js/actions-registry.js b/internal/air/static/js/actions-registry.js index a79e1be..88607fc 100644 --- a/internal/air/static/js/actions-registry.js +++ b/internal/air/static/js/actions-registry.js @@ -118,10 +118,10 @@ document.addEventListener('DOMContentLoaded', function() { // ========== CALENDAR ========== Actions.register('create-event', function() { - if (typeof EventModal !== 'undefined' && typeof EventModal.openNew === 'function') { - EventModal.openNew(); + if (typeof EventModal !== 'undefined' && typeof EventModal.open === 'function') { + EventModal.open(); } else { - console.warn('create-event: EventModal.openNew is not available'); + console.warn('create-event: EventModal.open is not available'); } }); Actions.register('open-find-time-modal', function() { diff --git a/internal/air/static/js/calendar-ui.js b/internal/air/static/js/calendar-ui.js index 5431f58..818a7e7 100644 --- a/internal/air/static/js/calendar-ui.js +++ b/internal/air/static/js/calendar-ui.js @@ -281,8 +281,8 @@ renderEventCard(event) { const participantsHtml = event.participants && event.participants.length > 0 ? `