From e6ee0d6fe0cf3f8a72d59e90cc5d34d13c633b5e Mon Sep 17 00:00:00 2001 From: Haorui Zhou Date: Wed, 1 Apr 2026 19:59:26 -0400 Subject: [PATCH 1/8] Reformat SQL, use default db and bucket --- .gitignore | 3 + installer/docker-compose.yml | 1 - installer/grafana-bridge/server.js | 88 +++++++++++++++++++++++------- 3 files changed, 72 insertions(+), 20 deletions(-) diff --git a/.gitignore b/.gitignore index 558d34e..e6d1b28 100644 --- a/.gitignore +++ b/.gitignore @@ -223,3 +223,6 @@ generated-days/ # Node dependencies node_modules/ .claude/settings.local.json + +# Superpowers (AI agent specs/plans - not for version control) +docs/superpowers/ diff --git a/installer/docker-compose.yml b/installer/docker-compose.yml index 38e1b1f..2d4250f 100644 --- a/installer/docker-compose.yml +++ b/installer/docker-compose.yml @@ -305,7 +305,6 @@ services: GRAFANA_INTERNAL_URL: "http://grafana:3000" GRAFANA_EXTERNAL_URL: "https://grafana.westernformularacing.org" GRAFANA_API_TOKEN: "${GRAFANA_API_TOKEN}" - GRAFANA_DATASOURCE_UID: "influxdb-wfr-v2" GRAFANA_FOLDER_UID: "${GRAFANA_FOLDER_UID:-}" CORS_ORIGIN: "http://localhost:3000,https://pecan.westernformularacing.org,https://pecan-dev.westernformularacing.org" INFLUX_TABLE: "${INFLUX_DATABASE:-WFR26}" diff --git a/installer/grafana-bridge/server.js b/installer/grafana-bridge/server.js index 82a68b1..7bb6a29 100644 --- a/installer/grafana-bridge/server.js +++ b/installer/grafana-bridge/server.js @@ -31,11 +31,33 @@ const GRAFANA_EXTERNAL_URL = "https://grafana.westernformularacing.org"; const GRAFANA_API_TOKEN = process.env.GRAFANA_API_TOKEN; const GRAFANA_FOLDER_UID = process.env.GRAFANA_FOLDER_UID || ""; -const DATASOURCE_UID = process.env.GRAFANA_DATASOURCE_UID || "influxdb-wfr-v2"; // Only allow CAN signal name characters (alphanumeric, underscore, hyphen, dot) const SIGNAL_NAME_RE = /^[A-Za-z0-9_.\-]+$/; +// Regex to extract season name from InfluxDB datasource database field (e.g., "WFR26" from "WFR26@iox") +const SEASON_RE = /^(WFR\d+)/; + +// Fetch all InfluxDB datasources from Grafana and find the one matching the given season +async function findDatasourceUidForSeason(season) { + const response = await fetch(`${GRAFANA_INTERNAL_URL}/api/datasources`, { + headers: { Authorization: `Bearer ${GRAFANA_API_TOKEN}` }, + }); + if (!response.ok) { + throw new Error(`Grafana datasources API error: ${response.status}`); + } + const datasources = await response.json(); + for (const ds of datasources) { + if (ds.type === "influxdb" && ds.name) { + const match = SEASON_RE.exec(ds.name); + if (match && match[1] === season) { + return ds.uid; + } + } + } + throw new Error(`No InfluxDB datasource found for season: ${season}`); +} + function validateSignalName(name) { if (typeof name !== "string" || name.length === 0 || name.length > 128) { return false; @@ -43,39 +65,38 @@ function validateSignalName(name) { return SIGNAL_NAME_RE.test(name); } -const INFLUX_TABLE = process.env.INFLUX_TABLE || "WFR26"; - -function buildQuery(signalName) { +function buildQuery(signalName, season) { return [ "SELECT", - ' DATE_BIN(INTERVAL \'100 milliseconds\', t."time", TIMESTAMP \'1970-01-01 00:00:00\') AS "time",', - ` AVG(t."${signalName}") AS "value"`, + ' DATE_BIN(INTERVAL \'100 milliseconds\', "time", TIMESTAMP \'1970-01-01 00:00:00\') AS "time",', + ` AVG("${signalName}") AS "${signalName}"`, "FROM", - ` "iox"."${INFLUX_TABLE}" AS t`, + ` "iox"."${season}"`, "WHERE", - ' t."time" >= $__timeFrom()', - ' AND t."time" <= $__timeTo()', + ' "time" >= $__timeFrom()', + ' AND "time" <= $__timeTo()', "GROUP BY", - ' 1', + ' DATE_BIN(INTERVAL \'100 milliseconds\', "time", TIMESTAMP \'1970-01-01 00:00:00\')', "ORDER BY", ' "time" ASC', ].join("\n"); } -function buildPanel(signalName, index) { +function buildPanel(signalName, index, dsUid, season) { return { type: "timeseries", title: signalName, - datasource: { - type: "influxdb", - uid: DATASOURCE_UID, - }, targets: [ { refId: "A", - query: buildQuery(signalName), + dataset: "iox", + datasource: { + type: "influxdb", + uid: dsUid, + }, rawQuery: true, - resultFormat: "time_series", + rawSql: buildQuery(signalName, season), + format: "time_series", }, ], gridPos: { @@ -140,9 +161,19 @@ router.post("/api/grafana/create-dashboard", async (req, res) => { const uid = "pecan_" + crypto.randomBytes(4).toString("hex"); const now = new Date(); + const currentSeason = `WFR${now.getFullYear() % 100}`; const title = `PECAN Analysis - ${now.toISOString().replace("T", " ").substring(0, 16)}`; - const panels = signalNames.map((name, i) => buildPanel(name, i)); + // Fetch the real datasource UID for the current season + let dsUid; + try { + dsUid = await findDatasourceUidForSeason(currentSeason); + } catch (err) { + console.error("Failed to find datasource UID:", err.message); + return res.status(500).json({ error: `Failed to resolve datasource for ${currentSeason}: ${err.message}` }); + } + + const panels = signalNames.map((name, i) => buildPanel(name, i, dsUid, currentSeason)); const payload = { dashboard: { @@ -154,7 +185,26 @@ router.post("/api/grafana/create-dashboard", async (req, res) => { schemaVersion: 39, version: 0, panels, - time: { from: "now-1h", to: "now" }, + time: { from: "now-24h", to: "now" }, + variables: { + list: [ + { + kind: "DatasourceVariable", + spec: { + name: "year", + label: "Year", + current: { text: currentSeason, value: dsUid }, + hide: "", + multi: false, + includeAll: false, + pluginId: "influxdb", + regex: "WFR2[0-9]+", + skipUrlSync: false, + refresh: 1, + }, + }, + ], + }, }, overwrite: false, }; From c51e242c999d66a5d1b4a69876d6140669ff03ea Mon Sep 17 00:00:00 2001 From: "WFR DAQ Server (ovh)" Date: Wed, 8 Apr 2026 02:26:55 +0000 Subject: [PATCH 2/8] Add memory limits to all services, disable lap-detector - Set per-service memory limits to prevent OOM crashes - influxdb3: 4096M, file-uploader: 1536M, data-downloader-api: 1024M - sandbox: 1024M, grafana: 512M, lap-detector: 512M (disabled) - slackbot/health-monitor/grafana-bridge/influxdb3-explorer: 256M - data-downloader-frontend: 128M, code-generator: 512M - Disable lap-detector via profiles (not needed currently) --- installer/docker-compose.yml | 58 ++++++++++++++++++++++++++++++++++-- 1 file changed, 56 insertions(+), 2 deletions(-) diff --git a/installer/docker-compose.yml b/installer/docker-compose.yml index 2d4250f..916f012 100644 --- a/installer/docker-compose.yml +++ b/installer/docker-compose.yml @@ -36,6 +36,10 @@ services: timeout: 5s retries: 5 start_period: 300s + deploy: + resources: + limits: + memory: 4096M influxdb3-explorer: image: influxdata/influxdb3-ui:latest @@ -54,6 +58,10 @@ services: - datalink depends_on: - influxdb3 + deploy: + resources: + limits: + memory: 256M grafana: image: grafana/grafana @@ -88,6 +96,10 @@ services: - ./grafana/dashboards:/var/lib/grafana/dashboards:ro networks: - datalink + deploy: + resources: + limits: + memory: 512M slackbot: build: ./slackbot @@ -122,11 +134,17 @@ services: networks: - datalink - default + deploy: + resources: + limits: + memory: 256M lap-detector: build: ./lap-detector container_name: lap-detector - restart: unless-stopped + restart: "no" + profiles: + - disabled ports: - "8050:8050" working_dir: /app @@ -135,6 +153,10 @@ services: command: python lap-detector.py networks: - datalink + deploy: + resources: + limits: + memory: 512M startup-data-loader: build: ./startup-data-loader @@ -161,6 +183,10 @@ services: sh -c "echo 'Waiting additional 5 seconds for InfluxDB 3 to stabilize...' && sleep 5 && python load_data.py" + deploy: + resources: + limits: + memory: 512M file-uploader: build: ./file-uploader @@ -184,7 +210,7 @@ services: resources: limits: cpus: "1" - memory: 1024M + memory: 1536M data-downloader-api: build: @@ -210,6 +236,10 @@ services: depends_on: influxdb3: condition: service_healthy + deploy: + resources: + limits: + memory: 1024M data-downloader-scanner: build: @@ -234,6 +264,10 @@ services: depends_on: data-downloader-api: condition: service_started + deploy: + resources: + limits: + memory: 512M health-monitor: build: ./health-monitor @@ -256,6 +290,10 @@ services: condition: service_healthy data-downloader-api: condition: service_started + deploy: + resources: + limits: + memory: 256M data-downloader-frontend: build: @@ -272,6 +310,10 @@ services: depends_on: data-downloader-api: condition: service_started + deploy: + resources: + limits: + memory: 128M # Custom sandbox execution container (with internet access for InfluxDB queries) sandbox: @@ -293,6 +335,10 @@ services: - influxdb3 networks: - datalink + deploy: + resources: + limits: + memory: 1024M # Grafana bridge - Pecan → Grafana dashboard creation API grafana-bridge: @@ -312,6 +358,10 @@ services: - grafana networks: - datalink + deploy: + resources: + limits: + memory: 256M # Code generator - Cohere integration for AI-powered code generation code-generator: @@ -338,3 +388,7 @@ services: - ./sandbox/prompt-guide.txt:/app/prompt-guide.txt networks: - datalink + deploy: + resources: + limits: + memory: 512M From d4c3e73538aaf876e5e5365bee4f93c7fea73a32 Mon Sep 17 00:00:00 2001 From: "WFR DAQ Server (ovh)" Date: Wed, 8 Apr 2026 02:28:47 +0000 Subject: [PATCH 3/8] Add VPS recovery guide for OOM crash scenarios --- installer/VPS_RECOVERY.md | 197 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 197 insertions(+) create mode 100644 installer/VPS_RECOVERY.md diff --git a/installer/VPS_RECOVERY.md b/installer/VPS_RECOVERY.md new file mode 100644 index 0000000..db01915 --- /dev/null +++ b/installer/VPS_RECOVERY.md @@ -0,0 +1,197 @@ +# VPS Recovery Guide — OVH vps-1969c8c2 + +## What happened + +The server ran out of memory (OOM). The Linux kernel started killing processes to survive, including: +- `containerd` (Docker runtime) — taking down all containers +- `cloudflared` binary — deleted from disk +- `tailscaled` binary — deleted from disk +- `containerd-shim-runc-v2` binary — deleted from disk + +OVH detected the unresponsive server and rebooted it into **rescue mode**. + +--- + +## Step 1 — Exit rescue mode + +OVH boots into rescue mode automatically when the server crashes hard. You need to manually switch it back. + +1. Go to [OVH control panel](https://www.ovh.com/manager/) → Bare Metal Cloud → VPS → `vps-1969c8c2.vps.ovh.ca` +2. Find the **Boot** field (shows `RESCUE`) — click the pencil/edit icon +3. Change to **Hard disk** (normal mode) +4. Click **Reboot** + +> The server will come up clean — no Docker containers will auto-start (all are `restart=no` or `restart=unless-stopped` but Docker itself won't be running until the daemon starts). + +--- + +## Step 2 — Fix SSH known_hosts + +The rescue OS has a different host key, so SSH will warn you. After rebooting to normal mode, clear the old key: + +```bash +ssh-keygen -R 148.113.191.22 +``` + +Then connect: +```bash +ssh ubuntu@148.113.191.22 +# or via Tailscale: +ssh ubuntu@ovh-daq-server +``` + +--- + +## Step 3 — Restore missing binaries + +The OOM killer can delete binaries from disk. Check and fix each one: + +### cloudflared +```bash +sudo systemctl status cloudflared +# If "status=203/EXEC" — binary is missing + +curl -L https://github.com/cloudflare/cloudflared/releases/latest/download/cloudflared-linux-amd64 \ + -o /tmp/cloudflared +sudo mv /tmp/cloudflared /usr/bin/cloudflared +sudo chmod +x /usr/bin/cloudflared +sudo systemctl restart cloudflared +sudo systemctl status cloudflared +``` + +### tailscale +```bash +sudo systemctl status tailscaled +# If "status=203/EXEC" — binary is missing + +sudo apt-get install --reinstall tailscale -y +sudo systemctl restart tailscaled +tailscale status +``` + +### containerd (Docker runtime) +```bash +# If Docker containers fail to start with: +# "containerd-shim-runc-v2: file does not exist" + +sudo apt-get install --reinstall containerd.io -y +sudo systemctl daemon-reload +sudo systemctl start docker +docker info | grep "Server Version" +``` + +--- + +## Step 4 — Start the Docker stack + +```bash +cd /home/ubuntu/projects/daq-server-components/installer +docker compose up -d +``` + +Wait ~30 seconds for InfluxDB to become healthy, then verify: + +```bash +docker ps --format 'table {{.Names}}\t{{.Status}}' +``` + +Expected running containers: +| Container | Notes | +|---|---| +| influxdb3 | Should show `(healthy)` | +| influxdb3-explorer | InfluxDB UI | +| grafana | Dashboard | +| grafana-bridge | Grafana API bridge | +| file-uploader | | +| data-downloader-api | Waits for influxdb3 healthy | +| data-downloader-scanner | | +| data-downloader-frontend | | +| health-monitor | | +| sandbox | | +| code-generator | | +| slackbot | Exits cleanly if `ENABLE_SLACK=false` | +| startup-data-loader | Runs once then exits — normal | + +> `lap-detector` is intentionally disabled. To run it: `docker compose --profile disabled up lap-detector -d` + +--- + +## Step 5 — Verify Cloudflare tunnel + +```bash +sudo systemctl status cloudflared +# Should show "Registered tunnel connection" in logs +``` + +Check that https://grafana.westernformularacing.org loads. + +--- + +## Investigating an OOM crash + +If the server crashed again and you're in rescue mode: + +```bash +# Mount original disk +mkdir -p /mnt/vps +mount /dev/sdb1 /mnt/vps + +# Check what got OOM-killed and when +journalctl --directory=/mnt/vps/var/log/journal \ + --since="2 hours ago" --no-pager \ + | grep -iE "oom|killed|memory" | head -50 + +# Check disk usage +df -h /mnt/vps +du -sh /mnt/vps/var/lib/docker /mnt/vps/var/lib/containerd /mnt/vps/var/log/journal + +# Vacuum logs if journal is large (>500MB) +journalctl --vacuum-size=200M +``` + +--- + +## Preventing OOM crashes + +Memory limits are now set in `docker-compose.yml`. Key limits: + +| Service | Limit | +|---|---| +| influxdb3 | 4096M | +| file-uploader | 1536M | +| data-downloader-api | 1024M | +| sandbox | 1024M | +| grafana | 512M | +| others | 128–512M | + +**Total ceiling: ~9GB** across all services (server has 8GB RAM + 8GB swap). + +If OOM happens again, check which container hit its limit: +```bash +docker stats --no-stream +# or check logs: +sudo journalctl -u docker --since="1 hour ago" | grep -i "oom\|killed" +``` + +Daily restart is scheduled at 4 AM to clear any memory accumulation: +```bash +crontab -l # shows: 0 4 * * * docker compose restart +# Logs at: /var/log/docker-restart.log +``` + +--- + +## Quick reference + +| Service | Port | +|---|---| +| InfluxDB | 9000 | +| InfluxDB Explorer | 8888 | +| Grafana | 8087 (also via Cloudflare tunnel) | +| Grafana Bridge | 3001 | +| File Uploader | 8084 | +| Data Downloader API | 8000 | +| Data Downloader Frontend | 3000 | +| Lap Detector (disabled) | 8050 | + +Tailscale IP: `100.72.11.60` (hostname: `ovh-daq-server`) From fa928615f54e0932aefd5790b471f52e78e59764 Mon Sep 17 00:00:00 2001 From: "WFR DAQ Server (ovh)" Date: Wed, 8 Apr 2026 03:04:06 +0000 Subject: [PATCH 4/8] Consolidate to single WFR database, seasons as tables - file-uploader: write to WFR database, season name as measurement table - file-uploader: getSeasons() queries WFR tables, falls back to SEASONS env var - file-uploader: UI label Bucket -> Season - data-downloader: all seasons share INFLUX_DATABASE (WFR), table=season name - .env: INFLUX_DATABASE=WFR, INFLUXDB_DATABASE=WFR (updated separately, gitignored) --- installer/data-downloader/backend/config.py | 9 ++--- installer/file-uploader/app.py | 36 +++++++++++++------- installer/file-uploader/helper.py | 7 ++-- installer/file-uploader/templates/index.html | 8 ++--- 4 files changed, 37 insertions(+), 23 deletions(-) diff --git a/installer/data-downloader/backend/config.py b/installer/data-downloader/backend/config.py index 27683d3..19b2b2c 100644 --- a/installer/data-downloader/backend/config.py +++ b/installer/data-downloader/backend/config.py @@ -24,7 +24,7 @@ def _parse_seasons(raw: str | None) -> List[SeasonConfig]: """Parse SEASONS env var: "WFR25:2025:222 76 153,WFR26:2026:...".""" if not raw: # Default fallback if not set - return [SeasonConfig(name="WFR25", year=2025, database="WFR25", table="WFR25", color="#DE4C99")] + return [SeasonConfig(name="WFR25", year=2025, database=os.getenv("INFLUX_DATABASE", "WFR"), table="WFR25", color="#DE4C99")] seasons = [] for part in raw.split(","): @@ -46,13 +46,14 @@ def _parse_seasons(raw: str | None) -> List[SeasonConfig]: color = parts[2] if len(parts) > 2 else None - # DB and table name both match season name by convention (WFR25→WFR25, WFR26→WFR26) - seasons.append(SeasonConfig(name=name, year=year, database=name, table=name, color=color)) + # All seasons share one database; table name matches season name (WFR25, WFR26, etc.) + shared_db = os.getenv("INFLUX_DATABASE", "WFR") + seasons.append(SeasonConfig(name=name, year=year, database=shared_db, table=name, color=color)) except ValueError: continue if not seasons: - return [SeasonConfig(name="WFR25", year=2025, database="WFR25", table="WFR25")] + return [SeasonConfig(name="WFR25", year=2025, database=os.getenv("INFLUX_DATABASE", "WFR"), table="WFR25")] # Sort by year descending (newest first) seasons.sort(key=lambda s: s.year, reverse=True) diff --git a/installer/file-uploader/app.py b/installer/file-uploader/app.py index 1876c0b..8799459 100644 --- a/installer/file-uploader/app.py +++ b/installer/file-uploader/app.py @@ -28,6 +28,7 @@ DEBUG: bool = bool(int(os.getenv("DEBUG") or 0)) INFLUXDB_TOKEN = os.getenv("INFLUXDB_TOKEN") INFLUXDB_URL = os.getenv("INFLUXDB_URL", "http://influxdb3:8181") +INFLUXDB_DATABASE = os.getenv("INFLUXDB_DATABASE", "WFR") app = Flask(__name__) @@ -35,17 +36,28 @@ def allowed_file(filename): return "." in filename and filename.rsplit(".", 1)[1].lower() in ALLOWED_EXTENSIONS -def getBuckets() -> list[str]: +def _seasons_from_env() -> list[str]: + """Fallback: parse SEASONS env var (format: 'WFR25:2025,WFR26:2026').""" + raw = os.getenv("SEASONS", "") + seasons = [part.split(":")[0].strip() for part in raw.split(",") if part.strip()] + return sorted(seasons, reverse=True) if seasons else ["WFR26", "WFR25"] + + +def getSeasons() -> list[str]: + """Return list of season/table names from the WFR database, falling back to env var.""" api_url = f"{INFLUXDB_URL.rstrip('/')}/api/v3/query_sql" - res = requests.post( - api_url, - headers={"Authorization": f"Token {INFLUXDB_TOKEN}", "Content-Type": "application/json"}, - json={"db": "_internal", "q": "SELECT database_name FROM system.databases WHERE deleted = false", "format": "json"}, - timeout=10, - ) - res.raise_for_status() - internal = {"_internal", "monitoring"} - return [row["database_name"] for row in res.json() if row["database_name"] not in internal] + try: + res = requests.post( + api_url, + headers={"Authorization": f"Token {INFLUXDB_TOKEN}", "Content-Type": "application/json"}, + json={"db": INFLUXDB_DATABASE, "q": "SELECT DISTINCT table_name FROM information_schema.tables", "format": "json"}, + timeout=10, + ) + res.raise_for_status() + seasons = [row["table_name"] for row in res.json() if not row["table_name"].startswith("_")] + return sorted(seasons, reverse=True) if seasons else _seasons_from_env() + except Exception: + return _seasons_from_env() # This function can send Slack messages to a channel @@ -68,7 +80,7 @@ def index(): file_name=CURRENT_FILE["name"], task_id=CURRENT_FILE["task_id"], current_bucket=CURRENT_FILE["bucket"], - bucket_names=getBuckets(), + bucket_names=getSeasons(), ) @@ -168,7 +180,7 @@ def on_progress(sent: int, total: int): def worker(): # Auto-configure streamer for file size with InfluxDB-safe settings file_size_mb = total_size / (1024 * 1024) - streamer = CANInfluxStreamer(bucket, dbc_path=dbc_temp_path) + streamer = CANInfluxStreamer(bucket=INFLUXDB_DATABASE, table=bucket, dbc_path=dbc_temp_path) try: # Process multiple CSV files using the new method diff --git a/installer/file-uploader/helper.py b/installer/file-uploader/helper.py index 992041a..aad4b66 100644 --- a/installer/file-uploader/helper.py +++ b/installer/file-uploader/helper.py @@ -59,7 +59,7 @@ class ProgressStats: class CANInfluxStreamer: def __init__( - self, bucket: str, batch_size: int = 500, max_concurrent_uploads: int = 5, + self, bucket: str, table: str, batch_size: int = 500, max_concurrent_uploads: int = 5, enable_progress_counting: bool = True, max_queue_size: int = 100, rate_limit_delay: float = 0.01, max_retries: int = 3, adaptive_backoff: bool = True, dbc_path: Optional[str] = None @@ -67,7 +67,8 @@ def __init__( self.batch_size = batch_size self.max_concurrent_uploads = max_concurrent_uploads - self.bucket = bucket + self.bucket = bucket # InfluxDB database name (e.g. "WFR") + self.table = table # Measurement/table name (e.g. "WFR26") self.enable_progress_counting = enable_progress_counting self.max_queue_size = max_queue_size self.rate_limit_delay = rate_limit_delay @@ -353,7 +354,7 @@ def _parse_row_generator( return pt = ( - Point(self.bucket) + Point(self.table) .tag("messageName", frame.message_name) .tag("canId", str(can_id)) .time(timestamp) diff --git a/installer/file-uploader/templates/index.html b/installer/file-uploader/templates/index.html index 40db717..26bb6af 100644 --- a/installer/file-uploader/templates/index.html +++ b/installer/file-uploader/templates/index.html @@ -18,9 +18,9 @@

Upload Files to InfluxDB

(multiple csv files supported)

- + - - + + From e44c9b56e6aed4adaecfeae86f35573715de1798 Mon Sep 17 00:00:00 2001 From: Haorui Zhou Date: Sun, 12 Apr 2026 12:34:49 -0400 Subject: [PATCH 5/8] Fix "new season" end up creating bucket issue --- installer/file-uploader/app.py | 34 ++++++++++++++++++++++++++++++++-- 1 file changed, 32 insertions(+), 2 deletions(-) diff --git a/installer/file-uploader/app.py b/installer/file-uploader/app.py index 8799459..56f2d66 100644 --- a/installer/file-uploader/app.py +++ b/installer/file-uploader/app.py @@ -43,6 +43,23 @@ def _seasons_from_env() -> list[str]: return sorted(seasons, reverse=True) if seasons else ["WFR26", "WFR25"] +def _table_create_conflict(response: requests.Response) -> bool: + """True if Influx rejected create because the table already exists (idempotent Add Season).""" + if response.status_code not in (400, 409): + return False + lowered = response.text.lower() + if any(s in lowered for s in ("already exists", "already exist", "duplicate")): + return True + try: + data = response.json() + err = str(data.get("error", "")).lower() + if any(s in err for s in ("already exists", "already exist", "duplicate")): + return True + except Exception: + pass + return False + + def getSeasons() -> list[str]: """Return list of season/table names from the WFR database, falling back to env var.""" api_url = f"{INFLUXDB_URL.rstrip('/')}/api/v3/query_sql" @@ -86,18 +103,31 @@ def index(): @app.route("/create-bucket", methods=["POST"]) def create_bucket(): + """Create a new table (season) inside INFLUXDB_DATABASE, not a new InfluxDB database.""" name = (request.json or {}).get("name", "").strip() if not name: return jsonify({"error": "No bucket name provided"}), 400 - api_url = f"{INFLUXDB_URL.rstrip('/')}/api/v3/configure/database" + if len(name) > 256: + return jsonify({"error": "Name too long (max 256 characters)"}), 400 + + api_url = f"{INFLUXDB_URL.rstrip('/')}/api/v3/configure/table" + # Tags must match CANInfluxStreamer line protocol (helper._parse_row_generator). + payload = { + "db": INFLUXDB_DATABASE, + "table": name, + "tags": ["messageName", "canId"], + "fields": [], + } res = requests.post( api_url, headers={"Authorization": f"Token {INFLUXDB_TOKEN}", "Content-Type": "application/json"}, - json={"db": name}, + json=payload, timeout=10, ) if res.status_code in (200, 201, 204): return jsonify({"name": name}) + if _table_create_conflict(res): + return jsonify({"name": name}) return jsonify({"error": res.text}), res.status_code From 3875174f0136601df6ad25ef9df298926466894b Mon Sep 17 00:00:00 2001 From: Haorui Zhou Date: Sun, 12 Apr 2026 12:49:09 -0400 Subject: [PATCH 6/8] Add optional GitHub team DBC selection/download Introduce optional GitHub-based DBC support: add GITHUB_DBC_TOKEN/REPO/BRANCH to .env.example and expose them through docker-compose. Implement server-side helpers in file-uploader/app.py to list repository .dbc blobs and download a selected .dbc to a temp file, and add a /dbc/list endpoint. Extend upload flow to accept either a custom uploaded .dbc or a repo path (dbc_github_path) and enforce validation/error handling (token never sent to the client). Update the UI and JS: add a team DBC select list, hint, and custom .dbc input; load team DBCs via /dbc/list, manage select modes, append the chosen DBC to the upload FormData, and improve response parsing and progress behavior. Misc: small import/type hint cleanup and various client-side UX tweaks. --- installer/.env.example | 8 + installer/docker-compose.local.yml | 3 + installer/docker-compose.yml | 4 + installer/file-uploader/app.py | 155 +++++++++- installer/file-uploader/static/index.js | 293 +++++++++++-------- installer/file-uploader/templates/index.html | 9 +- 6 files changed, 340 insertions(+), 132 deletions(-) diff --git a/installer/.env.example b/installer/.env.example index b03d179..2fc3969 100644 --- a/installer/.env.example +++ b/installer/.env.example @@ -3,6 +3,14 @@ # ------------------------------------------------------------ DBC_FILE_PATH=example.dbc +# ------------------------------------------------------------ +# File uploader — team DBCs from GitHub (optional) +# ------------------------------------------------------------ +# Fine-grained PAT or classic PAT with contents:read on Western-Formula-Racing/DBC +GITHUB_DBC_TOKEN= +# GITHUB_DBC_REPO=Western-Formula-Racing/DBC +# GITHUB_DBC_BRANCH=main + # ------------------------------------------------------------ # InfluxDB credentials # ------------------------------------------------------------ diff --git a/installer/docker-compose.local.yml b/installer/docker-compose.local.yml index 210edcc..18553c6 100644 --- a/installer/docker-compose.local.yml +++ b/installer/docker-compose.local.yml @@ -77,6 +77,9 @@ services: - INFLUXDB_TOKEN=${INFLUXDB_ADMIN_TOKEN:-apiv3_dev-influxdb-admin-token} - INFLUXDB_URL=${INFLUXDB_URL:-http://influxdb3:8181} - DBC_FILE_PATH=/installer/example.dbc + - GITHUB_DBC_TOKEN=${GITHUB_DBC_TOKEN:-} + - GITHUB_DBC_REPO=${GITHUB_DBC_REPO:-Western-Formula-Racing/DBC} + - GITHUB_DBC_BRANCH=${GITHUB_DBC_BRANCH:-main} deploy: resources: limits: diff --git a/installer/docker-compose.yml b/installer/docker-compose.yml index 916f012..c9987dd 100644 --- a/installer/docker-compose.yml +++ b/installer/docker-compose.yml @@ -206,6 +206,10 @@ services: - FILE_UPLOADER_WEBHOOK_URL=${FILE_UPLOADER_WEBHOOK_URL:-} # Fixed container path only - DBC_FILE_PATH=/installer/example.dbc + # Optional: PAT with repo read to list/download .dbc from GitHub (never sent to browser) + - GITHUB_DBC_TOKEN=${GITHUB_DBC_TOKEN:-} + - GITHUB_DBC_REPO=${GITHUB_DBC_REPO:-Western-Formula-Racing/DBC} + - GITHUB_DBC_BRANCH=${GITHUB_DBC_BRANCH:-main} deploy: resources: limits: diff --git a/installer/file-uploader/app.py b/installer/file-uploader/app.py index 56f2d66..cc0764c 100644 --- a/installer/file-uploader/app.py +++ b/installer/file-uploader/app.py @@ -6,7 +6,9 @@ stream_with_context, Response, ) -import uuid, time, threading, json, io, logging, requests, os, asyncio +import uuid, time, threading, json, logging, requests, os, asyncio +from typing import Optional, Tuple, List +from urllib.parse import quote from helper import CANInfluxStreamer import traceback @@ -29,9 +31,78 @@ INFLUXDB_TOKEN = os.getenv("INFLUXDB_TOKEN") INFLUXDB_URL = os.getenv("INFLUXDB_URL", "http://influxdb3:8181") INFLUXDB_DATABASE = os.getenv("INFLUXDB_DATABASE", "WFR") +GITHUB_DBC_TOKEN = os.getenv("GITHUB_DBC_TOKEN", "").strip() +GITHUB_DBC_REPO = os.getenv("GITHUB_DBC_REPO", "Western-Formula-Racing/DBC").strip() +GITHUB_DBC_BRANCH = os.getenv("GITHUB_DBC_BRANCH", "main").strip() app = Flask(__name__) +def _github_repo_parts() -> Tuple[str, str]: + owner, slash, repo = GITHUB_DBC_REPO.partition("/") + if not slash or not owner or not repo: + raise ValueError("GITHUB_DBC_REPO must be owner/repo") + return owner, repo + + +def _github_headers() -> dict: + h = { + "Accept": "application/vnd.github+json", + "X-GitHub-Api-Version": "2022-11-28", + } + if GITHUB_DBC_TOKEN: + h["Authorization"] = f"Bearer {GITHUB_DBC_TOKEN}" + return h + + +def list_github_dbc_paths() -> Tuple[List[str], Optional[str]]: + """ + List .dbc blob paths under GITHUB_DBC_REPO at GITHUB_DBC_BRANCH (recursive tree). + Returns (paths_sorted, error_message_or_none). + """ + if not GITHUB_DBC_TOKEN: + return [], None + try: + owner, repo = _github_repo_parts() + except ValueError as e: + return [], str(e) + url = f"https://api.github.com/repos/{owner}/{repo}/git/trees/{GITHUB_DBC_BRANCH}?recursive=1" + try: + r = requests.get(url, headers=_github_headers(), timeout=20) + if r.status_code != 200: + return [], f"GitHub tree {r.status_code}: {r.text[:300]}" + tree = r.json().get("tree") or [] + paths = [ + x["path"] + for x in tree + if x.get("type") == "blob" and str(x.get("path", "")).lower().endswith(".dbc") + ] + return sorted(paths), None + except requests.RequestException as e: + return [], str(e) + + +def download_github_dbc_to_temp(repo_path: str) -> str: + """Download a repo-relative .dbc path to a temp file; return filesystem path.""" + owner, repo = _github_repo_parts() + enc = quote(repo_path, safe="") + url = f"https://api.github.com/repos/{owner}/{repo}/contents/{enc}?ref={GITHUB_DBC_BRANCH}" + r = requests.get( + url, + headers={**_github_headers(), "Accept": "application/vnd.github.raw"}, + timeout=120, + ) + if r.status_code != 200: + raise RuntimeError(f"GitHub download {r.status_code}: {r.text[:400]}") + import tempfile + + fd, tmp = tempfile.mkstemp(suffix=".dbc") + try: + os.write(fd, r.content) + finally: + os.close(fd) + return tmp + + def allowed_file(filename): return "." in filename and filename.rsplit(".", 1)[1].lower() in ALLOWED_EXTENSIONS @@ -101,6 +172,24 @@ def index(): ) +@app.route("/dbc/list", methods=["GET"]) +def dbc_list(): + """List .dbc files from the configured GitHub repo (token never exposed to the client).""" + if not GITHUB_DBC_TOKEN: + return jsonify( + { + "token_configured": False, + "items": [], + "message": "GITHUB_DBC_TOKEN is not set; using optional custom upload or container default DBC.", + } + ) + paths, err = list_github_dbc_paths() + if err: + error_logger.warning("dbc_list GitHub error: %s", err) + return jsonify({"token_configured": True, "items": [], "error": err}) + return jsonify({"token_configured": True, "items": paths, "error": None}) + + @app.route("/create-bucket", methods=["POST"]) def create_bucket(): """Create a new table (season) inside INFLUXDB_DATABASE, not a new InfluxDB database.""" @@ -146,18 +235,62 @@ def upload_file(): bucket = request.form.get("bucket") if not bucket or bucket == "": return "No Bucket Provided", 400 - - # Handle optional custom DBC file + + dbc_github_path = (request.form.get("dbc_github_path") or "").strip() dbc_temp_path = None dbc_file = request.files.get("dbc") - if dbc_file and dbc_file.filename: - if not dbc_file.filename.lower().endswith(".dbc"): - return "Invalid DBC file type. Only .dbc files allowed.", 400 - import tempfile - with tempfile.NamedTemporaryFile(delete=False, suffix=".dbc") as tmp: - dbc_file.save(tmp) - dbc_temp_path = tmp.name - error_logger.info(f"Custom DBC uploaded: {dbc_file.filename} -> {dbc_temp_path}") + team_paths, _team_err = list_github_dbc_paths() + token_on = bool(GITHUB_DBC_TOKEN) + + if token_on: + if dbc_github_path: + if dbc_github_path not in team_paths: + return jsonify({"error": "Invalid or unknown team DBC path; refresh the page and pick again."}), 400 + try: + dbc_temp_path = download_github_dbc_to_temp(dbc_github_path) + except Exception as e: + error_logger.error(e) + return jsonify({"error": f"Could not download DBC from GitHub: {e}"}), 400 + error_logger.info("DBC from GitHub: %s -> %s", dbc_github_path, dbc_temp_path) + elif dbc_file and dbc_file.filename: + if not dbc_file.filename.lower().endswith(".dbc"): + return jsonify({"error": "Invalid DBC file type. Only .dbc files allowed."}), 400 + import tempfile + + with tempfile.NamedTemporaryFile(delete=False, suffix=".dbc") as tmp: + dbc_file.save(tmp) + dbc_temp_path = tmp.name + error_logger.info("Custom DBC uploaded: %s -> %s", dbc_file.filename, dbc_temp_path) + else: + if len(team_paths) >= 1: + return ( + jsonify( + { + "error": "Select a team DBC from the list or upload a custom .dbc file.", + } + ), + 400, + ) + return ( + jsonify( + { + "error": "No .dbc files found in the team repo; upload a custom .dbc file.", + } + ), + 400, + ) + else: + if dbc_github_path: + return jsonify({"error": "GitHub DBC is not configured on this server."}), 400 + if dbc_file and dbc_file.filename: + if not dbc_file.filename.lower().endswith(".dbc"): + return "Invalid DBC file type. Only .dbc files allowed.", 400 + import tempfile + + with tempfile.NamedTemporaryFile(delete=False, suffix=".dbc") as tmp: + dbc_file.save(tmp) + dbc_temp_path = tmp.name + error_logger.info(f"Custom DBC uploaded: {dbc_file.filename} -> {dbc_temp_path}") # Handle multiple CSV files files = request.files.getlist("file") diff --git a/installer/file-uploader/static/index.js b/installer/file-uploader/static/index.js index 06d6800..2ac5c50 100644 --- a/installer/file-uploader/static/index.js +++ b/installer/file-uploader/static/index.js @@ -1,56 +1,129 @@ let canSubmit = true; -document.addEventListener("DOMContentLoaded", () => { - document.getElementById("drop_zone").addEventListener("drop", dropHandler); +function applyDbcSelectMode() { + const select = document.getElementById("dbc-select"); + const input = document.getElementById("dbc-input"); + if (!select || !input) return; + const v = select.value; + if (v === "custom") { + input.disabled = false; + } else { + input.disabled = true; + input.value = ""; + const label = document.getElementById("dbc-name-label"); + if (label) label.innerText = ""; + } +} - document - .getElementById("drop_zone-input") - .addEventListener("change", clickHandler); +async function loadDbcList() { + const select = document.getElementById("dbc-select"); + const hint = document.getElementById("dbc-list-hint"); + if (!select || !hint) return; + select.innerHTML = ""; + hint.innerText = "Loading team DBC list…"; + try { + const res = await fetch("/dbc/list"); + const data = await res.json(); + const optDefault = document.createElement("option"); + optDefault.value = "default"; + optDefault.textContent = "Default (container DBC)"; + const optCustom = document.createElement("option"); + optCustom.value = "custom"; + optCustom.textContent = "Custom upload…"; - document - .getElementById("drop_zone") - .addEventListener("dragover", dragOverHandler); + if (!data.token_configured) { + select.appendChild(optDefault); + select.appendChild(optCustom); + select.value = "default"; + hint.innerText = + data.message || + "Set GITHUB_DBC_TOKEN on the server to list DBCs from Western-Formula-Racing/DBC."; + applyDbcSelectMode(); + return; + } - document.getElementById("dbc-input").addEventListener("change", (e) => { - const file = e.target.files[0]; - document.getElementById("dbc-name-label").innerText = file ? file.name : ""; - }); + if (data.error) { + hint.innerText = data.error; + } else { + hint.innerText = ""; + } - if (document.getElementById("task-id-label").innerText) { - handleProgress(document.getElementById("task-id-label").innerText); + const items = data.items || []; + for (const path of items) { + const opt = document.createElement("option"); + opt.value = "github:" + path; + opt.textContent = path; + select.appendChild(opt); + } + select.appendChild(optCustom); + + if (items.length === 0) { + select.value = "custom"; + hint.innerText = + (hint.innerText ? hint.innerText + " " : "") + + "No .dbc files in repo; upload a custom file."; + } else if (items.length === 1) { + select.value = "github:" + items[0]; + } else { + select.value = "github:" + items[0]; + } + applyDbcSelectMode(); + } catch (e) { + console.error(e); + hint.innerText = "Could not load team DBC list."; + const optDefault = document.createElement("option"); + optDefault.value = "default"; + optDefault.textContent = "Default (container DBC)"; + const optCustom = document.createElement("option"); + optCustom.value = "custom"; + optCustom.textContent = "Custom upload…"; + select.appendChild(optDefault); + select.appendChild(optCustom); + select.value = "default"; + applyDbcSelectMode(); } -}); +} -function clickHandler(e) { - e.preventDefault(); - if (!canSubmit) { - alert("File Currently Uploading"); - return; +function appendDbcToForm(form) { + const select = document.getElementById("dbc-select"); + const dbcFile = document.getElementById("dbc-input")?.files?.[0]; + if (!select) return; + const v = select.value; + if (v.startsWith("github:")) { + form.append("dbc_github_path", v.slice(7)); + } else if (v === "custom" && dbcFile) { + form.append("dbc", dbcFile); + } +} + +async function parseUploadResponse(res) { + const text = await res.text(); + try { + return JSON.parse(text); + } catch { + return { error: text || res.statusText, _notJson: true }; } - console.log("files uploaded"); +} + +function submitCsvUpload(files) { const name_label = document.getElementById("file-name-label"); const selected_bucket = document.getElementById("bucket-select").value; if (!selected_bucket || selected_bucket == "") { - console.error("no bucket selected"); - alert("Please Select A Bucket From The Dropdown Menu"); + alert("Please Select A Season/Table From The Dropdown Menu"); return; } - const files = e.target.files; if (!files || files.length === 0) { - console.error("no files"); name_label.innerText = "No Files Selected"; name_label.style = "color: red;"; alert("No Files Selected"); return; } - // Validate all files are CSV for (let i = 0; i < files.length; i++) { const file = files[i]; - if (file.type !== "text/csv" && !file.name.toLowerCase().endsWith('.csv')) { - console.error("File not CSV type"); + if (file.type !== "text/csv" && !file.name.toLowerCase().endsWith(".csv")) { name_label.innerText = `File ${file.name} is not a CSV file`; name_label.style = "color: red;"; alert(`File ${file.name} is not a CSV file. Only CSV files are allowed.`); @@ -58,40 +131,56 @@ function clickHandler(e) { } } - // Display file names - const fileNames = Array.from(files).map(f => f.name); + const sel = document.getElementById("dbc-select"); + if (sel && sel.value === "custom") { + const dbcFile = document.getElementById("dbc-input")?.files?.[0]; + const hasGithub = Array.from(sel.options).some((o) => o.value.startsWith("github:")); + const hasDefault = Array.from(sel.options).some((o) => o.value === "default"); + if (!dbcFile) { + if (hasGithub) { + alert("Select a team DBC from the list, or choose a custom .dbc file."); + return; + } + if (!hasDefault) { + alert("Upload a custom .dbc file."); + return; + } + } + } + + const fileNames = Array.from(files).map((f) => f.name); if (files.length === 1) { name_label.innerText = fileNames[0]; } else { - name_label.innerText = `${files.length} CSV files: ${fileNames.slice(0, 3).join(', ')}${files.length > 3 ? '...' : ''}`; + name_label.innerText = `${files.length} CSV files: ${fileNames.slice(0, 3).join(", ")}${files.length > 3 ? "..." : ""}`; } name_label.style = "color: white;"; const form = new FormData(); - // Append all files with the same field name for (let i = 0; i < files.length; i++) { form.append("file", files[i]); } form.append("bucket", selected_bucket); - const dbcFile = document.getElementById("dbc-input").files[0]; - if (dbcFile) form.append("dbc", dbcFile); + appendDbcToForm(form); fetch("/upload", { method: "POST", body: form, }) - .then((res) => res.json()) + .then((res) => parseUploadResponse(res)) .then((data) => { - // console.log("response ", data); if (data.error) { alert(data.error); location.reload(); + return; } if (data.task_id) { handleProgress(data.task_id); document.getElementById("task-id-label").innerText = data.task_id; } else { - console.error("No task_id"); + console.error("No task_id", data); + name_label.innerText = "There was an error (check console)"; + name_label.style = "color: red;"; } }) .catch((err) => { @@ -101,87 +190,49 @@ function clickHandler(e) { }); } -function dropHandler(e) { - e.preventDefault(); - if (!canSubmit) { - alert("File Currently Uploading"); - return; - } - console.log("files dropped"); - const name_label = document.getElementById("file-name-label"); - const selected_bucket = document.getElementById("bucket-select").value; +document.addEventListener("DOMContentLoaded", () => { + document.getElementById("drop_zone").addEventListener("drop", dropHandler); - if (!selected_bucket || selected_bucket == "") { - console.error("no bucket selected"); - alert("Please Select A Bucket From The Dropdown Menu"); - return; - } + document + .getElementById("drop_zone-input") + .addEventListener("change", clickHandler); - const files = e.dataTransfer?.files; - if (!files || files.length === 0) { - console.log("no files"); - name_label.innerText = "No Files Selected"; - name_label.style = "color: red;"; - alert("No Files Selected"); - return; - } + document + .getElementById("drop_zone") + .addEventListener("dragover", dragOverHandler); - // Validate all files are CSV - for (let i = 0; i < files.length; i++) { - const file = files[i]; - if (file.type !== "text/csv" && !file.name.toLowerCase().endsWith('.csv')) { - console.log("File not CSV type"); - name_label.innerText = `File ${file.name} is not a CSV file`; - name_label.style = "color: red;"; - alert(`File ${file.name} is not a CSV file. Only CSV files are allowed.`); - return; - } - } + document.getElementById("dbc-input").addEventListener("change", (e) => { + const file = e.target.files[0]; + document.getElementById("dbc-name-label").innerText = file ? file.name : ""; + }); - // Display file names - const fileNames = Array.from(files).map(f => f.name); - if (files.length === 1) { - name_label.innerText = fileNames[0]; - } else { - name_label.innerText = `${files.length} CSV files: ${fileNames.slice(0, 3).join(', ')}${files.length > 3 ? '...' : ''}`; + const dbcSelect = document.getElementById("dbc-select"); + if (dbcSelect) { + dbcSelect.addEventListener("change", applyDbcSelectMode); } - name_label.style = "color: white;"; + loadDbcList(); - const form = new FormData(); - // Append all files with the same field name - for (let i = 0; i < files.length; i++) { - form.append("file", files[i]); + if (document.getElementById("task-id-label").innerText) { + handleProgress(document.getElementById("task-id-label").innerText); } - form.append("bucket", selected_bucket); - const dbcFile = document.getElementById("dbc-input").files[0]; - if (dbcFile) form.append("dbc", dbcFile); +}); - fetch("/upload", { - method: "POST", - body: form, - }) - .then((res) => res.json()) - .then((data) => { - // console.log("response ", data); - if (data.error) { - alert(data.error); - location.reload(); - } +function clickHandler(e) { + e.preventDefault(); + if (!canSubmit) { + alert("File Currently Uploading"); + return; + } + submitCsvUpload(e.target.files); +} - if (data.task_id) { - handleProgress(data.task_id); - document.getElementById("task-id-label").innerText = data.task_id; - } else { - console.error("No task_id"); - name_label.innerText = "There was an error (check console)"; - name_label.style = "color: red;"; - } - }) - .catch((err) => { - console.error("error", err); - name_label.innerText = "There was an error (check console)"; - name_label.style = "color: red;"; - }); +function dropHandler(e) { + e.preventDefault(); + if (!canSubmit) { + alert("File Currently Uploading"); + return; + } + submitCsvUpload(e.dataTransfer?.files); } function dragOverHandler(e) { @@ -213,7 +264,6 @@ function createBucket() { btn.disabled = false; return; } - // Add to dropdown and select it const select = document.getElementById("bucket-select"); const opt = document.createElement("option"); opt.value = data.name; @@ -224,7 +274,9 @@ function createBucket() { msg.innerText = "Created!"; msg.style.color = "lightgreen"; btn.disabled = false; - setTimeout(() => { msg.innerText = ""; }, 3000); + setTimeout(() => { + msg.innerText = ""; + }, 3000); }) .catch((err) => { msg.innerText = "Error (check console)"; @@ -235,7 +287,6 @@ function createBucket() { } function handleProgress(task_id) { - console.log("running this"); const eventSource = new EventSource(`/progress/${task_id}`); canSubmit = false; document.getElementById("drop_zone").innerHTML = ` @@ -253,25 +304,27 @@ function handleProgress(task_id) { `; eventSource.onmessage = (e) => { const data = JSON.parse(e.data); - // console.log(data); - document.getElementById( - "progress-bar" - ).style = `justify-content: baseline;`; + document.getElementById("progress-bar").style = `justify-content: baseline;`; document.getElementById("progress-bar").style = `width: ${data.pct}%;`; document.getElementById("progress-bar_pct").innerText = data.pct + "%"; - document.getElementById( - "progress-bar_count" - ).innerText = `${data.sent} / ${data.total} rows`; + document.getElementById("progress-bar_count").innerText = `${data.sent} / ${data.total} rows`; document.getElementById("drop_zone-input").disabled = true; document.getElementById("bucket-select").disabled = true; + const dbcSel = document.getElementById("dbc-select"); + if (dbcSel) dbcSel.disabled = true; + const dbcIn = document.getElementById("dbc-input"); + if (dbcIn) dbcIn.disabled = true; if (data.done) { eventSource.close(); document.getElementById("progress-bar_pct").innerText = "Done"; document.getElementById("drop_zone-input").disabled = false; document.getElementById("bucket-select").disabled = false; + if (dbcSel) dbcSel.disabled = false; + if (dbcIn) dbcIn.disabled = false; + applyDbcSelectMode(); canSubmit = true; document.getElementById("drop_zone").innerHTML = `

Click to upload CSV files or drag and drop

`; + document.getElementById("drop_zone").addEventListener("drop", dropHandler); + document.getElementById("drop_zone").addEventListener("dragover", dragOverHandler); } }; } diff --git a/installer/file-uploader/templates/index.html b/installer/file-uploader/templates/index.html index 26bb6af..56859a7 100644 --- a/installer/file-uploader/templates/index.html +++ b/installer/file-uploader/templates/index.html @@ -33,8 +33,13 @@

(multiple csv files supported)

- - + + + + + + + From 94760ca6c97f5e22244a0847ac76c75f2a53d76b Mon Sep 17 00:00:00 2001 From: Haorui Zhou Date: Sun, 12 Apr 2026 13:00:58 -0400 Subject: [PATCH 7/8] Exclude InfluxDB system tables from seasons --- installer/file-uploader/app.py | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/installer/file-uploader/app.py b/installer/file-uploader/app.py index cc0764c..3584e35 100644 --- a/installer/file-uploader/app.py +++ b/installer/file-uploader/app.py @@ -31,6 +31,26 @@ INFLUXDB_TOKEN = os.getenv("INFLUXDB_TOKEN") INFLUXDB_URL = os.getenv("INFLUXDB_URL", "http://influxdb3:8181") INFLUXDB_DATABASE = os.getenv("INFLUXDB_DATABASE", "WFR") +# information_schema / catalog tables — not user telemetry seasons +_INFLUX_SYSTEM_TABLE_NAMES = frozenset( + { + "views", + "tables", + "schemata", + "routines", + "queries", + "processing_engine_triggers", + "processing_engine_trigger_arguments", + "processing_engine_logs", + "parquet_files", + "parameters", + "last_caches", + "influxdb_schema", + "distinct_caches", + "df_settings", + "columns", + } +) GITHUB_DBC_TOKEN = os.getenv("GITHUB_DBC_TOKEN", "").strip() GITHUB_DBC_REPO = os.getenv("GITHUB_DBC_REPO", "Western-Formula-Racing/DBC").strip() GITHUB_DBC_BRANCH = os.getenv("GITHUB_DBC_BRANCH", "main").strip() @@ -142,7 +162,14 @@ def getSeasons() -> list[str]: timeout=10, ) res.raise_for_status() - seasons = [row["table_name"] for row in res.json() if not row["table_name"].startswith("_")] + seasons = [] + for row in res.json(): + name = row.get("table_name") or "" + if not name or name.startswith("_"): + continue + if name.lower() in _INFLUX_SYSTEM_TABLE_NAMES: + continue + seasons.append(name) return sorted(seasons, reverse=True) if seasons else _seasons_from_env() except Exception: return _seasons_from_env() From db23746e6a89b511ea9cb14e14ee82847271bb68 Mon Sep 17 00:00:00 2001 From: Haorui Zhou Date: Sun, 12 Apr 2026 14:01:49 -0400 Subject: [PATCH 8/8] Support .zip uploads; rename bucket to season Add server-side support for .zip archives containing CSVs and rename UI/vars from "bucket" to "season". Introduces configurable upload limits in .env.example and implements safe zip expansion (size/member/count limits, zip-slip/path traversal protection) in app.py and helper.py. Updates CANInfluxStreamer to accept database/table semantics, writes CSVs to a safe temp tree (recursive CSV discovery), and preserves progress reporting using the new "season" keys. Frontend templates, JS and CSS are updated to accept .zip files and to use the season select control and user-facing text. --- docs/containers/file-uploader.md | 8 +- installer/.env.example | 6 + installer/file-uploader/app.py | 148 +++++++++++++++---- installer/file-uploader/helper.py | 64 +++++--- installer/file-uploader/static/index.js | 31 ++-- installer/file-uploader/static/style.css | 10 +- installer/file-uploader/templates/index.html | 14 +- 7 files changed, 200 insertions(+), 81 deletions(-) diff --git a/docs/containers/file-uploader.md b/docs/containers/file-uploader.md index 7432e8c..d8cc2ec 100644 --- a/docs/containers/file-uploader.md +++ b/docs/containers/file-uploader.md @@ -1,6 +1,6 @@ # File uploader -The file uploader is a Flask application that streams CAN CSV logs into InfluxDB 3. It exposes a simple web UI for selecting the destination bucket and monitoring progress. +The file uploader is a Flask application that streams CAN CSV logs into InfluxDB 3. It exposes a simple web UI for selecting the destination **season** (InfluxDB table within the configured database) and monitoring progress. ## Ports @@ -10,8 +10,8 @@ The file uploader is a Flask application that streams CAN CSV logs into InfluxDB | Variable | Description | Default | | --- | --- | --- | -| `INFLUXDB_URL` | API endpoint for bucket discovery and writes. | `http://influxdb3:8181` | -| `INFLUXDB_TOKEN` | Token with write access to the target bucket. | `dev-influxdb-admin-token` | +| `INFLUXDB_URL` | API endpoint for table discovery and writes. | `http://influxdb3:8181` | +| `INFLUXDB_TOKEN` | Token with write access to the target database. | `dev-influxdb-admin-token` | | `FILE_UPLOADER_WEBHOOK_URL` | Optional webhook invoked when uploads finish. | empty | | `SLACK_WEBHOOK_URL` | Fallback webhook if the dedicated uploader value is unset. | empty | @@ -25,6 +25,6 @@ The file uploader is a Flask application that streams CAN CSV logs into InfluxDB ## Usage 1. Visit http://localhost:8084. -2. Choose a target bucket from the drop-down (populated from the InfluxDB API). +2. Choose a target season (table) from the drop-down (populated from the InfluxDB API). 3. Upload one or more CSV files exported from the vehicle logger. 4. Monitor progress via the live event stream; notifications are sent upon completion if a webhook is configured. diff --git a/installer/.env.example b/installer/.env.example index 2fc3969..88d5bc4 100644 --- a/installer/.env.example +++ b/installer/.env.example @@ -11,6 +11,12 @@ GITHUB_DBC_TOKEN= # GITHUB_DBC_REPO=Western-Formula-Racing/DBC # GITHUB_DBC_BRANCH=main +# Optional limits for .zip uploads (file-uploader); defaults are generous for team use +# UPLOAD_ZIP_MAX_ARCHIVE_BYTES=2147483648 +# UPLOAD_ZIP_MAX_MEMBER_BYTES=4294967296 +# UPLOAD_ZIP_MAX_TOTAL_UNCOMPRESSED_BYTES=25769803776 +# UPLOAD_ZIP_MAX_CSV_IN_ZIP=5000 + # ------------------------------------------------------------ # InfluxDB credentials # ------------------------------------------------------------ diff --git a/installer/file-uploader/app.py b/installer/file-uploader/app.py index 3584e35..728cf9b 100644 --- a/installer/file-uploader/app.py +++ b/installer/file-uploader/app.py @@ -6,7 +6,7 @@ stream_with_context, Response, ) -import uuid, time, threading, json, logging, requests, os, asyncio +import uuid, time, threading, json, logging, requests, os, asyncio, io, zipfile from typing import Optional, Tuple, List from urllib.parse import quote from helper import CANInfluxStreamer @@ -18,10 +18,17 @@ error_logger = logging.getLogger(__name__) -ALLOWED_EXTENSIONS = {"csv"} +ALLOWED_EXTENSIONS = {"csv", "zip"} +# Zip expansion limits (team-only upload; still guard accidents / bad archives) +UPLOAD_ZIP_MAX_ARCHIVE_BYTES = int(os.getenv("UPLOAD_ZIP_MAX_ARCHIVE_BYTES", str(2 * 1024**3))) +UPLOAD_ZIP_MAX_MEMBER_BYTES = int(os.getenv("UPLOAD_ZIP_MAX_MEMBER_BYTES", str(4 * 1024**3))) +UPLOAD_ZIP_MAX_TOTAL_UNCOMPRESSED_BYTES = int( + os.getenv("UPLOAD_ZIP_MAX_TOTAL_UNCOMPRESSED_BYTES", str(24 * 1024**3)) +) +UPLOAD_ZIP_MAX_CSV_IN_ZIP = int(os.getenv("UPLOAD_ZIP_MAX_CSV_IN_ZIP", "5000")) ALLOWED_DBC_EXTENSIONS = {"dbc"} PROGRESS = {} -CURRENT_FILE = {"name": "", "task_id": "", "bucket": ""} +CURRENT_FILE = {"name": "", "task_id": "", "season": ""} WEBHOOK_URL = ( os.getenv("FILE_UPLOADER_WEBHOOK_URL") or os.getenv("SLACK_WEBHOOK_URL") @@ -175,6 +182,84 @@ def getSeasons() -> list[str]: return _seasons_from_env() +def _zip_entry_path_safe(arcname: str) -> bool: + if not arcname or arcname.startswith(("/", "\\")): + return False + n = arcname.replace("\\", "/").lstrip("/") + return ".." not in n.split("/") + + +def expand_upload_files_to_csv_payloads(files) -> Tuple[List[Tuple[str, bytes]], Optional[str]]: + """ + Normalize multipart uploads to (relative_path, bytes) for stream_multiple_csvs. + Plain .csv are stored at basename; each .zip expands to _zN/.csv under the temp tree. + """ + out: List[Tuple[str, bytes]] = [] + zip_idx = 0 + seen_in_zip: set[tuple[int, str]] = set() + for f in files: + if not f or not f.filename: + return [], "Empty file provided" + name = f.filename.strip() + ext = name.rsplit(".", 1)[-1].lower() if "." in name else "" + data = f.read() + if ext == "csv": + leaf = os.path.basename(name) or "unknown.csv" + out.append((leaf, data)) + elif ext == "zip": + if len(data) > UPLOAD_ZIP_MAX_ARCHIVE_BYTES: + return [], ( + f"Zip too large: {name} " + f"(max {UPLOAD_ZIP_MAX_ARCHIVE_BYTES // (1024 ** 3)} GiB compressed)" + ) + zip_idx += 1 + zlabel = zip_idx + try: + with zipfile.ZipFile(io.BytesIO(data), "r") as z: + infos = [ + i + for i in z.infolist() + if not i.is_dir() + and i.filename.lower().endswith(".csv") + and _zip_entry_path_safe(i.filename) + ] + if not infos: + return [], f"No CSV files found in zip: {name}" + if len(infos) > UPLOAD_ZIP_MAX_CSV_IN_ZIP: + return [], f"Too many CSV entries in {name} (max {UPLOAD_ZIP_MAX_CSV_IN_ZIP})" + total_uc = sum(i.file_size for i in infos) + if total_uc > UPLOAD_ZIP_MAX_TOTAL_UNCOMPRESSED_BYTES: + return [], ( + f"Zip {name} uncompressed total too large " + f"(max {UPLOAD_ZIP_MAX_TOTAL_UNCOMPRESSED_BYTES // (1024 ** 3)} GiB)" + ) + for i in infos: + if i.file_size > UPLOAD_ZIP_MAX_MEMBER_BYTES: + return [], f"CSV inside zip too large: {i.filename} in {name}" + leaf = os.path.basename(i.filename) or "data.csv" + key = (zlabel, leaf.lower()) + if key in seen_in_zip: + return [], ( + f'Duplicate CSV filename "{leaf}" inside zip {name} ' + "(rename one of the files)." + ) + seen_in_zip.add(key) + with z.open(i, "r") as fp: + body = fp.read() + if len(body) != i.file_size: + return [], f"Size mismatch for {i.filename} in {name}" + out.append((f"_z{zlabel}/{leaf}", body)) + except zipfile.BadZipFile: + return [], f"Invalid or corrupt zip: {name}" + except RuntimeError as e: + return [], f"Could not read zip {name}: {e}" + else: + return [], f"Invalid file type (only .csv and .zip): {name}" + if not out: + return [], "No CSV data to process" + return out, None + + # This function can send Slack messages to a channel def send_webhook_notification(payload_text=None): if not WEBHOOK_URL: @@ -194,8 +279,8 @@ def index(): "index.html", file_name=CURRENT_FILE["name"], task_id=CURRENT_FILE["task_id"], - current_bucket=CURRENT_FILE["bucket"], - bucket_names=getSeasons(), + current_season=CURRENT_FILE["season"], + season_names=getSeasons(), ) @@ -222,7 +307,7 @@ def create_bucket(): """Create a new table (season) inside INFLUXDB_DATABASE, not a new InfluxDB database.""" name = (request.json or {}).get("name", "").strip() if not name: - return jsonify({"error": "No bucket name provided"}), 400 + return jsonify({"error": "No season name provided"}), 400 if len(name) > 256: return jsonify({"error": "Name too long (max 256 characters)"}), 400 @@ -259,9 +344,9 @@ def upload_file(): ), 400, ) - bucket = request.form.get("bucket") - if not bucket or bucket == "": - return "No Bucket Provided", 400 + season = request.form.get("season") + if not season or season == "": + return jsonify({"error": "No season selected"}), 400 dbc_github_path = (request.form.get("dbc_github_path") or "").strip() dbc_temp_path = None @@ -319,36 +404,33 @@ def upload_file(): dbc_temp_path = tmp.name error_logger.info(f"Custom DBC uploaded: {dbc_file.filename} -> {dbc_temp_path}") - # Handle multiple CSV files + # Handle multiple CSV files and/or zip archives (expanded server-side) files = request.files.getlist("file") if not files or len(files) == 0: return "No Files Provided", 400 - # Validate all files are CSV for f in files: if not f or not f.filename or f.filename == "": return "Empty file provided", 400 - content_type = f.mimetype or "" - filename = f.filename or "" - if content_type != "text/csv" and not filename.lower().endswith('.csv'): - return f"Invalid File Type: {filename}. Only CSV files allowed.", 400 - - # Calculate total size of all files - total_size = 0 - file_data = [] - for f in files: - data = f.read() - total_size += len(data) - file_data.append((f.filename or "unknown.csv", data)) - f.seek(0) # Reset for potential re-read + file_data, expand_err = expand_upload_files_to_csv_payloads(files) + if expand_err: + return jsonify({"error": expand_err}), 400 + + total_size = sum(len(b) for _, b in file_data) task_id = str(uuid.uuid4()) PROGRESS[task_id] = {"pct": 0, "msg": "Starting...", "done": False} - file_names = [f.filename or "unknown.csv" for f in files] - CURRENT_FILE["name"] = f"{len(files)} CSV files: {', '.join(file_names[:3])}{'...' if len(files) > 3 else ''}" + display_names = [os.path.basename(p) for p, _ in file_data[:12]] + CURRENT_FILE["name"] = ( + f"{len(file_data)} CSV file(s): {', '.join(display_names[:3])}" + f"{'...' if len(file_data) > 3 else ''}" + ) CURRENT_FILE["task_id"] = str(task_id) - CURRENT_FILE["bucket"] = bucket - send_webhook_notification(f"Uploading {len(files)} CSV files -> {bucket}: {', '.join(file_names[:3])}{'...' if len(files) > 3 else ''}") + CURRENT_FILE["season"] = season + send_webhook_notification( + f"Uploading {len(file_data)} CSV file(s) -> season {season}: {', '.join(display_names[:3])}" + f"{'...' if len(file_data) > 3 else ''}" + ) def on_progress(sent: int, total: int): try: @@ -357,12 +439,12 @@ def on_progress(sent: int, total: int): PROGRESS[task_id]["sent"] = sent PROGRESS[task_id]["total"] = total PROGRESS[task_id]["name"] = CURRENT_FILE["name"] - PROGRESS[task_id]["bucket"] = bucket + PROGRESS[task_id]["season"] = season PROGRESS[task_id]["msg"] = f"Processing... {pct}% ({sent}/{total} rows)" if sent >= total and not PROGRESS[task_id].get("done"): PROGRESS[task_id]["done"] = True send_webhook_notification( - f"File Done Uploading: {CURRENT_FILE['name']} -> {CURRENT_FILE['bucket']}" + f"File Done Uploading: {CURRENT_FILE['name']} -> {CURRENT_FILE['season']}" ) except: pass @@ -370,7 +452,9 @@ def on_progress(sent: int, total: int): def worker(): # Auto-configure streamer for file size with InfluxDB-safe settings file_size_mb = total_size / (1024 * 1024) - streamer = CANInfluxStreamer(bucket=INFLUXDB_DATABASE, table=bucket, dbc_path=dbc_temp_path) + streamer = CANInfluxStreamer( + database=INFLUXDB_DATABASE, table=season, dbc_path=dbc_temp_path + ) try: # Process multiple CSV files using the new method @@ -427,7 +511,7 @@ def gen(): if state.get("done"): CURRENT_FILE["name"] = "" CURRENT_FILE["task_id"] = "" - CURRENT_FILE["bucket"] = "" + CURRENT_FILE["season"] = "" yield f"data: {json.dumps(state)}\n\n" break time.sleep(0.3) diff --git a/installer/file-uploader/helper.py b/installer/file-uploader/helper.py index aad4b66..c46132f 100644 --- a/installer/file-uploader/helper.py +++ b/installer/file-uploader/helper.py @@ -41,6 +41,33 @@ def _rolling_cleanup(): _rolling_cleanup() # Clean up any old temp files from previous runs +def _safe_csv_temp_path(temp_dir: str, relative_csv_path: str) -> str: + """Resolve temp_dir + relative path for a CSV; reject zip-slip / traversal.""" + base = os.path.abspath(temp_dir) + rel = relative_csv_path.replace("\\", "/").lstrip("/") + if not rel or rel.startswith("/"): + raise ValueError("invalid relative path") + for part in rel.split("/"): + if part == "..": + raise ValueError("path traversal in relative path") + joined = os.path.normpath(os.path.join(base, rel)) + if joined != base and not joined.startswith(base + os.sep): + raise ValueError("path escapes upload temp directory") + parent = os.path.dirname(joined) + if parent != base: + os.makedirs(parent, exist_ok=True) + return joined + + +def _iter_csv_files_under_dir(csv_dir: str): + """Yield (full_path, basename) for every .csv under csv_dir (recursive).""" + base = os.path.abspath(csv_dir) + for root, _dirs, files in os.walk(base): + for name in files: + if not name.lower().endswith(".csv"): + continue + yield os.path.join(root, name), name + if os.getenv("DEBUG") is None: from dotenv import load_dotenv @@ -59,7 +86,7 @@ class ProgressStats: class CANInfluxStreamer: def __init__( - self, bucket: str, table: str, batch_size: int = 500, max_concurrent_uploads: int = 5, + self, database: str, table: str, batch_size: int = 500, max_concurrent_uploads: int = 5, enable_progress_counting: bool = True, max_queue_size: int = 100, rate_limit_delay: float = 0.01, max_retries: int = 3, adaptive_backoff: bool = True, dbc_path: Optional[str] = None @@ -67,8 +94,9 @@ def __init__( self.batch_size = batch_size self.max_concurrent_uploads = max_concurrent_uploads - self.bucket = bucket # InfluxDB database name (e.g. "WFR") - self.table = table # Measurement/table name (e.g. "WFR26") + # InfluxDB 3 database; the Python client still names this parameter `bucket` on write_api.write(). + self.database = database + self.table = table # Table / measurement (e.g. season WFR26) self.enable_progress_counting = enable_progress_counting self.max_queue_size = max_queue_size self.rate_limit_delay = rate_limit_delay @@ -168,16 +196,12 @@ def count_total_messages_from_disk(self, csv_dir: str, estimate: bool = False) - return 0 total = 0 - csv_files = [f for f in os.listdir(csv_dir) if f.endswith('.csv')] - - for filename in csv_files: + for file_path, filename in _iter_csv_files_under_dir(csv_dir): # Filename must be a timestamp we can parse try: datetime.strptime(filename[:-4], "%Y-%m-%d-%H-%M-%S") except ValueError: continue # Skip files that don't match expected format - - file_path = os.path.join(csv_dir, filename) sample_count = 0 valid_rows = 0 @@ -502,12 +526,9 @@ async def _producer_from_disk( """Producer that processes CSV files from disk directory.""" semaphore = asyncio.Semaphore(2) # Limit concurrent file processing - csv_files = [f for f in os.listdir(csv_dir) if f.endswith('.csv')] + csv_files = list(_iter_csv_files_under_dir(csv_dir)) print(f"📄 Found {len(csv_files)} CSV files to process") - - # Process CSV files sequentially to control memory usage - for filename in csv_files: - csv_path = os.path.join(csv_dir, filename) + for csv_path, _basename in csv_files: await self.process_csv_file_from_disk(csv_path, queue, semaphore) async def _producer( @@ -752,7 +773,7 @@ async def _uploader( # Submit async write (non-blocking) self.write_api.write( - bucket=self.bucket, + bucket=self.database, org=self.org, record=batch_points, _callback_id=callback_id # Custom attribute for tracking @@ -941,17 +962,16 @@ async def stream_multiple_csvs( try: - # Save all CSV files to temp directory + # Save all CSV files to temp directory (relative paths may include subdirs per archive) for filename, data in file_data: if not filename: continue - - # Ensure filename ends with .csv - if not filename.lower().endswith('.csv'): - filename += '.csv' - - temp_path = os.path.join(temp_dir, filename) - with open(temp_path, 'wb') as f: + + if not filename.lower().endswith(".csv"): + filename += ".csv" + + temp_path = _safe_csv_temp_path(temp_dir, filename) + with open(temp_path, "wb") as f: f.write(data) print(f"💾 Saved {filename} ({len(data)} bytes)") diff --git a/installer/file-uploader/static/index.js b/installer/file-uploader/static/index.js index 2ac5c50..395ac41 100644 --- a/installer/file-uploader/static/index.js +++ b/installer/file-uploader/static/index.js @@ -107,10 +107,10 @@ async function parseUploadResponse(res) { function submitCsvUpload(files) { const name_label = document.getElementById("file-name-label"); - const selected_bucket = document.getElementById("bucket-select").value; + const selected_season = document.getElementById("season-select").value; - if (!selected_bucket || selected_bucket == "") { - alert("Please Select A Season/Table From The Dropdown Menu"); + if (!selected_season || selected_season == "") { + alert("Please select a season from the dropdown"); return; } @@ -123,10 +123,19 @@ function submitCsvUpload(files) { for (let i = 0; i < files.length; i++) { const file = files[i]; - if (file.type !== "text/csv" && !file.name.toLowerCase().endsWith(".csv")) { - name_label.innerText = `File ${file.name} is not a CSV file`; + const n = file.name.toLowerCase(); + const okCsv = + file.type === "text/csv" || + n.endsWith(".csv") || + file.type === "application/csv"; + const okZip = + n.endsWith(".zip") || + file.type === "application/zip" || + file.type === "application/x-zip-compressed"; + if (!okCsv && !okZip) { + name_label.innerText = `File ${file.name} is not CSV or zip`; name_label.style = "color: red;"; - alert(`File ${file.name} is not a CSV file. Only CSV files are allowed.`); + alert(`File ${file.name} must be .csv or .zip (of CSVs).`); return; } } @@ -160,7 +169,7 @@ function submitCsvUpload(files) { for (let i = 0; i < files.length; i++) { form.append("file", files[i]); } - form.append("bucket", selected_bucket); + form.append("season", selected_season); appendDbcToForm(form); fetch("/upload", { @@ -264,7 +273,7 @@ function createBucket() { btn.disabled = false; return; } - const select = document.getElementById("bucket-select"); + const select = document.getElementById("season-select"); const opt = document.createElement("option"); opt.value = data.name; opt.innerText = data.name; @@ -311,7 +320,7 @@ function handleProgress(task_id) { document.getElementById("progress-bar_count").innerText = `${data.sent} / ${data.total} rows`; document.getElementById("drop_zone-input").disabled = true; - document.getElementById("bucket-select").disabled = true; + document.getElementById("season-select").disabled = true; const dbcSel = document.getElementById("dbc-select"); if (dbcSel) dbcSel.disabled = true; const dbcIn = document.getElementById("dbc-input"); @@ -321,7 +330,7 @@ function handleProgress(task_id) { eventSource.close(); document.getElementById("progress-bar_pct").innerText = "Done"; document.getElementById("drop_zone-input").disabled = false; - document.getElementById("bucket-select").disabled = false; + document.getElementById("season-select").disabled = false; if (dbcSel) dbcSel.disabled = false; if (dbcIn) dbcIn.disabled = false; applyDbcSelectMode(); @@ -342,7 +351,7 @@ function handleProgress(task_id) { d="M13 13h3a3 3 0 0 0 0-6h-.025A5.56 5.56 0 0 0 16 6.5 5.5 5.5 0 0 0 5.207 5.021C5.137 5.017 5.071 5 5 5a4 4 0 0 0 0 8h2.167M10 15V6m0 0L8 8m2-2 2 2" /> -

Click to upload CSV files or drag and drop

`; +

Click to upload CSV or zip, or drag and drop

`; document.getElementById("drop_zone").addEventListener("drop", dropHandler); document.getElementById("drop_zone").addEventListener("dragover", dragOverHandler); } diff --git a/installer/file-uploader/static/style.css b/installer/file-uploader/static/style.css index d2607ae..343fee3 100644 --- a/installer/file-uploader/static/style.css +++ b/installer/file-uploader/static/style.css @@ -137,13 +137,13 @@ h2 { } } -#bucket-select_span { +#season-select_span { font-size: xx-large; font-weight: 600; margin-top: 15px } -#bucket-select { +#season-select { font-size: large; vertical-align: middle; border-radius: 15px; @@ -152,15 +152,15 @@ h2 { border: 3px solid white; } -#bucket-select:hover { +#season-select:hover { border: 3px solid rebeccapurple; } -#bucket-select:focus { +#season-select:focus { border: 3px solid rebeccapurple; box-shadow: 0 3px 5px 0 rgba(0,0,0,.2); outline: none; } -#bucket-select:disabled { +#season-select:disabled { cursor: not-allowed; background-color: #ffffff95; border: 3px solid rgba(255, 0, 0, 0.509); diff --git a/installer/file-uploader/templates/index.html b/installer/file-uploader/templates/index.html index 56859a7..3ec35c2 100644 --- a/installer/file-uploader/templates/index.html +++ b/installer/file-uploader/templates/index.html @@ -16,12 +16,12 @@

Upload Files to InfluxDB

-

(multiple csv files supported)

- - - - {% for name in bucket_names %} {% if current_bucket == name %} + {% for name in season_names %} {% if current_season == name %} {% else %} @@ -65,11 +65,11 @@

Task: {{ task_id }}

d="M13 13h3a3 3 0 0 0 0-6h-.025A5.56 5.56 0 0 0 16 6.5 5.5 5.5 0 0 0 5.207 5.021C5.137 5.017 5.071 5 5 5a4 4 0 0 0 0 8h2.167M10 15V6m0 0L8 8m2-2 2 2" /> -

Click to upload CSV files or drag and drop

+

Click to upload CSV or zip, or drag and drop