forked from plastic-labs/honcho
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathconfig.toml.example
More file actions
263 lines (225 loc) · 6.84 KB
/
config.toml.example
File metadata and controls
263 lines (225 loc) · 6.84 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
# Honcho Configuration File
# This file demonstrates all available configuration options.
# Copy this to config.toml and modify as needed.
# Environment variables will override these values.
# Application-level settings
[app]
LOG_LEVEL = "INFO"
SESSION_OBSERVERS_LIMIT = 10
GET_CONTEXT_MAX_TOKENS = 100000
MAX_FILE_SIZE = 5242880 # 5MB
MAX_MESSAGE_SIZE = 25000 # Characters
EMBED_MESSAGES = true
# LANGFUSE_HOST = "https://api.langfuse.com"
# LANGFUSE_PUBLIC_KEY = "your-public-key-here"
# COLLECT_METRICS_LOCAL = false
# LOCAL_METRICS_FILE = "metrics.jsonl"
# REASONING_TRACES_FILE = "traces.jsonl" # Path to JSONL file for reasoning traces
NAMESPACE = "honcho"
# Database settings
[db]
CONNECTION_URI = "postgresql+psycopg://postgres:postgres@localhost:5432/postgres"
SCHEMA = "public"
POOL_CLASS = "default"
POOL_PRE_PING = true
POOL_SIZE = 10
MAX_OVERFLOW = 20
POOL_TIMEOUT = 30 # seconds
POOL_RECYCLE = 300 # seconds
POOL_USE_LIFO = true
SQL_DEBUG = false
TRACING = false
# Authentication settings
[auth]
USE_AUTH = false
JWT_SECRET = "your-secret-key-here" # Must be set if USE_AUTH is true
# Sentry settings
[sentry]
ENABLED = false
DSN = ""
RELEASE = ""
ENVIRONMENT = "development"
TRACES_SAMPLE_RATE = 0.1
PROFILES_SAMPLE_RATE = 0.1
# LLM settings
[llm]
DEFAULT_MAX_TOKENS = 2500
MAX_TOOL_OUTPUT_CHARS = 10000 # Max chars for tool output (~2500 tokens)
MAX_MESSAGE_CONTENT_CHARS = 2000 # Max chars per message in tool results
# API Keys for LLM providers (set the ones you need)
# Supported transports: openai, anthropic, gemini
# Base URLs are set per-module via model_config.overrides.base_url
# Built-in text-generation defaults use openai / gpt-5.4-mini.
# Embeddings default to openai / text-embedding-3-small.
OPENAI_API_KEY = "your-api-key-here"
# ANTHROPIC_API_KEY = "your-api-key"
# GEMINI_API_KEY = "your-api-key"
# Embedding settings
[embedding]
VECTOR_DIMENSIONS = 1536
MAX_INPUT_TOKENS = 8192
MAX_TOKENS_PER_REQUEST = 300000
[embedding.model_config]
transport = "openai"
model = "text-embedding-3-small"
# Optional module-level endpoint overrides
# [embedding.model_config.overrides]
# base_url = "https://embedding-proxy.internal.example/v1"
# api_key_env = "EMBEDDING_CUSTOM_API_KEY"
# Deriver settings
[deriver]
ENABLED = true
WORKERS = 1
POLLING_SLEEP_INTERVAL_SECONDS = 1.0
STALE_SESSION_TIMEOUT_MINUTES = 5
# QUEUE_ERROR_RETENTION_SECONDS = 2592000 # 30 days
DEDUPLICATE = true
LOG_OBSERVATIONS = false
MAX_INPUT_TOKENS = 23000
WORKING_REPRESENTATION_MAX_OBSERVATIONS = 100
REPRESENTATION_BATCH_MAX_TOKENS = 1024
FLUSH_ENABLED = false # Bypass batch token threshold, process work immediately
[deriver.model_config]
transport = "openai"
model = "gpt-5.4-mini"
# temperature = 0.0
# thinking_effort = "minimal"
# thinking_budget_tokens = 1024
# max_output_tokens = 4096
# Optional module-level endpoint overrides
# transport = "openai"
# model = "my-local-model"
# [deriver.model_config.overrides]
# base_url = "https://llm.internal.example/v1"
# api_key_env = "DERIVER_CUSTOM_API_KEY"
# Optional fallback model
# [deriver.model_config.fallback]
# transport = "anthropic"
# model = "claude-haiku-4-5"
# [deriver.model_config.fallback.overrides]
# base_url = "https://llm-backup.internal.example/v1"
# api_key_env = "DERIVER_CUSTOM_BACKUP_API_KEY"
# [deriver.model_config.overrides.provider_params]
# verbosity = "low"
# Peer card settings
[peer_card]
ENABLED = true
# Dialectic settings
[dialectic]
MAX_OUTPUT_TOKENS = 8192
MAX_INPUT_TOKENS = 100000
HISTORY_TOKEN_LIMIT = 8192
SESSION_HISTORY_MAX_TOKENS = 4096
# Per-level settings for reasoning levels
# MAX_OUTPUT_TOKENS is optional per level; if not set, uses global MAX_OUTPUT_TOKENS
[dialectic.levels.minimal]
MAX_TOOL_ITERATIONS = 1
MAX_OUTPUT_TOKENS = 250
TOOL_CHOICE = "auto"
[dialectic.levels.minimal.model_config]
transport = "openai"
model = "gpt-5.4-mini"
[dialectic.levels.low]
MAX_TOOL_ITERATIONS = 5
TOOL_CHOICE = "auto"
[dialectic.levels.low.model_config]
transport = "openai"
model = "gpt-5.4-mini"
[dialectic.levels.medium]
MAX_TOOL_ITERATIONS = 2
[dialectic.levels.medium.model_config]
transport = "openai"
model = "gpt-5.4-mini"
[dialectic.levels.high]
MAX_TOOL_ITERATIONS = 4
[dialectic.levels.high.model_config]
transport = "openai"
model = "gpt-5.4-mini"
[dialectic.levels.max]
MAX_TOOL_ITERATIONS = 10
[dialectic.levels.max.model_config]
transport = "openai"
model = "gpt-5.4-mini"
# [dialectic.levels.max.model_config.fallback]
# transport = "gemini"
# model = "gemini-2.5-pro"
# Summary settings
[summary]
ENABLED = true
MESSAGES_PER_SHORT_SUMMARY = 20
MESSAGES_PER_LONG_SUMMARY = 60
MAX_TOKENS_SHORT = 1000
MAX_TOKENS_LONG = 4000
[summary.model_config]
transport = "openai"
model = "gpt-5.4-mini"
# thinking_effort = "minimal"
# thinking_budget_tokens = 1024
# [summary.model_config.fallback]
# transport = "anthropic"
# model = "claude-haiku-4-5"
# Dream settings
[dream]
ENABLED = true
DOCUMENT_THRESHOLD = 50
IDLE_TIMEOUT_MINUTES = 60
MIN_HOURS_BETWEEN_DREAMS = 8
ENABLED_TYPES = ["omni"]
MAX_TOOL_ITERATIONS = 20
HISTORY_TOKEN_LIMIT = 16384
[dream.deduction_model_config]
transport = "openai"
model = "gpt-5.4-mini"
[dream.induction_model_config]
transport = "openai"
model = "gpt-5.4-mini"
# Surprisal-based sampling subsystem
[dream.surprisal]
ENABLED = false
TREE_TYPE = "kdtree" # Options: kdtree, balltree, rptree, covertree, lsh, graph, prototype
TREE_K = 5 # k for kNN-based trees
SAMPLING_STRATEGY = "recent" # Options: recent, random, all
SAMPLE_SIZE = 200
TOP_PERCENT_SURPRISAL = 0.10 # Top 10% of observations
MIN_HIGH_SURPRISAL_FOR_REPLACE = 10
INCLUDE_LEVELS = ["explicit", "deductive"]
# Webhook settings
[webhook]
SECRET = ""
MAX_WORKSPACE_LIMIT = 10
# Prometheus metrics settings (pull-based metrics)
[metrics]
ENABLED = false
# NAMESPACE = "honcho" # Inherits from app.NAMESPACE if not set
# CloudEvents telemetry settings (analytics events)
[telemetry]
ENABLED = false
# ENDPOINT = "https://telemetry.honcho.dev/v1/events"
# HEADERS = '{"Authorization": "Bearer your-token"}' # JSON string for auth headers
BATCH_SIZE = 100
FLUSH_INTERVAL_SECONDS = 1.0
FLUSH_THRESHOLD = 50
MAX_RETRIES = 3
MAX_BUFFER_SIZE = 10000
# NAMESPACE = "honcho" # Inherits from app.NAMESPACE if not set
# Cache settings
[cache]
ENABLED = false
URL = "redis://localhost:6379/0?suppress=true"
# NAMESPACE = "honcho" # Inherits from app.NAMESPACE if not set
DEFAULT_TTL_SECONDS = 300
DEFAULT_LOCK_TTL_SECONDS = 5
# Vector store settings
[vector_store]
# Vector store type: "pgvector", "turbopuffer", or "lancedb"
TYPE = "pgvector"
# Migration flag: set to true when migration from pgvector is complete
MIGRATED = false
NAMESPACE = "honcho"
# This should match embedding.vector_dimensions. pgvector and dual-write mode
# currently still require 1536 until a schema migration lands.
DIMENSIONS = 1536
# TURBOPUFFER_API_KEY = "your-turbopuffer-api-key"
# TURBOPUFFER_REGION = "us-east-1"
LANCEDB_PATH = "./lancedb_data"
RECONCILIATION_INTERVAL_SECONDS = 300