-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathconfig.example.yaml
More file actions
266 lines (225 loc) · 8.59 KB
/
config.example.yaml
File metadata and controls
266 lines (225 loc) · 8.59 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
# Heta configuration template
#
# Usage:
# 1. cp config.example.yaml config.yaml
# 2. Fill in your API keys in the [providers] section below
# 3. docker-compose up -d
#
# ┌─────────────────────────────────────────────────────────────────────────┐
# │ DOMESTIC-ONLY SETUP (no OpenAI / Gemini needed) │
# │ If you only have DashScope + SiliconFlow accounts, replace each │
# │ <<: *openai → <<: *dashscope │
# │ <<: *gemini → <<: *dashscope │
# │ and change the model names as indicated in the comments below. │
# └─────────────────────────────────────────────────────────────────────────┘
# ═══════════════════════════════════════════════════════════════════════════
# STEP 1 — Fill in your API keys here (everything else is pre-configured)
# ═══════════════════════════════════════════════════════════════════════════
providers:
dashscope: &dashscope # Alibaba Cloud DashScope — LLM + VLM + Embedding
api_key: "YOUR_DASHSCOPE_API_KEY"
base_url: "https://dashscope.aliyuncs.com/compatible-mode/v1/"
siliconflow: &siliconflow # SiliconFlow — Embedding
api_key: "YOUR_SILICONFLOW_API_KEY"
base_url: "https://api.siliconflow.cn/v1/"
# ── Optional: international providers ────────────────────────────────────
# Remove or leave empty if using domestic APIs only.
openai: &openai # OpenAI — MemoryKB LLM + Embedding
api_key: "YOUR_OPENAI_API_KEY"
base_url: "https://api.openai.com/v1"
gemini: &gemini # Google Gemini — HetaGen LLM
api_key: "YOUR_GEMINI_API_KEY"
base_url: "https://generativelanguage.googleapis.com/v1beta/openai/"
# ═══════════════════════════════════════════════════════════════════════════
# STEP 2 — Infrastructure
# Pre-configured for docker-compose. No changes needed.
# (Running without Docker? Change service names to 127.0.0.1)
# ═══════════════════════════════════════════════════════════════════════════
persistence:
postgresql:
host: postgres
port: 5432
user: postgres
password: postgres
database: postgres
options: "-c search_path=public"
milvus:
host: milvus
port: 19530
url: "http://milvus:19530"
neo4j:
url: "bolt://neo4j:7687"
username: "neo4j"
password: "heta_password"
database: "neo4j"
# ═══════════════════════════════════════════════════════════════════════════
# Module config — no changes needed for quick start
# ═══════════════════════════════════════════════════════════════════════════
heta:
title: "Heta API"
version: "0.1.0"
fastapi:
host: "0.0.0.0"
port: 8000
reload: false
log_level: "info"
# ─────────────────────────────────────────────────────────────────────────
hetadb:
title: "HetaDB API"
version: "0.1.0"
workspace: "workspace"
fastapi:
host: "0.0.0.0"
port: 8001
reload: false
log_level: "info"
llm:
<<: *dashscope
model: "Qwen/Qwen3-32B"
max_concurrent_requests: 10
max_retries: 3
timeout: 120
vlm:
<<: *dashscope
model: "qwen3.5-plus"
max_concurrent_requests: 10
max_retries: 5
timeout: 120
embedding_api:
<<: *siliconflow
model: "BAAI/bge-m3"
dim: 1024
timeout: 30
batch_size: 2000
num_threads: 8
max_file_size_bytes: 3221225472
max_retries: 5
retry_delay: 2
# File storage — uses MinIO from docker-compose (S3-compatible)
s3:
ak: "minioadmin"
sk: "minioadmin"
endpoint: "http://minio:9000" # internal address used by the backend (Docker network)
public_endpoint: "http://localhost:9000" # address embedded in download URLs returned to clients
# change to your server IP or domain for remote deployments
bucket: "hetadb-raw-files"
config:
signature_version: "s3v4"
retries:
max_attempts: 3
mode: "adaptive"
read_timeout: 300
connect_timeout: 60
milvus:
db_name: "hetadb"
sentence_mode: false
# Max concurrent dataset parse tasks. Each task spawns graph.max_workers
# LLM threads, so effective LLM concurrency = parse_max_workers × llm.max_concurrent_requests.
parse_max_workers: 2
query_defaults:
top_k: 10
threshold: 0.0
similarity_weight: 1.5
occur_weight: 1.0
reranker_url: "" # Optional: fill in your reranker service URL to enable reranking
search_params:
ef_multiplier: 10
# ─────────────────────────────────────────────────────────────────────────
hetamem:
title: "HetaMem API"
version: "0.1.0"
fastapi:
host: "0.0.0.0"
port: 8003
reload: false
log_level: "info"
memorykb:
# Default: OpenAI
# Domestic alternative — replace the block below with:
# llm:
# <<: *dashscope
# model: "qwen-plus"
llm:
<<: *openai
model: "gpt-4o-mini-2024-07-18"
# Default: OpenAI text-embedding-3-small (dim=1536)
# Domestic alternative — replace the block below with:
# embedding:
# <<: *dashscope
# model: "text-embedding-v4"
# dim: 1536
embedding:
<<: *openai
model: "text-embedding-3-small"
dim: 1536
memoryvg:
# LLM: already using DashScope by default
llm:
provider: "openai"
config:
<<: *dashscope
model: "qwen3-max"
# Default: OpenAI text-embedding-3-large (dim=1024)
# Domestic alternative — replace the block below with:
# embedder:
# provider: "openai"
# config:
# <<: *siliconflow
# model: "BAAI/bge-m3"
# embedding_dims: 1024
embedder:
provider: "openai"
config:
<<: *openai
model: "text-embedding-3-large"
embedding_dims: 1024
vector_store:
provider: "milvus"
config:
collection_name: "memoryvg"
embedding_model_dims: 1024
metric_type: "IP"
db_name: "hetamem"
graph_store:
provider: "neo4j"
version: "v1.1"
# ─────────────────────────────────────────────────────────────────────────
hetagen:
fastapi:
host: "0.0.0.0"
port: 8002
reload: false
log_level: "info"
# Default: Google Gemini
# Domestic alternative — replace the block below with:
# llm:
# <<: *dashscope
# model: "qwen3-32b"
# max_concurrent_requests: 10
# max_retries: 5
# timeout: 120
llm:
<<: *gemini
model: "gemini-3-flash-preview"
max_concurrent_requests: 10
max_retries: 5
timeout: 120
vlm:
<<: *siliconflow
model: "Qwen/Qwen3-VL-32B-Instruct"
max_concurrent_requests: 10
max_retries: 5
timeout: 120
embedding_api:
<<: *siliconflow
model: "BAAI/bge-m3"
dim: 1024
timeout: 30
milvus:
db_name: "hetagen"
collection_name: "top_100_embeddings"
embedding_dim: 1024
index_type: "IVF_FLAT"
metric_type: "IP"
nlist: 128
batch_size: 100