-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path.env.example
More file actions
70 lines (61 loc) · 3.21 KB
/
.env.example
File metadata and controls
70 lines (61 loc) · 3.21 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
# Memory & Upload Configuration
# APP_MEMORY_MB: Total RAM (in MB) to allocate to the backend container.
# Everything else is derived automatically from this one value:
# - JVM heap = 75% of APP_MEMORY_MB
# - Max upload size = 25% of APP_MEMORY_MB
# - Nginx body limit = max upload + 50 MB multipart buffer
# - Proxy / analysis timeout scales with memory (300–900 s)
#
# Examples:
# 2048 → 512 MB max upload (default, suitable for most laptops)
# 4096 → 1 GB max upload
# 8192 → 2 GB max upload
APP_MEMORY_MB=2048
# Nginx Port Configuration
# Default: 80 (HTTP standard port)
# Change if port 80 is already in use on your system
NGINX_PORT=80
# File Retention Configuration
# Files are automatically deleted after this many hours
FILE_RETENTION_HOURS=12
# LLM Configuration (Local LLM Server)
# Use a local LLM server (LM Studio, Ollama) or OpenAI API
LLM_API_BASE_URL=http://localhost:1234/v1
LLM_API_KEY=
LLM_MODEL=Qwen2.5-14B-Coder-Instruct
LLM_TEMPERATURE=0.7
# LLM_MAX_TOKENS: Maximum number of tokens the LLM may generate in a single response.
# This is sent as `max_tokens` in every chat completion request and controls response
# length only — it does NOT affect the model's context window size.
# The LLM server enforces: prompt_tokens + LLM_MAX_TOKENS ≤ context_window.
# Increase if stories are getting cut off; decrease to save compute.
# Recommended: 4000–8000 for most local models.
LLM_MAX_TOKENS=8000
# LLM_CONTEXT_LENGTH: The context window size (in tokens) configured on your LLM server.
# Set this to match (or be safely under) the context length of the model you are running.
# The backend uses this value to detect prompt-too-large errors early and to report
# accurate token counts in error messages.
# If unset, the backend auto-detects it from the /v1/models endpoint; if that also
# fails, a conservative internal default is used.
# Example: 32768 for a 32k model, 131072 for a 128k model.
# LLM_CONTEXT_LENGTH=32768
# LLM_TIMEOUT: HTTP timeout in seconds for LLM API requests
# Local models (e.g. LM Studio) can be slow — increase if you get timeout errors.
# Recommended: 300 seconds (5 minutes) for local models.
LLM_TIMEOUT=300
# Overview Applications Configuration
# OVERVIEW_APPS_LIMITED: Set to true to cap the number of detected applications shown in the overview.
# Set to false to show all detected applications regardless of count.
OVERVIEW_APPS_LIMITED=true
# OVERVIEW_APPS_MAX: Maximum number of detected applications to show in the overview when OVERVIEW_APPS_LIMITED=true.
# Applications are ranked alphabetically. Increase if your captures regularly identify many distinct apps.
OVERVIEW_APPS_MAX=100
# Frontend Configuration
# VITE_SUPPORTED_FILE_TYPES: comma-separated list of accepted upload extensions
VITE_SUPPORTED_FILE_TYPES=.pcap,.pcapng,.cap
# VITE_ANALYSIS_OPTIONS: set to true to show the pre-upload analysis options modal
VITE_ANALYSIS_OPTIONS=false
# VITE_NETWORK_DIAGRAM_CONVERSATION_LIMIT: Set to false to disable the 500-conversation rendering
# limit in the Network Topology Diagram. Disabling this loads every conversation into the diagram,
# which may cause browser slowdowns or out-of-memory errors on large captures.
VITE_NETWORK_DIAGRAM_CONVERSATION_LIMIT=false