Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions .github/workflows/docker-publish-ghcr.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,24 +16,24 @@ jobs:

steps:
- name: Checkout repository
uses: actions/checkout@v5
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8

- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435

- name: Log in to GHCR
uses: docker/login-action@v3
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}

- name: Build and Push to GHCR with Repo Name
uses: docker/build-push-action@v6
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83
with:
context: .
push: true
platforms: linux/amd64,linux/arm64
tags: |
ghcr.io/owasp/lets-threat-model:latest
ghcr.io/owasp/lets-threat-model:${{ github.sha }}
ghcr.io/owasp/lets-threat-model:${{ github.sha }}
12 changes: 6 additions & 6 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,22 +27,22 @@ jobs:
steps:
# Step 1: Checkout code
- name: Checkout code
uses: actions/checkout@v5
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8

# Step 2: Set up Python
- name: Set up Python
uses: actions/setup-python@v6
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c
with:
python-version: '3.12' # Match your local version

# Step 3: Install dependencies
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
pip install -r requirements-dev.txt

python -m pip install -r requirements.txt
python -m pip install -r requirements-dev.txt
# Step 4: Run pytest (will use pytest.ini automatically)
- name: Run pytest
run: |
pytest
pytest
4 changes: 2 additions & 2 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
repos:
repos:
- repo: local
hooks:
- id: run-pytest
name: Run pytest before commit
entry: pytest tests/ -m "not agent" --asyncio-mode=auto --disable-warnings
language: system
pass_filenames: false
types: [python]
types: [python]
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,8 @@ After running the container, reports will be generated only if you specify the c
### **2. Install Dependencies**
Ensure you have Python **3.8+** installed, then install dependencies:
```sh
pip install -r requirements.txt
python -m pip install -r requirements.txt
python -m pip install -r requirements-dev.txt
```

---
Expand Down Expand Up @@ -289,7 +290,6 @@ source venv/bin/activate # On macOS/Linux
venv\Scripts\activate # On Windows
pip install -r requirements.txt
```

---

## 🏗 Contributing
Expand Down
62 changes: 8 additions & 54 deletions core/agents/agent_tools.py
Original file line number Diff line number Diff line change
@@ -1,22 +1,19 @@
import logging
from typing import Any
import uuid
from typing import Any

from langchain.chat_models.base import BaseChatModel
from tenacity import (
retry,
retry_if_exception,
wait_exponential,
stop_after_attempt,
before_sleep_log,
)
from langchain_core.runnables.base import Runnable
from langchain_core.runnables.utils import Input

from core.utils.llm_concurrency import llm_concurrency_guard


logger = logging.getLogger(__name__)

DEFAULT_RETRY_PARAMS = {
"stop_after_attempt": 5,
"wait_exponential_jitter": True,
"exponential_jitter_params": {"initial": 2.0, "max": 60.0},
}


class AgentHelper:
"""
Expand Down Expand Up @@ -205,46 +202,3 @@ def get_model_name(model: BaseChatModel):
if hasattr(model, attr):
return getattr(model, attr)
return "Unknown Model"


def is_rate_limit_error(exception: BaseException) -> bool:
# Example for HTTP errors:
if (
hasattr(exception, "status_code")
and getattr(exception, "status_code", None) == 429
):
return True
# OR parse error message or code if it's embedded in exception.args
return False


# General async retry wrapper for any chain.ainvoke call
@retry(
retry=retry_if_exception(is_rate_limit_error),
wait=wait_exponential(multiplier=1, min=2, max=60), # Exponential backoff
stop=stop_after_attempt(5), # Stop after 5 tries
before_sleep=before_sleep_log(logger, logging.WARNING),
reraise=True, # Raise exception after all retries fail
)
async def ainvoke_with_retry(chain: Runnable, input: Input):
# Optionally serialize inputs (if needed)
# inputs = {k: json.dumps(v) for k, v in inputs.items()} # Uncomment if your inputs need serialization

logger.debug(f"Invoking chain with inputs: {input}")
async with llm_concurrency_guard():
return await chain.ainvoke(input)


@retry(
retry=retry_if_exception(is_rate_limit_error),
wait=wait_exponential(multiplier=1, min=2, max=60), # Exponential backoff
stop=stop_after_attempt(5), # Stop after 5 attempts
before_sleep=before_sleep_log(logger, logging.WARNING),
reraise=True, # Raise the exception if it keeps failing
)
def invoke_with_retry(chain: Runnable, input: Input):
"""
Wrapper function to retry chain.invoke() on rate limit (HTTP 429) errors.
"""
logger.debug(f"Invoking chain synchronously with inputs: {input}")
return chain.invoke(input)
Loading