Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 7 additions & 7 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -36,10 +36,10 @@ jobs:
python -m pip install .[test]

jupyter server extension list
jupyter server extension list 2>&1 | grep -ie "notebook_intelligence.*OK"
jupyter server extension list 2>&1 | grep -ie "lab_notebook_intelligence.*OK"

jupyter labextension list
jupyter labextension list 2>&1 | grep -ie "@notebook-intelligence/notebook-intelligence.*OK"
jupyter labextension list 2>&1 | grep -ie "@qbraid/lab-notebook-intelligence.*OK"
python -m jupyterlab.browser_check

- name: Package the extension
Expand All @@ -48,13 +48,13 @@ jobs:

pip install build
python -m build
pip uninstall -y "notebook_intelligence" jupyterlab
pip uninstall -y "lab_notebook_intelligence" jupyterlab

- name: Upload extension packages
uses: actions/upload-artifact@v4
with:
name: extension-artifacts
path: dist/notebook_intelligence*
path: dist/lab_notebook_intelligence*
if-no-files-found: error

test_isolated:
Expand All @@ -77,14 +77,14 @@ jobs:
sudo rm -rf $(which node)
sudo rm -rf $(which node)

pip install "jupyterlab>=4.0.0,<5" notebook_intelligence*.whl
pip install "jupyterlab>=4.0.0,<5" lab_notebook_intelligence*.whl


jupyter server extension list
jupyter server extension list 2>&1 | grep -ie "notebook_intelligence.*OK"
jupyter server extension list 2>&1 | grep -ie "lab_notebook_intelligence.*OK"

jupyter labextension list
jupyter labextension list 2>&1 | grep -ie "@notebook-intelligence/notebook-intelligence.*OK"
jupyter labextension list 2>&1 | grep -ie "@qbraid/lab-notebook-intelligence.*OK"
python -m jupyterlab.browser_check --no-browser-test


Expand Down
91 changes: 91 additions & 0 deletions .github/workflows/upload-s3-production.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
name: Upload S3 production (deprecated)

on:
release:
types: [published]
workflow_dispatch:

jobs:
build:
name: Build wheel
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ['3.11']
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Install node
uses: actions/setup-node@v3
with:
node-version: '18.x'
- name: Setup Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
architecture: 'x64'

- name: Get pip cache dir
id: pip-cache
run: echo "dir=$(pip cache dir)" >> $GITHUB_OUTPUT
- name: pip cache
uses: actions/cache@v3
with:
path: ${{ steps.pip-cache.outputs.dir }}
key: ${{ runner.os }}-pip-${{ matrix.python-version }}-${{ hashFiles('**/setup.py') }}
restore-keys: |
${{ runner.os }}-pip-${{ matrix.python-version }}-
${{ runner.os }}-pip-
- name: Get yarn cache directory path
id: yarn-cache-dir-path
run: echo "dir=$(yarn cache dir)" >> $GITHUB_OUTPUT
- name: Setup yarn cache
uses: actions/cache@v3
id: yarn-cache # use this to check for `cache-hit` (`steps.yarn-cache.outputs.cache-hit != 'true'`)
env:
# Increase this value to reset cache
CACHE_NUMBER: 3
with:
path: |
${{ steps.yarn-cache-dir-path.outputs.dir }}
**/node_modules
key: ${{ runner.os }}-yarn-${{ env.CACHE_NUMBER }}-${{ hashFiles('**/yarn.lock') }}
restore-keys: |
${{ runner.os }}-yarn-${{ env.CACHE_NUMBER }}
${{ runner.os }}-yarn-
- name: Install dependencies and make wheels dir
run: python -m pip install -U "jupyter_packaging>=0.10,<2" "jupyterlab>=4.0.0,<5" pip wheel build

- name: Build the extension
run: python -m build

- uses: actions/upload-artifact@v4
if: matrix.python-version == '3.11'
with:
name: extension
path: dist/lab_notebook_intelligence*.whl
if-no-files-found: error

deploy:
name: Upload to Amazon S3
runs-on: ubuntu-latest

needs: build

steps:
- name: Checkout
uses: actions/checkout@v3
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v2
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: us-east-1
- uses: actions/download-artifact@v4
with:
name: extension

- name: Copy wheel file to S3 lab-extensions bucket
run: |
aws s3 rm s3://qbraid-lab-extensions/production/ --recursive --exclude "*" --include "lab_notebook_intelligence*.whl"
aws s3 cp ./ s3://qbraid-lab-extensions/production/ --recursive --exclude "*" --include "lab_notebook_intelligence*.whl"
92 changes: 92 additions & 0 deletions .github/workflows/upload-s3-staging.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
name: Upload S3 Staging

on:
push:
branches:
- main
workflow_dispatch:

jobs:
build:
name: Build wheel
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ['3.11']
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Install node
uses: actions/setup-node@v3
with:
node-version: '18.x'
- name: Setup Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
architecture: 'x64'

- name: Get pip cache dir
id: pip-cache
run: echo "dir=$(pip cache dir)" >> $GITHUB_OUTPUT
- name: pip cache
uses: actions/cache@v3
with:
path: ${{ steps.pip-cache.outputs.dir }}
key: ${{ runner.os }}-pip-${{ matrix.python-version }}-${{ hashFiles('**/setup.py') }}
restore-keys: |
${{ runner.os }}-pip-${{ matrix.python-version }}-
${{ runner.os }}-pip-
- name: Get yarn cache directory path
id: yarn-cache-dir-path
run: echo "dir=$(yarn cache dir)" >> $GITHUB_OUTPUT
- name: Setup yarn cache
uses: actions/cache@v3
id: yarn-cache # use this to check for `cache-hit` (`steps.yarn-cache.outputs.cache-hit != 'true'`)
env:
# Increase this value to reset cache
CACHE_NUMBER: 3
with:
path: |
${{ steps.yarn-cache-dir-path.outputs.dir }}
**/node_modules
key: ${{ runner.os }}-yarn-${{ env.CACHE_NUMBER }}-${{ hashFiles('**/yarn.lock') }}
restore-keys: |
${{ runner.os }}-yarn-${{ env.CACHE_NUMBER }}
${{ runner.os }}-yarn-
- name: Install dependencies and make wheels dir
run: python -m pip install -U "jupyter_packaging>=0.10,<2" "jupyterlab>=4.0.0,<5" pip wheel build

- name: Build the extension
run: python -m build

- uses: actions/upload-artifact@v4
if: matrix.python-version == '3.11'
with:
name: extension
path: dist/lab_notebook_intelligence*.whl
if-no-files-found: error

deploy:
name: Upload to Amazon S3
runs-on: ubuntu-latest

needs: build

steps:
- name: Checkout
uses: actions/checkout@v3
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v2
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: us-east-1
- uses: actions/download-artifact@v4
with:
name: extension

- name: Copy wheel file to S3 lab-extensions bucket
run: |
aws s3 rm s3://qbraid-lab-extensions/staging/ --recursive --exclude "*" --include "lab_notebook_intelligence*.whl"
aws s3 cp ./ s3://qbraid-lab-extensions/staging/ --recursive --exclude "*" --include "lab_notebook_intelligence*.whl"
4 changes: 2 additions & 2 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,9 @@ node_modules/
*.egg-info/
.ipynb_checkpoints
*.tsbuildinfo
notebook_intelligence/labextension
lab_notebook_intelligence/labextension
# Version file is handled by hatchling
notebook_intelligence/_version.py
lab_notebook_intelligence/_version.py

# Created by https://www.gitignore.io/api/python
# Edit at https://www.gitignore.io/?templates=python
Expand Down
18 changes: 9 additions & 9 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -117,16 +117,16 @@ You can easily add MCP servers to NBI by editing the configuration file [~/.jupy

```json
{
"mcpServers": {
"filesystem": {
"command": "npx",
"args": [
"-y",
"@modelcontextprotocol/server-filesystem",
"/Users/mbektas/mcp-test"
]
}
"mcpServers": {
"filesystem": {
"command": "npx",
"args": [
"-y",
"@modelcontextprotocol/server-filesystem",
"/Users/mbektas/mcp-test"
]
}
}
}
```

Expand Down
4 changes: 2 additions & 2 deletions install.json
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
{
"packageManager": "python",
"packageName": "notebook_intelligence",
"uninstallInstructions": "Use your Python package manager (pip, conda, etc.) to uninstall the package notebook_intelligence"
"packageName": "lab_notebook_intelligence",
"uninstallInstructions": "Use your Python package manager (pip, conda, etc.) to uninstall the package lab_notebook_intelligence"
}
2 changes: 1 addition & 1 deletion jupyter-config/server-config/notebook_intelligence.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
{
"ServerApp": {
"jpserver_extensions": {
"notebook_intelligence": true
"lab_notebook_intelligence": true
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
# in editable mode with pip. It is highly recommended to install
# the package from a stable release or in editable mode: https://pip.pypa.io/en/stable/topics/local-project-installs/#editable-installs
import warnings
warnings.warn("Importing 'notebook_intelligence' outside a proper installation.")
warnings.warn("Importing 'lab_notebook_intelligence' outside a proper installation.")
__version__ = "dev"

import logging
Expand All @@ -19,12 +19,12 @@
def _jupyter_labextension_paths():
return [{
"src": "labextension",
"dest": "@notebook-intelligence/notebook-intelligence"
"dest": "@qbraid/lab-notebook-intelligence"
}]


def _jupyter_server_extension_points():
return [{
"module": "notebook_intelligence",
"module": "lab_notebook_intelligence",
"app": NotebookIntelligence
}]
Original file line number Diff line number Diff line change
Expand Up @@ -6,16 +6,16 @@
import sys
from typing import Dict
import logging
from notebook_intelligence import github_copilot
from notebook_intelligence.api import ButtonData, ChatModel, EmbeddingModel, InlineCompletionModel, LLMProvider, ChatParticipant, ChatRequest, ChatResponse, CompletionContext, ContextRequest, Host, CompletionContextProvider, MCPServer, MarkdownData, NotebookIntelligenceExtension, TelemetryEvent, TelemetryListener, Tool, Toolset
from notebook_intelligence.base_chat_participant import BaseChatParticipant
from notebook_intelligence.config import NBIConfig
from notebook_intelligence.github_copilot_chat_participant import GithubCopilotChatParticipant
from notebook_intelligence.llm_providers.github_copilot_llm_provider import GitHubCopilotLLMProvider
from notebook_intelligence.llm_providers.litellm_compatible_llm_provider import LiteLLMCompatibleLLMProvider
from notebook_intelligence.llm_providers.ollama_llm_provider import OllamaLLMProvider
from notebook_intelligence.llm_providers.openai_compatible_llm_provider import OpenAICompatibleLLMProvider
from notebook_intelligence.mcp_manager import MCPManager
from lab_notebook_intelligence import github_copilot
from lab_notebook_intelligence.api import ButtonData, ChatModel, EmbeddingModel, InlineCompletionModel, LLMProvider, ChatParticipant, ChatRequest, ChatResponse, CompletionContext, ContextRequest, Host, CompletionContextProvider, MCPServer, MarkdownData, NotebookIntelligenceExtension, TelemetryEvent, TelemetryListener, Tool, Toolset
from lab_notebook_intelligence.base_chat_participant import BaseChatParticipant
from lab_notebook_intelligence.config import NBIConfig
from lab_notebook_intelligence.github_copilot_chat_participant import GithubCopilotChatParticipant
from lab_notebook_intelligence.llm_providers.github_copilot_llm_provider import GitHubCopilotLLMProvider
from lab_notebook_intelligence.llm_providers.litellm_compatible_llm_provider import LiteLLMCompatibleLLMProvider
from lab_notebook_intelligence.llm_providers.ollama_llm_provider import OllamaLLMProvider
from lab_notebook_intelligence.llm_providers.openai_compatible_llm_provider import OpenAICompatibleLLMProvider
from lab_notebook_intelligence.mcp_manager import MCPManager

log = logging.getLogger(__name__)

Expand Down Expand Up @@ -288,7 +288,7 @@ def get_chat_participant(self, prompt: str) -> ChatParticipant:
async def handle_chat_request(self, request: ChatRequest, response: ChatResponse, options: dict = {}) -> None:
if self.chat_model is None:
response.stream(MarkdownData("Chat model is not set!"))
response.stream(ButtonData("Configure", "notebook-intelligence:open-configuration-dialog"))
response.stream(ButtonData("Configure", "lab-notebook-intelligence:open-configuration-dialog"))
response.finish()
return
request.host = self
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
# Copyright (c) Mehmet Bektas <[email protected]>

import asyncio
import json
from typing import Any, Callable, Dict, Union
from dataclasses import asdict, dataclass
from enum import Enum
Expand All @@ -10,7 +9,7 @@
import logging
from mcp.server.fastmcp.tools import Tool as MCPToolClass

from notebook_intelligence.config import NBIConfig
from lab_notebook_intelligence.config import NBIConfig

log = logging.getLogger(__name__)

Expand Down Expand Up @@ -515,12 +514,15 @@ async def _tool_call_loop(tool_call_rounds: list):
tool_call_rounds = tool_call_rounds[1:]

tool_name = tool_call['function']['name']
print("Tool name is : ", tool_name)
tool_to_call = self._get_tool_by_name(tool_name)
if tool_to_call is None:
log.error(f"Tool not found: {tool_name}, args: {tool_call['function']['arguments']}")
response.stream(MarkdownData("Oops! Failed to find requested tool. Please try again with a different prompt."))
response.finish()
return

print("Tool to call is : ", tool_to_call)

if type(tool_call['function']['arguments']) is dict:
args = tool_call['function']['arguments']
Expand Down
Loading