|
97 | 97 | }, |
98 | 98 | "outputs": [], |
99 | 99 | "source": [ |
100 | | - "!ollama pull llama3.2:1b" |
| 100 | + "!ollama pull llama3.2:3b" |
101 | 101 | ] |
102 | 102 | }, |
103 | 103 | { |
|
148 | 148 | "outputs": [], |
149 | 149 | "source": [ |
150 | 150 | "# File Operation Tools\n", |
151 | | - "\n", |
152 | | - "\n", |
153 | 151 | "@tool\n", |
154 | 152 | "def file_read(file_path: str) -> str:\n", |
155 | | - " \"\"\"Read a file and return its content.\n", |
| 153 | + " \"\"\"Read a file and return its content. Supports both text and PDF files.\n", |
156 | 154 | "\n", |
157 | 155 | " Args:\n", |
158 | 156 | " file_path (str): Path to the file to read\n", |
|
164 | 162 | " FileNotFoundError: If the file doesn't exist\n", |
165 | 163 | " \"\"\"\n", |
166 | 164 | " try:\n", |
167 | | - " with open(file_path, \"r\") as file:\n", |
168 | | - " return file.read()\n", |
| 165 | + " # Check if it's a PDF file\n", |
| 166 | + " if file_path.lower().endswith('.pdf'):\n", |
| 167 | + " import PyPDF2\n", |
| 168 | + " with open(file_path, \"rb\") as file:\n", |
| 169 | + " pdf_reader = PyPDF2.PdfReader(file)\n", |
| 170 | + " text = \"\"\n", |
| 171 | + " for page in pdf_reader.pages:\n", |
| 172 | + " text += page.extract_text() + \"\\n\"\n", |
| 173 | + " return text if text.strip() else \"Error: Could not extract text from PDF\"\n", |
| 174 | + " else:\n", |
| 175 | + " # Regular text file\n", |
| 176 | + " with open(file_path, \"r\", encoding=\"utf-8\") as file:\n", |
| 177 | + " return file.read()\n", |
169 | 178 | " except FileNotFoundError:\n", |
170 | 179 | " return f\"Error: File '{file_path}' not found.\"\n", |
171 | 180 | " except Exception as e:\n", |
|
268 | 277 | "\"\"\"\n", |
269 | 278 | "\n", |
270 | 279 | "model_id = (\n", |
271 | | - " \"llama3.2:1b\" # You can change this to any model you have pulled with Ollama.\n", |
| 280 | + " \"llama3.2:3b\" # You can change this to any model you have pulled with Ollama.\n", |
272 | 281 | ")" |
273 | 282 | ] |
274 | 283 | }, |
|
289 | 298 | "ollama_model = OllamaModel(\n", |
290 | 299 | " model_id=model_id,\n", |
291 | 300 | " host=\"http://localhost:11434\",\n", |
292 | | - " params={\n", |
293 | | - " \"max_tokens\": 4096, # Adjust based on your model's capabilities\n", |
294 | | - " \"temperature\": 0.7, # Lower for more deterministic responses, higher for more creative\n", |
295 | | - " \"top_p\": 0.9, # Nucleus sampling parameter\n", |
296 | | - " \"stream\": True, # Enable streaming responses\n", |
297 | | - " },\n", |
| 301 | + " max_tokens=4096, # Adjust based on your model's capabilities\n", |
| 302 | + " temperature=0.7, # Lower for more deterministic responses, higher for more creative\n", |
| 303 | + " top_p=0.9, # Nucleus sampling parameter\n", |
298 | 304 | ")\n", |
299 | 305 | "\n", |
300 | 306 | "# Create the agent\n", |
|
378 | 384 | ], |
379 | 385 | "metadata": { |
380 | 386 | "kernelspec": { |
381 | | - "display_name": "Python 3 (ipykernel)", |
| 387 | + "display_name": "genai-on-aws", |
382 | 388 | "language": "python", |
383 | 389 | "name": "python3" |
384 | 390 | }, |
|
392 | 398 | "name": "python", |
393 | 399 | "nbconvert_exporter": "python", |
394 | 400 | "pygments_lexer": "ipython3", |
395 | | - "version": "3.12.9" |
| 401 | + "version": "3.12.1" |
396 | 402 | } |
397 | 403 | }, |
398 | 404 | "nbformat": 4, |
|
0 commit comments