Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions agents/agents-core/build.gradle.kts
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ kotlin {
api(project(":utils"))
api(project(":prompt:prompt-executor:prompt-executor-model"))
api(project(":prompt:prompt-llm"))
api(project(":prompt:prompt-processor"))
api(project(":prompt:prompt-structure"))

api(project(":prompt:prompt-executor:prompt-executor-clients:prompt-executor-openai-client"))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ import ai.koog.agents.core.dsl.extension.onAssistantMessage
import ai.koog.agents.core.dsl.extension.onMultipleAssistantMessages
import ai.koog.agents.core.dsl.extension.onMultipleToolCalls
import ai.koog.agents.core.dsl.extension.onToolCall
import ai.koog.prompt.processor.ResponseProcessor

/**
* Creates a single-run strategy for an AI agent.
Expand All @@ -28,19 +29,26 @@ import ai.koog.agents.core.dsl.extension.onToolCall
* - SingleRunMode.SINGLE: Executes without allowing multiple simultaneous tool calls.
* - SingleRunMode.SEQUENTIAL: Executes simultaneous tool calls sequentially.
* - SingleRunMode.PARALLEL: Executes multiple tool calls in parallel.
* @param responseProcessor The processor applied to all LLM responses. If null, no processing is applied. Defaults to null.
* @return An instance of AIAgentStrategy configured according to the specified single-run mode.
*/
public fun singleRunStrategy(runMode: ToolCalls = ToolCalls.SINGLE_RUN_SEQUENTIAL): AIAgentGraphStrategy<String, String> =
public fun singleRunStrategy(
runMode: ToolCalls = ToolCalls.SINGLE_RUN_SEQUENTIAL,
responseProcessor: ResponseProcessor? = null,
): AIAgentGraphStrategy<String, String> =
when (runMode) {
ToolCalls.SEQUENTIAL -> singleRunWithParallelAbility(false)
ToolCalls.PARALLEL -> singleRunWithParallelAbility(true)
ToolCalls.SINGLE_RUN_SEQUENTIAL -> singleRunModeStrategy()
ToolCalls.SEQUENTIAL -> singleRunWithParallelAbility(false, responseProcessor)
ToolCalls.PARALLEL -> singleRunWithParallelAbility(true, responseProcessor)
ToolCalls.SINGLE_RUN_SEQUENTIAL -> singleRunModeStrategy(responseProcessor)
}

private fun singleRunWithParallelAbility(parallelTools: Boolean) = strategy("single_run_sequential") {
val nodeCallLLM by nodeLLMRequestMultiple()
private fun singleRunWithParallelAbility(
parallelTools: Boolean,
responseProcessor: ResponseProcessor?
) = strategy("single_run_sequential") {
val nodeCallLLM by nodeLLMRequestMultiple(responseProcessor = responseProcessor)
val nodeExecuteTool by nodeExecuteMultipleTools(parallelTools = parallelTools)
val nodeSendToolResult by nodeLLMSendMultipleToolResults()
val nodeSendToolResult by nodeLLMSendMultipleToolResults(responseProcessor = responseProcessor)

edge(nodeStart forwardTo nodeCallLLM)
edge(nodeCallLLM forwardTo nodeExecuteTool onMultipleToolCalls { true })
Expand All @@ -61,10 +69,10 @@ private fun singleRunWithParallelAbility(parallelTools: Boolean) = strategy("sin
edge(nodeSendToolResult forwardTo nodeExecuteTool onMultipleToolCalls { true })
}

private fun singleRunModeStrategy() = strategy("single_run") {
val nodeCallLLM by nodeLLMRequest()
private fun singleRunModeStrategy(responseProcessor: ResponseProcessor?) = strategy("single_run") {
val nodeCallLLM by nodeLLMRequest(responseProcessor = responseProcessor)
val nodeExecuteTool by nodeExecuteTool()
val nodeSendToolResult by nodeLLMSendToolResult()
val nodeSendToolResult by nodeLLMSendToolResult(responseProcessor = responseProcessor)

edge(nodeStart forwardTo nodeCallLLM)
edge(nodeCallLLM forwardTo nodeExecuteTool onToolCall { true })
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@ import ai.koog.prompt.llm.LLModel
import ai.koog.prompt.message.LLMChoice
import ai.koog.prompt.message.Message
import ai.koog.prompt.params.LLMParams
import ai.koog.prompt.processor.ResponseProcessor
import ai.koog.prompt.processor.executeProcessed
import ai.koog.prompt.streaming.StreamFrame
import ai.koog.prompt.structure.StructureFixingParser
import ai.koog.prompt.structure.StructuredRequestConfig
Expand Down Expand Up @@ -114,13 +116,24 @@ public sealed class AIAgentLLMSession(
return executor.executeStreaming(preparedPrompt, model, tools)
}

protected suspend fun executeMultiple(prompt: Prompt, tools: List<ToolDescriptor>): List<Message.Response> {
protected suspend fun executeMultiple(
prompt: Prompt,
tools: List<ToolDescriptor>,
responseProcessor: ResponseProcessor? = null
): List<Message.Response> {
val preparedPrompt = preparePrompt(prompt, tools)
return executor.execute(preparedPrompt, model, tools)
return if (responseProcessor == null) {
executor.execute(preparedPrompt, model, tools)
} else {
executor.executeProcessed(preparedPrompt, model, tools, responseProcessor)
}
}

protected suspend fun executeSingle(prompt: Prompt, tools: List<ToolDescriptor>): Message.Response =
executeMultiple(prompt, tools).first()
protected suspend fun executeSingle(
prompt: Prompt,
tools: List<ToolDescriptor>,
responseProcessor: ResponseProcessor? = null
): Message.Response = executeMultiple(prompt, tools, responseProcessor).first()

/**
* Sends a request to the language model without utilizing any tools and returns the response.
Expand Down Expand Up @@ -152,14 +165,17 @@ public sealed class AIAgentLLMSession(
* This method updates the session's prompt configuration to mark tool usage as required before
* executing the request. Additionally, it ensures the session is active before proceeding.
*
* @param responseProcessor The processor applied to the LLM response. If null, no processing is applied. Defaults to null.
* @return The response from the language model after executing the request with enforced tool usage.
*/
public open suspend fun requestLLMOnlyCallingTools(): Message.Response {
public open suspend fun requestLLMOnlyCallingTools(
responseProcessor: ResponseProcessor? = null
): Message.Response {
validateSession()
val promptWithOnlyCallingTools = prompt.withUpdatedParams {
toolChoice = LLMParams.ToolChoice.Required
}
return executeSingle(promptWithOnlyCallingTools, tools)
return executeSingle(promptWithOnlyCallingTools, tools, responseProcessor)
}

/**
Expand All @@ -173,16 +189,20 @@ public sealed class AIAgentLLMSession(
* @param tool The tool to be used for the request, represented by a [ToolDescriptor] instance.
* This parameter ensures that the language model utilizes the specified tool
* during the interaction.
* @param responseProcessor The processor applied to the LLM response. If null, no processing is applied. Defaults to null.
* @return The response from the language model as a [Message.Response] instance after
* processing the request with the enforced tool.
*/
public open suspend fun requestLLMForceOneTool(tool: ToolDescriptor): Message.Response {
public open suspend fun requestLLMForceOneTool(
tool: ToolDescriptor,
responseProcessor: ResponseProcessor? = null
): Message.Response {
validateSession()
check(tools.contains(tool)) { "Unable to force call to tool `${tool.name}` because it is not defined" }
val promptWithForcingOneTool = prompt.withUpdatedParams {
toolChoice = LLMParams.ToolChoice.Named(tool.name)
}
return executeSingle(promptWithForcingOneTool, tools)
return executeSingle(promptWithForcingOneTool, tools, responseProcessor)
}

/**
Expand All @@ -194,11 +214,15 @@ public sealed class AIAgentLLMSession(
*
* @param tool The tool to be used for the request, represented as an instance of [Tool]. This parameter ensures
* the specified tool is utilized during the LLM interaction.
* @param responseProcessor The processor applied to the LLM response. If null, no processing is applied. Defaults to null.
* @return The response from the language model as a [Message.Response] instance after processing the request with the
* enforced tool.
*/
public open suspend fun requestLLMForceOneTool(tool: Tool<*, *>): Message.Response {
return requestLLMForceOneTool(tool.descriptor)
public open suspend fun requestLLMForceOneTool(
tool: Tool<*, *>,
responseProcessor: ResponseProcessor? = null
): Message.Response {
return requestLLMForceOneTool(tool.descriptor, responseProcessor)
}

/**
Expand All @@ -207,9 +231,9 @@ public sealed class AIAgentLLMSession(
*
* @return The first response message from the LLM after executing the request.
*/
public open suspend fun requestLLM(): Message.Response {
public open suspend fun requestLLM(responseProcessor: ResponseProcessor? = null): Message.Response {
validateSession()
return executeSingle(prompt, tools)
return executeSingle(prompt, tools, responseProcessor)
}

/**
Expand Down Expand Up @@ -248,11 +272,12 @@ public sealed class AIAgentLLMSession(
* Before executing the request, the session state is validated to ensure
* it is active and usable.
*
* @param responseProcessor The processor applied to the LLM response. If null, no processing is applied. Defaults to null.
* @return a list of responses from the language model
*/
public open suspend fun requestLLMMultiple(): List<Message.Response> {
public open suspend fun requestLLMMultiple(responseProcessor: ResponseProcessor? = null): List<Message.Response> {
validateSession()
return executeMultiple(prompt, tools)
return executeMultiple(prompt, tools, responseProcessor)
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ import ai.koog.prompt.executor.model.PromptExecutor
import ai.koog.prompt.llm.LLModel
import ai.koog.prompt.message.Message
import ai.koog.prompt.params.LLMParams
import ai.koog.prompt.processor.ResponseProcessor
import ai.koog.prompt.streaming.StreamFrame
import ai.koog.prompt.structure.StructureDefinition
import ai.koog.prompt.structure.StructureFixingParser
Expand Down Expand Up @@ -381,41 +382,54 @@ public class AIAgentLLMWriteSession internal constructor(
* Requests a response from the Language Learning Model (LLM) while also processing
* the response by updating the current prompt with the received message.
*
* @param responseProcessor The processor applied to the LLM response. If null, no processing is applied. Defaults to null.
* @return The response received from the Language Learning Model (LLM).
*/
override suspend fun requestLLMOnlyCallingTools(): Message.Response {
return super.requestLLMOnlyCallingTools().also { response -> appendPrompt { message(response) } }
override suspend fun requestLLMOnlyCallingTools(responseProcessor: ResponseProcessor?): Message.Response {
return super.requestLLMOnlyCallingTools(responseProcessor)
.also { response -> appendPrompt { message(response) } }
}

/**
* Requests an LLM (Large Language Model) to forcefully utilize a specific tool during its operation.
*
* @param tool A descriptor object representing the tool to be enforced for use by the LLM.
* @param responseProcessor The processor applied to the LLM response. If null, no processing is applied. Defaults to null.
* @return A response message received from the LLM after executing the enforced tool request.
*/
override suspend fun requestLLMForceOneTool(tool: ToolDescriptor): Message.Response {
return super.requestLLMForceOneTool(tool).also { response -> appendPrompt { message(response) } }
override suspend fun requestLLMForceOneTool(
tool: ToolDescriptor,
responseProcessor: ResponseProcessor?
): Message.Response {
return super.requestLLMForceOneTool(tool, responseProcessor)
.also { response -> appendPrompt { message(response) } }
}

/**
* Requests the execution of a single specified tool, enforcing its use,
* and updates the prompt based on the generated response.
*
* @param tool The tool that will be enforced and executed. It contains the input and output types.
* @param responseProcessor The processor applied to the LLM response. If null, no processing is applied. Defaults to null.
* @return The response generated after executing the provided tool.
*/
override suspend fun requestLLMForceOneTool(tool: Tool<*, *>): Message.Response {
return super.requestLLMForceOneTool(tool).also { response -> appendPrompt { message(response) } }
override suspend fun requestLLMForceOneTool(
tool: Tool<*, *>,
responseProcessor: ResponseProcessor?
): Message.Response {
return super.requestLLMForceOneTool(tool, responseProcessor)
.also { response -> appendPrompt { message(response) } }
}

/**
* Makes an asynchronous request to a Large Language Model (LLM) and updates the current prompt
* with the response received from the LLM.
* Makes an asynchronous request to a Large Language Model (LLM), processes the response using the provided
* responseProcessor, and updates the current prompt with the processed response.
*
* @param responseProcessor The processor applied to the LLM response. If null, no processing is applied. Defaults to null.
* @return A [Message.Response] object containing the response from the LLM.
*/
override suspend fun requestLLM(): Message.Response {
return super.requestLLM().also { response ->
override suspend fun requestLLM(responseProcessor: ResponseProcessor?): Message.Response {
return super.requestLLM(responseProcessor).also { response ->
appendPrompt { message(response) }
}
}
Expand All @@ -427,10 +441,11 @@ public class AIAgentLLMWriteSession internal constructor(
* response is subsequently used to update the session's prompt. The prompt updating mechanism
* allows stateful interactions with the LLM, maintaining context across multiple requests.
*
* @param responseProcessor The processor applied to the LLM response. If null, no processing is applied. Defaults to null.
* @return A list of `Message.Response` containing the results from the LLM.
*/
override suspend fun requestLLMMultiple(): List<Message.Response> {
return super.requestLLMMultiple().also { responses ->
override suspend fun requestLLMMultiple(responseProcessor: ResponseProcessor?): List<Message.Response> {
return super.requestLLMMultiple(responseProcessor).also { responses ->
appendPrompt {
responses.forEach { message(it) }
}
Expand Down
Loading
Loading