1
- # Module koog-ktor-plugin
1
+ # Module koog-ktor
2
2
3
3
Ktor server integration for the Koog AI agents framework.
4
4
5
5
## Overview
6
6
7
- The ` koog-ktor-plugin ` module provides seamless integration between the Koog AI agents framework and Ktor server applications. It includes:
7
+ The ` koog-ktor ` module provides seamless integration between the Koog AI agents framework and Ktor server applications.
8
+ It includes:
8
9
9
10
- A Ktor plugin for easy installation and configuration
10
11
- Support for multiple LLM providers (OpenAI, Anthropic, Google, OpenRouter, Ollama)
11
12
- Agent configuration with tools, features, and prompt customization
12
13
- Extension functions for routes to interact with LLMs and agents
13
- - Content moderation capabilities
14
14
- JVM-specific support for Model Context Protocol (MCP) integration
15
15
16
16
## Using in your project
@@ -19,58 +19,47 @@ Add the dependency to your `build.gradle.kts`:
19
19
20
20
``` kotlin
21
21
dependencies {
22
- implementation(" ai.koog:koog-ktor-plugin :$koogVersion " )
22
+ implementation(" ai.koog:koog-ktor:$koogVersion " )
23
23
}
24
24
```
25
25
26
26
## Basic Usage
27
27
28
- Install and configure the plugin in your Ktor application:
28
+ Provide one, or many apikey-s, and the Koog plugin will automatically connect to the provider when needed.
29
+ For additional, or provider-specific settings, See [ YAML/CONF Configuration] ( #yamlconf-configuration ) below.
30
+
31
+ ``` yaml
32
+ koog :
33
+ openai.apikey : " $OPENAI_API_KEY:your-openai-api-key"
34
+ anthropic.apikey : " $ANTHROPIC_API_KEY:your-anthropic-api-key"
35
+ google.apikey : " $GOOGLE_API_KEY:your-google-api-key"
36
+ openrouter.apikey : " $OPENROUTER_API_KEY:your-openrouter-api-key"
37
+ ollama.enabled : " $DEBUG:false"
38
+ ` ` `
39
+
40
+ ### Installing and configuring the plugin
41
+
42
+ The Koog plugin can also be configured by code, and some complex configurations can only be done by code.
43
+ See [Programmatic Configuration](#programmatic-configuration) below.
29
44
30
45
` ` ` kotlin
31
46
fun Application.module() {
32
47
install(Koog) {
33
- // Configure LLM providers
34
48
llm {
35
- openAI(apiKey = " your-openai-api-key" ) {
36
- baseUrl = " https://api.openai.com"
37
- }
38
-
39
- // Optional: Configure other providers
49
+ openAI(apiKey = "your-openai-api-key")
40
50
anthropic(apiKey = "your-anthropic-api-key")
41
51
ollama { baseUrl = "http://localhost:11434" }
42
52
google(apiKey = "your-google-api-key")
43
53
openRouter(apiKey = "your-openrouter-api-key")
44
54
}
45
-
46
- // Configure agent
47
- agent {
48
- // Register tools
49
- registerTools {
50
- // Add tools using reflection
51
- tool(::yourToolFunction)
52
- }
53
-
54
- // Configure prompt
55
- prompt {
56
- system(" You are a helpful assistant" )
57
- }
58
-
59
- // JVM-specific: Configure MCP integration
60
- mcp {
61
- sse(" your-mcp-server-url" )
62
- }
63
- }
64
55
}
65
-
66
- // Use in routes
56
+
67
57
routing {
68
58
route("/ai") {
69
59
post("/chat") {
70
60
val userInput = call.receive<String>()
71
-
72
- // Use agent to respond
73
- call.agentRespond(userInput)
61
+ val output = singleRunAgent(userInput)
62
+ call.respond(HttpStatusCode.OK, output)
74
63
}
75
64
}
76
65
}
@@ -84,19 +73,19 @@ fun Application.module() {
84
73
``` kotlin
85
74
post(" /moderated-chat" ) {
86
75
val userInput = call.receive<String >()
87
-
76
+
88
77
// Moderate content
89
- val isHarmful = moderateWithLLM( OpenAIModels . Moderation . Omni ) {
90
- user(userInput )
91
- }.isHarmful
92
-
78
+ val isHarmful = llm().moderate(prompt( " id " ) {
79
+ user(userRequest )
80
+ }, OpenAIModels . Moderation . Omni ) .isHarmful
81
+
93
82
if (isHarmful) {
94
83
call.respond(HttpStatusCode .BadRequest , " Harmful content detected" )
95
84
return @post
96
85
}
97
-
98
- // Process with agent
99
- call.agentRespond(userInput )
86
+
87
+ val output = singleRunAgent(userInput)
88
+ call.respond( HttpStatusCode . OK , output )
100
89
}
101
90
```
102
91
@@ -105,14 +94,16 @@ post("/moderated-chat") {
105
94
``` kotlin
106
95
post(" /llm-chat" ) {
107
96
val userInput = call.receive<String >()
108
-
109
- // Ask LLM directly
110
- val response = askLLM(OllamaModels .Meta .LLAMA_3_2 ) {
111
- system(" You are a helpful assistant" )
112
- user(userInput)
113
- }.single() as Message .Assistant
114
-
115
- call.respond(response.content)
97
+
98
+ val response = llm().execute(prompt(" id" ) {
99
+ system(
100
+ " You are a helpful assistant that can correct user answers. " +
101
+ " You will get a user's question and your task is to make it more clear for the further processing."
102
+ )
103
+ user(userRequest)
104
+ }, OllamaModels .Meta .LLAMA_3_2 )
105
+
106
+ call.respond(HttpStatusCode .OK , response.content)
116
107
}
117
108
```
118
109
@@ -121,9 +112,9 @@ post("/llm-chat") {
121
112
``` kotlin
122
113
post(" /custom-agent" ) {
123
114
val userInput = call.receive<String >()
124
-
125
- // Use custom strategy
126
- call.agentRespond(userInput, strategy = reActStrategy() )
115
+
116
+ val output = aiAgent(reActStrategy(), userInput)
117
+ call.respond( HttpStatusCode . OK , output )
127
118
}
128
119
```
129
120
@@ -145,7 +136,7 @@ llm {
145
136
socketTimeoutMillis = 30000
146
137
}
147
138
}
148
-
139
+
149
140
// Set fallback LLM
150
141
fallback {
151
142
provider = LLMProvider .Ollama
@@ -156,7 +147,8 @@ llm {
156
147
157
148
#### YAML/CONF Configuration
158
149
159
- You can also configure LLM providers using YAML or CONF files. The plugin will automatically read the configuration from the application's configuration file:
150
+ You can also configure LLM providers using YAML or CONF files. The plugin will automatically read the configuration from
151
+ the application's configuration file:
160
152
161
153
``` yaml
162
154
# application.yaml or application.conf
@@ -168,28 +160,29 @@ koog:
168
160
requestTimeoutMillis : 30000
169
161
connectTimeoutMillis : 10000
170
162
socketTimeoutMillis : 30000
171
-
163
+
172
164
anthropic :
173
165
apikey : " your-anthropic-api-key"
174
166
baseUrl : " https://api.anthropic.com"
175
167
timeout :
176
168
requestTimeoutMillis : 30000
177
-
169
+
178
170
google :
179
171
apikey : " your-google-api-key"
180
172
baseUrl : " https://generativelanguage.googleapis.com"
181
-
173
+
182
174
openrouter :
183
175
apikey : " your-openrouter-api-key"
184
176
baseUrl : " https://openrouter.ai"
185
-
177
+
186
178
ollama :
187
179
baseUrl : " http://localhost:11434"
188
180
timeout :
189
181
requestTimeoutMillis : 60000
190
182
` ` `
191
183
192
- When using configuration files, you can still provide programmatic configuration that will override the settings from the file:
184
+ When using configuration files, you can still provide programmatic configuration that will override the settings from
185
+ the file:
193
186
194
187
` ` ` kotlin
195
188
install(Koog) {
@@ -198,7 +191,7 @@ install(Koog) {
198
191
// This will override the API key from the configuration file
199
192
openAI(apiKey = System.getenv("OPENAI_API_KEY") ? : " override-from-code" )
200
193
}
201
-
194
+
202
195
// Rest of your configuration...
203
196
}
204
197
```
@@ -211,21 +204,21 @@ Configure agent behavior, tools, and features:
211
204
agent {
212
205
// Set model
213
206
model = OpenAIModels .GPT4 .Turbo
214
-
207
+
215
208
// Set max iterations
216
209
maxAgentIterations = 10
217
-
210
+
218
211
// Register tools
219
212
registerTools {
220
213
tool(::searchTool)
221
214
tool(::calculatorTool)
222
215
}
223
-
216
+
224
217
// Configure prompt
225
218
prompt {
226
219
system(" You are a helpful assistant specialized in..." )
227
220
}
228
-
221
+
229
222
// Install features
230
223
install(OpenTelemetry ) {
231
224
// Configure feature
@@ -242,10 +235,10 @@ agent {
242
235
mcp {
243
236
// Use Server-Sent Events
244
237
sse(" https://your-mcp-server.com/sse" )
245
-
238
+
246
239
// Or use process
247
240
process(yourMcpProcess)
248
-
241
+
249
242
// Or use existing client
250
243
client(yourMcpClient)
251
244
}
0 commit comments