Skip to content
Merged
Show file tree
Hide file tree
Changes from 19 commits
Commits
Show all changes
34 commits
Select commit Hold shift + click to select a range
8f0c9f1
Refactor `LLMParams` and introduce support for `OpenRouterParams` and…
devcrocod Aug 22, 2025
ade4272
Remove outdated TODO comment in `LLMParams`.
devcrocod Aug 22, 2025
1120d2c
Separate chat completions request and response for deepseek and openr…
devcrocod Aug 22, 2025
27f720b
Add OpenAI Responses API schema
devcrocod Aug 23, 2025
da4e2c0
Rename OpenAI embedding models
devcrocod Aug 23, 2025
88ef6c3
add OpenAI chat completion
devcrocod Aug 23, 2025
871fbaa
Introduce `OpenAIParams` with specialized `OpenAIChatParams` and `Ope…
devcrocod Aug 23, 2025
afbd9d9
Refactor `Item` serialization with `ItemPolymorphicSerializer` and in…
devcrocod Aug 23, 2025
972c68e
Refactor `OpenAIResponsesAPI` models: adjust class hierarchy, refine …
devcrocod Aug 23, 2025
9df5cda
add Responses API
devcrocod Aug 24, 2025
9d8361f
Remove unused model mappings from `LLMModelParser`.
devcrocod Aug 24, 2025
ab09703
Add new GPT-5 models (`GPT5`, `GPT5Mini`, `GPT5Nano`) to `LLMModelPar…
devcrocod Aug 24, 2025
76c8846
refactor openai clients
devcrocod Aug 24, 2025
38b7cb6
code formatting
devcrocod Aug 24, 2025
16eae04
Add new GPT models (`GPT5`, `GPT5Mini`, `GPT5Nano`, `GPT_OSS_120b`) t…
devcrocod Aug 24, 2025
5c2416a
Remove unused test cases for `O1Mini` and `TextModeration` in `ModelI…
devcrocod Aug 24, 2025
abe664d
Handle Azure-specific base URL in `determineParams` method for OpenAI…
devcrocod Aug 24, 2025
1b3cd78
Refine parameter validation for `topLogprobs` and documentation for O…
devcrocod Aug 25, 2025
33c66be
fix formatting in openai client parameter class
devcrocod Aug 25, 2025
64df4fa
Filter unsupported output item types in OpenAI client response proces…
devcrocod Aug 26, 2025
a805d76
Update Modules files
devcrocod Aug 26, 2025
0633a43
Add missed kdocs for OpenAIEndpoint
tiginamaria Aug 27, 2025
93465d4
Fix titles in module documentation
tiginamaria Aug 27, 2025
07b2043
- Make `id` and `status` fields in `OutputMessage` optional.
kpavlov Aug 27, 2025
f008f1d
add .env to gitignore
kpavlov Aug 27, 2025
8983351
Add test coverage for LLM parameter validation across multiple clients
aozherelyeva Aug 27, 2025
8c8f4d2
Mark ResponsesAPI as Experimental
kpavlov Aug 27, 2025
e719707
Add test coverage for LLM parameter validation across multiple clients
aozherelyeva Aug 27, 2025
6f888c6
Merge remote-tracking branch 'origin/devcrocod/providers-llmparams' i…
kpavlov Aug 27, 2025
da58652
Mark ResponsesAPI as Experimental
kpavlov Aug 27, 2025
a31f30f
Fix: Mark ResponsesAPI as Experimental
kpavlov Aug 27, 2025
eb0ee88
Use OpenAI ChatCompletion API by default
kpavlov Aug 27, 2025
1fcd911
Reformat code
kpavlov Aug 27, 2025
fb5e99e
Validate OpenAIParams and enhance tests
kpavlov Aug 27, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,12 @@ object Models {
return Stream.of(
OpenAIModels.Chat.GPT4o,
OpenAIModels.Chat.GPT4_1,
OpenAIModels.Chat.GPT5,
OpenAIModels.Chat.GPT5Mini,
OpenAIModels.Chat.GPT5Nano,

OpenAIModels.Reasoning.O4Mini,
OpenAIModels.Reasoning.O3Mini,
OpenAIModels.Reasoning.O1Mini,
OpenAIModels.Reasoning.O3,
OpenAIModels.Reasoning.O1,

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -182,12 +182,14 @@ private fun openAI(parts: List<String>, identifier: String): LLModel? {
private val OPENAI_MODELS_MAP = mapOf(
"chat" to mapOf(
"gpt4o" to OpenAIModels.Chat.GPT4o,
"gpt4_1" to OpenAIModels.Chat.GPT4_1
"gpt4_1" to OpenAIModels.Chat.GPT4_1,
"gpt5" to OpenAIModels.Chat.GPT5,
"gpt5mini" to OpenAIModels.Chat.GPT5Mini,
"gpt5nano" to OpenAIModels.Chat.GPT5Nano,
),
"reasoning" to mapOf(
"o4mini" to OpenAIModels.Reasoning.O4Mini,
"o3mini" to OpenAIModels.Reasoning.O3Mini,
"o1mini" to OpenAIModels.Reasoning.O1Mini,
"o3" to OpenAIModels.Reasoning.O3,
"o1" to OpenAIModels.Reasoning.O1
),
Expand All @@ -196,7 +198,6 @@ private val OPENAI_MODELS_MAP = mapOf(
"gpt4_1nano" to OpenAIModels.CostOptimized.GPT4_1Nano,
"gpt4_1mini" to OpenAIModels.CostOptimized.GPT4_1Mini,
"gpt4omini" to OpenAIModels.CostOptimized.GPT4oMini,
"o1mini" to OpenAIModels.CostOptimized.O1Mini,
"o3mini" to OpenAIModels.CostOptimized.O3Mini
),
"audio" to mapOf(
Expand All @@ -209,7 +210,6 @@ private val OPENAI_MODELS_MAP = mapOf(
"textembeddingada002" to OpenAIModels.Embeddings.TextEmbeddingAda002
),
"moderation" to mapOf(
"text" to OpenAIModels.Moderation.Text,
"omni" to OpenAIModels.Moderation.Omni
)
)
Expand Down Expand Up @@ -238,6 +238,10 @@ private val OPENROUTER_MODELS_MAP = mapOf(
"claude3haiku" to OpenRouterModels.Claude3Haiku,
"gpt4" to OpenRouterModels.GPT4,
"gpt4o" to OpenRouterModels.GPT4o,
"gpt5" to OpenRouterModels.GPT5,
"gpt5mini" to OpenRouterModels.GPT5Mini,
"gpt5nano" to OpenRouterModels.GPT5Nano,
"gptoss120b" to OpenRouterModels.GPT_OSS_120b,
"gpt4turbo" to OpenRouterModels.GPT4Turbo,
"gpt35turbo" to OpenRouterModels.GPT35Turbo
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,12 +45,6 @@ class ModelIdentifierParsingTest {
assertEquals(LLMProvider.OpenAI, o3Mini.provider)
assertEquals(OpenAIModels.Reasoning.O3Mini, o3Mini)

// Test O1 Mini
val o1Mini = getModelFromIdentifier("openai.reasoning.o1mini")
assertNotNull(o1Mini)
assertEquals(LLMProvider.OpenAI, o1Mini.provider)
assertEquals(OpenAIModels.Reasoning.O1Mini, o1Mini)

// Test O3
val o3 = getModelFromIdentifier("openai.reasoning.o3")
assertNotNull(o3)
Expand Down Expand Up @@ -123,12 +117,6 @@ class ModelIdentifierParsingTest {

@Test
fun testOpenAIModerationModels() = runTest {
// Test Text Moderation
val textModeration = getModelFromIdentifier("openai.moderation.text")
assertNotNull(textModeration)
assertEquals(LLMProvider.OpenAI, textModeration.provider)
assertEquals(OpenAIModels.Moderation.Text, textModeration)

// Test Omni Moderation
val omniModeration = getModelFromIdentifier("openai.moderation.omni")
assertNotNull(omniModeration)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,17 @@
import ai.koog.prompt.dsl.Prompt
import ai.koog.prompt.executor.clients.ConnectionTimeoutConfig
import ai.koog.prompt.executor.clients.LLMClient
import ai.koog.prompt.executor.clients.deepseek.models.DeepSeekChatCompletionRequest
import ai.koog.prompt.executor.clients.deepseek.models.DeepSeekChatCompletionResponse
import ai.koog.prompt.executor.clients.deepseek.models.DeepSeekChatCompletionStreamResponse
import ai.koog.prompt.executor.clients.openai.AbstractOpenAILLMClient
import ai.koog.prompt.executor.clients.openai.OpenAIBasedSettings
import ai.koog.prompt.executor.clients.openai.models.OpenAIMessage
import ai.koog.prompt.executor.clients.openai.models.OpenAITool
import ai.koog.prompt.executor.clients.openai.models.OpenAIToolChoice
import ai.koog.prompt.executor.model.LLMChoice
import ai.koog.prompt.llm.LLModel
import ai.koog.prompt.params.LLMParams
import io.github.oshai.kotlinlogging.KLogger
import io.github.oshai.kotlinlogging.KotlinLogging
import io.ktor.client.HttpClient
Expand All @@ -32,29 +40,70 @@
* defaults to "https://api.deepseek.com" and 900s
* @param clock Clock instance used for tracking response metadata timestamps.
*/
public class DeepSeekLLMClient(

Check warning on line 43 in prompt/prompt-executor/prompt-executor-clients/prompt-executor-deepseek-client/src/commonMain/kotlin/ai/koog/prompt/executor/clients/deepseek/DeepSeekLLMClient.kt

View workflow job for this annotation

GitHub Actions / Qodana for JVM

Check Kotlin and Java source code coverage

Class `DeepSeekLLMClient` coverage is below the threshold 50%
apiKey: String,
private val settings: DeepSeekClientSettings = DeepSeekClientSettings(),
baseClient: HttpClient = HttpClient(),
clock: Clock = Clock.System
) : AbstractOpenAILLMClient(apiKey, settings, baseClient, clock) {
) : AbstractOpenAILLMClient<DeepSeekChatCompletionResponse, DeepSeekChatCompletionStreamResponse>(
apiKey,
settings,
baseClient,
clock
) {

private companion object {
private val staticLogger = KotlinLogging.logger { }
}

override val logger: KLogger = staticLogger

/**
* Executes a moderation action on the given prompt using the specified language model.
* This method is not supported by the DeepSeek API and will always throw an `UnsupportedOperationException`.
*
* @param prompt The [Prompt] object to be moderated, containing the messages and respective context.
* @param model The [LLModel] to be used for the moderation process.
* @return This method does not return a valid result as it always throws an exception.
* @throws UnsupportedOperationException Always thrown because moderation is not supported by the DeepSeek API.
*/
override fun serializeProviderChatRequest(

Check warning on line 61 in prompt/prompt-executor/prompt-executor-clients/prompt-executor-deepseek-client/src/commonMain/kotlin/ai/koog/prompt/executor/clients/deepseek/DeepSeekLLMClient.kt

View workflow job for this annotation

GitHub Actions / Qodana for JVM

Check Kotlin and Java source code coverage

Method `serializeProviderChatRequest` coverage is below the threshold 50%
messages: List<OpenAIMessage>,
model: LLModel,
tools: List<OpenAITool>?,
toolChoice: OpenAIToolChoice?,
params: LLMParams,
stream: Boolean
): String {
val deepSeekParams = params.toDeepSeekParams()
val responseFormat = createResponseFormat(params.schema, model)

val request = DeepSeekChatCompletionRequest(
messages = messages,
model = model.id,
frequencyPenalty = deepSeekParams.frequencyPenalty,
logprobs = deepSeekParams.logprobs,
maxTokens = deepSeekParams.maxTokens,
presencePenalty = deepSeekParams.presencePenalty,
responseFormat = responseFormat,
stop = deepSeekParams.stop,
stream = stream,
temperature = deepSeekParams.temperature,
toolChoice = deepSeekParams.toolChoice?.toOpenAIToolChoice(),
tools = tools,
topLogprobs = deepSeekParams.topLogprobs,
topP = deepSeekParams.topP,
)

return json.encodeToString(request)
}

override fun processProviderChatResponse(response: DeepSeekChatCompletionResponse): List<LLMChoice> {

Check warning on line 92 in prompt/prompt-executor/prompt-executor-clients/prompt-executor-deepseek-client/src/commonMain/kotlin/ai/koog/prompt/executor/clients/deepseek/DeepSeekLLMClient.kt

View workflow job for this annotation

GitHub Actions / Qodana for JVM

Check Kotlin and Java source code coverage

Method `processProviderChatResponse` coverage is below the threshold 50%
require(response.choices.isNotEmpty()) { "Empty choices in response" }
return response.choices.map { it.toMessageResponses(createMetaInfo(response.usage)) }
}

override fun decodeStreamingResponse(data: String): DeepSeekChatCompletionStreamResponse =

Check warning on line 97 in prompt/prompt-executor/prompt-executor-clients/prompt-executor-deepseek-client/src/commonMain/kotlin/ai/koog/prompt/executor/clients/deepseek/DeepSeekLLMClient.kt

View workflow job for this annotation

GitHub Actions / Qodana for JVM

Check Kotlin and Java source code coverage

Method `decodeStreamingResponse` coverage is below the threshold 50%
json.decodeFromString(data)

override fun decodeResponse(data: String): DeepSeekChatCompletionResponse =

Check warning on line 100 in prompt/prompt-executor/prompt-executor-clients/prompt-executor-deepseek-client/src/commonMain/kotlin/ai/koog/prompt/executor/clients/deepseek/DeepSeekLLMClient.kt

View workflow job for this annotation

GitHub Actions / Qodana for JVM

Check Kotlin and Java source code coverage

Method `decodeResponse` coverage is below the threshold 50%
json.decodeFromString(data)

override fun processStreamingChunk(chunk: DeepSeekChatCompletionStreamResponse): String? =

Check warning on line 103 in prompt/prompt-executor/prompt-executor-clients/prompt-executor-deepseek-client/src/commonMain/kotlin/ai/koog/prompt/executor/clients/deepseek/DeepSeekLLMClient.kt

View workflow job for this annotation

GitHub Actions / Qodana for JVM

Check Kotlin and Java source code coverage

Method `processStreamingChunk` coverage is below the threshold 50%
chunk.choices.firstOrNull()?.delta?.content

public override suspend fun moderate(prompt: Prompt, model: LLModel): ModerationResult {

Check warning on line 106 in prompt/prompt-executor/prompt-executor-clients/prompt-executor-deepseek-client/src/commonMain/kotlin/ai/koog/prompt/executor/clients/deepseek/DeepSeekLLMClient.kt

View workflow job for this annotation

GitHub Actions / Qodana for JVM

Check Kotlin and Java source code coverage

Method `moderate` coverage is below the threshold 50%
logger.warn { "Moderation is not supported by DeepSeek API" }
throw UnsupportedOperationException("Moderation is not supported by DeepSeek API.")
}
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,174 @@
package ai.koog.prompt.executor.clients.deepseek

import ai.koog.prompt.params.LLMParams

internal fun LLMParams.toDeepSeekParams(): DeepSeekParams {

Check warning on line 5 in prompt/prompt-executor/prompt-executor-clients/prompt-executor-deepseek-client/src/commonMain/kotlin/ai/koog/prompt/executor/clients/deepseek/DeepSeekParams.kt

View workflow job for this annotation

GitHub Actions / Qodana for JVM

Check Kotlin and Java source code coverage

Method `toDeepSeekParams` coverage is below the threshold 50%
if (this is DeepSeekParams) return this
return DeepSeekParams(
temperature = temperature,
maxTokens = maxTokens,
numberOfChoices = numberOfChoices,
speculation = speculation,
schema = schema,
toolChoice = toolChoice,
user = user,
includeThoughts = includeThoughts,
)
}

/**
* DeepSeek chat-completions parameters layered on top of [LLMParams].
*
* @property temperature Sampling temperature in [0.0, 2.0]. Higher ⇒ more random;
* lower ⇒ more deterministic. Adjust this **or** [topP], not both.
* @property maxTokens Maximum number of tokens the model may generate for this response.
* @property numberOfChoices Number of completions to generate for the prompt (cost scales with N).
* @property speculation Provider-specific control for speculative decoding / draft acceleration.
* @property schema JSON Schema to constrain model output (validated when supported).
* @property toolChoice Controls if/which tool must be called (`none`/`auto`/`required`/specific).
* @property user not used for DeepSeek
* @property includeThoughts Request inclusion of model “thoughts”/reasoning traces (model-dependent).
* @property thinkingBudget Soft cap on tokens spent on internal reasoning (reasoning models).
* @property frequencyPenalty Number in [-2.0, 2.0]—penalizes frequent tokens to reduce repetition.
* @property presencePenalty Number in [-2.0, 2.0]—encourages introduction of new tokens/topics.
* @property logprobs Whether to include log-probabilities for output tokens.
* @property stop Stop sequences (0–4 items); generation halts before any of these.
* @property topLogprobs Number of top alternatives per position (0–20). Requires [logprobs] = true.
* @property topP Nucleus sampling in (0.0, 1.0]; use **instead of** [temperature].
*/
public class DeepSeekParams(

Check warning on line 39 in prompt/prompt-executor/prompt-executor-clients/prompt-executor-deepseek-client/src/commonMain/kotlin/ai/koog/prompt/executor/clients/deepseek/DeepSeekParams.kt

View workflow job for this annotation

GitHub Actions / Qodana for JVM

Check Kotlin and Java source code coverage

Constructor `DeepSeekParams` coverage is below the threshold 50%

Check warning on line 39 in prompt/prompt-executor/prompt-executor-clients/prompt-executor-deepseek-client/src/commonMain/kotlin/ai/koog/prompt/executor/clients/deepseek/DeepSeekParams.kt

View workflow job for this annotation

GitHub Actions / Qodana for JVM

Check Kotlin and Java source code coverage

Class `DeepSeekParams` coverage is below the threshold 50%
temperature: Double? = null,
maxTokens: Int? = null,
numberOfChoices: Int? = null,
speculation: String? = null,
schema: Schema? = null,
toolChoice: ToolChoice? = null,
user: String? = null,
includeThoughts: Boolean? = null,
thinkingBudget: Int? = null,
public val frequencyPenalty: Double? = null,

Check warning on line 49 in prompt/prompt-executor/prompt-executor-clients/prompt-executor-deepseek-client/src/commonMain/kotlin/ai/koog/prompt/executor/clients/deepseek/DeepSeekParams.kt

View workflow job for this annotation

GitHub Actions / Qodana for JVM

Check Kotlin and Java source code coverage

Method `getFrequencyPenalty` coverage is below the threshold 50%
public val presencePenalty: Double? = null,

Check warning on line 50 in prompt/prompt-executor/prompt-executor-clients/prompt-executor-deepseek-client/src/commonMain/kotlin/ai/koog/prompt/executor/clients/deepseek/DeepSeekParams.kt

View workflow job for this annotation

GitHub Actions / Qodana for JVM

Check Kotlin and Java source code coverage

Method `getPresencePenalty` coverage is below the threshold 50%
public val logprobs: Boolean? = null,

Check warning on line 51 in prompt/prompt-executor/prompt-executor-clients/prompt-executor-deepseek-client/src/commonMain/kotlin/ai/koog/prompt/executor/clients/deepseek/DeepSeekParams.kt

View workflow job for this annotation

GitHub Actions / Qodana for JVM

Check Kotlin and Java source code coverage

Method `getLogprobs` coverage is below the threshold 50%
public val stop: List<String>? = null,

Check warning on line 52 in prompt/prompt-executor/prompt-executor-clients/prompt-executor-deepseek-client/src/commonMain/kotlin/ai/koog/prompt/executor/clients/deepseek/DeepSeekParams.kt

View workflow job for this annotation

GitHub Actions / Qodana for JVM

Check Kotlin and Java source code coverage

Method `getStop` coverage is below the threshold 50%
public val topLogprobs: Int? = null,

Check warning on line 53 in prompt/prompt-executor/prompt-executor-clients/prompt-executor-deepseek-client/src/commonMain/kotlin/ai/koog/prompt/executor/clients/deepseek/DeepSeekParams.kt

View workflow job for this annotation

GitHub Actions / Qodana for JVM

Check Kotlin and Java source code coverage

Method `getTopLogprobs` coverage is below the threshold 50%
public val topP: Double? = null,

Check warning on line 54 in prompt/prompt-executor/prompt-executor-clients/prompt-executor-deepseek-client/src/commonMain/kotlin/ai/koog/prompt/executor/clients/deepseek/DeepSeekParams.kt

View workflow job for this annotation

GitHub Actions / Qodana for JVM

Check Kotlin and Java source code coverage

Method `getTopP` coverage is below the threshold 50%
) : LLMParams(
temperature, maxTokens, numberOfChoices,
speculation, schema, toolChoice,
user, includeThoughts, thinkingBudget
) {
init {
require(topP == null || topP in 0.0..1.0) {
"topP must be in (0.0, 1.0], but was $topP"
}
if (topLogprobs != null) {
require(logprobs == true) {
"`topLogprobs` requires `logprobs=true`."
}
require(topLogprobs in 0..20) {
"topLogprobs must be in [0, 20], but was $topLogprobs"
}
}
require(frequencyPenalty == null || frequencyPenalty in -2.0..2.0) {
"frequencyPenalty must be in [-2.0, 2.0], but was $frequencyPenalty"
}
require(presencePenalty == null || presencePenalty in -2.0..2.0) {
"presencePenalty must be in [-2.0, 2.0], but was $presencePenalty"
}

// --- Stop sequences ---
if (stop != null) {
require(stop.isNotEmpty()) { "stop must not be empty when provided." }
require(stop.size <= 4) { "stop supports at most 4 sequences, but was ${stop.size}" }
require(stop.all { it.isNotBlank() }) { "stop sequences must not be blank." }
}
}

/**
* Creates a copy of this instance with the ability to modify any of its properties.
*/
public fun copy(

Check warning on line 90 in prompt/prompt-executor/prompt-executor-clients/prompt-executor-deepseek-client/src/commonMain/kotlin/ai/koog/prompt/executor/clients/deepseek/DeepSeekParams.kt

View workflow job for this annotation

GitHub Actions / Qodana for JVM

Check Kotlin and Java source code coverage

Method `copy` coverage is below the threshold 50%
temperature: Double? = this.temperature,
maxTokens: Int? = this.maxTokens,
numberOfChoices: Int? = this.numberOfChoices,
speculation: String? = this.speculation,
schema: Schema? = this.schema,
toolChoice: ToolChoice? = this.toolChoice,
user: String? = this.user,
includeThoughts: Boolean? = this.includeThoughts,
thinkingBudget: Int? = this.thinkingBudget,
frequencyPenalty: Double? = this.frequencyPenalty,
presencePenalty: Double? = this.presencePenalty,
logprobs: Boolean? = this.logprobs,
stop: List<String>? = this.stop,
topLogprobs: Int? = this.topLogprobs,
topP: Double? = this.topP,
): DeepSeekParams = DeepSeekParams(
temperature = temperature,
maxTokens = maxTokens,
numberOfChoices = numberOfChoices,
speculation = speculation,
schema = schema,
toolChoice = toolChoice,
user = user,
includeThoughts = includeThoughts,
thinkingBudget = thinkingBudget,
frequencyPenalty = frequencyPenalty,
presencePenalty = presencePenalty,
logprobs = logprobs,
stop = stop,
topLogprobs = topLogprobs,
topP = topP,
)

override fun equals(other: Any?): Boolean = when {

Check warning on line 124 in prompt/prompt-executor/prompt-executor-clients/prompt-executor-deepseek-client/src/commonMain/kotlin/ai/koog/prompt/executor/clients/deepseek/DeepSeekParams.kt

View workflow job for this annotation

GitHub Actions / Qodana for JVM

Check Kotlin and Java source code coverage

Method `equals` coverage is below the threshold 50%
this === other -> true
other !is DeepSeekParams -> false
else ->
temperature == other.temperature &&
maxTokens == other.maxTokens &&
numberOfChoices == other.numberOfChoices &&
speculation == other.speculation &&
schema == other.schema &&
toolChoice == other.toolChoice &&
user == other.user &&
includeThoughts == other.includeThoughts &&
thinkingBudget == other.thinkingBudget &&
frequencyPenalty == other.frequencyPenalty &&
presencePenalty == other.presencePenalty &&
logprobs == other.logprobs &&
stop == other.stop &&
topLogprobs == other.topLogprobs &&
topP == other.topP
}

override fun hashCode(): Int = listOf(

Check warning on line 145 in prompt/prompt-executor/prompt-executor-clients/prompt-executor-deepseek-client/src/commonMain/kotlin/ai/koog/prompt/executor/clients/deepseek/DeepSeekParams.kt

View workflow job for this annotation

GitHub Actions / Qodana for JVM

Check Kotlin and Java source code coverage

Method `hashCode` coverage is below the threshold 50%
temperature, maxTokens, numberOfChoices,
speculation, schema, toolChoice,
user, includeThoughts, thinkingBudget,
frequencyPenalty, presencePenalty,
logprobs, stop, topLogprobs, topP,
).fold(0) { acc, element ->
31 * acc + (element?.hashCode() ?: 0)
}

override fun toString(): String = buildString {

Check warning on line 155 in prompt/prompt-executor/prompt-executor-clients/prompt-executor-deepseek-client/src/commonMain/kotlin/ai/koog/prompt/executor/clients/deepseek/DeepSeekParams.kt

View workflow job for this annotation

GitHub Actions / Qodana for JVM

Check Kotlin and Java source code coverage

Method `toString` coverage is below the threshold 50%
append("DeepSeekParams(")
append("temperature=$temperature")
append(", maxTokens=$maxTokens")
append(", numberOfChoices=$numberOfChoices")
append(", speculation=$speculation")
append(", schema=$schema")
append(", toolChoice=$toolChoice")
append(", user=$user")
append(", includeThoughts=$includeThoughts")
append(", thinkingBudget=$thinkingBudget")
append(", frequencyPenalty=$frequencyPenalty")
append(", presencePenalty=$presencePenalty")
append(", logprobs=$logprobs")
append(", stop=$stop")
append(", topLogprobs=$topLogprobs")
append(", topP=$topP")
append(")")
}
}
Loading
Loading