AI rule node: add max output tokens for all providers
This commit is contained in:
parent
bc96c63fcf
commit
d81d41fd7b
@ -63,6 +63,7 @@ class Langchain4jChatModelConfigurerImpl implements Langchain4jChatModelConfigur
|
||||
.topP(modelConfig.topP())
|
||||
.frequencyPenalty(modelConfig.frequencyPenalty())
|
||||
.presencePenalty(modelConfig.presencePenalty())
|
||||
.maxTokens(modelConfig.maxOutputTokens())
|
||||
.timeout(toDuration(modelConfig.timeoutSeconds()))
|
||||
.maxRetries(modelConfig.maxRetries())
|
||||
.build();
|
||||
@ -78,6 +79,7 @@ class Langchain4jChatModelConfigurerImpl implements Langchain4jChatModelConfigur
|
||||
.topP(modelConfig.topP())
|
||||
.frequencyPenalty(modelConfig.frequencyPenalty())
|
||||
.presencePenalty(modelConfig.presencePenalty())
|
||||
.maxTokens(modelConfig.maxOutputTokens())
|
||||
.timeout(toDuration(modelConfig.timeoutSeconds()))
|
||||
.maxRetries(modelConfig.maxRetries())
|
||||
.build();
|
||||
@ -94,6 +96,7 @@ class Langchain4jChatModelConfigurerImpl implements Langchain4jChatModelConfigur
|
||||
.topK(modelConfig.topK())
|
||||
.frequencyPenalty(modelConfig.frequencyPenalty())
|
||||
.presencePenalty(modelConfig.presencePenalty())
|
||||
.maxOutputTokens(modelConfig.maxOutputTokens())
|
||||
.timeout(toDuration(modelConfig.timeoutSeconds()))
|
||||
.maxRetries(modelConfig.maxRetries())
|
||||
.build();
|
||||
@ -165,6 +168,9 @@ class Langchain4jChatModelConfigurerImpl implements Langchain4jChatModelConfigur
|
||||
if (modelConfig.frequencyPenalty() != null) {
|
||||
generationConfigBuilder.setPresencePenalty(modelConfig.frequencyPenalty().floatValue());
|
||||
}
|
||||
if (modelConfig.maxOutputTokens() != null) {
|
||||
generationConfigBuilder.setMaxOutputTokens(modelConfig.maxOutputTokens());
|
||||
}
|
||||
var generationConfig = generationConfigBuilder.build();
|
||||
|
||||
// construct generative model instance
|
||||
@ -191,6 +197,7 @@ class Langchain4jChatModelConfigurerImpl implements Langchain4jChatModelConfigur
|
||||
.topP(modelConfig.topP())
|
||||
.frequencyPenalty(modelConfig.frequencyPenalty())
|
||||
.presencePenalty(modelConfig.presencePenalty())
|
||||
.maxTokens(modelConfig.maxOutputTokens())
|
||||
.timeout(toDuration(modelConfig.timeoutSeconds()))
|
||||
.maxRetries(modelConfig.maxRetries())
|
||||
.build();
|
||||
@ -205,6 +212,7 @@ class Langchain4jChatModelConfigurerImpl implements Langchain4jChatModelConfigur
|
||||
.temperature(modelConfig.temperature())
|
||||
.topP(modelConfig.topP())
|
||||
.topK(modelConfig.topK())
|
||||
.maxTokens(modelConfig.maxOutputTokens())
|
||||
.timeout(toDuration(modelConfig.timeoutSeconds()))
|
||||
.maxRetries(modelConfig.maxRetries())
|
||||
.build();
|
||||
@ -227,6 +235,7 @@ class Langchain4jChatModelConfigurerImpl implements Langchain4jChatModelConfigur
|
||||
var defaultChatRequestParams = ChatRequestParameters.builder()
|
||||
.temperature(modelConfig.temperature())
|
||||
.topP(modelConfig.topP())
|
||||
.maxOutputTokens(modelConfig.maxOutputTokens())
|
||||
.build();
|
||||
|
||||
return BedrockChatModel.builder()
|
||||
@ -248,6 +257,7 @@ class Langchain4jChatModelConfigurerImpl implements Langchain4jChatModelConfigur
|
||||
.topP(modelConfig.topP())
|
||||
.frequencyPenalty(modelConfig.frequencyPenalty())
|
||||
.presencePenalty(modelConfig.presencePenalty())
|
||||
.maxTokens(modelConfig.maxOutputTokens())
|
||||
.timeout(toDuration(modelConfig.timeoutSeconds()))
|
||||
.maxRetries(modelConfig.maxRetries())
|
||||
.build();
|
||||
|
||||
@ -31,6 +31,7 @@ public record AmazonBedrockChatModel(
|
||||
String modelId,
|
||||
Double temperature,
|
||||
Double topP,
|
||||
Integer maxOutputTokens,
|
||||
Integer timeoutSeconds,
|
||||
Integer maxRetries
|
||||
) implements AiChatModelConfig<AmazonBedrockChatModel.Config> {}
|
||||
|
||||
@ -32,6 +32,7 @@ public record AnthropicChatModel(
|
||||
Double temperature,
|
||||
Double topP,
|
||||
Integer topK,
|
||||
Integer maxOutputTokens,
|
||||
Integer timeoutSeconds,
|
||||
Integer maxRetries
|
||||
) implements AiChatModelConfig<AnthropicChatModel.Config> {}
|
||||
|
||||
@ -33,6 +33,7 @@ public record AzureOpenAiChatModel(
|
||||
Double topP,
|
||||
Double frequencyPenalty,
|
||||
Double presencePenalty,
|
||||
Integer maxOutputTokens,
|
||||
Integer timeoutSeconds,
|
||||
Integer maxRetries
|
||||
) implements AiChatModelConfig<AzureOpenAiChatModel.Config> {}
|
||||
|
||||
@ -33,6 +33,7 @@ public record GitHubModelsChatModel(
|
||||
Double topP,
|
||||
Double frequencyPenalty,
|
||||
Double presencePenalty,
|
||||
Integer maxOutputTokens,
|
||||
Integer timeoutSeconds,
|
||||
Integer maxRetries
|
||||
) implements AiChatModelConfig<GitHubModelsChatModel.Config> {}
|
||||
|
||||
@ -34,6 +34,7 @@ public record GoogleAiGeminiChatModel(
|
||||
Integer topK,
|
||||
Double frequencyPenalty,
|
||||
Double presencePenalty,
|
||||
Integer maxOutputTokens,
|
||||
Integer timeoutSeconds,
|
||||
Integer maxRetries
|
||||
) implements AiChatModelConfig<GoogleAiGeminiChatModel.Config> {}
|
||||
|
||||
@ -34,6 +34,7 @@ public record GoogleVertexAiGeminiChatModel(
|
||||
Integer topK,
|
||||
Double frequencyPenalty,
|
||||
Double presencePenalty,
|
||||
Integer maxOutputTokens,
|
||||
Integer timeoutSeconds,
|
||||
Integer maxRetries
|
||||
) implements AiChatModelConfig<GoogleVertexAiGeminiChatModel.Config> {}
|
||||
|
||||
@ -33,6 +33,7 @@ public record MistralAiChatModel(
|
||||
Double topP,
|
||||
Double frequencyPenalty,
|
||||
Double presencePenalty,
|
||||
Integer maxOutputTokens,
|
||||
Integer timeoutSeconds,
|
||||
Integer maxRetries
|
||||
) implements AiChatModelConfig<MistralAiChatModel.Config> {}
|
||||
|
||||
@ -33,6 +33,7 @@ public record OpenAiChatModel(
|
||||
Double topP,
|
||||
Double frequencyPenalty,
|
||||
Double presencePenalty,
|
||||
Integer maxOutputTokens,
|
||||
Integer timeoutSeconds,
|
||||
Integer maxRetries
|
||||
) implements AiChatModelConfig<OpenAiChatModel.Config> {}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user