Skip to content

Commit

Permalink
Add presencePenalty and frequencyPenalty settings
Browse files Browse the repository at this point in the history
  • Loading branch information
afirstenberg committed Jan 5, 2025
1 parent 4b25871 commit f87f22a
Show file tree
Hide file tree
Showing 4 changed files with 52 additions and 3 deletions.
4 changes: 4 additions & 0 deletions libs/langchain-google-common/src/chat_models.ts
Original file line number Diff line number Diff line change
Expand Up @@ -189,6 +189,10 @@ export abstract class ChatGoogleBase<AuthOptions>

topK = 40;

presencePenalty = undefined;

frequencyPenalty = undefined;

stopSequences: string[] = [];

logprobs: false;
Expand Down
41 changes: 38 additions & 3 deletions libs/langchain-google-common/src/types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,36 @@ export interface GoogleAIModelParams {
*/
topK?: number;

/**
* Presence penalty applied to the next token's logprobs
* if the token has already been seen in the response.
* This penalty is binary on/off and not dependant on the
* number of times the token is used (after the first).
* Use frequencyPenalty for a penalty that increases with each use.
* A positive penalty will discourage the use of tokens that have
* already been used in the response, increasing the vocabulary.
* A negative penalty will encourage the use of tokens that have
* already been used in the response, decreasing the vocabulary.
*/
presencePenalty?: number;

/**
* Frequency penalty applied to the next token's logprobs,
* multiplied by the number of times each token has been seen
* in the respponse so far.
* A positive penalty will discourage the use of tokens that
* have already been used, proportional to the number of times
* the token has been used:
* The more a token is used, the more dificult it is for the model
* to use that token again increasing the vocabulary of responses.
* Caution: A _negative_ penalty will encourage the model to reuse
* tokens proportional to the number of times the token has been used.
* Small negative values will reduce the vocabulary of a response.
* Larger negative values will cause the model to start repeating
* a common token until it hits the maxOutputTokens limit.
*/
frequencyPenalty?: number;

stopSequences?: string[];

safetySettings?: GoogleAISafetySetting[];
Expand All @@ -187,13 +217,16 @@ export interface GoogleAIModelParams {

/**
* Whether to return log probabilities of the output tokens or not.
* If true, returns the log probabilities of each output token returned in the content of message.
* If true, returns the log probabilities of each output token
* returned in the content of message.
*/
logprobs?: boolean;

/**
* An integer between 0 and 5 specifying the number of most likely tokens to return at each token position,
* each with an associated log probability. logprobs must be set to true if this parameter is used.
* An integer between 0 and 5 specifying the number of
* most likely tokens to return at each token position,
* each with an associated log probability.
* logprobs must be set to true if this parameter is used.
*/
topLogprobs?: number;
}
Expand Down Expand Up @@ -478,6 +511,8 @@ export interface GeminiGenerationConfig {
temperature?: number;
topP?: number;
topK?: number;
presencePenalty?: number;
frequencyPenalty?: number;
responseMimeType?: GoogleAIResponseMimeType;
responseLogprobs?: boolean;
logprobs?: number;
Expand Down
8 changes: 8 additions & 0 deletions libs/langchain-google-common/src/utils/common.ts
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,14 @@ export function copyAIModelParamsInto(
target.maxOutputTokens;
ret.topP = options?.topP ?? params?.topP ?? target.topP;
ret.topK = options?.topK ?? params?.topK ?? target.topK;
ret.presencePenalty =
options?.presencePenalty ??
params?.presencePenalty ??
target.presencePenalty;
ret.frequencyPenalty =
options?.frequencyPenalty ??
params?.frequencyPenalty ??
target.frequencyPenalty;
ret.stopSequences =
options?.stopSequences ?? params?.stopSequences ?? target.stopSequences;
ret.safetySettings =
Expand Down
2 changes: 2 additions & 0 deletions libs/langchain-google-common/src/utils/gemini.ts
Original file line number Diff line number Diff line change
Expand Up @@ -1084,6 +1084,8 @@ export function getGeminiAPI(config?: GeminiAPIConfig): GoogleAIAPI {
temperature: parameters.temperature,
topK: parameters.topK,
topP: parameters.topP,
presencePenalty: parameters.presencePenalty,
frequencyPenalty: parameters.frequencyPenalty,
maxOutputTokens: parameters.maxOutputTokens,
stopSequences: parameters.stopSequences,
responseMimeType: parameters.responseMimeType,
Expand Down

0 comments on commit f87f22a

Please sign in to comment.