Skip to content

Commit

Permalink
Merge pull request #76 from pufflyai/441-bug-playground-completion-le…
Browse files Browse the repository at this point in the history
…ngth-is-longer-than-the-max-token-parameter-in-the-settings

Fix wrong gpt2 model name.
  • Loading branch information
au-re authored Oct 21, 2023
2 parents 79d5da2 + acbb789 commit 6e9909e
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 9 deletions.
4 changes: 2 additions & 2 deletions packages/@pufflig/ps-models/src/models/hf.ts
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,8 @@ export const hf_settings = [
];

export const hf_completion: ModelDefinition = {
"gpt-2": {
modelId: "gpt-2",
gpt2: {
modelId: "gpt2",
description: "",
settings: hf_settings,
streaming: false,
Expand Down
14 changes: 7 additions & 7 deletions packages/@pufflig/ps-models/src/models/openai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ export const openai_completion: ModelDefinition = {
"What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.",
},
{
id: "max_tokens",
id: "maxTokens",
type: "number",
name: "Max Tokens",
max: 4097,
Expand Down Expand Up @@ -93,7 +93,7 @@ export const openai_completion: ModelDefinition = {
"What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.",
},
{
id: "max_tokens",
id: "maxTokens",
type: "number",
name: "Max Tokens",
max: 16384,
Expand Down Expand Up @@ -156,7 +156,7 @@ export const openai_completion: ModelDefinition = {
"What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.",
},
{
id: "max_tokens",
id: "maxTokens",
type: "number",
name: "Max Tokens",
max: 16384,
Expand Down Expand Up @@ -222,7 +222,7 @@ export const openai_chat: ModelDefinition = {
"What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.",
},
{
id: "max_tokens",
id: "maxTokens",
type: "number",
name: "Max Tokens",
// although the documentation says the model should support 8192 tokens, it actually supports 4096
Expand Down Expand Up @@ -286,7 +286,7 @@ export const openai_chat: ModelDefinition = {
"What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.",
},
{
id: "max_tokens",
id: "maxTokens",
type: "number",
name: "Max Tokens",
max: 32768,
Expand Down Expand Up @@ -349,7 +349,7 @@ export const openai_chat: ModelDefinition = {
"What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.",
},
{
id: "max_tokens",
id: "maxTokens",
type: "number",
name: "Max Tokens",
max: 4097,
Expand Down Expand Up @@ -412,7 +412,7 @@ export const openai_chat: ModelDefinition = {
"What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.",
},
{
id: "max_tokens",
id: "maxTokens",
type: "number",
name: "Max Tokens",
max: 16385,
Expand Down

0 comments on commit 6e9909e

Please sign in to comment.