diff --git a/docs/requests.md b/docs/requests.md
index e185cec54b2..8de78963154 100644
--- a/docs/requests.md
+++ b/docs/requests.md
@@ -73,7 +73,6 @@ For scenarios where you want to receive partial responses or stream data as it's
```python
import requests
import json
-from queue import Queue
def fetch_response(url, model, messages):
"""
@@ -87,7 +86,7 @@ def fetch_response(url, model, messages):
Returns:
requests.Response: The streamed response object.
"""
- payload = {"model": model, "messages": messages}
+ payload = {"model": model, "messages": messages, "stream": True}
headers = {
"Content-Type": "application/json",
"Accept": "text/event-stream",
@@ -99,7 +98,7 @@ def fetch_response(url, model, messages):
)
return response
-def process_stream(response, output_queue):
+def process_stream(response):
"""
Processes the streamed response and extracts messages.
@@ -111,37 +110,31 @@ def process_stream(response, output_queue):
if line:
line = line.decode("utf-8")
if line == "data: [DONE]":
+ print("\n\nConversation completed.")
break
if line.startswith("data: "):
try:
data = json.loads(line[6:])
- message = data.get("message", "")
+ message = data.get("choices", [{}])[0].get("delta", {}).get("content")
if message:
- output_queue.put(message)
- except json.JSONDecodeError:
+ print(message, end="", flush=True)
+ except json.JSONDecodeError as e:
+ print(f"Error decoding JSON: {e}")
continue
# Define the API endpoint
-chat_url = "http://localhost/v1/chat/completions"
+chat_url = "http://localhost:8080/v1/chat/completions"
# Define the payload
-model = "gpt-4o"
-messages = [{"role": "system", "content": "Hello, how are you?"}]
-
-# Initialize the queue to store output messages
-output_queue = Queue()
+model = ""
+messages = [{"role": "user", "content": "Hello, how are you?"}]
try:
# Fetch the streamed response
response = fetch_response(chat_url, model, messages)
# Process the streamed response
- process_stream(response, output_queue)
-
- # Retrieve messages from the queue
- while not output_queue.empty():
- msg = output_queue.get()
- print(msg)
+ process_stream(response)
except Exception as e:
print(f"An error occurred: {e}")
@@ -150,23 +143,21 @@ except Exception as e:
**Explanation:**
- **`fetch_response` Function:**
- Sends a POST request to the streaming chat completions endpoint with the specified model and messages.
- - Sets the `Accept` header to `text/event-stream` to enable streaming.
+ - Sets `stream` parameter to `true` to enable streaming.
- Raises an exception if the request fails.
- **`process_stream` Function:**
- Iterates over each line in the streamed response.
- Decodes the line and checks for the termination signal `"data: [DONE]"`.
- Parses lines that start with `"data: "` to extract the message content.
- - Enqueues the extracted messages into `output_queue` for further processing.
- **Main Execution:**
- Defines the API endpoint, model, and messages.
- - Initializes a `Queue` to store incoming messages.
- Fetches and processes the streamed response.
- - Retrieves and prints messages from the queue.
+ - Retrieves and prints messages.
**Usage Tips:**
-- Ensure your local server supports streaming and the `Accept` header appropriately.
+- Ensure your local server supports streaming.
- Adjust the `chat_url` if your local server runs on a different port or path.
- Use threading or asynchronous programming for handling streams in real-time applications.
@@ -286,7 +277,7 @@ async def fetch_response_async(url, model, messages, output_queue):
messages (list): A list of message dictionaries.
output_queue (Queue): A queue to store the extracted messages.
"""
- payload = {"model": model, "messages": messages}
+ payload = {"model": model, "messages": messages, "stream": True}
headers = {
"Content-Type": "application/json",
"Accept": "text/event-stream",
@@ -305,7 +296,7 @@ async def fetch_response_async(url, model, messages, output_queue):
if decoded_line.startswith("data: "):
try:
data = json.loads(decoded_line[6:])
- message = data.get("message", "")
+ message = data.get("choices", [{}])[0].get("delta", {}).get("content")
if message:
output_queue.put(message)
except json.JSONDecodeError:
diff --git a/g4f/Provider/DDG.py b/g4f/Provider/DDG.py
index 78e13568cdc..19ba747a868 100644
--- a/g4f/Provider/DDG.py
+++ b/g4f/Provider/DDG.py
@@ -73,7 +73,7 @@ async def create_async_generator(
"Content-Type": "application/json",
},
cookies: dict = None,
- max_retries: int = 3,
+ max_retries: int = 0,
**kwargs
) -> AsyncResult:
if cookies is None and conversation is not None:
diff --git a/g4f/Provider/GizAI.py b/g4f/Provider/GizAI.py
index c33f82df3ee..a8401b43b95 100644
--- a/g4f/Provider/GizAI.py
+++ b/g4f/Provider/GizAI.py
@@ -6,7 +6,6 @@
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
-
class GizAI(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://app.giz.ai/assistant"
api_endpoint = "https://app.giz.ai/api/data/users/inferenceServer.infer"
diff --git a/g4f/Provider/Mhystical.py b/g4f/Provider/Mhystical.py
index 42a7039623b..570cc85cd5f 100644
--- a/g4f/Provider/Mhystical.py
+++ b/g4f/Provider/Mhystical.py
@@ -18,6 +18,7 @@ class Mhystical(OpenaiAPI):
label = "Mhystical"
url = "https://mhystical.cc"
api_endpoint = "https://api.mhystical.cc/v1/completions"
+ login_url = "https://mhystical.cc/dashboard"
working = True
needs_auth = False
supports_stream = False # Set to False, as streaming is not specified in ChatifyAI
@@ -37,11 +38,12 @@ def create_async_generator(
model: str,
messages: Messages,
stream: bool = False,
+ api_key: str = "mhystical",
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
headers = {
- "x-api-key": "mhystical",
+ "x-api-key": api_key,
"Content-Type": "application/json",
"accept": "*/*",
"cache-control": "no-cache",
diff --git a/g4f/Provider/PollinationsAI.py b/g4f/Provider/PollinationsAI.py
index 3df3fafd7df..4990a86961e 100644
--- a/g4f/Provider/PollinationsAI.py
+++ b/g4f/Provider/PollinationsAI.py
@@ -7,12 +7,12 @@
from typing import Optional
from aiohttp import ClientSession
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..requests.raise_for_status import raise_for_status
from ..typing import AsyncResult, Messages
from ..image import ImageResponse
-from .needs_auth.OpenaiAPI import OpenaiAPI
-class PollinationsAI(OpenaiAPI):
+class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
label = "Pollinations AI"
url = "https://pollinations.ai"
@@ -21,21 +21,21 @@ class PollinationsAI(OpenaiAPI):
supports_stream = True
supports_system_message = True
supports_message_history = True
-
+
# API endpoints base
api_base = "https://text.pollinations.ai/openai"
-
+
# API endpoints
text_api_endpoint = "https://text.pollinations.ai"
image_api_endpoint = "https://image.pollinations.ai"
-
+
# Models configuration
default_model = "openai"
default_image_model = "flux"
-
+
image_models = []
models = []
-
+
additional_models_image = ["midjourney", "dall-e-3"]
additional_models_text = ["sur", "sur-mistral", "claude"]
model_aliases = {
@@ -100,7 +100,7 @@ async def create_async_generator(
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
-
+
# Check if models
# Image generation
if model in cls.image_models:
@@ -151,7 +151,6 @@ async def _generate_image(
if seed is None:
seed = random.randint(0, 10000)
-
headers = {
'Accept': '*/*',
'Accept-Language': 'en-US,en;q=0.9',
@@ -177,7 +176,7 @@ async def _generate_image(
async with session.head(url, proxy=proxy) as response:
if response.status == 200:
- image_response = ImageResponse(images=url, alt=messages[-1]["content"])
+ image_response = ImageResponse(images=url, alt=messages[-1]["content"] if prompt is None else prompt)
yield image_response
@classmethod
@@ -195,7 +194,7 @@ async def _generate_text(
) -> AsyncResult:
if api_key is None:
api_key = "dummy" # Default value if api_key is not provided
-
+
headers = {
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
@@ -215,7 +214,7 @@ async def _generate_text(
"jsonMode": False,
"stream": stream
}
-
+
async with session.post(cls.text_api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
async for chunk in response.content:
diff --git a/g4f/Provider/needs_auth/DeepInfra.py b/g4f/Provider/needs_auth/DeepInfra.py
index c5ebac1e0fd..ea114a1b659 100644
--- a/g4f/Provider/needs_auth/DeepInfra.py
+++ b/g4f/Provider/needs_auth/DeepInfra.py
@@ -7,6 +7,7 @@
class DeepInfra(OpenaiAPI):
label = "DeepInfra"
url = "https://deepinfra.com"
+ login_url = "https://deepinfra.com/dash/api_keys"
working = True
api_base = "https://api.deepinfra.com/v1/openai",
needs_auth = True
diff --git a/g4f/Provider/needs_auth/DeepSeek.py b/g4f/Provider/needs_auth/DeepSeek.py
new file mode 100644
index 00000000000..88b187be52a
--- /dev/null
+++ b/g4f/Provider/needs_auth/DeepSeek.py
@@ -0,0 +1,14 @@
+from __future__ import annotations
+
+from .OpenaiAPI import OpenaiAPI
+
+class DeepSeek(OpenaiAPI):
+ label = "DeepSeek"
+ url = "https://platform.deepseek.com"
+ login_url = "https://platform.deepseek.com/api_keys"
+ working = True
+ api_base = "https://api.deepseek.com"
+ needs_auth = True
+ supports_stream = True
+ supports_message_history = True
+ default_model = "deepseek-chat"
\ No newline at end of file
diff --git a/g4f/Provider/needs_auth/GlhfChat.py b/g4f/Provider/needs_auth/GlhfChat.py
index c0bf8e7e02b..f3a578af17f 100644
--- a/g4f/Provider/needs_auth/GlhfChat.py
+++ b/g4f/Provider/needs_auth/GlhfChat.py
@@ -5,6 +5,7 @@
class GlhfChat(OpenaiAPI):
label = "GlhfChat"
url = "https://glhf.chat"
+ login_url = "https://glhf.chat/users/settings/api"
api_base = "https://glhf.chat/api/openai/v1"
working = True
model_aliases = {
diff --git a/g4f/Provider/needs_auth/OpenaiAPI.py b/g4f/Provider/needs_auth/OpenaiAPI.py
index 7ff9b555ed1..75c3574fd28 100644
--- a/g4f/Provider/needs_auth/OpenaiAPI.py
+++ b/g4f/Provider/needs_auth/OpenaiAPI.py
@@ -8,7 +8,7 @@
from ...typing import Union, Optional, AsyncResult, Messages, ImagesType
from ...requests import StreamSession, raise_for_status
from ...providers.response import FinishReason, ToolCalls, Usage
-from ...errors import MissingAuthError, ResponseError
+from ...errors import MissingAuthError
from ...image import to_data_uri
from ... import debug
diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py
index 0fe7cafe7eb..6fd88782d91 100644
--- a/g4f/Provider/needs_auth/OpenaiChat.py
+++ b/g4f/Provider/needs_auth/OpenaiChat.py
@@ -106,9 +106,8 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
@classmethod
async def on_auth_async(cls, **kwargs) -> AsyncIterator:
- if cls.needs_auth:
- async for chunk in cls.login():
- yield chunk
+ async for chunk in cls.login():
+ yield chunk
yield AuthResult(
api_key=cls._api_key,
cookies=cls._cookies or RequestConfig.cookies or {},
@@ -335,11 +334,11 @@ async def create_authed(
cls._update_request_args(auth_result, session)
await raise_for_status(response)
else:
- if cls._headers is None:
+ if cls._headers is None and getattr(auth_result, "cookies", None):
cls._create_request_args(auth_result.cookies, auth_result.headers)
- if not cls._set_api_key(auth_result.api_key):
- raise MissingAuthError("Access token is not valid")
- async with session.get(cls.url, headers=auth_result.headers) as response:
+ if not cls._set_api_key(getattr(auth_result, "api_key", None)):
+ raise MissingAuthError("Access token is not valid")
+ async with session.get(cls.url, headers=cls._headers) as response:
cls._update_request_args(auth_result, session)
await raise_for_status(response)
try:
@@ -349,9 +348,11 @@ async def create_authed(
debug.log(f"{e.__class__.__name__}: {e}")
model = cls.get_model(model)
if conversation is None:
- conversation = Conversation(conversation_id, str(uuid.uuid4()))
+ conversation = Conversation(conversation_id, str(uuid.uuid4()), getattr(auth_result, "cookies", {}).get("oai-did"))
else:
conversation = copy(conversation)
+ if getattr(auth_result, "cookies", {}).get("oai-did") != getattr(conversation, "user_id", None):
+ conversation = Conversation(None, str(uuid.uuid4()))
if cls._api_key is None:
auto_continue = False
conversation.finish_reason = None
@@ -361,11 +362,11 @@ async def create_authed(
f"{cls.url}/backend-anon/sentinel/chat-requirements"
if cls._api_key is None else
f"{cls.url}/backend-api/sentinel/chat-requirements",
- json={"p": None if not getattr(auth_result, "proof_token") else get_requirements_token(auth_result.proof_token)},
+ json={"p": None if not getattr(auth_result, "proof_token", None) else get_requirements_token(getattr(auth_result, "proof_token", None))},
headers=cls._headers
) as response:
- if response.status == 401:
- cls._headers = cls._api_key = None
+ if response.status in (401, 403):
+ auth_result.reset()
else:
cls._update_request_args(auth_result, session)
await raise_for_status(response)
@@ -380,14 +381,13 @@ async def create_authed(
# cls._set_api_key(auth_result.access_token)
# if auth_result.arkose_token is None:
# raise MissingAuthError("No arkose token found in .har file")
-
if "proofofwork" in chat_requirements:
- if auth_result.proof_token is None:
+ if getattr(auth_result, "proof_token") is None:
auth_result.proof_token = get_config(auth_result.headers.get("user-agent"))
proofofwork = generate_proof_token(
**chat_requirements["proofofwork"],
- user_agent=auth_result.headers.get("user-agent"),
- proof_token=getattr(auth_result, "proof_token")
+ user_agent=getattr(auth_result, "headers", {}).get("user-agent"),
+ proof_token=getattr(auth_result, "proof_token", None)
)
[debug.log(text) for text in (
#f"Arkose: {'False' if not need_arkose else auth_result.arkose_token[:12]+'...'}",
@@ -434,7 +434,7 @@ async def create_authed(
# headers["openai-sentinel-arkose-token"] = RequestConfig.arkose_token
if proofofwork is not None:
headers["openai-sentinel-proof-token"] = proofofwork
- if need_turnstile and auth_result.turnstile_token is not None:
+ if need_turnstile and getattr(auth_result, "turnstile_token", None) is not None:
headers['openai-sentinel-turnstile-token'] = auth_result.turnstile_token
async with session.post(
f"{cls.url}/backend-anon/conversation"
@@ -587,7 +587,7 @@ async def login(
cookies: Cookies = None,
headers: dict = None,
**kwargs
- ) -> AsyncIterator[str]:
+ ) -> AsyncIterator:
if cls._expires is not None and (cls._expires - 60*10) < time.time():
cls._headers = cls._api_key = None
if cls._headers is None or headers is not None:
@@ -653,7 +653,7 @@ def on_request(event: nodriver.cdp.network.RequestWillBeSent):
await page.evaluate("document.getElementById('prompt-textarea').innerText = 'Hello'")
await page.evaluate("document.querySelector('[data-testid=\"send-button\"]').click()")
while True:
- if cls._api_key is not None:
+ if cls._api_key is not None or not cls.needs_auth:
break
body = await page.evaluate("JSON.stringify(window.__remixContext)")
if body:
@@ -689,8 +689,10 @@ def _create_request_args(cls, cookies: Cookies = None, headers: dict = None, use
@classmethod
def _update_request_args(cls, auth_result: AuthResult, session: StreamSession):
- for c in session.cookie_jar if hasattr(session, "cookie_jar") else session.cookies.jar:
- auth_result.cookies[getattr(c, "key", getattr(c, "name", ""))] = c.value
+ if hasattr(auth_result, "cookies"):
+ for c in session.cookie_jar if hasattr(session, "cookie_jar") else session.cookies.jar:
+ auth_result.cookies[getattr(c, "key", getattr(c, "name", ""))] = c.value
+ cls._cookies = auth_result.cookies
cls._update_cookie_header()
@classmethod
@@ -717,12 +719,13 @@ class Conversation(JsonConversation):
"""
Class to encapsulate response fields.
"""
- def __init__(self, conversation_id: str = None, message_id: str = None, finish_reason: str = None, parent_message_id: str = None):
+ def __init__(self, conversation_id: str = None, message_id: str = None, user_id: str = None, finish_reason: str = None, parent_message_id: str = None):
self.conversation_id = conversation_id
self.message_id = message_id
self.finish_reason = finish_reason
self.is_recipient = False
self.parent_message_id = message_id if parent_message_id is None else parent_message_id
+ self.user_id = user_id
def get_cookies(
urls: Optional[Iterator[str]] = None
diff --git a/g4f/Provider/needs_auth/__init__.py b/g4f/Provider/needs_auth/__init__.py
index 24282f0f3ec..4b800ff45ad 100644
--- a/g4f/Provider/needs_auth/__init__.py
+++ b/g4f/Provider/needs_auth/__init__.py
@@ -5,6 +5,7 @@
from .CopilotAccount import CopilotAccount
from .DeepInfra import DeepInfra
from .DeepInfraImage import DeepInfraImage
+from .DeepSeek import DeepSeek
from .Gemini import Gemini
from .GeminiPro import GeminiPro
from .GithubCopilot import GithubCopilot
diff --git a/g4f/Provider/openai/har_file.py b/g4f/Provider/openai/har_file.py
index 989a9efc44a..2acd34fbad7 100644
--- a/g4f/Provider/openai/har_file.py
+++ b/g4f/Provider/openai/har_file.py
@@ -32,7 +32,7 @@ class RequestConfig:
arkose_token: str = None
headers: dict = {}
cookies: dict = {}
- data_build: str = "prod-697873d7e78bb14df6e13af3a91fa237cc4db415"
+ data_build: str = "prod-db8e51e8414e068257091cf5003a62d3d4ee6ed0"
class arkReq:
def __init__(self, arkURL, arkBx, arkHeader, arkBody, arkCookies, userAgent):
diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py
index 73a9f64e64e..d4a3bd76a77 100644
--- a/g4f/api/__init__.py
+++ b/g4f/api/__init__.py
@@ -432,7 +432,7 @@ def upload_cookies(files: List[UploadFile]):
HTTP_404_NOT_FOUND: {"model": ErrorResponseModel},
})
def read_files(request: Request, bucket_id: str, delete_files: bool = True, refine_chunks_with_spacy: bool = False):
- bucket_dir = os.path.join(get_cookies_dir(), bucket_id)
+ bucket_dir = os.path.join(get_cookies_dir(), "buckets", bucket_id)
event_stream = "text/event-stream" in request.headers.get("accept", "")
if not os.path.isdir(bucket_dir):
return ErrorResponse.from_message("Bucket dir not found", 404)
@@ -443,7 +443,7 @@ def read_files(request: Request, bucket_id: str, delete_files: bool = True, refi
HTTP_200_OK: {"model": UploadResponseModel}
})
def upload_files(bucket_id: str, files: List[UploadFile]):
- bucket_dir = os.path.join(get_cookies_dir(), bucket_id)
+ bucket_dir = os.path.join(get_cookies_dir(), "buckets", bucket_id)
os.makedirs(bucket_dir, exist_ok=True)
filenames = []
for file in files:
diff --git a/g4f/client/__init__.py b/g4f/client/__init__.py
index cca85db4de1..cd2a43feee0 100644
--- a/g4f/client/__init__.py
+++ b/g4f/client/__init__.py
@@ -63,7 +63,7 @@ def iter_response(
tool_calls = chunk.get_list()
continue
elif isinstance(chunk, Usage):
- usage = chunk.get_dict()
+ usage = chunk
continue
elif isinstance(chunk, BaseConversation):
yield chunk
@@ -90,19 +90,23 @@ def iter_response(
idx += 1
if usage is None:
- usage = Usage(prompt_tokens=0, completion_tokens=idx, total_tokens=idx).get_dict()
+ usage = Usage(prompt_tokens=0, completion_tokens=idx, total_tokens=idx)
+
finish_reason = "stop" if finish_reason is None else finish_reason
if stream:
- yield ChatCompletionChunk.model_construct(None, finish_reason, completion_id, int(time.time()))
+ yield ChatCompletionChunk.model_construct(
+ None, finish_reason, completion_id, int(time.time()),
+ usage=usage.get_dict()
+ )
else:
if response_format is not None and "type" in response_format:
if response_format["type"] == "json_object":
content = filter_json(content)
- yield ChatCompletion.model_construct(content, finish_reason, completion_id, int(time.time()), **filter_none(
- tool_calls=tool_calls,
- usage=usage
- ))
+ yield ChatCompletion.model_construct(
+ content, finish_reason, completion_id, int(time.time()),
+ usage=usage.get_dict(), **filter_none(tool_calls=tool_calls)
+ )
# Synchronous iter_append_model_and_provider function
def iter_append_model_and_provider(response: ChatCompletionResponseType, last_model: str, last_provider: ProviderType) -> ChatCompletionResponseType:
@@ -126,6 +130,8 @@ async def async_iter_response(
finish_reason = None
completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28))
idx = 0
+ tool_calls = None
+ usage = None
try:
async for chunk in response:
@@ -135,6 +141,12 @@ async def async_iter_response(
elif isinstance(chunk, BaseConversation):
yield chunk
continue
+ elif isinstance(chunk, ToolCalls):
+ tool_calls = chunk.get_list()
+ continue
+ elif isinstance(chunk, Usage):
+ usage = chunk
+ continue
elif isinstance(chunk, SynthesizeData) or not chunk:
continue
@@ -158,13 +170,22 @@ async def async_iter_response(
finish_reason = "stop" if finish_reason is None else finish_reason
+ if usage is None:
+ usage = Usage(prompt_tokens=0, completion_tokens=idx, total_tokens=idx)
+
if stream:
- yield ChatCompletionChunk.model_construct(None, finish_reason, completion_id, int(time.time()))
+ yield ChatCompletionChunk.model_construct(
+ None, finish_reason, completion_id, int(time.time()),
+ usage=usage.get_dict()
+ )
else:
if response_format is not None and "type" in response_format:
if response_format["type"] == "json_object":
content = filter_json(content)
- yield ChatCompletion.model_construct(content, finish_reason, completion_id, int(time.time()))
+ yield ChatCompletion.model_construct(
+ content, finish_reason, completion_id, int(time.time()),
+ usage=usage.get_dict(), **filter_none(tool_calls=tool_calls)
+ )
finally:
await safe_aclose(response)
diff --git a/g4f/client/stubs.py b/g4f/client/stubs.py
index 8f3425de9b4..f1d07fe703d 100644
--- a/g4f/client/stubs.py
+++ b/g4f/client/stubs.py
@@ -36,6 +36,7 @@ class ChatCompletionChunk(BaseModel):
model: str
provider: Optional[str]
choices: List[ChatCompletionDeltaChoice]
+ usage: Usage
@classmethod
def model_construct(
@@ -43,7 +44,8 @@ def model_construct(
content: str,
finish_reason: str,
completion_id: str = None,
- created: int = None
+ created: int = None,
+ usage: Usage = None
):
return super().model_construct(
id=f"chatcmpl-{completion_id}" if completion_id else None,
@@ -54,7 +56,8 @@ def model_construct(
choices=[ChatCompletionDeltaChoice.model_construct(
ChatCompletionDelta.model_construct(content),
finish_reason
- )]
+ )],
+ **filter_none(usage=usage)
)
class ChatCompletionMessage(BaseModel):
diff --git a/g4f/gui/client/index.html b/g4f/gui/client/index.html
index b06da6fbada..de604a1b18a 100644
--- a/g4f/gui/client/index.html
+++ b/g4f/gui/client/index.html
@@ -36,6 +36,7 @@
import llamaTokenizer from "llama-tokenizer-js"
+