diff --git a/g4f/Provider/Jmuz.py b/g4f/Provider/Jmuz.py index c713398bf5a..a5084fc01fc 100644 --- a/g4f/Provider/Jmuz.py +++ b/g4f/Provider/Jmuz.py @@ -5,7 +5,7 @@ class Jmuz(OpenaiAPI): label = "Jmuz" - url = "https://jmuz.me" + url = "https://discord.gg/qXfu24JmsB" login_url = None api_base = "https://jmuz.me/gpt/api/v2" api_key = "prod" @@ -15,7 +15,7 @@ class Jmuz(OpenaiAPI): supports_stream = True supports_system_message = False - default_model = 'gpt-4o' + default_model = "gpt-4o" model_aliases = { "gemini": "gemini-exp", "deepseek-chat": "deepseek-2.5", @@ -29,13 +29,7 @@ def get_models(cls): return cls.models @classmethod - def get_model(cls, model: str, **kwargs) -> str: - if model in cls.get_models(): - return model - return cls.default_model - - @classmethod - def create_async_generator( + async def create_async_generator( cls, model: str, messages: Messages, @@ -52,7 +46,8 @@ def create_async_generator( "cache-control": "no-cache", "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36" } - return super().create_async_generator( + started = False + async for chunk in super().create_async_generator( model=model, messages=messages, api_base=cls.api_base, @@ -60,4 +55,11 @@ def create_async_generator( stream=cls.supports_stream, headers=headers, **kwargs - ) + ): + if isinstance(chunk, str) and cls.url in chunk: + continue + if isinstance(chunk, str) and not started: + chunk = chunk.lstrip() + if chunk: + started = True + yield chunk diff --git a/g4f/Provider/Pizzagpt.py b/g4f/Provider/Pizzagpt.py index 9829e59d1a5..65fffd1ee5a 100644 --- a/g4f/Provider/Pizzagpt.py +++ b/g4f/Provider/Pizzagpt.py @@ -10,7 +10,7 @@ class Pizzagpt(AsyncGeneratorProvider, ProviderModelMixin): url = "https://www.pizzagpt.it" api_endpoint = "/api/chatx-completion" - working = True + working = False default_model = 'gpt-4o-mini' @classmethod @@ -46,6 +46,6 @@ async def create_async_generator( response_json = await response.json() content = response_json.get("answer", response_json).get("content") if content: - if "misuse detected. please get in touch" in content: + if "Misuse detected. please get in touch" in content: raise ValueError(content) yield content diff --git a/g4f/Provider/needs_auth/Custom.py b/g4f/Provider/needs_auth/Custom.py index d78e5e28b7e..8332394b26c 100644 --- a/g4f/Provider/needs_auth/Custom.py +++ b/g4f/Provider/needs_auth/Custom.py @@ -3,9 +3,10 @@ from .OpenaiAPI import OpenaiAPI class Custom(OpenaiAPI): - label = "Custom" + label = "Custom Provider" url = None - login_url = "http://localhost:8080" + login_url = None working = True api_base = "http://localhost:8080/v1" - needs_auth = False \ No newline at end of file + needs_auth = False + sort_models = False \ No newline at end of file diff --git a/g4f/Provider/needs_auth/GeminiPro.py b/g4f/Provider/needs_auth/GeminiPro.py index 502fcb5d5d8..89dbf52e35a 100644 --- a/g4f/Provider/needs_auth/GeminiPro.py +++ b/g4f/Provider/needs_auth/GeminiPro.py @@ -3,12 +3,14 @@ import base64 import json import requests +from typing import Optional from aiohttp import ClientSession, BaseConnector from ...typing import AsyncResult, Messages, ImagesType from ...image import to_bytes, is_accepted_format from ...errors import MissingAuthError from ...requests.raise_for_status import raise_for_status +from ...providers.response import Usage, FinishReason from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin from ..helper import get_connector from ... import debug @@ -62,6 +64,7 @@ async def create_async_generator( api_base: str = api_base, use_auth_header: bool = False, images: ImagesType = None, + tools: Optional[list] = None, connector: BaseConnector = None, **kwargs ) -> AsyncResult: @@ -104,7 +107,10 @@ async def create_async_generator( "maxOutputTokens": kwargs.get("max_tokens"), "topP": kwargs.get("top_p"), "topK": kwargs.get("top_k"), - } + }, + "tools": [{ + "functionDeclarations": tools + }] if tools else None } system_prompt = "\n".join( message["content"] @@ -128,6 +134,15 @@ async def create_async_generator( data = b"".join(lines) data = json.loads(data) yield data["candidates"][0]["content"]["parts"][0]["text"] + if "finishReason" in data["candidates"][0]: + yield FinishReason(data["candidates"][0]["finishReason"].lower()) + usage = data.get("usageMetadata") + if usage: + yield Usage( + prompt_tokens=usage.get("promptTokenCount"), + completion_tokens=usage.get("candidatesTokenCount"), + total_tokens=usage.get("totalTokenCount") + ) except: data = data.decode(errors="ignore") if isinstance(data, bytes) else data raise RuntimeError(f"Read chunk failed: {data}") diff --git a/g4f/Provider/needs_auth/HuggingFace.py b/g4f/Provider/needs_auth/HuggingFace.py index 9d4e353847f..02220e78b55 100644 --- a/g4f/Provider/needs_auth/HuggingFace.py +++ b/g4f/Provider/needs_auth/HuggingFace.py @@ -23,21 +23,25 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin): default_model = HuggingChat.default_model default_image_model = HuggingChat.default_image_model model_aliases = HuggingChat.model_aliases + extra_models = [ + "meta-llama/Llama-3.2-11B-Vision-Instruct", + "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF", + "NousResearch/Hermes-3-Llama-3.1-8B", + ] @classmethod def get_models(cls) -> list[str]: if not cls.models: url = "https://huggingface.co/api/models?inference=warm&pipeline_tag=text-generation" models = [model["id"] for model in requests.get(url).json()] - models.append("meta-llama/Llama-3.2-11B-Vision-Instruct") - models.append("nvidia/Llama-3.1-Nemotron-70B-Instruct-HF") + models.extend(cls.extra_models) models.sort() if not cls.image_models: url = "https://huggingface.co/api/models?pipeline_tag=text-to-image" cls.image_models = [model["id"] for model in requests.get(url).json() if model["trendingScore"] >= 20] cls.image_models.sort() models.extend(cls.image_models) - cls.models = models + cls.models = list(set(models)) return cls.models @classmethod diff --git a/g4f/Provider/needs_auth/OpenaiAPI.py b/g4f/Provider/needs_auth/OpenaiAPI.py index 3cc558bdb89..1ca256d4f2d 100644 --- a/g4f/Provider/needs_auth/OpenaiAPI.py +++ b/g4f/Provider/needs_auth/OpenaiAPI.py @@ -23,6 +23,7 @@ class OpenaiAPI(AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin): supports_system_message = True default_model = "" fallback_models = [] + sort_models = True @classmethod def get_models(cls, api_key: str = None, api_base: str = None) -> list[str]: @@ -36,8 +37,11 @@ def get_models(cls, api_key: str = None, api_base: str = None) -> list[str]: response = requests.get(f"{api_base}/models", headers=headers) raise_for_status(response) data = response.json() - cls.models = [model.get("id") for model in (data.get("data") if isinstance(data, dict) else data)] - cls.models.sort() + data = data.get("data") if isinstance(data, dict) else data + cls.image_models = [model.get("id") for model in data if model.get("image")] + cls.models = [model.get("id") for model in data] + if cls.sort_models: + cls.models.sort() except Exception as e: debug.log(e) cls.models = cls.fallback_models diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py index d4a3bd76a77..374d7c6458c 100644 --- a/g4f/api/__init__.py +++ b/g4f/api/__init__.py @@ -215,12 +215,16 @@ async def read_root_v1(): HTTP_200_OK: {"model": List[ModelResponseModel]}, }) async def models(): - return [{ - 'id': model_id, - 'object': 'model', - 'created': 0, - 'owned_by': model.base_provider - } for model_id, model in g4f.models.ModelUtils.convert.items()] + return { + "object": "list", + "data": [{ + "id": model_id, + "object": "model", + "created": 0, + "owned_by": model.base_provider, + "image": isinstance(model, g4f.models.ImageModel), + } for model_id, model in g4f.models.ModelUtils.convert.items()] + } @self.app.get("/v1/models/{model_name}", responses={ HTTP_200_OK: {"model": ModelResponseModel}, diff --git a/g4f/gui/client/index.html b/g4f/gui/client/index.html index bbf41314597..5517eca2970 100644 --- a/g4f/gui/client/index.html +++ b/g4f/gui/client/index.html @@ -143,7 +143,7 @@

Settings

- +
@@ -157,6 +157,14 @@

Settings

document.getElementById('recognition-language').placeholder = navigator.language;
+
+ + +
+