Skip to content

Commit

Permalink
Merge pull request #2590 from hlohaus/16Jan
Browse files Browse the repository at this point in the history
Support TitleGeneration, Reasoning in HuggingChat
  • Loading branch information
hlohaus authored Jan 24, 2025
2 parents a73c33f + 3c35e6d commit a9fde5b
Show file tree
Hide file tree
Showing 19 changed files with 411 additions and 280 deletions.
40 changes: 29 additions & 11 deletions g4f/Provider/Jmuz.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

class Jmuz(OpenaiAPI):
label = "Jmuz"
url = "https://discord.gg/qXfu24JmsB"
url = "https://discord.gg/Ew6JzjA2NR"
login_url = None
api_base = "https://jmuz.me/gpt/api/v2"
api_key = "prod"
Expand All @@ -18,12 +18,14 @@ class Jmuz(OpenaiAPI):
default_model = "gpt-4o"
model_aliases = {
"gemini": "gemini-exp",
"deepseek-chat": "deepseek-2.5",
"qwq-32b": "qwq-32b-preview"
"gemini-1.5-pro": "gemini-pro",
"gemini-1.5-flash": "gemini-thinking",
"deepseek-chat": "deepseek-v3",
"qwq-32b": "qwq-32b-preview",
}

@classmethod
def get_models(cls):
def get_models(cls, **kwargs):
if not cls.models:
cls.models = super().get_models(api_key=cls.api_key, api_base=cls.api_base)
return cls.models
Expand All @@ -47,6 +49,7 @@ async def create_async_generator(
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36"
}
started = False
buffer = ""
async for chunk in super().create_async_generator(
model=model,
messages=messages,
Expand All @@ -56,10 +59,25 @@ async def create_async_generator(
headers=headers,
**kwargs
):
if isinstance(chunk, str) and cls.url in chunk:
continue
if isinstance(chunk, str) and not started:
chunk = chunk.lstrip()
if chunk:
started = True
if isinstance(chunk, str):
buffer += chunk
if "Join for free".startswith(buffer) or buffer.startswith("Join for free"):
if buffer.endswith("\n"):
buffer = ""
continue
if "https://discord.gg/".startswith(buffer) or "https://discord.gg/" in buffer:
if "..." in buffer:
buffer = ""
continue
if "o1-preview".startswith(buffer) or buffer.startswith("o1-preview"):
if "\n" in buffer:
buffer = ""
continue
if not started:
buffer = buffer.lstrip()
if buffer:
started = True
yield buffer
buffer = ""
else:
yield chunk
203 changes: 97 additions & 106 deletions g4f/Provider/PollinationsAI.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,42 +3,45 @@
import json
import random
import requests
from urllib.parse import quote
from urllib.parse import quote_plus
from typing import Optional
from aiohttp import ClientSession

from .helper import filter_none
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..typing import AsyncResult, Messages, ImagesType
from ..image import to_data_uri
from ..requests.raise_for_status import raise_for_status
from ..typing import AsyncResult, Messages
from ..image import ImageResponse
from ..requests.aiohttp import get_connector
from ..providers.response import ImageResponse, FinishReason, Usage

DEFAULT_HEADERS = {
'Accept': '*/*',
'Accept-Language': 'en-US,en;q=0.9',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
}

class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
label = "Pollinations AI"
url = "https://pollinations.ai"

working = True
supports_stream = False
supports_system_message = True
supports_message_history = True

# API endpoints base
api_base = "https://text.pollinations.ai/openai"

# API endpoints
text_api_endpoint = "https://text.pollinations.ai/"
text_api_endpoint = "https://text.pollinations.ai/openai"
image_api_endpoint = "https://image.pollinations.ai/"

# Models configuration
default_model = "openai"
default_image_model = "flux"

image_models = []
models = []

additional_models_image = ["midjourney", "dall-e-3"]
additional_models_text = ["claude", "karma", "command-r", "llamalight", "mistral-large", "sur", "sur-mistral"]
default_vision_model = "gpt-4o"
extra_image_models = ["midjourney", "dall-e-3", "flux-pro", "flux-realism", "flux-cablyai", "flux-anime", "flux-3d"]
vision_models = [default_vision_model, "gpt-4o-mini"]
extra_text_models = [*vision_models, "claude", "karma", "command-r", "llamalight", "mistral-large", "sur", "sur-mistral", "any-dark"]
model_aliases = {
"gpt-4o": default_model,
"qwen-2-72b": "qwen",
"qwen-2.5-coder-32b": "qwen-coder",
"llama-3.3-70b": "llama",
Expand All @@ -50,30 +53,25 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
"deepseek-chat": "deepseek",
"llama-3.2-3b": "llamalight",
}
text_models = []

@classmethod
def get_models(cls, **kwargs):
# Initialize model lists if not exists
if not hasattr(cls, 'image_models'):
cls.image_models = []
if not hasattr(cls, 'text_models'):
cls.text_models = []

# Fetch image models if not cached
if not cls.image_models:
url = "https://image.pollinations.ai/models"
response = requests.get(url)
raise_for_status(response)
cls.image_models = response.json()
cls.image_models.extend(cls.additional_models_image)
cls.image_models.extend(cls.extra_image_models)

# Fetch text models if not cached
if not cls.text_models:
url = "https://text.pollinations.ai/models"
response = requests.get(url)
raise_for_status(response)
cls.text_models = [model.get("name") for model in response.json()]
cls.text_models.extend(cls.additional_models_text)
cls.text_models.extend(cls.extra_text_models)

# Return combined models
return cls.text_models + cls.image_models
Expand All @@ -94,22 +92,27 @@ async def create_async_generator(
enhance: bool = False,
safe: bool = False,
# Text specific parameters
temperature: float = 0.5,
presence_penalty: float = 0,
images: ImagesType = None,
temperature: float = None,
presence_penalty: float = None,
top_p: float = 1,
frequency_penalty: float = 0,
stream: bool = False,
frequency_penalty: float = None,
response_format: Optional[dict] = None,
cache: bool = False,
**kwargs
) -> AsyncResult:
if images is not None and not model:
model = cls.default_vision_model
model = cls.get_model(model)
if not cache and seed is None:
seed = random.randint(0, 100000)

# Check if models
# Image generation
if model in cls.image_models:
async for result in cls._generate_image(
yield await cls._generate_image(
model=model,
messages=messages,
prompt=prompt,
prompt=messages[-1]["content"] if prompt is None else prompt,
proxy=proxy,
width=width,
height=height,
Expand All @@ -118,27 +121,28 @@ async def create_async_generator(
private=private,
enhance=enhance,
safe=safe
):
yield result
)
else:
# Text generation
async for result in cls._generate_text(
model=model,
messages=messages,
images=images,
proxy=proxy,
temperature=temperature,
presence_penalty=presence_penalty,
top_p=top_p,
frequency_penalty=frequency_penalty,
stream=stream
response_format=response_format,
seed=seed,
cache=cache,
):
yield result

@classmethod
async def _generate_image(
cls,
model: str,
messages: Messages,
prompt: str,
proxy: str,
width: int,
Expand All @@ -148,16 +152,7 @@ async def _generate_image(
private: bool,
enhance: bool,
safe: bool
) -> AsyncResult:
if seed is None:
seed = random.randint(0, 10000)

headers = {
'Accept': '*/*',
'Accept-Language': 'en-US,en;q=0.9',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
}

) -> ImageResponse:
params = {
"seed": seed,
"width": width,
Expand All @@ -168,85 +163,81 @@ async def _generate_image(
"enhance": enhance,
"safe": safe
}
params = {k: v for k, v in params.items() if v is not None}

async with ClientSession(headers=headers) as session:
prompt = messages[-1]["content"] if prompt is None else prompt
param_string = "&".join(f"{k}={v}" for k, v in params.items())
url = f"{cls.image_api_endpoint}/prompt/{quote(prompt)}?{param_string}"

async with session.head(url, proxy=proxy) as response:
if response.status == 200:
image_response = ImageResponse(images=url, alt=prompt)
yield image_response
params = {k: json.dumps(v) if isinstance(v, bool) else v for k, v in params.items() if v is not None}
async with ClientSession(headers=DEFAULT_HEADERS, connector=get_connector(proxy=proxy)) as session:
async with session.head(f"{cls.image_api_endpoint}prompt/{quote_plus(prompt)}", params=params) as response:
await raise_for_status(response)
return ImageResponse(str(response.url), prompt)

@classmethod
async def _generate_text(
cls,
model: str,
messages: Messages,
images: Optional[ImagesType],
proxy: str,
temperature: float,
presence_penalty: float,
top_p: float,
frequency_penalty: float,
stream: bool,
seed: Optional[int] = None
) -> AsyncResult:
headers = {
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
"content-type": "application/json",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36"
}

if seed is None:
seed = random.randint(0, 10000)

async with ClientSession(headers=headers) as session:
response_format: Optional[dict],
seed: Optional[int],
cache: bool
) -> AsyncResult:
jsonMode = False
if response_format is not None and "type" in response_format:
if response_format["type"] == "json_object":
jsonMode = True

if images is not None and messages:
last_message = messages[-1].copy()
last_message["content"] = [
*[{
"type": "image_url",
"image_url": {"url": to_data_uri(image)}
} for image, _ in images],
{
"type": "text",
"text": messages[-1]["content"]
}
]
messages[-1] = last_message

async with ClientSession(headers=DEFAULT_HEADERS, connector=get_connector(proxy=proxy)) as session:
data = {
"messages": messages,
"model": model,
"temperature": temperature,
"presence_penalty": presence_penalty,
"top_p": top_p,
"frequency_penalty": frequency_penalty,
"jsonMode": False,
"stream": stream,
"jsonMode": jsonMode,
"stream": False, # To get more informations like Usage and FinishReason
"seed": seed,
"cache": False
"cache": cache
}

async with session.post(cls.text_api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
async for chunk in response.content:
if chunk:
decoded_chunk = chunk.decode()

# Skip [DONE].
if "data: [DONE]" in decoded_chunk:
continue

# Processing plain text
if not decoded_chunk.startswith("data:"):
clean_text = decoded_chunk.strip()
if clean_text:
yield clean_text
continue

# Processing JSON format
try:
# Remove the prefix “data: “ and parse JSON
json_str = decoded_chunk.replace("data:", "").strip()
json_response = json.loads(json_str)

if "choices" in json_response and json_response["choices"]:
if "delta" in json_response["choices"][0]:
content = json_response["choices"][0]["delta"].get("content")
if content:
# Remove escaped slashes before parentheses
clean_content = content.replace("\\(", "(").replace("\\)", ")")
yield clean_content
except json.JSONDecodeError:
# If JSON could not be parsed, skip
continue
async with session.post(cls.text_api_endpoint, json=filter_none(**data)) as response:
await raise_for_status(response)
async for line in response.content:
decoded_chunk = line.decode(errors="replace")
# If [DONE].
if "data: [DONE]" in decoded_chunk:
break
# Processing JSON format
try:
# Remove the prefix “data: “ and parse JSON
json_str = decoded_chunk.replace("data:", "").strip()
data = json.loads(json_str)
choice = data["choices"][0]
if "usage" in data:
yield Usage(**data["usage"])
if "message" in choice and "content" in choice["message"] and choice["message"]["content"]:
yield choice["message"]["content"].replace("\\(", "(").replace("\\)", ")")
elif "delta" in choice and "content" in choice["delta"] and choice["delta"]["content"]:
yield choice["delta"]["content"].replace("\\(", "(").replace("\\)", ")")
if "finish_reason" in choice and choice["finish_reason"] is not None:
yield FinishReason(choice["finish_reason"])
break
except json.JSONDecodeError:
yield decoded_chunk.strip()
continue
Loading

0 comments on commit a9fde5b

Please sign in to comment.