Skip to content

Commit

Permalink
Merge pull request #2555 from hlohaus/10Jan
Browse files Browse the repository at this point in the history
Add login url to HuggingFace in Web UI
  • Loading branch information
hlohaus authored Jan 10, 2025
2 parents 3784f6e + 92005da commit c159eeb
Show file tree
Hide file tree
Showing 14 changed files with 207 additions and 46 deletions.
3 changes: 3 additions & 0 deletions g4f/Provider/Jmuz.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@

class Jmuz(OpenaiAPI):
label = "Jmuz"
login_url = None
api_base = "https://jmuz.me/gpt/api/v2"
api_key = "prod"

Expand Down Expand Up @@ -33,6 +34,8 @@ def create_async_generator(
model: str,
messages: Messages,
stream: bool = False,
api_key: str = None,
api_base: str = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
Expand Down
8 changes: 4 additions & 4 deletions g4f/Provider/PollinationsAI.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,13 +170,13 @@ async def _generate_image(
params = {k: v for k, v in params.items() if v is not None}

async with ClientSession(headers=headers) as session:
prompt = quote(messages[-1]["content"] if prompt is None else prompt)
prompt = messages[-1]["content"] if prompt is None else prompt
param_string = "&".join(f"{k}={v}" for k, v in params.items())
url = f"{cls.image_api_endpoint}/prompt/{prompt}?{param_string}"
url = f"{cls.image_api_endpoint}/prompt/{quote(prompt)}?{param_string}"

async with session.head(url, proxy=proxy) as response:
if response.status == 200:
image_response = ImageResponse(images=url, alt=messages[-1]["content"] if prompt is None else prompt)
image_response = ImageResponse(images=url, alt=prompt)
yield image_response

@classmethod
Expand Down Expand Up @@ -225,4 +225,4 @@ async def _generate_text(
content = json_response['choices'][0]['message']['content']
yield content
except json.JSONDecodeError:
yield decoded_chunk
pass
70 changes: 70 additions & 0 deletions g4f/Provider/hf_space/Qwen_QVQ_72B.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
from __future__ import annotations

import json
from aiohttp import ClientSession, FormData

from ...typing import AsyncResult, Messages, ImagesType
from ...requests import raise_for_status
from ...errors import ResponseError
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_prompt, get_random_string
from ...image import to_bytes, is_accepted_format

class Qwen_QVQ_72B(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://qwen-qvq-72b-preview.hf.space"
api_endpoint = "/gradio_api/call/generate"

working = True

default_model = "Qwen/QwQ-32B-Preview"
models = [default_model]

@classmethod
async def create_async_generator(
cls, model: str, messages: Messages,
images: ImagesType = None,
api_key: str = None,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"Accept": "application/json",
}
if api_key is not None:
headers["Authorization"] = f"Bearer {api_key}"
async with ClientSession(headers=headers) as session:
if images:
data = FormData()
data_bytes = to_bytes(images[0][0])
data.add_field("files", data_bytes, content_type=is_accepted_format(data_bytes), filename=images[0][1])
url = f"https://qwen-qvq-72b-preview.hf.space/gradio_api/upload?upload_id={get_random_string()}"
async with session.post(url, data=data, proxy=proxy) as response:
await raise_for_status(response)
image = await response.json()
data = {"data": [{"path": image[0]}, format_prompt(messages)]}
else:
data = {"data": [None, format_prompt(messages)]}
async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
await raise_for_status(response)
event_id = (await response.json()).get("event_id")
async with session.get(f"{cls.url}{cls.api_endpoint}/{event_id}") as event_response:
await raise_for_status(event_response)
event = None
text_position = 0
async for chunk in event_response.content:
if chunk.startswith(b"event: "):
event = chunk[7:].decode(errors="replace").strip()
if chunk.startswith(b"data: "):
if event == "error":
raise ResponseError(f"GPU token limit exceeded: {chunk.decode(errors='replace')}")
if event in ("complete", "generating"):
try:
data = json.loads(chunk[6:])
except (json.JSONDecodeError, KeyError, TypeError) as e:
raise RuntimeError(f"Failed to read response: {chunk.decode(errors='replace')}", e)
if event == "generating":
if isinstance(data[0], str):
yield data[0][text_position:]
text_position = len(data[0])
else:
break
71 changes: 71 additions & 0 deletions g4f/Provider/hf_space/StableDiffusion35Large.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
from __future__ import annotations

import json
from aiohttp import ClientSession

from ...typing import AsyncResult, Messages
from ...image import ImageResponse, ImagePreview
from ...errors import ResponseError
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin

class StableDiffusion35Large(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://stabilityai-stable-diffusion-3-5-large.hf.space"
api_endpoint = "/gradio_api/call/infer"

working = True

default_model = 'stable-diffusion-3.5-large'
models = [default_model]
image_models = [default_model]

@classmethod
async def create_async_generator(
cls, model: str, messages: Messages,
prompt: str = None,
negative_prompt: str = None,
api_key: str = None,
proxy: str = None,
width: int = 1024,
height: int = 1024,
guidance_scale: float = 4.5,
num_inference_steps: int = 50,
seed: int = 0,
randomize_seed: bool = True,
**kwargs
) -> AsyncResult:
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
}
if api_key is not None:
headers["Authorization"] = f"Bearer {api_key}"
async with ClientSession(headers=headers) as session:
prompt = messages[-1]["content"] if prompt is None else prompt
data = {
"data": [prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps]
}
async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
response.raise_for_status()
event_id = (await response.json()).get("event_id")
async with session.get(f"{cls.url}{cls.api_endpoint}/{event_id}") as event_response:
event_response.raise_for_status()
event = None
async for chunk in event_response.content:
if chunk.startswith(b"event: "):
event = chunk[7:].decode(errors="replace").strip()
if chunk.startswith(b"data: "):
if event == "error":
raise ResponseError(f"GPU token limit exceeded: {chunk.decode(errors='replace')}")
if event in ("complete", "generating"):
try:
data = json.loads(chunk[6:])
if data is None:
continue
url = data[0]["url"]
except (json.JSONDecodeError, KeyError, TypeError) as e:
raise RuntimeError(f"Failed to parse image URL: {chunk.decode(errors='replace')}", e)
if event == "generating":
yield ImagePreview(url, prompt)
else:
yield ImageResponse(url, prompt)
break
12 changes: 9 additions & 3 deletions g4f/Provider/hf_space/__init__.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,22 @@
from __future__ import annotations

from ...typing import AsyncResult, Messages
from ...typing import AsyncResult, Messages, ImagesType
from ...errors import ResponseError
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin

from .BlackForestLabsFlux1Dev import BlackForestLabsFlux1Dev
from .BlackForestLabsFlux1Schnell import BlackForestLabsFlux1Schnell
from .VoodoohopFlux1Schnell import VoodoohopFlux1Schnell
from .StableDiffusion35Large import StableDiffusion35Large
from .Qwen_QVQ_72B import Qwen_QVQ_72B

class HuggingSpace(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://huggingface.co/spaces"
parent = "HuggingFace"
working = True
default_model = BlackForestLabsFlux1Dev.default_model
providers = [BlackForestLabsFlux1Dev, BlackForestLabsFlux1Schnell, VoodoohopFlux1Schnell]
default_vision_model = Qwen_QVQ_72B.default_model
providers = [BlackForestLabsFlux1Dev, BlackForestLabsFlux1Schnell, VoodoohopFlux1Schnell, StableDiffusion35Large, Qwen_QVQ_72B]

@classmethod
def get_parameters(cls, **kwargs) -> dict:
Expand All @@ -33,8 +37,10 @@ def get_models(cls, **kwargs) -> list[str]:

@classmethod
async def create_async_generator(
cls, model: str, messages: Messages, **kwargs
cls, model: str, messages: Messages, images: ImagesType = None, **kwargs
) -> AsyncResult:
if not model and images is not None:
model = cls.default_vision_model
is_started = False
for provider in cls.providers:
if model in provider.model_aliases:
Expand Down
7 changes: 4 additions & 3 deletions g4f/Provider/needs_auth/HuggingFace.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@

class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://huggingface.co"
login_url = "https://huggingface.co/settings/tokens"
working = True
supports_message_history = True
default_model = HuggingChat.default_model
Expand Down Expand Up @@ -149,14 +150,14 @@ async def create_async_generator(
def format_prompt_mistral(messages: Messages, do_continue: bool = False) -> str:
system_messages = [message["content"] for message in messages if message["role"] == "system"]
question = " ".join([messages[-1]["content"], *system_messages])
history = "".join([
history = "\n".join([
f"<s>[INST]{messages[idx-1]['content']} [/INST] {message['content']}</s>"
for idx, message in enumerate(messages)
if message["role"] == "assistant"
])
if do_continue:
return history[:-len('</s>')]
return f"{history}<s>[INST] {question} [/INST]"
return f"{history}\n<s>[INST] {question} [/INST]"

def format_prompt_qwen(messages: Messages, do_continue: bool = False) -> str:
prompt = "".join([
Expand Down Expand Up @@ -185,7 +186,7 @@ def format_prompt_custom(messages: Messages, end_token: str = "</s>", do_continu
def get_inputs(messages: Messages, model_data: dict, model_type: str, do_continue: bool = False) -> str:
if model_type in ("gpt2", "gpt_neo", "gemma", "gemma2"):
inputs = format_prompt(messages, do_continue=do_continue)
elif model_type in ("mistral"):
elif model_type == "mistral" and model_data.get("author") == "mistralai":
inputs = format_prompt_mistral(messages, do_continue)
elif "config" in model_data and "tokenizer_config" in model_data["config"] and "eos_token" in model_data["config"]["tokenizer_config"]:
eos_token = model_data["config"]["tokenizer_config"]["eos_token"]
Expand Down
4 changes: 2 additions & 2 deletions g4f/Provider/needs_auth/HuggingFaceAPI.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@

class HuggingFaceAPI(OpenaiAPI):
label = "HuggingFace (Inference API)"
url = "https://api-inference.huggingface.co"
login_url = "https://huggingface.co/settings/tokens"
parent = "HuggingFace"
url = "https://api-inference.huggingface.com"
api_base = "https://api-inference.huggingface.co/v1"
working = True
default_model = "meta-llama/Llama-3.2-11B-Vision-Instruct"
Expand Down
6 changes: 5 additions & 1 deletion g4f/gui/client/home.html
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,10 @@
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>G4F GUI</title>
<link rel="apple-touch-icon" sizes="180x180" href="/static/img/apple-touch-icon.png">
<link rel="icon" type="image/png" sizes="32x32" href="/static/img/favicon-32x32.png">
<link rel="icon" type="image/png" sizes="16x16" href="/static/img/favicon-16x16.png">
<link rel="manifest" href="/static/img/site.webmanifest">
<style>
:root {
--colour-1: #000000;
Expand Down Expand Up @@ -287,7 +291,7 @@

(async () => {
const today = new Date().toJSON().slice(0, 10);
const max = 100;
const max = 5;
const cache_id = Math.floor(Math.random() * max);
let prompt;
if (cache_id % 2 == 0) {
Expand Down
2 changes: 1 addition & 1 deletion g4f/gui/client/index.html
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ <h3>Settings</h3>
</div>
<div class="field box hidden">
<label for="BingCreateImages-api_key" class="label" title="">Microsoft Designer in Bing:</label>
<textarea id="BingCreateImages-api_key" name="BingCreateImages[api_key]" placeholder="&quot;_U&quot; cookie"></textarea>
<input type="text" id="BingCreateImages-api_key" name="BingCreateImages[api_key]" placeholder="&quot;_U&quot; cookie"/>
</div>
</div>
<div class="bottom_buttons">
Expand Down
21 changes: 11 additions & 10 deletions g4f/gui/client/static/js/chat.v1.js
Original file line number Diff line number Diff line change
Expand Up @@ -743,7 +743,7 @@ const ask_gpt = async (message_id, message_index = -1, regenerate = false, provi
message_storage[message_id] = "";
stop_generating.classList.remove("stop_generating-hidden");
let scroll = true;
if (message_index > 0 && parseInt(message_index, 10) + 1 < conversation.items.length) {
if (message_index >= 0 && parseInt(message_index) + 1 < conversation.items.length) {
scroll = false;
}

Expand Down Expand Up @@ -1110,7 +1110,7 @@ const load_conversation = async (conversation_id, scroll=true) => {
if (lastLine.endsWith("[aborted]") || lastLine.endsWith("[error]")) {
reason = "error";
// Has an even number of start or end code tags
} else if (reason = "stop" && buffer.split("```").length - 1 % 2 === 1) {
} else if (reason == "stop" && buffer.split("```").length - 1 % 2 === 1) {
reason = "length";
}
if (reason == "length" || reason == "max_tokens" || reason == "error") {
Expand Down Expand Up @@ -1724,19 +1724,19 @@ async function on_api() {
option.dataset.parent = provider.parent;
providerSelect.appendChild(option);

if (provider.login_url) {
if (provider.parent) {
if (!login_urls[provider.parent]) {
login_urls[provider.parent] = [provider.label, provider.login_url, [provider.name]];
} else {
login_urls[provider.parent][2].push(provider.name);
}
} else if (provider.login_url) {
if (!login_urls[provider.name]) {
login_urls[provider.name] = [provider.label, provider.login_url, []];
} else {
login_urls[provider.name][0] = provider.label;
login_urls[provider.name][1] = provider.login_url;
}
} else if (provider.parent) {
if (!login_urls[provider.parent]) {
login_urls[provider.parent] = [provider.label, provider.login_url, [provider.name]];
} else {
login_urls[provider.parent][2].push(provider.name);
}
}
});
for (let [name, [label, login_url, childs]] of Object.entries(login_urls)) {
Expand All @@ -1746,9 +1746,10 @@ async function on_api() {
option = document.createElement("div");
option.classList.add("field", "box", "hidden");
childs = childs.map((child)=>`${child}-api_key`).join(" ");
console.log(childs);
option.innerHTML = `
<label for="${name}-api_key" class="label" title="">${label}:</label>
<textarea id="${name}-api_key" name="${name}[api_key]" class="${childs}" placeholder="api_key"></textarea>
<input type="text" id="${name}-api_key" name="${name}[api_key]" class="${childs}" placeholder="api_key"/>
<a href="${login_url}" target="_blank" title="Login to ${label}">Get API key</a>
`;
settings.querySelector(".paper").appendChild(option);
Expand Down
6 changes: 3 additions & 3 deletions g4f/gui/server/backend_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ def create():
tool_calls.append({
"function": {
"name": "search_tool",
"arguments": {"query": web_search, "instructions": ""} if web_search != "true" else {}
"arguments": {"query": web_search, "instructions": "", "max_words": 1000} if web_search != "true" else {}
},
"type": "function"
})
Expand Down Expand Up @@ -173,7 +173,7 @@ def list_buckets():
@app.route('/backend-api/v2/files/<bucket_id>', methods=['GET', 'DELETE'])
def manage_files(bucket_id: str):
bucket_id = secure_filename(bucket_id)
bucket_dir = get_bucket_dir(secure_filename(bucket_id))
bucket_dir = get_bucket_dir(bucket_id)

if not os.path.isdir(bucket_dir):
return jsonify({"error": {"message": "Bucket directory not found"}}), 404
Expand Down Expand Up @@ -231,7 +231,7 @@ def upload_file(bucket_id, filename):
if not file_data:
return jsonify({"error": {"message": "No file data received"}}), 400

with open(str(file_path), 'wb') as f:
with file_path.open('wb') as f:
f.write(file_data)

return jsonify({"message": f"File '{filename}' uploaded successfully to bucket '{bucket_id}'"}), 201
Expand Down
13 changes: 3 additions & 10 deletions g4f/providers/asyncio.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,15 +70,8 @@ def to_sync_generator(generator: AsyncIterator, stream: bool = True) -> Iterator

# Helper function to convert a synchronous iterator to an async iterator
async def to_async_iterator(iterator: Iterator) -> AsyncIterator:
if isinstance(iterator, str):
yield iterator
elif hasattr(iterator, "__await__"):
yield await iterator
elif hasattr(iterator, "__aiter__"):
try:
async for item in iterator:
yield item
elif hasattr(iterator, "__iter__"):
for item in iterator:
yield item
else:
yield iterator
except TypeError:
yield await iterator
Loading

0 comments on commit c159eeb

Please sign in to comment.