diff options
-rw-r--r-- | README.md | 5 | ||||
-rw-r--r-- | etc/tool/readme_table.py | 2 | ||||
-rw-r--r-- | g4f/Provider/DeepInfra.py | 3 | ||||
-rw-r--r-- | g4f/Provider/GeminiPro.py | 9 | ||||
-rw-r--r-- | g4f/Provider/HuggingChat.py | 36 | ||||
-rw-r--r-- | g4f/Provider/Llama.py | 6 | ||||
-rw-r--r-- | g4f/Provider/Replicate.py | 3 | ||||
-rw-r--r-- | g4f/cookies.py | 33 | ||||
-rw-r--r-- | g4f/gui/client/index.html | 10 | ||||
-rw-r--r-- | g4f/gui/server/api.py | 28 | ||||
-rw-r--r-- | g4f/models.py | 17 |
11 files changed, 102 insertions, 50 deletions
@@ -404,11 +404,12 @@ While we wait for gpt-5, here is a list of new models that are at least better t | Label | Provider | Image Model | Vision Model | Website | | ----- | -------- | ----------- | ------------ | ------- | -| Microsoft Copilot in Bing | `g4f.Provider.Bing` | dall-e| gpt-4-vision | [bing.com](https://bing.com/chat) | +| Microsoft Copilot in Bing | `g4f.Provider.Bing` | dall-e-3 | gpt-4-vision | [bing.com](https://bing.com/chat) | | DeepInfra | `g4f.Provider.DeepInfra` | stability-ai/sdxl| llava-1.5-7b-hf | [deepinfra.com](https://deepinfra.com) | | Gemini | `g4f.Provider.Gemini` | gemini| gemini | [gemini.google.com](https://gemini.google.com) | +| Gemini API | `g4f.Provider.GeminiPro` | ❌| gemini-1.5-pro-latest | [ai.google.dev](https://ai.google.dev) | | Meta AI | `g4f.Provider.MetaAI` | meta| ❌ | [meta.ai](https://www.meta.ai) | -| OpenAI ChatGPT | `g4f.Provider.OpenaiChat` | dall-e| gpt-4-vision | [chat.openai.com](https://chat.openai.com) | +| OpenAI ChatGPT | `g4f.Provider.OpenaiChat` | dall-e-3 | gpt-4-vision | [chat.openai.com](https://chat.openai.com) | | Replicate | `g4f.Provider.Replicate` | stability-ai/sdxl| ❌ | [replicate.com](https://replicate.com) | | You.com | `g4f.Provider.You` | dall-e| agent | [you.com](https://you.com) | diff --git a/etc/tool/readme_table.py b/etc/tool/readme_table.py index 439b17fa..e89f861b 100644 --- a/etc/tool/readme_table.py +++ b/etc/tool/readme_table.py @@ -136,6 +136,8 @@ def print_image_models(): netloc = urlparse(provider_url).netloc.replace("www.", "") website = f"[{netloc}]({provider_url})" label = image_model["provider"] if image_model["label"] is None else image_model["label"] + if image_model["image_model"] is None: + image_model["image_model"] = "❌" if image_model["vision_model"] is None: image_model["vision_model"] = "❌" lines.append(f'| {label} | `g4f.Provider.{image_model["provider"]}` | {image_model["image_model"]}| {image_model["vision_model"]} | {website} |') diff --git a/g4f/Provider/DeepInfra.py b/g4f/Provider/DeepInfra.py index 35ff84a1..a74601e8 100644 --- a/g4f/Provider/DeepInfra.py +++ b/g4f/Provider/DeepInfra.py @@ -9,13 +9,14 @@ class DeepInfra(Openai): label = "DeepInfra" url = "https://deepinfra.com" working = True + needs_auth = False has_auth = True supports_stream = True supports_message_history = True default_model = "meta-llama/Meta-Llama-3-70b-instruct" default_vision_model = "llava-hf/llava-1.5-7b-hf" model_aliases = { - 'mixtral-8x22b': 'HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1' + 'dbrx-instruct': 'databricks/dbrx-instruct', } @classmethod diff --git a/g4f/Provider/GeminiPro.py b/g4f/Provider/GeminiPro.py index 214b7383..56c211ef 100644 --- a/g4f/Provider/GeminiPro.py +++ b/g4f/Provider/GeminiPro.py @@ -11,12 +11,14 @@ from ..errors import MissingAuthError from .helper import get_connector class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin): + label = "Gemini API" url = "https://ai.google.dev" working = True supports_message_history = True needs_auth = True - default_model = "gemini-pro" - models = ["gemini-pro", "gemini-pro-vision"] + default_model = "gemini-1.5-pro-latest" + default_vision_model = default_model + models = [default_model, "gemini-pro", "gemini-pro-vision"] @classmethod async def create_async_generator( @@ -32,11 +34,10 @@ class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin): connector: BaseConnector = None, **kwargs ) -> AsyncResult: - model = "gemini-pro-vision" if not model and image is not None else model model = cls.get_model(model) if not api_key: - raise MissingAuthError('Missing "api_key"') + raise MissingAuthError('Add a "api_key"') headers = params = None if use_auth_header: diff --git a/g4f/Provider/HuggingChat.py b/g4f/Provider/HuggingChat.py index 668ce4b1..527f0a56 100644 --- a/g4f/Provider/HuggingChat.py +++ b/g4f/Provider/HuggingChat.py @@ -6,12 +6,14 @@ from aiohttp import ClientSession, BaseConnector from ..typing import AsyncResult, Messages from ..requests.raise_for_status import raise_for_status +from ..providers.conversation import BaseConversation from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt, get_connector +from .helper import format_prompt, get_connector, get_cookies class HuggingChat(AsyncGeneratorProvider, ProviderModelMixin): url = "https://huggingface.co/chat" working = True + needs_auth = True default_model = "mistralai/Mixtral-8x7B-Instruct-v0.1" models = [ "HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1", @@ -22,9 +24,6 @@ class HuggingChat(AsyncGeneratorProvider, ProviderModelMixin): 'mistralai/Mistral-7B-Instruct-v0.2', 'meta-llama/Meta-Llama-3-70B-Instruct' ] - model_aliases = { - "openchat/openchat_3.5": "openchat/openchat-3.5-0106", - } @classmethod def get_models(cls): @@ -45,9 +44,16 @@ class HuggingChat(AsyncGeneratorProvider, ProviderModelMixin): connector: BaseConnector = None, web_search: bool = False, cookies: dict = None, + conversation: Conversation = None, + return_conversation: bool = False, + delete_conversation: bool = True, **kwargs ) -> AsyncResult: options = {"model": cls.get_model(model)} + if cookies is None: + cookies = get_cookies("huggingface.co", False) + if return_conversation: + delete_conversation = False system_prompt = "\n".join([message["content"] for message in messages if message["role"] == "system"]) if system_prompt: @@ -61,9 +67,14 @@ class HuggingChat(AsyncGeneratorProvider, ProviderModelMixin): headers=headers, connector=get_connector(connector, proxy) ) as session: - async with session.post(f"{cls.url}/conversation", json=options) as response: - await raise_for_status(response) - conversation_id = (await response.json())["conversationId"] + if conversation is None: + async with session.post(f"{cls.url}/conversation", json=options) as response: + await raise_for_status(response) + conversation_id = (await response.json())["conversationId"] + if return_conversation: + yield Conversation(conversation_id) + else: + conversation_id = conversation.conversation_id async with session.get(f"{cls.url}/conversation/{conversation_id}/__data.json") as response: await raise_for_status(response) data: list = (await response.json())["nodes"][1]["data"] @@ -72,7 +83,7 @@ class HuggingChat(AsyncGeneratorProvider, ProviderModelMixin): message_id: str = data[message_keys["id"]] options = { "id": message_id, - "inputs": format_prompt(messages), + "inputs": format_prompt(messages) if conversation is None else messages[-1]["content"], "is_continue": False, "is_retry": False, "web_search": web_search @@ -92,5 +103,10 @@ class HuggingChat(AsyncGeneratorProvider, ProviderModelMixin): yield token elif line["type"] == "finalAnswer": break - async with session.delete(f"{cls.url}/conversation/{conversation_id}") as response: - await raise_for_status(response) + if delete_conversation: + async with session.delete(f"{cls.url}/conversation/{conversation_id}") as response: + await raise_for_status(response) + +class Conversation(BaseConversation): + def __init__(self, conversation_id: str) -> None: + self.conversation_id = conversation_id
\ No newline at end of file diff --git a/g4f/Provider/Llama.py b/g4f/Provider/Llama.py index 8f3e9ea2..f2c78b36 100644 --- a/g4f/Provider/Llama.py +++ b/g4f/Provider/Llama.py @@ -11,7 +11,7 @@ class Llama(AsyncGeneratorProvider, ProviderModelMixin): url = "https://www.llama2.ai" working = True supports_message_history = True - default_model = "meta/llama-3-70b-chat" + default_model = "meta/meta-llama-3-70b-instruct" models = [ "meta/llama-2-7b-chat", "meta/llama-2-13b-chat", @@ -20,8 +20,8 @@ class Llama(AsyncGeneratorProvider, ProviderModelMixin): "meta/meta-llama-3-70b-instruct", ] model_aliases = { - "meta-llama/Meta-Llama-3-8b-instruct": "meta/meta-llama-3-8b-instruct", - "meta-llama/Meta-Llama-3-70b-instruct": "meta/meta-llama-3-70b-instruct", + "meta-llama/Meta-Llama-3-8B-Instruct": "meta/meta-llama-3-8b-instruct", + "meta-llama/Meta-Llama-3-70B-Instruct": "meta/meta-llama-3-70b-instruct", "meta-llama/Llama-2-7b-chat-hf": "meta/llama-2-7b-chat", "meta-llama/Llama-2-13b-chat-hf": "meta/llama-2-13b-chat", "meta-llama/Llama-2-70b-chat-hf": "meta/llama-2-70b-chat", diff --git a/g4f/Provider/Replicate.py b/g4f/Provider/Replicate.py index 593fd04d..89777cf2 100644 --- a/g4f/Provider/Replicate.py +++ b/g4f/Provider/Replicate.py @@ -11,6 +11,9 @@ class Replicate(AsyncGeneratorProvider, ProviderModelMixin): url = "https://replicate.com" working = True default_model = "meta/meta-llama-3-70b-instruct" + model_aliases = { + "meta-llama/Meta-Llama-3-70B-Instruct": default_model + } @classmethod async def create_async_generator( diff --git a/g4f/cookies.py b/g4f/cookies.py index 4f0899b4..3c082abc 100644 --- a/g4f/cookies.py +++ b/g4f/cookies.py @@ -26,6 +26,14 @@ from . import debug # Global variable to store cookies _cookies: Dict[str, Cookies] = {} +DOMAINS = [ + ".bing.com", + ".meta.ai", + ".google.com", + "www.whiterabbitneo.com", + "huggingface.co" +] + if has_browser_cookie3 and os.environ.get('DBUS_SESSION_BUS_ADDRESS') == "/dev/null": _LinuxPasswordManager.get_password = lambda a, b: b"secret" @@ -88,6 +96,15 @@ def load_cookies_from_browsers(domain_name: str, raise_requirements_error: bool return cookies def read_cookie_files(dirPath: str = "./har_and_cookies"): + def get_domain(v: dict) -> str: + host = [h["value"] for h in v['request']['headers'] if h["name"].lower() in ("host", ":authority")] + if not host: + return + host = host.pop() + for d in DOMAINS: + if d in host: + return d + global _cookies harFiles = [] cookieFiles = [] @@ -109,17 +126,15 @@ def read_cookie_files(dirPath: str = "./har_and_cookies"): print("Read .har file:", path) new_cookies = {} for v in harFile['log']['entries']: + domain = get_domain(v) + if domain is None: + continue v_cookies = {} for c in v['request']['cookies']: - if "domain" not in c: - continue - - if c['domain'] not in v_cookies: - v_cookies[c['domain']] = {} - v_cookies[c['domain']][c['name']] = c['value'] - for domain, c in v_cookies.items(): - _cookies[domain] = c - new_cookies[domain] = len(c) + v_cookies[c['name']] = c['value'] + if len(v_cookies) > 0: + _cookies[domain] = v_cookies + new_cookies[domain] = len(v_cookies) if debug.logging: for domain, new_values in new_cookies.items(): print(f"Cookies added: {new_values} from {domain}") diff --git a/g4f/gui/client/index.html b/g4f/gui/client/index.html index d84bbbe9..cfca51a9 100644 --- a/g4f/gui/client/index.html +++ b/g4f/gui/client/index.html @@ -130,11 +130,7 @@ <textarea id="DeepInfra-api_key" name="DeepInfra[api_key]" class="DeepInfraImage-api_key" placeholder="api_key"></textarea> </div> <div class="field box"> - <label for="Gemini-api_key" class="label" title="">Gemini:</label> - <textarea id="Gemini-api_key" name="Gemini[api_key]" placeholder=""__Secure-1PSID" cookie"></textarea> - </div> - <div class="field box"> - <label for="GeminiPro-api_key" class="label" title="">GeminiPro API:</label> + <label for="GeminiPro-api_key" class="label" title="">Gemini API:</label> <textarea id="GeminiPro-api_key" name="GeminiPro[api_key]" placeholder="api_key"></textarea> </div> <div class="field box"> @@ -157,6 +153,10 @@ <label for="OpenRouter-api_key" class="label" title="">OpenRouter:</label> <textarea id="OpenRouter-api_key" name="OpenRouter[api_key]" placeholder="api_key"></textarea> </div> + <div class="field box"> + <label for="Replicate-api_key" class="label" title="">Replicate:</label> + <textarea id="Replicate-api_key" name="Replicate[api_key]" class="ReplicateImage-api_key" placeholder="api_key"></textarea> + </div> </div> <div class="bottom_buttons"> <button onclick="delete_conversations()"> diff --git a/g4f/gui/server/api.py b/g4f/gui/server/api.py index 3d9f6a1c..020b2090 100644 --- a/g4f/gui/server/api.py +++ b/g4f/gui/server/api.py @@ -45,6 +45,7 @@ class Api(): @staticmethod def get_image_models() -> list[dict]: image_models = [] + index = [] for provider in __providers__: if hasattr(provider, "image_models"): if hasattr(provider, "get_models"): @@ -52,14 +53,25 @@ class Api(): parent = provider if hasattr(provider, "parent"): parent = __map__[provider.parent] - for model in provider.image_models: - image_models.append({ - "provider": parent.__name__, - "url": parent.url, - "label": parent.label if hasattr(parent, "label") else None, - "image_model": model, - "vision_model": parent.default_vision_model if hasattr(parent, "default_vision_model") else None - }) + if parent.__name__ not in index: + for model in provider.image_models: + image_models.append({ + "provider": parent.__name__, + "url": parent.url, + "label": parent.label if hasattr(parent, "label") else None, + "image_model": model, + "vision_model": parent.default_vision_model if hasattr(parent, "default_vision_model") else None + }) + index.append(parent.__name__) + elif hasattr(provider, "default_vision_model") and provider.__name__ not in index: + image_models.append({ + "provider": provider.__name__, + "url": provider.url, + "label": provider.label if hasattr(provider, "label") else None, + "image_model": None, + "vision_model": provider.default_vision_model + }) + index.append(provider.__name__) return image_models @staticmethod diff --git a/g4f/models.py b/g4f/models.py index 2b7c69f3..acd61846 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -25,6 +25,7 @@ from .Provider import ( Llama, OpenaiChat, PerplexityLabs, + Replicate, Pi, Vercel, You, @@ -137,19 +138,19 @@ llama2_13b = Model( llama2_70b = Model( name = "meta-llama/Llama-2-70b-chat-hf", base_provider = "meta", - best_provider = RetryProvider([Llama, DeepInfra, HuggingChat]) + best_provider = RetryProvider([Llama, DeepInfra]) ) llama3_8b_instruct = Model( - name = "meta-llama/Meta-Llama-3-8b-instruct", + name = "meta-llama/Meta-Llama-3-8B-Instruct", base_provider = "meta", - best_provider = RetryProvider([Llama]) + best_provider = RetryProvider([Llama, DeepInfra, Replicate]) ) llama3_70b_instruct = Model( - name = "meta-llama/Meta-Llama-3-70b-instruct", + name = "meta-llama/Meta-Llama-3-70B-Instruct", base_provider = "meta", - best_provider = RetryProvider([Llama, HuggingChat]) + best_provider = RetryProvider([Llama, DeepInfra]) ) codellama_34b_instruct = Model( @@ -168,7 +169,7 @@ codellama_70b_instruct = Model( mixtral_8x7b = Model( name = "mistralai/Mixtral-8x7B-Instruct-v0.1", base_provider = "huggingface", - best_provider = RetryProvider([DeepInfra, HuggingChat, HuggingFace, PerplexityLabs]) + best_provider = RetryProvider([DeepInfra, HuggingFace, PerplexityLabs]) ) mistral_7b = Model( @@ -186,7 +187,7 @@ mistral_7b_v02 = Model( mixtral_8x22b = Model( name = "HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1", base_provider = "huggingface", - best_provider = RetryProvider([HuggingChat, DeepInfra]) + best_provider = DeepInfra ) # Misc models @@ -211,7 +212,7 @@ airoboros_70b = Model( openchat_35 = Model( name = "openchat/openchat_3.5", base_provider = "huggingface", - best_provider = RetryProvider([DeepInfra, HuggingChat]) + best_provider = DeepInfra ) # Bard |