summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorrkihacker <rkihacker@gmail.com>2024-11-02 17:07:48 +0100
committerGitHub <noreply@github.com>2024-11-02 17:07:48 +0100
commit56d696cf10f0b436e7212cbeb67929ae3639c311 (patch)
tree858f25d175297d5ad613f4bd3d1762eec82ac9ee
parentremove model prefix for claude (diff)
parentUpdate (docs/providers-and-models.md) (diff)
downloadgpt4free-56d696cf10f0b436e7212cbeb67929ae3639c311.tar
gpt4free-56d696cf10f0b436e7212cbeb67929ae3639c311.tar.gz
gpt4free-56d696cf10f0b436e7212cbeb67929ae3639c311.tar.bz2
gpt4free-56d696cf10f0b436e7212cbeb67929ae3639c311.tar.lz
gpt4free-56d696cf10f0b436e7212cbeb67929ae3639c311.tar.xz
gpt4free-56d696cf10f0b436e7212cbeb67929ae3639c311.tar.zst
gpt4free-56d696cf10f0b436e7212cbeb67929ae3639c311.zip
-rw-r--r--docs/providers-and-models.md33
-rw-r--r--g4f/Provider/AIChatFree.py2
-rw-r--r--g4f/Provider/AIUncensored.py148
-rw-r--r--g4f/Provider/AiChats.py2
-rw-r--r--g4f/Provider/Airforce.py250
-rw-r--r--g4f/Provider/Allyfy.py88
-rw-r--r--g4f/Provider/AmigoChat.py2
-rw-r--r--g4f/Provider/Blackbox.py3
-rw-r--r--g4f/Provider/airforce/AirforceChat.py375
-rw-r--r--g4f/Provider/airforce/AirforceImage.py97
-rw-r--r--g4f/Provider/airforce/__init__.py2
-rw-r--r--g4f/models.py263
12 files changed, 806 insertions, 459 deletions
diff --git a/docs/providers-and-models.md b/docs/providers-and-models.md
index b3dbd9f1..1c56244c 100644
--- a/docs/providers-and-models.md
+++ b/docs/providers-and-models.md
@@ -1,4 +1,5 @@
+
# G4F - Providers and Models
This document provides an overview of various AI providers and models, including text generation, image generation, and vision capabilities. It aims to help users navigate the diverse landscape of AI services and choose the most suitable option for their needs.
@@ -9,6 +10,7 @@ This document provides an overview of various AI providers and models, including
- [Text Models](#text-models)
- [Image Models](#image-models)
- [Vision Models](#vision-models)
+ - [Providers and vision models](#providers-and-vision-models)
- [Conclusion and Usage Tips](#conclusion-and-usage-tips)
---
@@ -16,16 +18,16 @@ This document provides an overview of various AI providers and models, including
| Provider | Text Models | Image Models | Vision Models | Stream | Status | Auth |
|----------|-------------|--------------|---------------|--------|--------|------|
|[ai4chat.co](https://www.ai4chat.co)|`g4f.Provider.Ai4Chat`|`gpt-4`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
-|[chat.ai365vip.com](https://chat.ai365vip.com)|`g4f.Provider.AI365VIP`|`gpt-3.5-turbo, gpt-4o`|❌|❌|?|![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
+|[chat.ai365vip.com](https://chat.ai365vip.com)|`g4f.Provider.AI365VIP`|✔|❌|❌|?|![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
|[aichatfree.info](https://aichatfree.info)|`g4f.Provider.AIChatFree`|`gemini-pro`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[aichatonline.org](https://aichatonline.org)|`g4f.Provider.AiChatOnline`|`gpt-4o-mini`|❌|❌|?|![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
|[ai-chats.org](https://ai-chats.org)|`g4f.Provider.AiChats`|`gpt-4`|`dalle`|❌|?|![Captcha](https://img.shields.io/badge/Captcha-f48d37)|❌|
|[api.airforce](https://api.airforce)|`g4f.Provider.AiMathGPT`|`llama-3.1-70b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
-|[api.airforce](https://api.airforce)|`g4f.Provider.Airforce`|`gpt-4, gpt-4-turbo, gpt-4o-mini, gpt-3.5-turbo, gpt-4o, claude-3-haiku, claude-3-sonnet, claude-3-5-sonnet, claude-3-opus, llama-3-70b, llama-3-8b, llama-2-13b, llama-3.1-405b, llama-3.1-70b, llama-3.1-8b, llamaguard-2-8b, llamaguard-7b, llama-3.2-90b, mixtral-8x7b mixtral-8x22b, mistral-7b, qwen-1.5-7b, qwen-1.5-14b, qwen-1.5-72b, qwen-1.5-110b, qwen-2-72b, gemma-2b, gemma-2-9b, gemma-2-27b, gemini-flash, gemini-pro, deepseek, mixtral-8x7b-dpo, yi-34b, wizardlm-2-8x22b, solar-10.7b, mythomax-l2-13b, cosmosrp`|`flux, flux-realism', flux-anime, flux-3d, flux-disney, flux-pixel, flux-4o, any-dark, dalle-3`|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
+|[api.airforce](https://api.airforce)|`g4f.Provider.Airforce`|`claude-3-haiku, claude-3-sonnet, claude-3-opus, gpt-4, gpt-4-turbo, gpt-4o-mini, gpt-3.5-turbo, llama-3-70b, llama-3-8b, llama-2-13b, llama-3.1-405b, llama-3.1-70b, llama-3.1-8b, llamaguard-2-8b, llamaguard-7b, llama-3.2-90b, llamaguard-3-8b, llama-3.2-11b, llamaguard-3-11b, llama-3.2-3b, llama-3.2-1b, llama-2-7b, mixtral-8x7b, mixtral-8x22b, mythomax-13b, openchat-3.5, qwen-2-72b, qwen-2-5-7b, qwen-2-5-72b, gemma-2b, gemma-2-9b, gemma-2b-27b, gemini-flash, gemini-pro, dbrx-instruct, deepseek-coder, hermes-2-dpo, hermes-2, openhermes-2.5, wizardlm-2-8x22b, phi-2, solar-10-7b, cosmosrp, lfm-40b, german-7b, zephyr-7b`|`flux, flux-realism', flux-anime, flux-3d, flux-disney, flux-pixel, flux-4o, any-dark, sdxl`|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[aiuncensored.info](https://www.aiuncensored.info)|`g4f.Provider.AIUncensored`|✔|✔|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[allyfy.chat](https://allyfy.chat/)|`g4f.Provider.Allyfy`|`gpt-3.5-turbo`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
-|[amigochat.io/chat](https://amigochat.io/chat/)|`g4f.Provider.AmigoChat`|`gpt-4o, gpt-4o-mini, o1, o1-mini, claude-3.5-sonnet, llama-3.2-90b, llama-3.1-405b, gemini-pro`|`flux-pro, flux-realism, dalle-3`|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
-|[openchat.team](https://openchat.team/)|`g4f.Provider.Aura`|✔|❌|❌|?|![Disabled](https://img.shields.io/badge/Disabled-red)|❌|
+|[amigochat.io/chat](https://amigochat.io/chat/)|`g4f.Provider.AmigoChat`|✔|✔|❌|✔|![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
+|[openchat.team](https://openchat.team/)|`g4f.Provider.Aura`|✔|❌|❌|?|![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)![Disabled](https://img.shields.io/badge/Disabled-red)|❌|
|[bing.com](https://bing.com/chat)|`g4f.Provider.Bing`|`gpt-4`|✔|`gpt-4-vision`|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌+✔|
|[bing.com/images](https://www.bing.com/images/create)|`g4f.Provider.BingCreateImages`|`❌|✔|❌|❌|![Active](https://img.shields.io/badge/Active-brightgreen)|✔|
|[blackbox.ai](https://www.blackbox.ai)|`g4f.Provider.Blackbox`|`blackboxai, blackboxai-pro, gemini-flash, llama-3.1-8b, llama-3.1-70b, gpt-4o, gemini-pro, claude-3.5-sonnet`|`flux`|✔|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
@@ -33,8 +35,8 @@ This document provides an overview of various AI providers and models, including
|[chatgpt.com](https://chatgpt.com)|`g4f.Provider.ChatGpt`|`?`|`?`|`?`|?|![Unknown](https://img.shields.io/badge/Unknown-grey) |❌|
|[chatgpt.es](https://chatgpt.es)|`g4f.Provider.ChatGptEs`|`gpt-4o, gpt-4o-mini`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[chatgpt4online.org](https://chatgpt4online.org)|`g4f.Provider.Chatgpt4Online`|`gpt-4`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
-|[chatgpt4o.one](https://chatgpt4o.one)|`g4f.Provider.Chatgpt4o`|✔|❌|❌|❌|![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
-|[chatgptfree.ai](https://chatgptfree.ai)|`g4f.Provider.ChatgptFree`|`gpt-4o-mini`|❌|❌|?|![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
+|[chatgpt4o.one](https://chatgpt4o.one)|`g4f.Provider.Chatgpt4o`|✔|❌|❌|❌|![Disabled](https://img.shields.io/badge/Disabled-red)![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
+|[chatgptfree.ai](https://chatgptfree.ai)|`g4f.Provider.ChatgptFree`|`gpt-4o-mini`|❌|❌|?|![Disabled](https://img.shields.io/badge/Disabled-red)![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
|[app.chathub.gg](https://app.chathub.gg)|`g4f.Provider.ChatHub`|`llama-3.1-8b, mixtral-8x7b, gemma-2, sonar-online`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[chatify-ai.vercel.app](https://chatify-ai.vercel.app)|`g4f.Provider.ChatifyAI`|`llama-3.1-8b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[playground.ai.cloudflare.com](https://playground.ai.cloudflare.com)|`g4f.Provider.Cloudflare`|`german-7b, gemma-7b, llama-2-7b, llama-3-8b, llama-3.1-8b, llama-3.2-11b, llama-3.2-1b, llama-3.2-3b, mistral-7b, openchat-3.5, phi-2, qwen-1.5-0.5b, qwen-1.5-1.8b, qwen-1.5-14b, qwen-1.5-7b, tinyllama-1.1b, cybertron-7b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
@@ -48,7 +50,7 @@ This document provides an overview of various AI providers and models, including
|[chat10.free2gpt.xyz](chat10.free2gpt.xyz)|`g4f.Provider.Free2GPT`|`llama-3.1-70b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[chat.chatgpt.org.uk](https://chat.chatgpt.org.uk)|`g4f.Provider.FreeChatgpt`|`qwen-1.5-14b, sparkdesk-v1.1, qwen-2-7b, glm-4-9b, glm-3-6b, yi-1.5-9b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[freegptsnav.aifree.site](https://freegptsnav.aifree.site)|`g4f.Provider.FreeGpt`|`llama-3.1-70b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
-|[free.netfly.top](https://free.netfly.top)|`g4f.Provider.FreeNetfly`|✔|❌|❌|?|![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
+|[free.netfly.top](https://free.netfly.top)|`g4f.Provider.FreeNetfly`|✔|❌|❌|?|![Disabled](https://img.shields.io/badge/Disabled-red)![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
|[gemini.google.com](https://gemini.google.com)|`g4f.Provider.Gemini`|✔|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|✔|
|[ai.google.dev](https://ai.google.dev)|`g4f.Provider.GeminiPro`|✔|❌|✔|?|![Active](https://img.shields.io/badge/Active-brightgreen)|✔|
|[app.giz.ai](https://app.giz.ai/assistant/)|`g4f.Provider.GizAI`|`gemini-flash, gemini-pro, gpt-4o-mini, gpt-4o, claude-3.5-sonnet, claude-3-haiku, llama-3.1-70b, llama-3.1-8b, mistral-large`|`sdxl, sd-1.5, sd-3.5, dalle-3, flux-schnell, flux1-pro`|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
@@ -57,7 +59,7 @@ This document provides an overview of various AI providers and models, including
|[console.groq.com/playground](https://console.groq.com/playground)|`g4f.Provider.Groq`|✔|❌|❌|?|![Active](https://img.shields.io/badge/Active-brightgreen)|✔|
|[huggingface.co/chat](https://huggingface.co/chat)|`g4f.Provider.HuggingChat`|`llama-3.1-70b, command-r-plus, qwen-2-72b, llama-3.2-11b, hermes-3, mistral-nemo, phi-3.5-mini`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[huggingface.co](https://huggingface.co/chat)|`g4f.Provider.HuggingFace`|✔|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
-|[koala.sh/chat](https://koala.sh/chat)|`g4f.Provider.Koala`|`gpt-4o-mini`|❌|❌|?|![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
+|[koala.sh/chat](https://koala.sh/chat)|`g4f.Provider.Koala`|`gpt-4o-mini`|❌|❌|?|![Disabled](https://img.shields.io/badge/Disabled-red)![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
|[liaobots.work](https://liaobots.work)|`g4f.Provider.Liaobots`|`gpt-3.5-turbo, gpt-4o-mini, gpt-4o, gpt-4-turbo, grok-2, grok-2-mini, claude-3-opus, claude-3-sonnet, claude-3-5-sonnet, claude-3-haiku, claude-2.1, gemini-flash, gemini-pro`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[magickpen.com](https://magickpen.com)|`g4f.Provider.MagickPen`|`gpt-4o-mini`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[meta.ai](https://www.meta.ai)|`g4f.Provider.MetaAI`|✔|✔|?|?|![Active](https://img.shields.io/badge/Active-brightgreen)|✔|
@@ -81,7 +83,7 @@ This document provides an overview of various AI providers and models, including
|[chatgpt.com](https://chatgpt.com/)|`g4f.Provider.OpenaiChat`|`gpt-4o, gpt-4o-mini, gpt-4`|❌|✔||![Unknown](https://img.shields.io/badge/Unknown-grey)|✔|
|[www.perplexity.ai)](https://www.perplexity.ai)|`g4f.Provider.PerplexityAi`|✔|❌|❌|?|![Disabled](https://img.shields.io/badge/Disabled-red)|❌|
|[perplexity.ai](https://www.perplexity.ai)|`g4f.Provider.PerplexityApi`|✔|❌|❌|?|![Unknown](https://img.shields.io/badge/Unknown-grey)|✔|
-|[labs.perplexity.ai](https://labs.perplexity.ai)|`g4f.Provider.PerplexityLabs`|`sonar-online, sonar-chat, llama-3.1-8b, llama-3.1-70b`|❌|❌|?|![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
+|[labs.perplexity.ai](https://labs.perplexity.ai)|`g4f.Provider.PerplexityLabs`|`sonar-online, sonar-chat, llama-3.1-8b, llama-3.1-70b`|❌|❌|?|![Disabled](https://img.shields.io/badge/Disabled-red)![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
|[pi.ai/talk](https://pi.ai/talk)|`g4f.Provider.Pi`|`pi`|❌|❌|?|![Unknown](https://img.shields.io/badge/Unknown-grey)|❌|
|[]()|`g4f.Provider.Pizzagpt`|`gpt-4o-mini`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[poe.com](https://poe.com)|`g4f.Provider.Poe`|✔|❌|❌|?|![Unknown](https://img.shields.io/badge/Unknown-grey)|✔|
@@ -131,6 +133,8 @@ This document provides an overview of various AI providers and models, including
|mistral-nemo|Mistral AI|2+ Providers|[huggingface.co](https://huggingface.co/mistralai/Mistral-Nemo-Instruct-2407)|
|mistral-large|Mistral AI|2+ Providers|[mistral.ai](https://mistral.ai/news/mistral-large-2407/)|
|mixtral-8x7b-dpo|NousResearch|1+ Providers|[huggingface.co](https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO)|
+|hermes-2-dpo|NousResearch|1+ Providers|[huggingface.co](https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO)|
+|hermes-2|NousResearch|1+ Providers|[huggingface.co](https://huggingface.co/NousResearch/Hermes-2-Pro-Mistral-7B)|
|yi-34b|NousResearch|1+ Providers|[huggingface.co](https://huggingface.co/NousResearch/Nous-Hermes-2-Yi-34B)|
|hermes-3|NousResearch|2+ Providers|[huggingface.co](https://huggingface.co/NousResearch/Hermes-3-Llama-3.1-8B)|
|gemini|Google DeepMind|1+ Providers|[deepmind.google](http://deepmind.google/technologies/gemini/)|
@@ -170,7 +174,7 @@ This document provides an overview of various AI providers and models, including
|solar-10-7b|Upstage|1+ Providers|[huggingface.co](https://huggingface.co/upstage/SOLAR-10.7B-Instruct-v1.0)|
|solar-pro|Upstage|1+ Providers|[huggingface.co](https://huggingface.co/upstage/solar-pro-preview-instruct)|
|pi|Inflection|1+ Providers|[inflection.ai](https://inflection.ai/blog/inflection-2-5)|
-|deepseek|DeepSeek|1+ Providers|[deepseek.com](https://www.deepseek.com/)|
+|deepseek-coder|DeepSeek|1+ Providers|[huggingface.co](https://huggingface.co/deepseek-ai/DeepSeek-Coder-V2-Instruct)|
|wizardlm-2-7b|WizardLM|1+ Providers|[huggingface.co](https://huggingface.co/dreamgen/WizardLM-2-7B)|
|wizardlm-2-8x22b|WizardLM|2+ Providers|[huggingface.co](https://huggingface.co/alpindale/WizardLM-2-8x22B)|
|sh-n-7b|Together|1+ Providers|[huggingface.co](https://huggingface.co/togethercomputer/StripedHyena-Nous-7B)|
@@ -190,6 +194,10 @@ This document provides an overview of various AI providers and models, including
|german-7b|TheBloke|1+ Providers|[huggingface.co](https://huggingface.co/TheBloke/DiscoLM_German_7b_v1-GGUF)|
|tinyllama-1.1b|TinyLlama|1+ Providers|[huggingface.co](https://huggingface.co/TinyLlama/TinyLlama-1.1B-Chat-v1.0)|
|cybertron-7b|TheBloke|1+ Providers|[huggingface.co](https://huggingface.co/fblgit/una-cybertron-7b-v2-bf16)|
+|openhermes-2.5|Teknium|1+ Providers|[huggingface.co](https://huggingface.co/datasets/teknium/OpenHermes-2.5)|
+|lfm-40b|Liquid|1+ Providers|[liquid.ai](https://www.liquid.ai/liquid-foundation-models)|
+|zephyr-7b|HuggingFaceH4|1+ Providers|[huggingface.co](https://huggingface.co/HuggingFaceH4/zephyr-7b-beta)|
+
### Image Models
| Model | Base Provider | Providers | Website |
@@ -224,6 +232,11 @@ This document provides an overview of various AI providers and models, including
|blackboxai|Blackbox AI|1+ Providers|[docs.blackbox.chat](https://docs.blackbox.chat/blackbox-ai-1)|
|minicpm-llama-3-v2.5|OpenBMB|1+ Providers | [huggingface.co](https://huggingface.co/openbmb/MiniCPM-Llama3-V-2_5)|
+### Providers and vision models
+| Provider | Base Provider | | Vision Models | Status | Auth |
+|-------|---------------|-----------|---------|---------|---------|
+| `g4f.Provider.Blackbox` | Blackbox AI | | `blackboxai, blackboxai-pro, gemini-flash, llama-3.1-8b, llama-3.1-70b, gpt-4o, gemini-pro, claude-3.5-sonnet` | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+
## Conclusion and Usage Tips
This document provides a comprehensive overview of various AI providers and models available for text generation, image generation, and vision tasks. **When choosing a provider or model, consider the following factors:**
1. **Availability**: Check the status of the provider to ensure it's currently active and accessible.
diff --git a/g4f/Provider/AIChatFree.py b/g4f/Provider/AIChatFree.py
index 71c04681..6f4b8560 100644
--- a/g4f/Provider/AIChatFree.py
+++ b/g4f/Provider/AIChatFree.py
@@ -14,7 +14,7 @@ from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
class AIChatFree(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://aichatfree.info/"
- working = True
+ working = False
supports_stream = True
supports_message_history = True
default_model = 'gemini-pro'
diff --git a/g4f/Provider/AIUncensored.py b/g4f/Provider/AIUncensored.py
index d653191c..ce492b38 100644
--- a/g4f/Provider/AIUncensored.py
+++ b/g4f/Provider/AIUncensored.py
@@ -2,33 +2,49 @@ from __future__ import annotations
import json
from aiohttp import ClientSession
+from itertools import cycle
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
from ..image import ImageResponse
+
class AIUncensored(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.aiuncensored.info"
+ api_endpoints_text = [
+ "https://twitterclone-i0wr.onrender.com/api/chat",
+ "https://twitterclone-4e8t.onrender.com/api/chat",
+ "https://twitterclone-8wd1.onrender.com/api/chat",
+ ]
+ api_endpoints_image = [
+ "https://twitterclone-4e8t.onrender.com/api/image",
+ "https://twitterclone-i0wr.onrender.com/api/image",
+ "https://twitterclone-8wd1.onrender.com/api/image",
+ ]
+ api_endpoints_cycle_text = cycle(api_endpoints_text)
+ api_endpoints_cycle_image = cycle(api_endpoints_image)
working = True
supports_stream = True
supports_system_message = True
supports_message_history = True
- default_model = 'ai_uncensored'
- chat_models = [default_model]
- image_models = ['ImageGenerator']
- models = [*chat_models, *image_models]
-
- api_endpoints = {
- 'ai_uncensored': "https://twitterclone-i0wr.onrender.com/api/chat",
- 'ImageGenerator': "https://twitterclone-4e8t.onrender.com/api/image"
+ default_model = 'TextGenerations'
+ text_models = [default_model]
+ image_models = ['ImageGenerations']
+ models = [*text_models, *image_models]
+
+ model_aliases = {
+ #"": "TextGenerations",
+ "flux": "ImageGenerations",
}
@classmethod
def get_model(cls, model: str) -> str:
if model in cls.models:
return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
else:
return cls.default_model
@@ -38,75 +54,63 @@ class AIUncensored(AsyncGeneratorProvider, ProviderModelMixin):
model: str,
messages: Messages,
proxy: str = None,
- stream: bool = False,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
- if model in cls.chat_models:
- async with ClientSession(headers={"content-type": "application/json"}) as session:
+ headers = {
+ 'accept': '*/*',
+ 'accept-language': 'en-US,en;q=0.9',
+ 'cache-control': 'no-cache',
+ 'content-type': 'application/json',
+ 'origin': 'https://www.aiuncensored.info',
+ 'pragma': 'no-cache',
+ 'priority': 'u=1, i',
+ 'referer': 'https://www.aiuncensored.info/',
+ 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Linux"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'cross-site',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36'
+ }
+
+ async with ClientSession(headers=headers) as session:
+ if model in cls.image_models:
+ prompt = messages[-1]['content']
data = {
- "messages": [
- {"role": "user", "content": format_prompt(messages)}
- ],
- "stream": stream
+ "prompt": prompt,
}
- async with session.post(cls.api_endpoints[model], json=data, proxy=proxy) as response:
+ api_endpoint = next(cls.api_endpoints_cycle_image)
+ async with session.post(api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
- if stream:
- async for chunk in cls._handle_streaming_response(response):
- yield chunk
- else:
- yield await cls._handle_non_streaming_response(response)
- elif model in cls.image_models:
- headers = {
- "accept": "*/*",
- "accept-language": "en-US,en;q=0.9",
- "cache-control": "no-cache",
- "content-type": "application/json",
- "origin": cls.url,
- "pragma": "no-cache",
- "priority": "u=1, i",
- "referer": f"{cls.url}/",
- "sec-ch-ua": '"Chromium";v="129", "Not=A?Brand";v="8"',
- "sec-ch-ua-mobile": "?0",
- "sec-ch-ua-platform": '"Linux"',
- "sec-fetch-dest": "empty",
- "sec-fetch-mode": "cors",
- "sec-fetch-site": "cross-site",
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36"
- }
- async with ClientSession(headers=headers) as session:
- prompt = messages[0]['content']
- data = {"prompt": prompt}
- async with session.post(cls.api_endpoints[model], json=data, proxy=proxy) as response:
+ response_data = await response.json()
+ image_url = response_data['image_url']
+ image_response = ImageResponse(images=image_url, alt=prompt)
+ yield image_response
+ elif model in cls.text_models:
+ data = {
+ "messages": [
+ {
+ "role": "user",
+ "content": format_prompt(messages)
+ }
+ ]
+ }
+ api_endpoint = next(cls.api_endpoints_cycle_text)
+ async with session.post(api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
- result = await response.json()
- image_url = result.get('image_url', '')
- if image_url:
- yield ImageResponse(image_url, alt=prompt)
- else:
- yield "Failed to generate image. Please try again."
-
- @classmethod
- async def _handle_streaming_response(cls, response):
- async for line in response.content:
- line = line.decode('utf-8').strip()
- if line.startswith("data: "):
- if line == "data: [DONE]":
- break
- try:
- json_data = json.loads(line[6:])
- if 'data' in json_data:
- yield json_data['data']
- except json.JSONDecodeError:
- pass
-
- @classmethod
- async def _handle_non_streaming_response(cls, response):
- response_json = await response.json()
- return response_json.get('content', "Sorry, I couldn't generate a response.")
-
- @classmethod
- def validate_response(cls, response: str) -> str:
- return response
+ full_response = ""
+ async for line in response.content:
+ line = line.decode('utf-8')
+ if line.startswith("data: "):
+ try:
+ json_str = line[6:]
+ if json_str != "[DONE]":
+ data = json.loads(json_str)
+ if "data" in data:
+ full_response += data["data"]
+ yield data["data"]
+ except json.JSONDecodeError:
+ continue
diff --git a/g4f/Provider/AiChats.py b/g4f/Provider/AiChats.py
index 08492e24..7ff25639 100644
--- a/g4f/Provider/AiChats.py
+++ b/g4f/Provider/AiChats.py
@@ -11,7 +11,7 @@ from .helper import format_prompt
class AiChats(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://ai-chats.org"
api_endpoint = "https://ai-chats.org/chat/send2/"
- working = True
+ working = False
supports_message_history = True
default_model = 'gpt-4'
models = ['gpt-4', 'dalle']
diff --git a/g4f/Provider/Airforce.py b/g4f/Provider/Airforce.py
index 015766f4..b7819f9a 100644
--- a/g4f/Provider/Airforce.py
+++ b/g4f/Provider/Airforce.py
@@ -1,105 +1,30 @@
from __future__ import annotations
-import random
-import json
-import re
+from typing import Any, Dict
+import inspect
+
from aiohttp import ClientSession
+
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ..image import ImageResponse
-
-def split_long_message(message: str, max_length: int = 4000) -> list[str]:
- return [message[i:i+max_length] for i in range(0, len(message), max_length)]
+from .helper import format_prompt
+from .airforce.AirforceChat import AirforceChat
+from .airforce.AirforceImage import AirforceImage
class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://api.airforce"
- image_api_endpoint = "https://api.airforce/imagine2"
- text_api_endpoint = "https://api.airforce/chat/completions"
+ api_endpoint_completions = AirforceChat.api_endpoint_completions
+ api_endpoint_imagine2 = AirforceImage.api_endpoint_imagine2
working = True
+ supports_stream = AirforceChat.supports_stream
+ supports_system_message = AirforceChat.supports_system_message
+ supports_message_history = AirforceChat.supports_message_history
- default_model = 'llama-3-70b-chat'
-
- supports_stream = True
- supports_system_message = True
- supports_message_history = True
-
- text_models = [
- 'claude-3-haiku-20240307',
- 'claude-3-sonnet-20240229',
- 'claude-3-5-sonnet-20240620',
- 'claude-3-opus-20240229',
- 'chatgpt-4o-latest',
- 'gpt-4',
- 'gpt-4-turbo',
- 'gpt-4o-mini-2024-07-18',
- 'gpt-4o-mini',
- 'gpt-3.5-turbo',
- 'gpt-3.5-turbo-0125',
- 'gpt-3.5-turbo-1106',
- default_model,
- 'llama-3-70b-chat-turbo',
- 'llama-3-8b-chat',
- 'llama-3-8b-chat-turbo',
- 'llama-3-70b-chat-lite',
- 'llama-3-8b-chat-lite',
- 'llama-2-13b-chat',
- 'llama-3.1-405b-turbo',
- 'llama-3.1-70b-turbo',
- 'llama-3.1-8b-turbo',
- 'LlamaGuard-2-8b',
- 'Llama-Guard-7b',
- 'Llama-3.2-90B-Vision-Instruct-Turbo',
- 'Mixtral-8x7B-Instruct-v0.1',
- 'Mixtral-8x22B-Instruct-v0.1',
- 'Mistral-7B-Instruct-v0.1',
- 'Mistral-7B-Instruct-v0.2',
- 'Mistral-7B-Instruct-v0.3',
- 'Qwen1.5-7B-Chat',
- 'Qwen1.5-14B-Chat',
- 'Qwen1.5-72B-Chat',
- 'Qwen1.5-110B-Chat',
- 'Qwen2-72B-Instruct',
- 'gemma-2b-it',
- 'gemma-2-9b-it',
- 'gemma-2-27b-it',
- 'gemini-1.5-flash',
- 'gemini-1.5-pro',
- 'deepseek-llm-67b-chat',
- 'Nous-Hermes-2-Mixtral-8x7B-DPO',
- 'Nous-Hermes-2-Yi-34B',
- 'WizardLM-2-8x22B',
- 'SOLAR-10.7B-Instruct-v1.0',
- 'MythoMax-L2-13b',
- 'cosmosrp',
- ]
-
- image_models = [
- 'flux',
- 'flux-realism',
- 'flux-anime',
- 'flux-3d',
- 'flux-disney',
- 'flux-pixel',
- 'flux-4o',
- 'any-dark',
- ]
-
- models = [
- *text_models,
- *image_models,
- ]
+ default_model = AirforceChat.default_model
+ models = [*AirforceChat.text_models, *AirforceImage.image_models]
model_aliases = {
- "claude-3-haiku": "claude-3-haiku-20240307",
- "claude-3-sonnet": "claude-3-sonnet-20240229",
- "gpt-4o": "chatgpt-4o-latest",
- "llama-3-70b": "llama-3-70b-chat",
- "llama-3-8b": "llama-3-8b-chat",
- "mixtral-8x7b": "Mixtral-8x7B-Instruct-v0.1",
- "qwen-1.5-7b": "Qwen1.5-7B-Chat",
- "gemma-2b": "gemma-2b-it",
- "gemini-flash": "gemini-1.5-flash",
- "mythomax-l2-13b": "MythoMax-L2-13b",
- "solar-10.7b": "SOLAR-10.7B-Instruct-v1.0",
+ **AirforceChat.model_aliases,
+ **AirforceImage.model_aliases
}
@classmethod
@@ -107,139 +32,28 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
if model in cls.models:
return model
elif model in cls.model_aliases:
- return cls.model_aliases.get(model, cls.default_model)
+ return cls.model_aliases[model]
else:
return cls.default_model
@classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- seed: int = None,
- size: str = "1:1",
- stream: bool = False,
- **kwargs
- ) -> AsyncResult:
+ async def create_async_generator(cls, model: str, messages: Messages, **kwargs) -> AsyncResult:
model = cls.get_model(model)
+
+ provider = AirforceChat if model in AirforceChat.text_models else AirforceImage
- if model in cls.image_models:
- async for result in cls._generate_image(model, messages, proxy, seed, size):
- yield result
- elif model in cls.text_models:
- async for result in cls._generate_text(model, messages, proxy, stream):
- yield result
-
- @classmethod
- async def _generate_image(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- seed: int = None,
- size: str = "1:1",
- **kwargs
- ) -> AsyncResult:
- headers = {
- "accept": "*/*",
- "accept-language": "en-US,en;q=0.9",
- "cache-control": "no-cache",
- "origin": "https://llmplayground.net",
- "user-agent": "Mozilla/5.0"
- }
-
- if seed is None:
- seed = random.randint(0, 100000)
-
- prompt = messages[-1]['content']
-
- async with ClientSession(headers=headers) as session:
- params = {
- "model": model,
- "prompt": prompt,
- "size": size,
- "seed": seed
- }
- async with session.get(f"{cls.image_api_endpoint}", params=params, proxy=proxy) as response:
- response.raise_for_status()
- content_type = response.headers.get('Content-Type', '').lower()
+ if model not in provider.models:
+ raise ValueError(f"Unsupported model: {model}")
- if 'application/json' in content_type:
- async for chunk in response.content.iter_chunked(1024):
- if chunk:
- yield chunk.decode('utf-8')
- elif 'image' in content_type:
- image_data = b""
- async for chunk in response.content.iter_chunked(1024):
- if chunk:
- image_data += chunk
- image_url = f"{cls.image_api_endpoint}?model={model}&prompt={prompt}&size={size}&seed={seed}"
- alt_text = f"Generated image for prompt: {prompt}"
- yield ImageResponse(images=image_url, alt=alt_text)
-
- @classmethod
- async def _generate_text(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- stream: bool = False,
- **kwargs
- ) -> AsyncResult:
- headers = {
- "accept": "*/*",
- "accept-language": "en-US,en;q=0.9",
- "authorization": "Bearer missing api key",
- "content-type": "application/json",
- "user-agent": "Mozilla/5.0"
- }
+ # Get the signature of the provider's create_async_generator method
+ sig = inspect.signature(provider.create_async_generator)
+
+ # Filter kwargs to only include parameters that the provider's method accepts
+ filtered_kwargs = {k: v for k, v in kwargs.items() if k in sig.parameters}
- async with ClientSession(headers=headers) as session:
- formatted_prompt = cls._format_messages(messages)
- prompt_parts = split_long_message(formatted_prompt)
- full_response = ""
+ # Add model and messages to filtered_kwargs
+ filtered_kwargs['model'] = model
+ filtered_kwargs['messages'] = messages
- for part in prompt_parts:
- data = {
- "messages": [{"role": "user", "content": part}],
- "model": model,
- "max_tokens": 4096,
- "temperature": 1,
- "top_p": 1,
- "stream": stream
- }
- async with session.post(cls.text_api_endpoint, json=data, proxy=proxy) as response:
- response.raise_for_status()
- part_response = ""
- if stream:
- async for line in response.content:
- if line:
- line = line.decode('utf-8').strip()
- if line.startswith("data: ") and line != "data: [DONE]":
- json_data = json.loads(line[6:])
- content = json_data['choices'][0]['delta'].get('content', '')
- part_response += content
- else:
- json_data = await response.json()
- content = json_data['choices'][0]['message']['content']
- part_response = content
-
- part_response = re.sub(
- r"One message exceeds the \d+chars per message limit\..+https:\/\/discord\.com\/invite\/\S+",
- '',
- part_response
- )
-
- part_response = re.sub(
- r"Rate limit \(\d+\/minute\) exceeded\. Join our discord for more: .+https:\/\/discord\.com\/invite\/\S+",
- '',
- part_response
- )
-
- full_response += part_response
- yield full_response
-
- @classmethod
- def _format_messages(cls, messages: Messages) -> str:
- return " ".join([msg['content'] for msg in messages])
+ async for result in provider.create_async_generator(**filtered_kwargs):
+ yield result
diff --git a/g4f/Provider/Allyfy.py b/g4f/Provider/Allyfy.py
index bf607df4..53cf1da1 100644
--- a/g4f/Provider/Allyfy.py
+++ b/g4f/Provider/Allyfy.py
@@ -1,17 +1,28 @@
from __future__ import annotations
-
-from aiohttp import ClientSession
+import aiohttp
+import asyncio
import json
-
+import uuid
+from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
-class Allyfy(AsyncGeneratorProvider):
+class Allyfy(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://allyfy.chat"
api_endpoint = "https://chatbot.allyfy.chat/api/v1/message/stream/super/chat"
working = True
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = 'gpt-3.5-turbo'
+ models = [default_model]
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ return cls.default_model
@classmethod
async def create_async_generator(
@@ -21,50 +32,55 @@ class Allyfy(AsyncGeneratorProvider):
proxy: str = None,
**kwargs
) -> AsyncResult:
+ model = cls.get_model(model)
+ client_id = str(uuid.uuid4())
+
headers = {
- "accept": "text/event-stream",
- "accept-language": "en-US,en;q=0.9",
- "content-type": "application/json;charset=utf-8",
- "dnt": "1",
- "origin": "https://www.allyfy.chat",
- "priority": "u=1, i",
- "referer": "https://www.allyfy.chat/",
- "referrer": "https://www.allyfy.chat",
- 'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="126"',
+ 'accept': 'text/event-stream',
+ 'accept-language': 'en-US,en;q=0.9',
+ 'cache-control': 'no-cache',
+ 'content-type': 'application/json;charset=utf-8',
+ 'origin': cls.url,
+ 'pragma': 'no-cache',
+ 'priority': 'u=1, i',
+ 'referer': f"{cls.url}/",
+ 'referrer': cls.url,
+ 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Linux"',
- "sec-fetch-dest": "empty",
- "sec-fetch-mode": "cors",
- "sec-fetch-site": "same-site",
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-site',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36'
}
+
async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
data = {
- "messages": [{"content": prompt, "role": "user"}],
+ "messages": messages,
"content": prompt,
"baseInfo": {
- "clientId": "q08kdrde1115003lyedfoir6af0yy531",
+ "clientId": client_id,
"pid": "38281",
"channelId": "100000",
"locale": "en-US",
- "localZone": 180,
+ "localZone": 120,
"packageName": "com.cch.allyfy.webh",
}
}
- async with session.post(f"{cls.api_endpoint}", json=data, proxy=proxy) as response:
+
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
- full_response = []
- async for line in response.content:
- line = line.decode().strip()
- if line.startswith("data:"):
- data_content = line[5:]
- if data_content == "[DONE]":
- break
- try:
- json_data = json.loads(data_content)
- if "content" in json_data:
- full_response.append(json_data["content"])
- except json.JSONDecodeError:
- continue
- yield "".join(full_response)
+ response_text = await response.text()
+
+ filtered_response = []
+ for line in response_text.splitlines():
+ if line.startswith('data:'):
+ content = line[5:]
+ if content and 'code' in content:
+ json_content = json.loads(content)
+ if json_content['content']:
+ filtered_response.append(json_content['content'])
+
+ final_response = ''.join(filtered_response)
+ yield final_response
diff --git a/g4f/Provider/AmigoChat.py b/g4f/Provider/AmigoChat.py
index f5027111..b086d5e1 100644
--- a/g4f/Provider/AmigoChat.py
+++ b/g4f/Provider/AmigoChat.py
@@ -13,7 +13,7 @@ class AmigoChat(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://amigochat.io/chat/"
chat_api_endpoint = "https://api.amigochat.io/v1/chat/completions"
image_api_endpoint = "https://api.amigochat.io/v1/images/generations"
- working = True
+ working = False
supports_stream = True
supports_system_message = True
supports_message_history = True
diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py
index 0013800e..e2595b02 100644
--- a/g4f/Provider/Blackbox.py
+++ b/g4f/Provider/Blackbox.py
@@ -274,7 +274,8 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
"visitFromDelta": False,
"mobileClient": False,
"webSearchMode": web_search,
- "userSelectedModel": cls.userSelectedModel.get(model, model)
+ "userSelectedModel": cls.userSelectedModel.get(model, model),
+ "validated": "69783381-2ce4-4dbd-ac78-35e9063feabc"
}
headers_chat = {
diff --git a/g4f/Provider/airforce/AirforceChat.py b/g4f/Provider/airforce/AirforceChat.py
new file mode 100644
index 00000000..b4b1eca3
--- /dev/null
+++ b/g4f/Provider/airforce/AirforceChat.py
@@ -0,0 +1,375 @@
+from __future__ import annotations
+import re
+from aiohttp import ClientSession
+import json
+from typing import List
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+
+def clean_response(text: str) -> str:
+ """Clean response from unwanted patterns."""
+ patterns = [
+ r"One message exceeds the \d+chars per message limit\..+https:\/\/discord\.com\/invite\/\S+",
+ r"Rate limit \(\d+\/minute\) exceeded\. Join our discord for more: .+https:\/\/discord\.com\/invite\/\S+",
+ r"Rate limit \(\d+\/hour\) exceeded\. Join our discord for more: https:\/\/discord\.com\/invite\/\S+",
+ r"</s>", # zephyr-7b-beta
+ ]
+
+ for pattern in patterns:
+ text = re.sub(pattern, '', text)
+ return text.strip()
+
+def split_message(message: dict, chunk_size: int = 995) -> List[dict]:
+ """Split a message into chunks of specified size."""
+ content = message.get('content', '')
+ if len(content) <= chunk_size:
+ return [message]
+
+ chunks = []
+ while content:
+ chunk = content[:chunk_size]
+ content = content[chunk_size:]
+ chunks.append({
+ 'role': message['role'],
+ 'content': chunk
+ })
+ return chunks
+
+def split_messages(messages: Messages, chunk_size: int = 995) -> Messages:
+ """Split all messages that exceed chunk_size into smaller messages."""
+ result = []
+ for message in messages:
+ result.extend(split_message(message, chunk_size))
+ return result
+
+class AirforceChat(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "AirForce Chat"
+ api_endpoint_completions = "https://api.airforce/chat/completions" # Замініть на реальний ендпоінт
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = 'llama-3-70b-chat'
+ text_models = [
+ # anthropic
+ 'claude-3-haiku-20240307',
+ 'claude-3-sonnet-20240229',
+ 'claude-3-5-sonnet-20240620',
+ 'claude-3-5-sonnet-20241022',
+ 'claude-3-opus-20240229',
+
+ # openai
+ 'chatgpt-4o-latest',
+ 'gpt-4',
+ 'gpt-4-turbo',
+ 'gpt-4o-2024-05-13',
+ 'gpt-4o-mini-2024-07-18',
+ 'gpt-4o-mini',
+ 'gpt-4o-2024-08-06',
+ 'gpt-3.5-turbo',
+ 'gpt-3.5-turbo-0125',
+ 'gpt-3.5-turbo-1106',
+ 'gpt-4o',
+ 'gpt-4-turbo-2024-04-09',
+ 'gpt-4-0125-preview',
+ 'gpt-4-1106-preview',
+
+ # meta-llama
+ default_model,
+ 'llama-3-70b-chat-turbo',
+ 'llama-3-8b-chat',
+ 'llama-3-8b-chat-turbo',
+ 'llama-3-70b-chat-lite',
+ 'llama-3-8b-chat-lite',
+ 'llama-2-13b-chat',
+ 'llama-3.1-405b-turbo',
+ 'llama-3.1-70b-turbo',
+ 'llama-3.1-8b-turbo',
+ 'LlamaGuard-2-8b',
+ 'llamaguard-7b',
+ 'Llama-Vision-Free',
+ 'Llama-Guard-7b',
+ 'Llama-3.2-90B-Vision-Instruct-Turbo',
+ 'Meta-Llama-Guard-3-8B',
+ 'Llama-3.2-11B-Vision-Instruct-Turbo',
+ 'Llama-Guard-3-11B-Vision-Turbo',
+ 'Llama-3.2-3B-Instruct-Turbo',
+ 'Llama-3.2-1B-Instruct-Turbo',
+ 'llama-2-7b-chat-int8',
+ 'llama-2-7b-chat-fp16',
+ 'Llama 3.1 405B Instruct',
+ 'Llama 3.1 70B Instruct',
+ 'Llama 3.1 8B Instruct',
+
+ # mistral-ai
+ 'Mixtral-8x7B-Instruct-v0.1',
+ 'Mixtral-8x22B-Instruct-v0.1',
+ 'Mistral-7B-Instruct-v0.1',
+ 'Mistral-7B-Instruct-v0.2',
+ 'Mistral-7B-Instruct-v0.3',
+
+ # Gryphe
+ 'MythoMax-L2-13b-Lite',
+ 'MythoMax-L2-13b',
+
+ # openchat
+ 'openchat-3.5-0106',
+
+ # qwen
+ #'Qwen1.5-72B-Chat', Пуста відповідь
+ #'Qwen1.5-110B-Chat', Пуста відповідь
+ 'Qwen2-72B-Instruct',
+ 'Qwen2.5-7B-Instruct-Turbo',
+ 'Qwen2.5-72B-Instruct-Turbo',
+
+ # google
+ 'gemma-2b-it',
+ 'gemma-2-9b-it',
+ 'gemma-2-27b-it',
+
+ # gemini
+ 'gemini-1.5-flash',
+ 'gemini-1.5-pro',
+
+ # databricks
+ 'dbrx-instruct',
+
+ # deepseek-ai
+ 'deepseek-coder-6.7b-base',
+ 'deepseek-coder-6.7b-instruct',
+ 'deepseek-math-7b-instruct',
+
+ # NousResearch
+ 'deepseek-math-7b-instruct',
+ 'Nous-Hermes-2-Mixtral-8x7B-DPO',
+ 'hermes-2-pro-mistral-7b',
+
+ # teknium
+ 'openhermes-2.5-mistral-7b',
+
+ # microsoft
+ 'WizardLM-2-8x22B',
+ 'phi-2',
+
+ # upstage
+ 'SOLAR-10.7B-Instruct-v1.0',
+
+ # pawan
+ 'cosmosrp',
+
+ # liquid
+ 'lfm-40b-moe',
+
+ # DiscoResearch
+ 'discolm-german-7b-v1',
+
+ # tiiuae
+ 'falcon-7b-instruct',
+
+ # defog
+ 'sqlcoder-7b-2',
+
+ # tinyllama
+ 'tinyllama-1.1b-chat',
+
+ # HuggingFaceH4
+ 'zephyr-7b-beta',
+ ]
+
+ models = [*text_models]
+
+ model_aliases = {
+ # anthropic
+ "claude-3-haiku": "claude-3-haiku-20240307",
+ "claude-3-sonnet": "claude-3-sonnet-20240229",
+ "claude-3.5-sonnet": "claude-3-5-sonnet-20240620",
+ "claude-3.5-sonnet": "claude-3-5-sonnet-20241022",
+ "claude-3-opus": "claude-3-opus-20240229",
+
+ # openai
+ "gpt-4o": "chatgpt-4o-latest",
+ #"gpt-4": "gpt-4",
+ #"gpt-4-turbo": "gpt-4-turbo",
+ "gpt-4o": "gpt-4o-2024-05-13",
+ "gpt-4o-mini": "gpt-4o-mini-2024-07-18",
+ #"gpt-4o-mini": "gpt-4o-mini",
+ "gpt-4o": "gpt-4o-2024-08-06",
+ "gpt-3.5-turbo": "gpt-3.5-turbo",
+ "gpt-3.5-turbo": "gpt-3.5-turbo-0125",
+ "gpt-3.5-turbo": "gpt-3.5-turbo-1106",
+ #"gpt-4o": "gpt-4o",
+ "gpt-4-turbo": "gpt-4-turbo-2024-04-09",
+ "gpt-4": "gpt-4-0125-preview",
+ "gpt-4": "gpt-4-1106-preview",
+
+ # meta-llama
+ "llama-3-70b": "llama-3-70b-chat",
+ "llama-3-8b": "llama-3-8b-chat",
+ "llama-3-8b": "llama-3-8b-chat-turbo",
+ "llama-3-70b": "llama-3-70b-chat-lite",
+ "llama-3-8b": "llama-3-8b-chat-lite",
+ "llama-2-13b": "llama-2-13b-chat",
+ "llama-3.1-405b": "llama-3.1-405b-turbo",
+ "llama-3.1-70b": "llama-3.1-70b-turbo",
+ "llama-3.1-8b": "llama-3.1-8b-turbo",
+ "llamaguard-2-8b": "LlamaGuard-2-8b",
+ "llamaguard-7b": "llamaguard-7b",
+ #"llama_vision_free": "Llama-Vision-Free", # Unknown
+ "llamaguard-7b": "Llama-Guard-7b",
+ "llama-3.2-90b": "Llama-3.2-90B-Vision-Instruct-Turbo",
+ "llamaguard-3-8b": "Meta-Llama-Guard-3-8B",
+ "llama-3.2-11b": "Llama-3.2-11B-Vision-Instruct-Turbo",
+ "llamaguard-3-11b": "Llama-Guard-3-11B-Vision-Turbo",
+ "llama-3.2-3b": "Llama-3.2-3B-Instruct-Turbo",
+ "llama-3.2-1b": "Llama-3.2-1B-Instruct-Turbo",
+ "llama-2-7b": "llama-2-7b-chat-int8",
+ "llama-2-7b": "llama-2-7b-chat-fp16",
+ "llama-3.1-405b": "Llama 3.1 405B Instruct",
+ "llama-3.1-70b": "Llama 3.1 70B Instruct",
+ "llama-3.1-8b": "Llama 3.1 8B Instruct",
+
+ # mistral-ai
+ "mixtral-8x7b": "Mixtral-8x7B-Instruct-v0.1",
+ "mixtral-8x22b": "Mixtral-8x22B-Instruct-v0.1",
+ "mixtral-8x7b": "Mistral-7B-Instruct-v0.1",
+ "mixtral-8x7b": "Mistral-7B-Instruct-v0.2",
+ "mixtral-8x7b": "Mistral-7B-Instruct-v0.3",
+
+ # Gryphe
+ "mythomax-13b": "MythoMax-L2-13b-Lite",
+ "mythomax-13b": "MythoMax-L2-13b",
+
+ # openchat
+ "openchat-3.5": "openchat-3.5-0106",
+
+ # qwen
+ #"qwen-1.5-72b": "Qwen1.5-72B-Chat", # Empty answer
+ #"qwen-1.5-110b": "Qwen1.5-110B-Chat", # Empty answer
+ "qwen-2-72b": "Qwen2-72B-Instruct",
+ "qwen-2-5-7b": "Qwen2.5-7B-Instruct-Turbo",
+ "qwen-2-5-72b": "Qwen2.5-72B-Instruct-Turbo",
+
+ # google
+ "gemma-2b": "gemma-2b-it",
+ "gemma-2-9b": "gemma-2-9b-it",
+ "gemma-2b-27b": "gemma-2-27b-it",
+
+ # gemini
+ "gemini-flash": "gemini-1.5-flash",
+ "gemini-pro": "gemini-1.5-pro",
+
+ # databricks
+ "dbrx-instruct": "dbrx-instruct",
+
+ # deepseek-ai
+ #"deepseek-coder": "deepseek-coder-6.7b-base",
+ "deepseek-coder": "deepseek-coder-6.7b-instruct",
+ #"deepseek-math": "deepseek-math-7b-instruct",
+
+ # NousResearch
+ #"deepseek-math": "deepseek-math-7b-instruct",
+ "hermes-2-dpo": "Nous-Hermes-2-Mixtral-8x7B-DPO",
+ "hermes-2": "hermes-2-pro-mistral-7b",
+
+ # teknium
+ "openhermes-2.5": "openhermes-2.5-mistral-7b",
+
+ # microsoft
+ "wizardlm-2-8x22b": "WizardLM-2-8x22B",
+ #"phi-2": "phi-2",
+
+ # upstage
+ "solar-10-7b": "SOLAR-10.7B-Instruct-v1.0",
+
+ # pawan
+ #"cosmosrp": "cosmosrp",
+
+ # liquid
+ "lfm-40b": "lfm-40b-moe",
+
+ # DiscoResearch
+ "german-7b": "discolm-german-7b-v1",
+
+ # tiiuae
+ #"falcon-7b": "falcon-7b-instruct",
+
+ # defog
+ #"sqlcoder-7b": "sqlcoder-7b-2",
+
+ # tinyllama
+ #"tinyllama-1b": "tinyllama-1.1b-chat",
+
+ # HuggingFaceH4
+ "zephyr-7b": "zephyr-7b-beta",
+ }
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool = False,
+ proxy: str = None,
+ max_tokens: str = 4096,
+ temperature: str = 1,
+ top_p: str = 1,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ chunked_messages = split_messages(messages)
+
+ headers = {
+ 'accept': '*/*',
+ 'accept-language': 'en-US,en;q=0.9',
+ 'authorization': 'Bearer missing api key',
+ 'cache-control': 'no-cache',
+ 'content-type': 'application/json',
+ 'origin': 'https://llmplayground.net',
+ 'pragma': 'no-cache',
+ 'priority': 'u=1, i',
+ 'referer': 'https://llmplayground.net/',
+ 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Linux"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'cross-site',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36'
+ }
+
+ data = {
+ "messages": chunked_messages,
+ "model": model,
+ "max_tokens": max_tokens,
+ "temperature": temperature,
+ "top_p": top_p,
+ "stream": stream
+ }
+
+ async with ClientSession(headers=headers) as session:
+ async with session.post(cls.api_endpoint_completions, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ text = ""
+ if stream:
+ async for line in response.content:
+ line = line.decode('utf-8')
+ if line.startswith('data: '):
+ json_str = line[6:]
+ try:
+ chunk = json.loads(json_str)
+ if 'choices' in chunk and chunk['choices']:
+ content = chunk['choices'][0].get('delta', {}).get('content', '')
+ text += content # Збираємо дельти
+ except json.JSONDecodeError as e:
+ print(f"Error decoding JSON: {json_str}, Error: {e}")
+ elif line.strip() == "[DONE]":
+ break
+ yield clean_response(text)
+ else:
+ response_json = await response.json()
+ text = response_json["choices"][0]["message"]["content"]
+ yield clean_response(text)
+
diff --git a/g4f/Provider/airforce/AirforceImage.py b/g4f/Provider/airforce/AirforceImage.py
new file mode 100644
index 00000000..010d1a94
--- /dev/null
+++ b/g4f/Provider/airforce/AirforceImage.py
@@ -0,0 +1,97 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+import random
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ...image import ImageResponse
+
+
+class AirforceImage(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Airforce Image"
+ #url = "https://api.airforce"
+ api_endpoint_imagine2 = "https://api.airforce/imagine2"
+ #working = True
+
+ default_model = 'flux'
+ image_models = [
+ 'flux',
+ 'flux-realism',
+ 'flux-anime',
+ 'flux-3d',
+ 'flux-disney',
+ 'flux-pixel',
+ 'flux-4o',
+ 'any-dark',
+ 'stable-diffusion-xl-base',
+ 'stable-diffusion-xl-lightning',
+ ]
+ models = [*image_models]
+
+ model_aliases = {
+ "sdxl": "stable-diffusion-xl-base",
+ "sdxl": "stable-diffusion-xl-lightning",
+ }
+
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ size: str = '1:1',
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ 'accept': '*/*',
+ 'accept-language': 'en-US,en;q=0.9',
+ 'authorization': 'Bearer missing api key',
+ 'cache-control': 'no-cache',
+ 'origin': 'https://llmplayground.net',
+ 'pragma': 'no-cache',
+ 'priority': 'u=1, i',
+ 'referer': 'https://llmplayground.net/',
+ 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Linux"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'cross-site',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36'
+ }
+
+ async with ClientSession(headers=headers) as session:
+ prompt = messages[-1]['content']
+ seed = random.randint(0, 4294967295)
+ params = {
+ 'model': model,
+ 'prompt': prompt,
+ 'size': size,
+ 'seed': str(seed)
+ }
+ async with session.get(cls.api_endpoint_imagine2, params=params, proxy=proxy) as response:
+ response.raise_for_status()
+ if response.status == 200:
+ content_type = response.headers.get('Content-Type', '')
+ if 'image' in content_type:
+ image_url = str(response.url)
+ yield ImageResponse(image_url, alt="Airforce generated image")
+ else:
+ content = await response.text()
+ yield f"Unexpected content type: {content_type}\nResponse content: {content}"
+ else:
+ error_content = await response.text()
+ yield f"Error: {error_content}"
diff --git a/g4f/Provider/airforce/__init__.py b/g4f/Provider/airforce/__init__.py
new file mode 100644
index 00000000..5ffa6d31
--- /dev/null
+++ b/g4f/Provider/airforce/__init__.py
@@ -0,0 +1,2 @@
+from .AirforceChat import AirforceChat
+from .AirforceImage import AirforceImage
diff --git a/g4f/models.py b/g4f/models.py
index bea09f28..944c4e9c 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -8,8 +8,8 @@ from .Provider import (
AIChatFree,
AiMathGPT,
Airforce,
+ AIUncensored,
Allyfy,
- AmigoChat,
Bing,
Blackbox,
ChatGpt,
@@ -104,11 +104,11 @@ default = Model(
ChatHub,
ChatGptEs,
ChatHub,
- AmigoChat,
ChatifyAI,
Cloudflare,
Editee,
AiMathGPT,
+ AIUncensored,
])
)
@@ -130,20 +130,20 @@ gpt_3 = Model(
gpt_35_turbo = Model(
name = 'gpt-3.5-turbo',
base_provider = 'OpenAI',
- best_provider = IterListProvider([Allyfy, NexraChatGPT, Airforce, DarkAI, Liaobots])
+ best_provider = IterListProvider([Allyfy, NexraChatGPT, DarkAI, Airforce, Liaobots])
)
# gpt-4
gpt_4o = Model(
name = 'gpt-4o',
base_provider = 'OpenAI',
- best_provider = IterListProvider([NexraChatGPT, Blackbox, ChatGptEs, AmigoChat, DarkAI, Editee, GizAI, Airforce, Liaobots, OpenaiChat])
+ best_provider = IterListProvider([NexraChatGPT, Blackbox, ChatGptEs, DarkAI, Editee, GizAI, Airforce, Liaobots, OpenaiChat])
)
gpt_4o_mini = Model(
name = 'gpt-4o-mini',
base_provider = 'OpenAI',
- best_provider = IterListProvider([DDG, ChatGptEs, FreeNetfly, Pizzagpt, MagickPen, AmigoChat, RubiksAI, Liaobots, Airforce, GizAI, ChatgptFree, Koala, OpenaiChat, ChatGpt])
+ best_provider = IterListProvider([DDG, ChatGptEs, FreeNetfly, Pizzagpt, MagickPen, RubiksAI, Liaobots, Airforce, GizAI, ChatgptFree, Koala, OpenaiChat, ChatGpt])
)
gpt_4_turbo = Model(
@@ -162,13 +162,13 @@ gpt_4 = Model(
o1 = Model(
name = 'o1',
base_provider = 'OpenAI',
- best_provider = AmigoChat
+ best_provider = None
)
o1_mini = Model(
name = 'o1-mini',
base_provider = 'OpenAI',
- best_provider = IterListProvider([AmigoChat, GizAI])
+ best_provider = IterListProvider([GizAI])
)
@@ -191,7 +191,7 @@ meta = Model(
llama_2_7b = Model(
name = "llama-2-7b",
base_provider = "Meta Llama",
- best_provider = Cloudflare
+ best_provider = IterListProvider([Cloudflare, Airforce])
)
llama_2_13b = Model(
@@ -217,44 +217,44 @@ llama_3_70b = Model(
llama_3_1_8b = Model(
name = "llama-3.1-8b",
base_provider = "Meta Llama",
- best_provider = IterListProvider([Blackbox, DeepInfraChat, ChatHub, Cloudflare, Airforce, GizAI, PerplexityLabs])
+ best_provider = IterListProvider([Blackbox, DeepInfraChat, ChatHub, Cloudflare, GizAI, Airforce, PerplexityLabs])
)
llama_3_1_70b = Model(
name = "llama-3.1-70b",
base_provider = "Meta Llama",
- best_provider = IterListProvider([DDG, HuggingChat, Blackbox, FreeGpt, TeachAnything, Free2GPT, DeepInfraChat, DarkAI, Airforce, AiMathGPT, RubiksAI, GizAI, HuggingFace, PerplexityLabs])
+ best_provider = IterListProvider([DDG, HuggingChat, Blackbox, FreeGpt, TeachAnything, Free2GPT, DeepInfraChat, DarkAI, AiMathGPT, RubiksAI, GizAI, Airforce, HuggingFace, PerplexityLabs])
)
llama_3_1_405b = Model(
name = "llama-3.1-405b",
base_provider = "Meta Llama",
- best_provider = IterListProvider([DeepInfraChat, Blackbox, AmigoChat, DarkAI, Airforce])
+ best_provider = IterListProvider([DeepInfraChat, Blackbox, DarkAI, Airforce])
)
# llama 3.2
llama_3_2_1b = Model(
name = "llama-3.2-1b",
base_provider = "Meta Llama",
- best_provider = Cloudflare
+ best_provider = IterListProvider([Cloudflare, Airforce])
)
llama_3_2_3b = Model(
name = "llama-3.2-3b",
base_provider = "Meta Llama",
- best_provider = Cloudflare
+ best_provider = IterListProvider([Cloudflare, Airforce])
)
llama_3_2_11b = Model(
name = "llama-3.2-11b",
base_provider = "Meta Llama",
- best_provider = IterListProvider([Cloudflare, HuggingChat, HuggingFace])
+ best_provider = IterListProvider([Cloudflare, HuggingChat, Airforce, HuggingFace])
)
llama_3_2_90b = Model(
name = "llama-3.2-90b",
base_provider = "Meta Llama",
- best_provider = IterListProvider([AmigoChat, Airforce])
+ best_provider = IterListProvider([Airforce])
)
@@ -271,6 +271,18 @@ llamaguard_2_8b = Model(
best_provider = Airforce
)
+llamaguard_3_8b = Model(
+ name = "llamaguard-3-8b",
+ base_provider = "Meta Llama",
+ best_provider = Airforce
+)
+
+llamaguard_3_11b = Model(
+ name = "llamaguard-3-11b",
+ base_provider = "Meta Llama",
+ best_provider = Airforce
+)
+
### Mistral ###
mistral_7b = Model(
@@ -305,14 +317,14 @@ mistral_large = Model(
### NousResearch ###
-mixtral_8x7b_dpo = Model(
- name = "mixtral-8x7b-dpo",
+hermes_2 = Model(
+ name = "hermes-2",
base_provider = "NousResearch",
best_provider = Airforce
)
-yi_34b = Model(
- name = "yi-34b",
+hermes_2_dpo = Model(
+ name = "hermes-2-dpo",
base_provider = "NousResearch",
best_provider = Airforce
)
@@ -328,7 +340,7 @@ hermes_3 = Model(
phi_2 = Model(
name = "phi-2",
base_provider = "Microsoft",
- best_provider = Cloudflare
+ best_provider = IterListProvider([Cloudflare, Airforce])
)
phi_3_medium_4k = Model(
@@ -348,7 +360,7 @@ phi_3_5_mini = Model(
gemini_pro = Model(
name = 'gemini-pro',
base_provider = 'Google DeepMind',
- best_provider = IterListProvider([GeminiPro, Blackbox, AIChatFree, GPROChat, NexraGeminiPro, AmigoChat, Editee, GizAI, Airforce, Liaobots])
+ best_provider = IterListProvider([GeminiPro, Blackbox, AIChatFree, GPROChat, NexraGeminiPro, Editee, GizAI, Airforce, Liaobots])
)
gemini_flash = Model(
@@ -364,10 +376,10 @@ gemini = Model(
)
# gemma
-gemma_2b_9b = Model(
- name = 'gemma-2b-9b',
+gemma_2b = Model(
+ name = 'gemma-2b',
base_provider = 'Google',
- best_provider = Airforce
+ best_provider = IterListProvider([ReplicateHome, Airforce])
)
gemma_2b_27b = Model(
@@ -376,12 +388,6 @@ gemma_2b_27b = Model(
best_provider = IterListProvider([DeepInfraChat, Airforce])
)
-gemma_2b = Model(
- name = 'gemma-2b',
- base_provider = 'Google',
- best_provider = IterListProvider([ReplicateHome, Airforce])
-)
-
gemma_7b = Model(
name = 'gemma-7b',
base_provider = 'Google',
@@ -389,18 +395,18 @@ gemma_7b = Model(
)
# gemma 2
-gemma_2_27b = Model(
- name = 'gemma-2-27b',
- base_provider = 'Google',
- best_provider = Airforce
-)
-
gemma_2 = Model(
name = 'gemma-2',
base_provider = 'Google',
best_provider = ChatHub
)
+gemma_2_9b = Model(
+ name = 'gemma-2-9b',
+ base_provider = 'Google',
+ best_provider = Airforce
+)
+
### Anthropic ###
claude_2_1 = Model(
@@ -413,26 +419,26 @@ claude_2_1 = Model(
claude_3_opus = Model(
name = 'claude-3-opus',
base_provider = 'Anthropic',
- best_provider = IterListProvider([Airforce, Liaobots])
+ best_provider = IterListProvider([Liaobots])
)
claude_3_sonnet = Model(
name = 'claude-3-sonnet',
base_provider = 'Anthropic',
- best_provider = IterListProvider([Airforce, Liaobots])
+ best_provider = IterListProvider([Liaobots])
)
claude_3_haiku = Model(
name = 'claude-3-haiku',
base_provider = 'Anthropic',
- best_provider = IterListProvider([DDG, Airforce, GizAI, Liaobots])
+ best_provider = IterListProvider([DDG, GizAI, Liaobots])
)
# claude 3.5
claude_3_5_sonnet = Model(
name = 'claude-3.5-sonnet',
base_provider = 'Anthropic',
- best_provider = IterListProvider([Blackbox, Editee, AmigoChat, Airforce, GizAI, Liaobots])
+ best_provider = IterListProvider([Blackbox, Editee, GizAI, Liaobots])
)
@@ -493,40 +499,34 @@ qwen_1_5_0_5b = Model(
qwen_1_5_7b = Model(
name = 'qwen-1.5-7b',
base_provider = 'Qwen',
- best_provider = IterListProvider([Cloudflare, Airforce])
+ best_provider = IterListProvider([Cloudflare])
)
qwen_1_5_14b = Model(
name = 'qwen-1.5-14b',
base_provider = 'Qwen',
- best_provider = IterListProvider([FreeChatgpt, Cloudflare, Airforce])
+ best_provider = IterListProvider([FreeChatgpt, Cloudflare])
)
-qwen_1_5_72b = Model(
- name = 'qwen-1.5-72b',
+# qwen 2
+qwen_2_72b = Model(
+ name = 'qwen-2-72b',
base_provider = 'Qwen',
- best_provider = Airforce
+ best_provider = IterListProvider([DeepInfraChat, HuggingChat, Airforce, HuggingFace])
)
-qwen_1_5_110b = Model(
- name = 'qwen-1.5-110b',
+qwen_2_5_7b = Model(
+ name = 'qwen-2-5-7b',
base_provider = 'Qwen',
best_provider = Airforce
)
-qwen_1_5_1_8b = Model(
- name = 'qwen-1.5-1.8b',
+qwen_2_5_72b = Model(
+ name = 'qwen-2-5-72b',
base_provider = 'Qwen',
best_provider = Airforce
)
-# qwen 2
-qwen_2_72b = Model(
- name = 'qwen-2-72b',
- base_provider = 'Qwen',
- best_provider = IterListProvider([DeepInfraChat, HuggingChat, Airforce, HuggingFace])
-)
-
qwen = Model(
name = 'qwen',
base_provider = 'Qwen',
@@ -556,18 +556,18 @@ yi_1_5_9b = Model(
)
### Upstage ###
-solar_1_mini = Model(
- name = 'solar-1-mini',
- base_provider = 'Upstage',
- best_provider = Upstage
-)
-
solar_10_7b = Model(
name = 'solar-10-7b',
base_provider = 'Upstage',
best_provider = Airforce
)
+solar_mini = Model(
+ name = 'solar-mini',
+ base_provider = 'Upstage',
+ best_provider = Upstage
+)
+
solar_pro = Model(
name = 'solar-pro',
base_provider = 'Upstage',
@@ -583,8 +583,8 @@ pi = Model(
)
### DeepSeek ###
-deepseek = Model(
- name = 'deepseek',
+deepseek_coder = Model(
+ name = 'deepseek-coder',
base_provider = 'DeepSeek',
best_provider = Airforce
)
@@ -630,7 +630,7 @@ lzlv_70b = Model(
openchat_3_5 = Model(
name = 'openchat-3.5',
base_provider = 'OpenChat',
- best_provider = Cloudflare
+ best_provider = IterListProvider([Cloudflare])
)
openchat_3_6_8b = Model(
@@ -683,23 +683,6 @@ sonar_chat = Model(
best_provider = PerplexityLabs
)
-
-### Gryphe ###
-mythomax_l2_13b = Model(
- name = 'mythomax-l2-13b',
- base_provider = 'Gryphe',
- best_provider = Airforce
-)
-
-
-### Pawan ###
-cosmosrp = Model(
- name = 'cosmosrp',
- base_provider = 'Pawan',
- best_provider = Airforce
-)
-
-
### TheBloke ###
german_7b = Model(
name = 'german-7b',
@@ -708,14 +691,6 @@ german_7b = Model(
)
-### Tinyllama ###
-tinyllama_1_1b = Model(
- name = 'tinyllama-1.1b',
- base_provider = 'Tinyllama',
- best_provider = Cloudflare
-)
-
-
### Fblgit ###
cybertron_7b = Model(
name = 'cybertron-7b',
@@ -723,6 +698,7 @@ cybertron_7b = Model(
best_provider = Cloudflare
)
+
### Nvidia ###
nemotron_70b = Model(
name = 'nemotron-70b',
@@ -731,6 +707,46 @@ nemotron_70b = Model(
)
+### Teknium ###
+openhermes_2_5 = Model(
+ name = 'openhermes-2.5',
+ base_provider = 'Teknium',
+ best_provider = Airforce
+)
+
+
+### Pawan ###
+cosmosrp = Model(
+ name = 'cosmosrp',
+ base_provider = 'Pawan',
+ best_provider = Airforce
+)
+
+
+### Liquid ###
+lfm_40b = Model(
+ name = 'lfm-40b',
+ base_provider = 'Liquid',
+ best_provider = Airforce
+)
+
+
+### DiscoResearch ###
+german_7b = Model(
+ name = 'german-7b',
+ base_provider = 'DiscoResearch',
+ best_provider = Airforce
+)
+
+
+### HuggingFaceH4 ###
+zephyr_7b = Model(
+ name = 'zephyr-7b',
+ base_provider = 'HuggingFaceH4',
+ best_provider = Airforce
+)
+
+
#############
### Image ###
@@ -754,7 +770,7 @@ sdxl_lora = Model(
sdxl = Model(
name = 'sdxl',
base_provider = 'Stability AI',
- best_provider = IterListProvider([ReplicateHome])
+ best_provider = IterListProvider([ReplicateHome, Airforce])
)
@@ -792,21 +808,21 @@ playground_v2_5 = Model(
flux = Model(
name = 'flux',
base_provider = 'Flux AI',
- best_provider = IterListProvider([Blackbox, Airforce])
+ best_provider = IterListProvider([Blackbox, AIUncensored, Airforce])
)
flux_pro = Model(
name = 'flux-pro',
base_provider = 'Flux AI',
- best_provider = IterListProvider([NexraFluxPro, AmigoChat])
+ best_provider = IterListProvider([NexraFluxPro])
)
flux_realism = Model(
name = 'flux-realism',
base_provider = 'Flux AI',
- best_provider = IterListProvider([Airforce, AmigoChat])
+ best_provider = IterListProvider([Airforce])
)
@@ -947,6 +963,8 @@ class ModelUtils:
# llamaguard
'llamaguard-7b': llamaguard_7b,
'llamaguard-2-8b': llamaguard_2_8b,
+'llamaguard-3-8b': llamaguard_3_8b,
+'llamaguard-3-11b': llamaguard_3_11b,
### Mistral ###
@@ -958,17 +976,17 @@ class ModelUtils:
### NousResearch ###
-'mixtral-8x7b-dpo': mixtral_8x7b_dpo,
+'hermes-2': hermes_2,
+'hermes-2-dpo': hermes_2_dpo,
'hermes-3': hermes_3,
-
-'yi-34b': yi_34b,
-
-
+
+
### Microsoft ###
'phi-2': phi_2,
'phi_3_medium-4k': phi_3_medium_4k,
'phi-3.5-mini': phi_3_5_mini,
+
### Google ###
# gemini
'gemini': gemini,
@@ -977,13 +995,12 @@ class ModelUtils:
# gemma
'gemma-2b': gemma_2b,
-'gemma-2b-9b': gemma_2b_9b,
'gemma-2b-27b': gemma_2b_27b,
'gemma-7b': gemma_7b,
# gemma-2
'gemma-2': gemma_2,
-'gemma-2-27b': gemma_2_27b,
+'gemma-2-9b': gemma_2_9b,
### Anthropic ###
@@ -1028,10 +1045,9 @@ class ModelUtils:
'qwen-1.5-0.5b': qwen_1_5_0_5b,
'qwen-1.5-7b': qwen_1_5_7b,
'qwen-1.5-14b': qwen_1_5_14b,
-'qwen-1.5-72b': qwen_1_5_72b,
-'qwen-1.5-110b': qwen_1_5_110b,
-'qwen-1.5-1.8b': qwen_1_5_1_8b,
'qwen-2-72b': qwen_2_72b,
+'qwen-2-5-7b': qwen_2_5_7b,
+'qwen-2-5-72b': qwen_2_5_72b,
### Zhipu AI ###
@@ -1044,16 +1060,17 @@ class ModelUtils:
### Upstage ###
-'solar-mini': solar_1_mini,
'solar-10-7b': solar_10_7b,
+'solar-mini': solar_mini,
'solar-pro': solar_pro,
### Inflection ###
'pi': pi,
+
### DeepSeek ###
-'deepseek': deepseek,
+'deepseek-coder': deepseek_coder,
### Yorickvp ###
@@ -1094,30 +1111,38 @@ class ModelUtils:
### Perplexity AI ###
'sonar-online': sonar_online,
'sonar-chat': sonar_chat,
-
-
-### Gryphe ###
-'mythomax-l2-13b': sonar_chat,
-
-
-### Pawan ###
-'cosmosrp': cosmosrp,
-
+
### TheBloke ###
'german-7b': german_7b,
-### Tinyllama ###
-'tinyllama-1.1b': tinyllama_1_1b,
-
-
### Fblgit ###
'cybertron-7b': cybertron_7b,
### Nvidia ###
'nemotron-70b': nemotron_70b,
+
+
+### Teknium ###
+'openhermes-2.5': openhermes_2_5,
+
+
+### Pawan ###
+'cosmosrp': cosmosrp,
+
+
+### Liquid ###
+'lfm-40b': lfm_40b,
+
+
+### DiscoResearch ###
+'german-7b': german_7b,
+
+
+### HuggingFaceH4 ###
+'zephyr-7b': zephyr_7b,