Skip to content

Commit

Permalink
Add GeminiPro API provider
Browse files Browse the repository at this point in the history
Set min version for undetected-chromedriver
Add api_key to the new client
  • Loading branch information
hlohaus committed Feb 23, 2024
1 parent 51b4aaa commit 51264fe
Show file tree
Hide file tree
Showing 11 changed files with 223 additions and 68 deletions.
19 changes: 9 additions & 10 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ or set the api base in your client to: [http://localhost:1337/v1](http://localho
1. [Download and install Python](https://www.python.org/downloads/) (Version 3.10+ is recommended).
2. [Install Google Chrome](https://www.google.com/chrome/) for providers with webdriver

##### Install using pypi:
##### Install using PyPI package:

```
pip install -U g4f[all]
Expand All @@ -113,35 +113,33 @@ Or use partial requirements.

See: [/docs/requirements](/docs/requirements.md)

##### Install from source:
##### Install from source using git:

See: [/docs/git](/docs/git.md)


##### Install using Docker
##### Install using Docker for Developers:

See: [/docs/docker](/docs/docker.md)


## 💡 Usage

#### Text Generation
**with Python**

```python
from g4f.client import Client

client = Client()
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Say this is a test"}],
messages=[{"role": "user", "content": "Hello"}],
...
)
print(response.choices[0].message.content)
```

#### Image Generation
**with Python**

```python
from g4f.client import Client
Expand All @@ -154,14 +152,15 @@ response = client.images.generate(
)
image_url = response.data[0].url
```
Result:

**Result:**

[![Image with cat](/docs/cat.jpeg)](/docs/client.md)

**See also for Python:**
**See also:**

- [Documentation for new Client](/docs/client.md)
- [Documentation for leagcy API](/docs/leagcy.md)
- Documentation for the new Client: [/docs/client](/docs/client.md)
- Documentation for the leagcy API: [docs/leagcy](/docs/leagcy.md)


#### Web UI
Expand Down
33 changes: 29 additions & 4 deletions docs/client.md
Original file line number Diff line number Diff line change
Expand Up @@ -37,12 +37,16 @@ client = Client(
)
```

You also have the option to define a proxy in the client for all outgoing requests:
## Configuration

You can set an "api_key" for your provider in client.
And you also have the option to define a proxy for all outgoing requests:

```python
from g4f.client import Client

client = Client(
api_key="...",
proxies="http://user:pass@host",
...
)
Expand Down Expand Up @@ -74,7 +78,7 @@ stream = client.chat.completions.create(
)
for chunk in stream:
if chunk.choices[0].delta.content:
print(chunk.choices[0].delta.content, end="")
print(chunk.choices[0].delta.content or "", end="")
```

**Image Generation:**
Expand Down Expand Up @@ -109,7 +113,28 @@ image_url = response.data[0].url

Original / Variant:

[![Original Image](/docs/cat.jpeg)](/docs/client.md)
[![Variant Image](/docs/cat.webp)](/docs/client.md)
[![Original Image](/docs/cat.jpeg)](/docs/client.md) [![Variant Image](/docs/cat.webp)](/docs/client.md)

#### Advanced example using GeminiProVision

```python
from g4f.client import Client
from g4f.Provider.GeminiPro import GeminiPro

client = Client(
api_key="...",
provider=GeminiPro
)
response = client.chat.completions.create(
model="gemini-pro-vision",
messages=[{"role": "user", "content": "What are on this image?"}],
image=open("docs/cat.jpeg", "rb")
)
print(response.choices[0].message.content)
```
**Question:** What are on this image?
```
A cat is sitting on a window sill looking at a bird outside the window.
```

[Return to Home](/)
86 changes: 86 additions & 0 deletions g4f/Provider/GeminiPro.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
from __future__ import annotations

import base64
import json
from aiohttp import ClientSession

from ..typing import AsyncResult, Messages, ImageType
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..image import to_bytes, is_accepted_format


class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://ai.google.dev"
working = True
supports_message_history = True
default_model = "gemini-pro"
models = ["gemini-pro", "gemini-pro-vision"]

@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
stream: bool = False,
proxy: str = None,
api_key: str = None,
image: ImageType = None,
**kwargs
) -> AsyncResult:
model = "gemini-pro-vision" if not model and image else model
model = cls.get_model(model)
api_key = api_key if api_key else kwargs.get("access_token")
headers = {
"Content-Type": "application/json",
}
async with ClientSession(headers=headers) as session:
method = "streamGenerateContent" if stream else "generateContent"
url = f"https://generativelanguage.googleapis.com/v1beta/models/{model}:{method}"
contents = [
{
"role": "model" if message["role"] == "assistant" else message["role"],
"parts": [{"text": message["content"]}]
}
for message in messages
]
if image:
image = to_bytes(image)
contents[-1]["parts"].append({
"inline_data": {
"mime_type": is_accepted_format(image),
"data": base64.b64encode(image).decode()
}
})
data = {
"contents": contents,
# "generationConfig": {
# "stopSequences": kwargs.get("stop"),
# "temperature": kwargs.get("temperature"),
# "maxOutputTokens": kwargs.get("max_tokens"),
# "topP": kwargs.get("top_p"),
# "topK": kwargs.get("top_k"),
# }
}
async with session.post(url, params={"key": api_key}, json=data, proxy=proxy) as response:
if not response.ok:
data = await response.json()
raise RuntimeError(data[0]["error"]["message"])
if stream:
lines = []
async for chunk in response.content:
if chunk == b"[{\n":
lines = [b"{\n"]
elif chunk == b",\r\n" or chunk == b"]":
try:
data = b"".join(lines)
data = json.loads(data)
yield data["candidates"][0]["content"]["parts"][0]["text"]
except:
data = data.decode() if isinstance(data, bytes) else data
raise RuntimeError(f"Read text failed. data: {data}")
lines = []
else:
lines.append(chunk)
else:
data = await response.json()
yield data["candidates"][0]["content"]["parts"][0]["text"]
1 change: 1 addition & 0 deletions g4f/Provider/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
from .FreeChatgpt import FreeChatgpt
from .FreeGpt import FreeGpt
from .GeekGpt import GeekGpt
from .GeminiPro import GeminiPro
from .GeminiProChat import GeminiProChat
from .Gpt6 import Gpt6
from .GPTalk import GPTalk
Expand Down
Loading

0 comments on commit 51264fe

Please sign in to comment.