-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathapp.py
80 lines (62 loc) · 2.55 KB
/
app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
import streamlit as st
import os
import google.generativeai as genai
from PIL import Image
# Use your API key
GOOGLE_API_KEY = "Use API key"
# Load environment variables
from dotenv import load_dotenv
load_dotenv()
# Configure generative AI library
genai.configure(api_key=GOOGLE_API_KEY)
# Load Gemini Pro models
pro_model = genai.GenerativeModel("gemini-pro")
pro_vision_model = genai.GenerativeModel("gemini-pro-vision")
# Function to get Gemini Pro model response
@st.cache_data
def get_gemini_response(question):
response = pro_model.generate_content(question)
return response.text
# Function to get Gemini Pro Vision model response
@st.cache_data
def get_gemini_vision_response(_images, input):
prompt = [input] + _images if input else _images
response = pro_vision_model.generate_content(prompt)
return response.text
# Set page configuration
st.set_page_config(page_title="Gemini PRO Model QnA", layout="wide")
# Sidebar
st.sidebar.title("Model Selection")
selected_model = st.sidebar.selectbox(
"Which Model would you like to select",
("Gemini Pro Model", "Gemini Pro Vision Model")
)
# Toggle switch for st.cache
use_cache = st.sidebar.checkbox("Log data", value=True)
# Display different content based on the selected model
if selected_model == "Gemini Pro Model":
st.header("Gemini Pro LLM Application")
input_question = st.text_input("Prompt Here:", key="input")
submit_button = st.button("Ask the Question")
if submit_button:
if use_cache:
response = get_gemini_response(input_question)
else:
response = get_gemini_response(input_question)
st.subheader("The Response is:")
st.write(response)
elif selected_model == "Gemini Pro Vision Model":
st.header("Gemini Pro Vision LLM Application for multiple images")
input_question = st.text_input("Prompt Here:", key="input")
uploaded_files = st.file_uploader("Choose Images(Upto 4 images)", type=["jpg", "png", "jpeg"], accept_multiple_files=True)
images = []
if uploaded_files:
for i, uploaded_file in enumerate(uploaded_files):
image = Image.open(uploaded_file)
st.image(image, caption=f"Uploaded Image {i + 1}", use_column_width=False, width=100)
images.append(image)
submit_button = st.button("Describe the given Images")
if submit_button:
response = get_gemini_vision_response(images, input_question)
st.subheader("Image Descriptions:")
st.write(response)