Skip to content

Commit

Permalink
Demo UI fix debug info (deepset-ai#1846)
Browse files Browse the repository at this point in the history
* Fix debug info

* Make enter to run work better

* Reintroduce default question in the eval dataset

* Outputting valid json instead of a Python dict
  • Loading branch information
ZanSara authored Dec 6, 2021
1 parent 160f81a commit 983b20f
Show file tree
Hide file tree
Showing 4 changed files with 26 additions and 21 deletions.
3 changes: 2 additions & 1 deletion rest_api/controller/search.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import logging
import time
import json
from pathlib import Path

from fastapi import APIRouter
Expand Down Expand Up @@ -72,7 +73,7 @@ def _process_request(pipeline, request) -> QueryResponse:

result = pipeline.run(query=request.query, params=params,debug=request.debug)
end_time = time.time()
logger.info({"request": request.dict(), "response": result, "time": f"{(end_time - start_time):.2f}"})
logger.info(json.dumps({"request": request, "response": result, "time": f"{(end_time - start_time):.2f}"}, default=str))

return result

Expand Down
2 changes: 1 addition & 1 deletion ui/eval_labels_example.csv
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
"Question Text";"Answer"
"What is the capital of the Netherlands?";"Amsterdam"
"What is the capital of France?";"Paris"
"What's the tallest mountain in Africa?";"Mount Kilimanjaro"
"What's the climate of Beijing?";"monsoon-influenced humid continental"
"What's the longest river of Europe?";"The Volga"
Expand Down
6 changes: 3 additions & 3 deletions ui/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ def query(query, filters={}, top_k_reader=5, top_k_retriever=5) -> Tuple[List[Di
"_raw": answer,
}
)
return results, response_raw
return results, response


def send_feedback(query, answer_obj, is_correct_answer, is_correct_document, document) -> None:
Expand All @@ -108,8 +108,8 @@ def send_feedback(query, answer_obj, is_correct_answer, is_correct_document, doc
def upload_doc(file):
url = f"{API_ENDPOINT}/{DOC_UPLOAD}"
files = [("files", file)]
response_raw = requests.post(url, files=files).json()
return response_raw
response = requests.post(url, files=files).json()
return response


def get_backlink(result) -> Tuple[str, str]:
Expand Down
36 changes: 20 additions & 16 deletions ui/webapp.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,12 @@
# and every value gets lost. To keep track of our feedback state we use the official streamlit gist mentioned
# here https://gist.github.com/tvst/036da038ab3e999a64497f42de966a92
import SessionState
from utils import HS_VERSION, haystack_is_ready, query, send_feedback, upload_doc, haystack_version, get_backlink
from utils import haystack_is_ready, query, send_feedback, upload_doc, haystack_version, get_backlink


# Adjust to a question that you would like users to see in the search bar when they load the UI:
DEFAULT_QUESTION_AT_STARTUP = os.getenv("DEFAULT_QUESTION_AT_STARTUP", "What's the capital of France?")
DEFAULT_ANSWER_AT_STARTUP = os.getenv("DEFAULT_ANSWER_AT_STARTUP", "Paris")

# Sliders
DEFAULT_DOCS_FROM_RETRIEVER = int(os.getenv("DEFAULT_DOCS_FROM_RETRIEVER", 3))
Expand All @@ -36,15 +37,16 @@ def main():

# Persistent state
state = SessionState.get(
random_question=DEFAULT_QUESTION_AT_STARTUP,
random_answer="",
last_question=DEFAULT_QUESTION_AT_STARTUP,
question=DEFAULT_QUESTION_AT_STARTUP,
answer=DEFAULT_ANSWER_AT_STARTUP,
results=None,
raw_json=None,
random_question_requested=False
)

# Small callback to reset the interface in case the text of the question changes
def reset_results(*args):
state.answer = None
state.results = None
state.raw_json = None

Expand Down Expand Up @@ -131,7 +133,7 @@ def reset_results(*args):

# Search bar
question = st.text_input("",
value=state.random_question,
value=state.question,
max_chars=100,
on_change=reset_results
)
Expand All @@ -141,22 +143,24 @@ def reset_results(*args):

# Run button
run_pressed = col1.button("Run")
run_query = run_pressed or question != state.last_question

# Get next random question from the CSV
#state.get_next_question = col2.button("Random question")
if col2.button("Random question"):
reset_results()
new_row = df.sample(1)
while new_row["Question Text"].values[0] == state.random_question: # Avoid picking the same question twice (the change is not visible on the UI)
while new_row["Question Text"].values[0] == state.question: # Avoid picking the same question twice (the change is not visible on the UI)
new_row = df.sample(1)
state.random_question = new_row["Question Text"].values[0]
state.random_answer = new_row["Answer"].values[0]

state.question = new_row["Question Text"].values[0]
state.answer = new_row["Answer"].values[0]
state.random_question_requested = True
# Re-runs the script setting the random question as the textbox value
# Unfortunately necessary as the Random Question button is _below_ the textbox
raise st.script_runner.RerunException(st.script_request_queue.RerunData(None))

else:
state.random_question_requested = False

run_query = (run_pressed or question != state.question) and not state.random_question_requested

# Check the connection
with st.spinner("⌛️    Haystack is starting..."):
if not haystack_is_ready():
Expand All @@ -167,7 +171,7 @@ def reset_results(*args):
# Get results for query
if run_query and question:
reset_results()
state.last_question = question
state.question = question
with st.spinner(
"🧠    Performing neural search on documents... \n "
"Do you want to optimize speed or accuracy? \n"
Expand All @@ -189,9 +193,9 @@ def reset_results(*args):
if state.results:

# Show the gold answer if we use a question of the given set
if question == state.random_question and eval_mode and state.random_answer:
st.write("## Correct answers:")
st.write(state.random_answer)
if eval_mode and state.answer:
st.write("## Correct answer:")
st.write(state.answer)

st.write("## Results:")

Expand Down

0 comments on commit 983b20f

Please sign in to comment.