Skip to content

Commit

Permalink
fix double-quote-string
Browse files Browse the repository at this point in the history
  • Loading branch information
FangXinyu-0913 committed Jul 22, 2024
1 parent 9e5a62b commit 171378b
Showing 1 changed file with 4 additions and 4 deletions.
8 changes: 4 additions & 4 deletions vlmeval/vlm/video_llm/video_llava.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ def read_video_pyav(container, indices):
break
if i >= start_index and i in indices:
frames.append(frame)
return np.stack([x.to_ndarray(format="rgb24") for x in frames])
return np.stack([x.to_ndarray(format='rgb24') for x in frames])


class VideoLLaVA_HF(BaseModel):
Expand Down Expand Up @@ -56,8 +56,8 @@ def generate_inner(self, message, dataset=None):
indices = np.arange(0, total_frames, total_frames / self.nframe).astype(int)
clip = read_video_pyav(container, indices)

prompt = f"USER: <video>\n{question} ASSISTANT:"
inputs = self.processor(text=prompt, videos=clip, return_tensors="pt").to(self.model.device)
prompt = f'USER: <video>\n{question} ASSISTANT:'
inputs = self.processor(text=prompt, videos=clip, return_tensors='pt').to(self.model.device)

# Generate args -- deperecated
generation_args = {
Expand Down Expand Up @@ -117,7 +117,7 @@ def get_model_output(self, model, video_processor, tokenizer, video, qs):
else:
qs = ''.join([DEFAULT_IMAGE_TOKEN] * 8) + '\n' + qs

conv_mode = "llava_v1"
conv_mode = 'llava_v1'
device = torch.device('cuda')
conv = conv_templates[conv_mode].copy()
conv.append_message(conv.roles[0], qs)
Expand Down

0 comments on commit 171378b

Please sign in to comment.