Skip to content

Commit

Permalink
Unfreeze projector when finetuning with LoRA.
Browse files Browse the repository at this point in the history
  • Loading branch information
haotian-liu committed Oct 26, 2023
1 parent 9ad7265 commit 9e3f3d0
Showing 1 changed file with 4 additions and 0 deletions.
4 changes: 4 additions & 0 deletions llava/model/llava_arch.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,10 @@ def initialize_vision_modules(self, model_args, fsdp=None):

if getattr(self, 'mm_projector', None) is None:
self.mm_projector = build_vision_projector(self.config)
else:
# In case it is frozen by LoRA
for p in self.mm_projector.parameters():
p.requires_grad = True

if pretrain_mm_mlp_adapter is not None:
mm_projector_weights = torch.load(pretrain_mm_mlp_adapter, map_location='cpu')
Expand Down

0 comments on commit 9e3f3d0

Please sign in to comment.