Skip to content

Commit

Permalink
EfficientSAM
Browse files Browse the repository at this point in the history
  • Loading branch information
yformer committed Dec 12, 2023
1 parent 7ebc25f commit ce556d0
Show file tree
Hide file tree
Showing 3 changed files with 30 additions and 27 deletions.
Binary file modified .DS_Store
Binary file not shown.
12 changes: 6 additions & 6 deletions EfficientSAM_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,14 +8,14 @@


models = {}
# Build the VIT-tiny model.
models['vitt'] = build_efficient_sam_vitt()
# Build the EfficientSAM-Ti model.
models['efficientsam_ti'] = build_efficient_sam_vitt()

# Since VIT-small is >100MB, we store the zip file.
# Since EfficientSAM-S checkpoint is >100MB, we store the zip file.
with zipfile.ZipFile("weights/efficient_sam_vits.pt.zip", 'r') as zip_ref:
zip_ref.extractall("weights")
# # Build the VIT-small model.
models['vits'] = build_efficient_sam_vits()
# # Build the EfficientSAM-S model.
models['efficientsam_s'] = build_efficient_sam_vits()

# load an image
sample_image_np = np.array(Image.open("figs/examples/dogs.jpg"))
Expand All @@ -25,7 +25,7 @@
input_points = torch.tensor([[[[580, 350], [650, 350]]]])
input_labels = torch.tensor([[[1, 1]]])

# Run inference for both vitt and vits based models.
# Run inference for both EfficientSAM-Ti and EfficientSAM-S based models.
for model_name, efficient_sam in models.items():
print('Running inference using ', model_name)
predicted_logits, predicted_iou = efficient_sam(
Expand Down
45 changes: 24 additions & 21 deletions notebooks/EfficientSAM_example.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
"id": "AIrAUKnLClPD"
},
"source": [
"This script provides example for how to get visualization result from EfficientSAM using ready-to-use torchscript, part of the code is borrow from MobileSAM project, many thanks!"
"This script provides example for how to get visualization result from EfficientSAM using checkpoint (ready-to-use torchscript can also be used), part of the code is borrow from MobileSAM project, many thanks!"
]
},
{
Expand Down Expand Up @@ -332,7 +332,6 @@
}
],
"source": [
"\n",
"from efficient_sam import build_efficient_sam_vitt, build_efficient_sam_vits\n",
"\n",
"efficient_sam = build_efficient_sam_vitt()\n",
Expand All @@ -359,13 +358,6 @@
"prepare your own image here"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": 25,
Expand Down Expand Up @@ -487,19 +479,30 @@
}
],
"metadata": {
"accelerator": "GPU",
"colab": {
"machine_shape": "hm",
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
"custom": {
"cells": [],
"metadata": {
"accelerator": "GPU",
"colab": {
"machine_shape": "hm",
"provenance": []
},
"fileHeader": "",
"fileUid": "f337ddbb-4ec7-4bc4-8c8b-f31305249752",
"isAdHoc": false,
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 0
},
"language_info": {
"name": "python"
}
"indentAmount": 2
},
"nbformat": 4,
"nbformat_minor": 0
"nbformat_minor": 2
}

0 comments on commit ce556d0

Please sign in to comment.