Skip to content

Commit

Permalink
Replace deprecated ops.fb with ops.fbgemm in fbgemm_gpu Python source.
Browse files Browse the repository at this point in the history
Reviewed By: jianyuh

Differential Revision: D33303039

fbshipit-source-id: 038ed37141ae392174cdec8a033940dd630ce370
  • Loading branch information
Rick Weyrauch authored and facebook-github-bot committed Jan 5, 2022
1 parent 58545e7 commit ac69edd
Show file tree
Hide file tree
Showing 3 changed files with 13 additions and 13 deletions.
8 changes: 4 additions & 4 deletions fbgemm_gpu/bench/split_table_batched_embeddings_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -1896,7 +1896,7 @@ def hashtable( # noqa C901
assert hash_table.numel() * 4 < 2 ** 32
# initialize
hash_table[:, :] = -1
torch.ops.fb.pruned_hashmap_insert(
torch.ops.fbgemm.pruned_hashmap_insert(
chosen_indices, dense_indices, offsets, hash_table, hash_table_offsets
)

Expand All @@ -1919,7 +1919,7 @@ def hashtable( # noqa C901

empirical_hit_rate = np.mean(
[
torch.ops.fb.pruned_hashmap_lookup(
torch.ops.fbgemm.pruned_hashmap_lookup(
indices, offsets, hash_table, hash_table_offsets
)
.ne(-1)
Expand All @@ -1932,7 +1932,7 @@ def hashtable( # noqa C901

time_per_iter = benchmark_requests(
requests,
lambda indices, offsets, _: torch.ops.fb.pruned_hashmap_lookup(
lambda indices, offsets, _: torch.ops.fbgemm.pruned_hashmap_lookup(
indices, offsets, hash_table, hash_table_offsets
),
)
Expand Down Expand Up @@ -2010,7 +2010,7 @@ def pruned_array( # noqa C901

time_per_iter = benchmark_requests(
requests,
lambda indices, offsets, _: torch.ops.fb.pruned_array_lookup(
lambda indices, offsets, _: torch.ops.fbgemm.pruned_array_lookup(
indices,
offsets,
index_remappings,
Expand Down
6 changes: 3 additions & 3 deletions fbgemm_gpu/test/split_table_batched_embeddings_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -3330,7 +3330,7 @@ def next_power_of_2(x: int) -> int:
hash_table[:, :] = -1
hash_table_offsets = torch.tensor([0] + np.cumsum(capacities).tolist()).long()

torch.ops.fb.pruned_hashmap_insert(
torch.ops.fbgemm.pruned_hashmap_insert(
indices, dense_indices, offsets, hash_table, hash_table_offsets
)

Expand All @@ -3349,7 +3349,7 @@ def next_power_of_2(x: int) -> int:
if use_cpu_hashtable:
dense_indices_ = ht.lookup(indices, offsets)
else:
dense_indices_ = torch.ops.fb.pruned_hashmap_lookup(
dense_indices_ = torch.ops.fbgemm.pruned_hashmap_lookup(
indices, offsets, hash_table, hash_table_offsets
)

Expand All @@ -3362,7 +3362,7 @@ def next_power_of_2(x: int) -> int:
if use_cpu_hashtable:
dense_indices_ = ht.lookup(indices, offsets)
else:
dense_indices_ = torch.ops.fb.pruned_hashmap_lookup(
dense_indices_ = torch.ops.fbgemm.pruned_hashmap_lookup(
indices, offsets, hash_table, hash_table_offsets
)

Expand Down
12 changes: 6 additions & 6 deletions fbgemm_gpu/test/uvm_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -202,18 +202,18 @@ def test_uvm_memadviceDontFork(self, sizes: List[int], vanilla: bool) -> None:
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_uvm_to_cpu_clone(self, sizes: List[int], vanilla: bool) -> None:
op = (
torch.ops.fb.new_managed_tensor
torch.ops.fbgemm.new_managed_tensor
if not vanilla
else torch.ops.fb.new_vanilla_managed_tensor
else torch.ops.fbgemm.new_vanilla_managed_tensor
)
uvm_t = op(torch.empty(0, device="cuda:0", dtype=torch.float), sizes)
assert torch.ops.fb.is_uvm_tensor(uvm_t)
assert torch.ops.fb.uvm_storage(uvm_t)
assert torch.ops.fbgemm.is_uvm_tensor(uvm_t)
assert torch.ops.fbgemm.uvm_storage(uvm_t)

cpu_clone = torch.ops.fb.uvm_to_cpu_clone(uvm_t)

assert not torch.ops.fb.is_uvm_tensor(cpu_clone)
assert not torch.ops.fb.uvm_storage(cpu_clone)
assert not torch.ops.fbgemm.is_uvm_tensor(cpu_clone)
assert not torch.ops.fbgemm.uvm_storage(cpu_clone)


if __name__ == "__main__":
Expand Down

0 comments on commit ac69edd

Please sign in to comment.