forked from facebookresearch/faiss
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathbench_fw_ivf.py
120 lines (113 loc) · 3.74 KB
/
bench_fw_ivf.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import argparse
import os
from bench_fw.benchmark import Benchmark
from bench_fw.benchmark_io import BenchmarkIO
from bench_fw.descriptors import DatasetDescriptor, IndexDescriptor
logging.basicConfig(level=logging.INFO)
def sift1M(bio):
benchmark = Benchmark(
num_threads=32,
training_vectors=DatasetDescriptor(
namespace="std_d", tablename="sift1M"
),
database_vectors=DatasetDescriptor(
namespace="std_d", tablename="sift1M"
),
query_vectors=DatasetDescriptor(
namespace="std_q", tablename="sift1M"
),
index_descs=[
IndexDescriptor(
factory=f"IVF{2 ** nlist},Flat",
)
for nlist in range(8, 15)
],
k=1,
distance_metric="L2",
)
benchmark.set_io(bio)
benchmark.benchmark(result_file="result.json", local=False, train=True, reconstruct=False, knn=True, range=False)
def bigann(bio):
for scale in [1, 2, 5, 10, 20, 50]:
benchmark = Benchmark(
num_threads=32,
training_vectors=DatasetDescriptor(
namespace="std_t", tablename="bigann1M"
),
database_vectors=DatasetDescriptor(
namespace="std_d", tablename=f"bigann{scale}M"
),
query_vectors=DatasetDescriptor(
namespace="std_q", tablename="bigann1M"
),
index_descs=[
IndexDescriptor(
factory=f"IVF{2 ** nlist},Flat",
) for nlist in range(11, 19)
] + [
IndexDescriptor(
factory=f"IVF{2 ** nlist}_HNSW32,Flat",
construction_params=[None, {"efConstruction": 200, "efSearch": 40}],
) for nlist in range(11, 19)
],
k=1,
distance_metric="L2",
)
benchmark.set_io(bio)
benchmark.benchmark(f"result{scale}.json", local=False, train=True, reconstruct=False, knn=True, range=False)
def ssnpp(bio):
benchmark = Benchmark(
num_threads=32,
training_vectors=DatasetDescriptor(
tablename="ssnpp_training_5M.npy"
),
database_vectors=DatasetDescriptor(
tablename="ssnpp_database_5M.npy"
),
query_vectors=DatasetDescriptor(
tablename="ssnpp_queries_10K.npy"
),
index_descs=[
IndexDescriptor(
factory=f"IVF{2 ** nlist},PQ256x4fs,Refine(SQfp16)",
) for nlist in range(9, 16)
] + [
IndexDescriptor(
factory=f"IVF{2 ** nlist},Flat",
) for nlist in range(9, 16)
] + [
IndexDescriptor(
factory=f"PQ256x4fs,Refine(SQfp16)",
),
IndexDescriptor(
factory=f"HNSW32",
),
],
k=1,
distance_metric="L2",
)
benchmark.set_io(bio)
benchmark.benchmark("result.json", local=False, train=True, reconstruct=False, knn=True, range=False)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('experiment')
parser.add_argument('path')
args = parser.parse_args()
assert os.path.exists(args.path)
path = os.path.join(args.path, args.experiment)
if not os.path.exists(path):
os.mkdir(path)
bio = BenchmarkIO(
path=path,
)
if args.experiment == "sift1M":
sift1M(bio)
elif args.experiment == "bigann":
bigann(bio)
elif args.experiment == "ssnpp":
ssnpp(bio)