-
Notifications
You must be signed in to change notification settings - Fork 8
/
Copy pathgenerate_perlmutter_scripts.py
149 lines (126 loc) · 5.49 KB
/
generate_perlmutter_scripts.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
import os
import shutil
from itertools import product
import glob
import re
import pandas as pd
# For reference
"""NWQ-VQE Options
REQUIRED
--hamiltonian, -f Path to the input Hamiltonian file (formatted as a sum of Fermionic operators, see examples)
--nparticles, -n Number of electrons in molecule
--backend, -b Simulation backend. Defaults to CPU
--list-backends, -l List available backends and exit.
OPTIONAL
--seed Random seed for initial point and empirical gradient estimation. Defaults to time(NULL)
--config Path to NWQ-Sim config file. Defaults to "../default_config.json"
--opt-config Path to config file for NLOpt optimizer parameters
--optimizer NLOpt optimizer name. Defaults to LN_COBYLA
--reltol Relative tolerance termination criterion. Defaults to -1 (off)
--abstol Relative tolerance termination criterion. Defaults to -1 (off)
--maxeval Maximum number of function evaluations for optimizer. Defaults to 200
--maxtime Maximum optimizer time (seconds). Defaults to -1.0 (off)
--stopval Cutoff function value for optimizer. Defaults to -MAXFLOAT (off)
--xacc Use XACC indexing scheme, otherwise uses DUCC scheme. (Deprecated, true by default)
--ducc Use DUCC indexing scheme, otherwise uses XACC scheme. (Defaults to true)
ADAPT-VQE OPTIONS
--adapt Use ADAPT-VQE for dynamic ansatz construction. Defaults to false
--adapt-maxeval Set a maximum iteration count for ADAPT-VQE. Defaults to 100
--adapt-gradtol Cutoff absolute tolerance for operator gradient norm. Defaults to 1e-3
--adapt-fvaltol Cutoff absolute tolerance for function value. Defaults to 1e-6
--qubit Uses Qubit instead of Fermionic operators for ADAPT-VQE. Defaults to false
--adapt-pool Sets the pool size for Qubit operators. Defaults to -1"""
hamilpath = ["/global/homes/m/mxburns/benchmarks/DUCC-Hamiltonians/H6"]
orbitals = ["Bare", 'DUCC3']
name = 'adapt_vqe_h6_bare_comp'
args = {
'optimizer': 'LN_COBYLA',
'maxeval': 500,
'abstol': 1e-3,
'backend': 'NVGPU',
'xacc': '',
'adapt': '',
# 'qubit': '',
# 'adapt-pool': 25,
'adapt-gradtol': 1e-6,
'adapt-fvaltol': 1e-10,
'adapt-maxeval': 1000
}
def filterfunc(val):
try:
n_orbs = int(re.findall(r'(\d+)-Orbitals', val)[0])
except IndexError:
return False
return n_orbs <= 11
script_template = """#!/bin/bash
# Base script generated by NERSC Batch Script Generator on https://iris.nersc.gov/jobscript.html
#SBATCH -N {nodes}
#SBATCH -C gpu
#SBATCH -G {gpus}
#SBATCH -q regular
#SBATCH -J nwq_vqe_{name}
#SBATCH -t {time}
#SBATCH [email protected]
#SBATCH --mail-type=ALL
#SBATCH -A m4243
#SBATCH -o {outpath}
#SBATCH -e {errpath}
# OpenMP settings:
export OMP_NUM_THREADS={threads}
export OMP_PLACES=threads
export OMP_PROC_BIND=spread
#run the application:
# applications may perform better with --gpu-bind=none instead of --gpu-bind=single:1
srun -n {cpus} -c {threads} --cpu_bind=cores -G {gpus} --gpu-bind=single:1 {execpath} {args}"""
_exec = "/global/homes/m/mxburns/development/NWQ-Sim/build/vqe/nwq_vqe"
threads = 1
cpus = 1
gpus = 1
time = "12:00:00"
nodes = 1
def find_directories(base_path, subfolder):
result = []
for i in os.listdir(f'{base_path}/{subfolder}'):
if os.path.isdir(f'{base_path}/{subfolder}/{i}'):
result.append(i)
return i
problems = {
'hamiltonian': sum([list(filter(filterfunc, glob.glob(f'{h}/**/{o}/**/*-xacc', recursive=True))) for h, o in product(hamilpath, orbitals)], []),
}
problems['nparticles'] = [int(re.findall(r'(\d+)-electrons', i)[0]) for i in problems['hamiltonian']]
arglist = ' '.join([f'--{i} {j}' for i, j in args.items()])
def make_dir(basepath, name):
if not os.path.exists(f'{basepath}/{name}'):
os.makedirs(f'{basepath}/{name}/jobs')
os.makedirs(f'{basepath}/{name}/err')
os.makedirs(f'{basepath}/{name}/out')
def write_sbatchfile(dirpath, name, index, hamil, part, args):
args = f'--hamiltonian {hamil} --nparticles {part} ' + args
batchstr = script_template.format(
execpath=f'{basepath}/{name}/nwq_vqe',
args=args,
threads=threads,
cpus=cpus,
gpus=gpus,
nodes=nodes,
name=name + f"_{index}",
errpath=f'{dirpath}/err/{name}_{index}.err',
outpath=f'{dirpath}/out/{name}_{index}.out',
time=time,
)
with open(f'{dirpath}/jobs/{name}_{index}.sh', 'w') as batchfile:
batchfile.write(batchstr)
basepath = os.environ['SCRATCH']+'/jobs/vqe'
make_dir(basepath, name)
shutil.copyfile(__file__, f'{basepath}/{name}/generate_perlmutter_scripts_{name}.py')
shutil.copy2(_exec, f'{basepath}/{name}/nwq_vqe')
data = []
for index, (hamil, part) in enumerate(zip(problems['hamiltonian'], problems['nparticles'])):
write_sbatchfile(os.path.join(basepath, name), name, index, hamil, part, arglist)
data.append((index, hamil, part, *args.values()))
df = pd.DataFrame(data, columns=['job', 'hamiltonian', 'particles', *args.keys()])
df.to_csv(f'{basepath}/{name}/jobs.csv', index=False)
with open(f'{basepath}/{name}/launch.sh', 'w') as launchfile:
for index in range(len(problems['hamiltonian'])):
launchfile.write(f'sbatch {basepath}/{name}/jobs/{name}_{index}.sh\n')
print(f"Done writing {len(problems['hamiltonian'])} jobs to {basepath}/{name}\nLaunch script at '{basepath}/{name}/launch.sh'")