-
Notifications
You must be signed in to change notification settings - Fork 2
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Tigran Sedrakyan
committed
May 2, 2024
0 parents
commit f010d6e
Showing
23 changed files
with
11,026 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,5 @@ | ||
__pycache__/ | ||
notebooks/digits | ||
notebooks/ideal | ||
notebooks/noisy | ||
notebooks/backup |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,15 @@ | ||
cff-version: 1.2.0 | ||
title: Photonic QGAN | ||
message: >- | ||
If you use this software, please cite it using the | ||
metadata from this file. | ||
type: software | ||
authors: | ||
- given-names: Tigran | ||
family-names: Sedrakyan | ||
email: [email protected] | ||
affiliation: Quandela | ||
repository-code: 'https://github.com/tigran-sedrakyan/photonic-qgan' | ||
abstract: >- | ||
Photonic implemetation of photonic quantum generative | ||
adverserial networks. |
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,34 @@ | ||
import numpy as np | ||
import torch | ||
import pandas as pd | ||
from torch.utils.data import Dataset | ||
|
||
class DigitsDataset(Dataset): | ||
def __init__(self, csv_file, label=0, transform=None): | ||
self.label = label | ||
self.csv_file = csv_file | ||
self.transform = transform | ||
self.df = self.filter_by_label(label) | ||
|
||
def filter_by_label(self, label): | ||
# Use pandas to return a dataframe of only zeros | ||
df = pd.read_csv(self.csv_file) | ||
df = df.loc[df.iloc[:, -1] == label] | ||
return df | ||
|
||
def __len__(self): | ||
return len(self.df) | ||
|
||
def __getitem__(self, idx): | ||
if torch.is_tensor(idx): | ||
idx = idx.tolist() | ||
|
||
image = self.df.iloc[idx, :-1] / 16 | ||
image = np.array(image) | ||
image = image.astype(np.float32).reshape(8, 8) | ||
|
||
if self.transform: | ||
image = self.transform(image) | ||
|
||
# Return image and label | ||
return image, 0 |
Large diffs are not rendered by default.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,62 @@ | ||
import numpy as np | ||
import perceval as pcvl | ||
|
||
def state_to_int(state, pnr): | ||
m = state.m | ||
res = 0 | ||
for i in range(m): | ||
# pnr | ||
if pnr: | ||
res += state[i] * (m + 1) ** (m - i) | ||
# threshold | ||
elif state[i] != 0: | ||
res += 2 ** (m - i) | ||
return res | ||
|
||
# generates a mapping dictionary from output Fock states to integers | ||
def get_output_map(circuit, input_state, pnr=True, lossy=False): | ||
proc = pcvl.Processor("SLOS") | ||
proc.set_circuit(circuit) | ||
proc.with_input(input_state) | ||
sampler = pcvl.algorithm.Sampler(proc) | ||
|
||
rev_map = {} | ||
possible_outputs = [] | ||
|
||
all_states = sampler.probs()["results"].keys() | ||
if pnr or not lossy: | ||
possible_states_list = list(all_states) | ||
else: | ||
possible_states_list = [ | ||
key for key in all_states if all(i < 2 for i in key) | ||
] | ||
|
||
for key in possible_states_list: | ||
int_state = state_to_int(key, pnr) | ||
# while int_state in rev_map: | ||
# int_state += 1 | ||
if int_state in rev_map.keys(): | ||
rev_map[int_state].append(key) | ||
else: | ||
rev_map[int_state] = [key] | ||
if int_state not in possible_outputs: | ||
possible_outputs.append(int_state) | ||
|
||
out_map = {} | ||
for index, int_state in enumerate(sorted(list(possible_outputs))): | ||
for basic_state in rev_map[int_state]: | ||
out_map[basic_state] = index | ||
return out_map | ||
|
||
|
||
# maps the output of a generator circuit to image pixels | ||
def map_generator_output(gen_out, expected_len): | ||
gen_out_len = len(gen_out) | ||
surplus_half = np.abs((gen_out_len - expected_len) // 2) | ||
|
||
if gen_out_len > expected_len: | ||
return gen_out[surplus_half : surplus_half + expected_len] | ||
else: | ||
ret = np.zeros(expected_len) | ||
ret[surplus_half : surplus_half + gen_out_len] = gen_out | ||
return ret |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,226 @@ | ||
import numpy as np | ||
import perceval as pcvl | ||
from perceval.components.unitary_components import PS, BS | ||
import re | ||
|
||
|
||
# a parametrized quantum circuit with variational and encoding layers | ||
class ParametrizedQuantumCircuit: | ||
def __init__(self, m=3, arch = ["var", "var", "var", "enc", "var", "var", "var"], use_clements = False): | ||
self.m = m | ||
self.arch = arch | ||
if use_clements: | ||
self.circuit = self.get_circuit_clements_based() | ||
else: | ||
self.circuit = self.get_circuit() | ||
|
||
self.var_params, self.enc_params = self.get_params() | ||
self.var_param_names = [p.name for p in self.var_params] | ||
self.enc_param_names = [p.name for p in self.enc_params] | ||
|
||
|
||
def get_variational_layer(self, l): | ||
modes = self.m | ||
var = pcvl.Circuit(modes, name="var_" + str(l)) | ||
# add phase shifters | ||
for m in range(modes): | ||
var.add( | ||
m, | ||
PS( | ||
pcvl.P( | ||
"phi_" + str(m) + "_" + str(l + 1), | ||
min_v=0, | ||
max_v=2 * np.pi, | ||
periodic=True, | ||
) | ||
), | ||
) | ||
|
||
# add beam splitters | ||
for m in range(0, modes - 1, 2): | ||
var.add( | ||
(m, m + 1), | ||
BS( | ||
pcvl.P( | ||
"theta_" + str(m) + "_" + str(l + 1), | ||
min_v=0, | ||
max_v=2 * np.pi, | ||
periodic=True, | ||
), | ||
pcvl.P( | ||
"psi_" + str(m) + "_" + str(l + 1), | ||
min_v=0, | ||
max_v=2 * np.pi, | ||
periodic=True, | ||
), | ||
), | ||
) | ||
for m in range(1, modes - 1, 2): | ||
var.add( | ||
(m, m + 1), | ||
BS( | ||
pcvl.P( | ||
"theta_" + str(m) + "_" + str(l + 1), | ||
min_v=0, | ||
max_v=2 * np.pi, | ||
periodic=True, | ||
), | ||
pcvl.P( | ||
"psi_" + str(m) + "_" + str(l + 1), | ||
min_v=0, | ||
max_v=2 * np.pi, | ||
periodic=True, | ||
), | ||
), | ||
) | ||
return var | ||
|
||
|
||
def get_variational_clements_layer(self, l): | ||
modes = self.m | ||
mode_range = tuple([val.item() for val in np.arange(modes, dtype = int)]) | ||
var_clem = pcvl.Circuit(modes, name="var_clem_" + str(l)) | ||
|
||
# add generic interferometer (Clements based) | ||
var_clem.add(mode_range, pcvl.Circuit.generic_interferometer( | ||
modes, lambda i : BS(theta=pcvl.P("theta_" + str(i) + "_" + str(l + 1)), | ||
phi_tr=pcvl.P("psi_" + str(i) + "_" + str(l + 1))))) | ||
|
||
return var_clem | ||
|
||
|
||
def get_encoding_layer(self, l, mode_range): | ||
modes = self.m | ||
enc = pcvl.Circuit(modes, name="enc_" + str(l)) | ||
|
||
# add phase shifters | ||
for m in mode_range: | ||
pcvl.P( | ||
"enc_" + str(m) + "_" + str(l + 1), | ||
min_v=0, | ||
max_v=2 * np.pi, | ||
periodic=True, | ||
) | ||
enc.add( | ||
m, | ||
PS( | ||
pcvl.P( | ||
"enc_" + str(m) + "_" + str(l + 1), | ||
min_v=0, | ||
max_v=2 * np.pi, | ||
periodic=True, | ||
) | ||
), | ||
) | ||
return enc | ||
|
||
|
||
def get_circuit(self): | ||
modes = self.m | ||
mode_range = tuple([val.item() for val in np.arange(modes, dtype = int)]) | ||
active_modes = mode_range | ||
arch = self.arch | ||
c = pcvl.Circuit(modes) | ||
|
||
var_layer_num = 0 | ||
enc_layer_num = 0 | ||
for layer_name in arch: | ||
split = re.split("\[|\]", layer_name) | ||
if len(split) == 1: | ||
layer_type = split[0] | ||
else: | ||
layer_type, modes_type = split[:-1] | ||
if ":" in modes_type: | ||
start, end = modes_type.split(":") | ||
active_modes = np.arange(int(start), int(end)) | ||
else: | ||
active_modes = np.array(modes_type.split(","), dtype=int) | ||
active_modes = tuple([val.item() for val in active_modes]) | ||
|
||
if layer_type == "var": | ||
c.add(mode_range, self.get_variational_layer(var_layer_num)) | ||
var_layer_num += 1 | ||
else: | ||
c.add(mode_range, self.get_encoding_layer(enc_layer_num, active_modes)) | ||
enc_layer_num += 1 | ||
|
||
return c | ||
|
||
|
||
def get_circuit_clements_based(self): | ||
modes = self.m | ||
mode_range = tuple([val.item() for val in np.arange(modes, dtype = int)]) | ||
active_modes = mode_range | ||
arch = self.arch | ||
c = pcvl.Circuit(modes) | ||
|
||
var_layer_num = 0 | ||
enc_layer_num = 0 | ||
for layer_name in arch: | ||
split = re.split("\[|\]", layer_name) | ||
if len(split) == 1: | ||
layer_type = split[0] | ||
else: | ||
layer_type, modes_type = split[:-1] | ||
if ":" in modes_type: | ||
start, end = modes_type.split(":") | ||
active_modes = np.arange(int(start), int(end)) | ||
else: | ||
active_modes = np.array(modes_type.split(","), dtype=int) | ||
active_modes = tuple([val.item() for val in active_modes]) | ||
|
||
if layer_type == "var": | ||
c.add(mode_range, self.get_variational_clements_layer(var_layer_num)) | ||
var_layer_num += 1 | ||
else: | ||
c.add(mode_range, self.get_encoding_layer(enc_layer_num, active_modes)) | ||
enc_layer_num += 1 | ||
|
||
return c | ||
|
||
|
||
def get_params(self): | ||
params = self.circuit.get_parameters() | ||
var_params = [] | ||
enc_params = [] | ||
for p in params: | ||
if "enc" in p.name: | ||
enc_params.append(p) | ||
else: | ||
var_params.append(p) | ||
return var_params, enc_params | ||
|
||
|
||
def init_params(self, red_factor=1, init_var_params=None): | ||
if init_var_params is None: | ||
var_param_map = self.update_var_params(np.random.normal(0, 2 * red_factor * np.pi, len(self.var_param_names))) | ||
else: | ||
var_param_map = self.update_var_params(init_var_params) | ||
|
||
enc_param_map = self.encode_feature(np.zeros(len(self.enc_param_names))) | ||
|
||
for var_p in self.var_params: | ||
var_p.set_value(var_param_map[var_p.name]) | ||
for enc_p in self.enc_params: | ||
enc_p.set_value(enc_param_map[enc_p.name]) | ||
return list(self.var_param_map.values()) | ||
|
||
|
||
def update_var_params(self, updated): | ||
updated_dict = {} | ||
for i, p in enumerate(self.var_params): | ||
new_val = updated[i] | ||
updated_dict[p.name] = new_val | ||
p.set_value(new_val) | ||
self.var_param_map = updated_dict | ||
return updated_dict | ||
|
||
|
||
def encode_feature(self, feature): | ||
updated_dict = {} | ||
for i, p in enumerate(self.enc_params): | ||
new_val = feature[i] | ||
updated_dict[p.name] = new_val | ||
p.set_value(new_val) | ||
self.enc_param_map = updated_dict | ||
return updated_dict |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,42 @@ | ||
import numpy as np | ||
|
||
class SPSA(): | ||
""" | ||
SPSA wrapper class used for optimising the models. | ||
:param init_params: Values of initial parameters over which the optimization should be performed. | ||
:param grad_fun: Method returning the pseudo-gradient vector w.r.t. the parameters. | ||
:param iter_num: The number of SPSA iterations. | ||
""" | ||
def __init__(self, init_params, grad_fun, iter_num = 5000): | ||
self.params = init_params | ||
self.grad_fun = grad_fun | ||
|
||
self.gamma= 0.101 | ||
self.alpha= 0.602 | ||
|
||
self.iter_num = iter_num | ||
self.k = 0 | ||
self.c = 0.1 | ||
self.A = 0.1 * iter_num | ||
|
||
mag_g0 = np.abs(np.array(self.grad_fun(init_params, self.c)).mean()) | ||
self.a = 0.001 * ((self.A+1)**self.alpha)/mag_g0 | ||
|
||
def step(self, iter_count = 20): | ||
"""Perform a step of optimization consisting of a given number of iterations. | ||
:param iter_count: The duration of the step aka the number of iterations is comprises. | ||
:return: the updated parameters | ||
""" | ||
params = self.params.copy() | ||
|
||
for _ in range(iter_count): | ||
self.k += 1 | ||
ak = self.a/((self.k+self.A)**(self.alpha)) | ||
ck = self.c/(self.k**(self.gamma)) | ||
gk = np.array(self.grad_fun(params, ck)) | ||
params -= ak * gk | ||
|
||
self.params = params | ||
return params |
Empty file.
Oops, something went wrong.