Skip to content

Commit

Permalink
Various improvements for gradients
Browse files Browse the repository at this point in the history
Signed-off-by: Daniel Claudino <[email protected]>
  • Loading branch information
danclaudino committed Mar 17, 2022
1 parent 577cef4 commit 87115cb
Show file tree
Hide file tree
Showing 14 changed files with 152 additions and 52 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ class BackwardDifferenceGradient : public AlgorithmGradientStrategy {
obs = parameters.get<std::shared_ptr<Observable>>("observable");

// Default step size
step = 1.0e-7;
step = 1.0e-7;
// Change step size if need be
if (parameters.keyExists<double>("step")) {
step = parameters.get<double>("step");
Expand Down Expand Up @@ -110,15 +110,16 @@ class BackwardDifferenceGradient : public AlgorithmGradientStrategy {
}
}
} else {
kernels = obs->observe(circuit);
auto evaled = circuit->operator()(tmpX);
kernels = obs->observe(evaled);

// loop over circuit instructions
// and gather coefficients/instructions
for (auto &f : kernels) {
if (containMeasureGates(f)) {
auto evaled = f->operator()(tmpX);

coefficients.push_back(std::real(f->getCoefficient()));
gradientInstructions.push_back(evaled);
gradientInstructions.push_back(f);
}
}
}
Expand Down Expand Up @@ -171,9 +172,7 @@ class BackwardDifferenceGradient : public AlgorithmGradientStrategy {
return;
}

const std::string name() const override {
return "backward";
}
const std::string name() const override { return "backward"; }
const std::string description() const override { return ""; }
};

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -106,15 +106,16 @@ class CentralDifferenceGradient : public AlgorithmGradientStrategy {
}
}
} else {
kernels = obs->observe(circuit);
auto evaled = circuit->operator()(tmpX);
kernels = obs->observe(evaled);

// loop over circuit instructions
// and gather coefficients/instructions
for (auto &f : kernels) {
if (containMeasureGates(f)) {
auto evaled = f->operator()(tmpX);

coefficients.push_back(std::real(f->getCoefficient()));
gradientInstructions.push_back(evaled);
gradientInstructions.push_back(f);
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ class ForwardDifferenceGradient : public AlgorithmGradientStrategy {
obs = parameters.get<std::shared_ptr<Observable>>("observable");

// Default step size
step = 1.0e-7;
step = 1.0e-7;
// Change step size if need be
if (parameters.keyExists<double>("step")) {
step = parameters.get<double>("step");
Expand Down Expand Up @@ -113,15 +113,16 @@ class ForwardDifferenceGradient : public AlgorithmGradientStrategy {
}
}
} else {
kernels = obs->observe(circuit);
auto evaled = circuit->operator()(tmpX);
kernels = obs->observe(evaled);

// loop over circuit instructions
// and gather coefficients/instructions
for (auto &f : kernels) {
if (containMeasureGates(f)) {
auto evaled = f->operator()(tmpX);

coefficients.push_back(std::real(f->getCoefficient()));
gradientInstructions.push_back(evaled);
gradientInstructions.push_back(f);
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ class ParameterShiftGradient : public AlgorithmGradientStrategy {
std::shared_ptr<Observable> obs; // Hamiltonian (or any) observable
std::function<std::shared_ptr<CompositeInstruction>(std::vector<double>)>
kernel_evaluator;
double shiftScalar = 0.25;
double shiftScalar = 0.5;

public:
bool initialize(const HeterogeneousMap parameters) override {
Expand All @@ -51,7 +51,7 @@ class ParameterShiftGradient : public AlgorithmGradientStrategy {
}
// Default shiftScalar (this is not clonable, hence need to be
// reinitialized)
shiftScalar = 0.25;
shiftScalar = 0.5;
if (parameters.keyExists<double>("shift-scalar")) {
shiftScalar = parameters.get<double>("shift-scalar");
}
Expand Down Expand Up @@ -104,15 +104,16 @@ class ParameterShiftGradient : public AlgorithmGradientStrategy {
}
}
} else {
kernels = obs->observe(circuit);

// CompositeInstruction::operator()() must be called
// before Observable::observe()
auto evaled = circuit->operator()(tmpX);
kernels = obs->observe(evaled);
// loop over circuit instructions
// and gather coefficients/instructions
for (auto &f : kernels) {
if (containMeasureGates(f)) {
auto evaled = f->operator()(tmpX);
coefficients.push_back(std::real(f->getCoefficient()));
gradientInstructions.push_back(evaled);
gradientInstructions.push_back(f);
}
}
}
Expand Down Expand Up @@ -157,8 +158,8 @@ class ParameterShiftGradient : public AlgorithmGradientStrategy {
coefficients[instElement + nInstructionsElement[gradTerm] + shift];
}

// gradient is (<+> - <->)
dx[gradTerm] = (plusGradElement - minusGradElement);
// gradient is (<+> - <->) / 2
dx[gradTerm] = (plusGradElement - minusGradElement) / 2.0;
shift += 2 * nInstructionsElement[gradTerm];
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -386,8 +386,9 @@ std::vector<double> Autodiff::computeDerivative(
"vector size.");
}

auto varNames = getOrderedVector(CompositeInstruction->getVariables());
for (size_t i = 0; i < vars.size(); ++i) {
varMap.emplace(CompositeInstruction->getVariables()[i], vars[i]);
varMap.emplace(varNames[i], vars[i]);
}

AutodiffCircuitVisitor visitor(nbQubits, varMap);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,13 @@ using cxdual = std::complex<autodiff::dual>;
typedef Eigen::Matrix<cxdual, -1, 1, 0> VectorXcdual;
typedef Eigen::Matrix<cxdual, -1, -1, 0> MatrixXcdual;


template <typename T> std::vector<T> getOrderedVector(const std::vector<T> unorderedVector) {
std::set<T> s(unorderedVector.begin(), unorderedVector.end());
std::vector<T> orderedVector(s.begin(), s.end());
return orderedVector;
}

namespace xacc {
namespace quantum {
class Autodiff : public AlgorithmGradientStrategy {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,8 +54,8 @@ TEST(AutodiffTester, checkGates) {
.parameters theta0, theta1
X 0
H 1
Ry(theta0) 1
Rx(theta1) 0
Rx(theta0) 0
Ry(theta1) 1
CNOT 1 0
)");
auto ansatz = xacc::getCompiled("test1");
Expand Down Expand Up @@ -139,8 +139,8 @@ TEST(AutodiffTester, checkGradientH3) {
.parameters t0, t1
.qbit q
X(q[0]);
exp_i_theta(q, t0, {{"pauli", "X0 Y1 - Y0 X1"}});
exp_i_theta(q, t1, {{"pauli", "X0 Z1 Y2 - X2 Z1 Y0"}});
exp_i_theta(q, t1, {{"pauli", "X0 Y1 - Y0 X1"}});
exp_i_theta(q, t0, {{"pauli", "X0 Z1 Y2 - X2 Z1 Y0"}});
)");
auto ansatz = xacc::getCompiled("ansatz_h3");
auto autodiff = std::make_shared<xacc::quantum::Autodiff>();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
* Daniel Claudino - initial API and implementation
*******************************************************************************/
#include <gtest/gtest.h>
#include <string>

#include "xacc.hpp"
#include "xacc_service.hpp"
Expand Down Expand Up @@ -47,7 +48,54 @@ TEST(GradientStrategiesTester, checkParameterShift) {

std::vector<double> dx(1);
parameterShift->compute(dx, buffer->getChildren());
EXPECT_NEAR(dx[0], std::sqrt(2), 1e-4);
EXPECT_NEAR(dx[0], 1, 1e-10);
}

TEST(GradientStrategiesTester, checkParameterShiftXanadu) {
auto accelerator = xacc::getAccelerator("qpp");
auto buffer = xacc::qalloc(3);

std::shared_ptr<Observable> observable =
std::make_shared<xacc::quantum::PauliOperator>();
observable->fromString("Y0 Z2");

auto provider = xacc::getIRProvider("quantum");
auto ansatz = provider->createComposite("testCircuit");
std::vector<std::string> varNames = {"x0", "x1", "x2", "x3", "x4", "x5"};
ansatz->addVariables(varNames);
ansatz->addInstruction(provider->createInstruction("Rx", {0}, {"x0"}));
ansatz->addInstruction(provider->createInstruction("Ry", {1}, {"x1"}));
ansatz->addInstruction(provider->createInstruction("Rz", {2}, {"x2"}));
ansatz->addInstruction(provider->createInstruction("CNOT", {0, 1}));
ansatz->addInstruction(provider->createInstruction("CNOT", {1, 2}));
ansatz->addInstruction(provider->createInstruction("CNOT", {2, 0}));
ansatz->addInstruction(provider->createInstruction("Rx", {0}, {"x3"}));
ansatz->addInstruction(provider->createInstruction("Ry", {1}, {"x4"}));
ansatz->addInstruction(provider->createInstruction("Rz", {2}, {"x5"}));
ansatz->addInstruction(provider->createInstruction("CNOT", {0, 1}));
ansatz->addInstruction(provider->createInstruction("CNOT", {1, 2}));
ansatz->addInstruction(provider->createInstruction("CNOT", {2, 0}));

std::vector<double> params{0.37454012, 0.95071431, 0.73199394, 0.59865848, 0.15601864, 0.15599452};
auto opt = xacc::getOptimizer("nlopt", {{"nlopt-optimizer", "l-bfgs"}, {"initial-parameters", params}, {"maxeval", 1}});
auto vqe = xacc::getAlgorithm("vqe", {{"ansatz", ansatz}, {"accelerator", accelerator}, {"observable", observable}, {"optimizer", opt}, {"gradient_strategy", "parameter-shift"}});
vqe->execute(buffer);
EXPECT_NEAR(buffer->getInformation("opt-val").as<double>(), -0.1197136570687156, 1e-5);

auto parameterShift = xacc::getGradient("parameter-shift", {{"observable", observable}, {"shift-scalar", 0.5}});
auto gradientInstructions =
parameterShift->getGradientExecutions(ansatz, params);
auto tmpbuffer = xacc::qalloc(3);
accelerator->execute(tmpbuffer, gradientInstructions);
std::vector<double> dx(6);

parameterShift->compute(dx, tmpbuffer->getChildren());

// check gradient
std::vector<double> expectedGradient = {-0.0651888, -0.0272892, 0, -0.0933935, -0.761068, 0};
for(int i = 0; i < 6; i++) {
EXPECT_NEAR(dx[i], expectedGradient[i], 1e-5);
}
}

TEST(GradientStrategiesTester, checkCentralDifference) {
Expand Down Expand Up @@ -223,7 +271,7 @@ TEST(GradientStrategiesTester, checkParameterShiftShots) {
accelerator->execute(buffer, gradientInstructions);
std::vector<double> dx(1);
parameterShift->compute(dx, buffer->getChildren());
EXPECT_NEAR(dx[0], std::sqrt(2), 0.1);
EXPECT_NEAR(dx[0], 1, 0.1);
}

TEST(GradientStrategiesTester, checkCentralDifferenceShots) {
Expand Down
24 changes: 12 additions & 12 deletions quantum/plugins/algorithms/qaoa/qaoa.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -170,24 +170,25 @@ void QAOA::execute(const std::shared_ptr<AcceleratorBuffer> buffer) const {
// commuting terms such as the maxcut Hamiltonian.
// Limitation: this grouping cannot handle gradient strategy at the moment.
// Observe the cost Hamiltonian:
auto kernels = [&] {
auto getObservedKernels = [&] (const std::vector<double> x = {}) {
auto evaled = x.empty() ? kernel : kernel->operator()(x);
if (dynamic_cast<xacc::quantum::PauliOperator *>(m_costHamObs)) {
return m_costHamObs->observe(kernel, {{"accelerator", m_qpu}});
return m_costHamObs->observe(evaled, {{"accelerator", m_qpu}});
} else {
return m_costHamObs->observe(kernel);
return m_costHamObs->observe(evaled);
}
}();
};

// Grouping is possible (no gradient strategy)
// TODO: Gradient strategy to handle grouping as well.
int iterCount = 0;
if (m_costHamObs->getNonIdentitySubTerms().size() > 1 &&
kernels.size() == 1 && !gradientStrategy) {
getObservedKernels().size() == 1 && !gradientStrategy) {
OptFunction f(
[&, this](const std::vector<double> &x, std::vector<double> &dx) {
auto tmpBuffer = xacc::qalloc(buffer->size());
std::vector<std::shared_ptr<CompositeInstruction>> fsToExec{
kernels[0]->operator()(x)};
getObservedKernels()[0]->operator()(x)};
if (m_irTransformation) {
for (auto &composite : fsToExec) {
m_irTransformation->apply(
Expand Down Expand Up @@ -241,7 +242,7 @@ void QAOA::execute(const std::shared_ptr<AcceleratorBuffer> buffer) const {

double identityCoeff = 0.0;
int nInstructionsEnergy = 0, nInstructionsGradient = 0;
for (auto &f : kernels) {
for (auto &f : getObservedKernels(x)) {
kernelNames.push_back(f->name());
std::complex<double> coeff = f->getCoefficient();

Expand All @@ -254,8 +255,7 @@ void QAOA::execute(const std::shared_ptr<AcceleratorBuffer> buffer) const {
}

if (nFunctionInstructions > kernel->nInstructions()) {
auto evaled = f->operator()(x);
fsToExec.push_back(evaled);
fsToExec.push_back(f);
coefficients.push_back(std::real(coeff));
} else {
identityCoeff += std::real(coeff);
Expand Down Expand Up @@ -399,6 +399,7 @@ QAOA::execute(const std::shared_ptr<AcceleratorBuffer> buffer,

// Observe the cost Hamiltonian, with the input Accelerator:
// i.e. perform grouping (e.g. max-cut QAOA, Pauli) if possible:
kernel = kernel->operator()(x);
auto kernels = [&] {
if (dynamic_cast<xacc::quantum::PauliOperator *>(m_costHamObs)) {
return m_costHamObs->observe(kernel, {{"accelerator", m_qpu}});
Expand All @@ -412,7 +413,7 @@ QAOA::execute(const std::shared_ptr<AcceleratorBuffer> buffer,
// Grouping was done:
// just execute the single observed kernel:
std::vector<std::shared_ptr<CompositeInstruction>> fsToExec{
kernels[0]->operator()(x)};
kernels[0]};
if (m_irTransformation) {
for (auto &composite : fsToExec) {
m_irTransformation->apply(
Expand Down Expand Up @@ -442,8 +443,7 @@ QAOA::execute(const std::shared_ptr<AcceleratorBuffer> buffer,
}

if (nFunctionInstructions > kernel->nInstructions()) {
auto evaled = f->operator()(x);
fsToExec.push_back(evaled);
fsToExec.push_back(f);
coefficients.push_back(std::real(coeff));
} else {
identityCoeff += std::real(coeff);
Expand Down
5 changes: 3 additions & 2 deletions quantum/plugins/algorithms/qaoa/tests/QAOATester.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ TEST(QAOATester, checkP1TriangleGraph) {
for (auto beta : all_betas) {
auto buffer = xacc::qalloc(3);
auto cost =
qaoa->execute(buffer, std::vector<double>{gamma, beta})[0];
qaoa->execute(buffer, std::vector<double>{beta, gamma})[0];
auto d = 1;
auto e = 1;
auto f = 1;
Expand All @@ -117,6 +117,7 @@ TEST(QAOATester, checkP1TriangleGraph) {
}
}


// Making sure that a set of Hadamards can be passed
// as the "initial-state" to the QAOA algorithm and
// the proper result is returned
Expand Down Expand Up @@ -296,7 +297,7 @@ TEST(QAOATester, checkP1TriangleGraphGroupingExpVal) {
for (auto gamma : all_gammas) {
for (auto beta : all_betas) {
auto buffer = xacc::qalloc(3);
auto cost = qaoa->execute(buffer, std::vector<double>{gamma, beta})[0];
auto cost = qaoa->execute(buffer, std::vector<double>{beta, gamma})[0];
auto d = 1;
auto e = 1;
auto f = 1;
Expand Down
Loading

0 comments on commit 87115cb

Please sign in to comment.