Skip to content

Commit

Permalink
Dynamic CVRP + Integration Layer added
Browse files Browse the repository at this point in the history
  • Loading branch information
udeshmg committed Jul 1, 2021
1 parent a16d3cf commit 6d91c90
Show file tree
Hide file tree
Showing 21 changed files with 608 additions and 311 deletions.
13 changes: 12 additions & 1 deletion eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ def _eval_dataset(model, dataset, width, softmax_temp, opts, device):
if __name__ == "__main__":

parser = argparse.ArgumentParser()
parser.add_argument("datasets", nargs='+', help="Filename of the dataset(s) to evaluate")
parser.add_argument("--datasets", type=str, default=None, help="Filename of the dataset(s) to evaluate")
parser.add_argument("-f", action='store_true', help="Set true to overwrite")
parser.add_argument("-o", default=None, help="Name of the results file to write")
parser.add_argument('--val_size', type=int, default=10000,
Expand Down Expand Up @@ -206,6 +206,17 @@ def _eval_dataset(model, dataset, width, softmax_temp, opts, device):

opts = parser.parse_args()

dynamic = True
if dynamic:
opts.datasets = ["data/dynamic_tsp/dynamic_tsp20_test_seed1234.pkl"]
opts.model = "outputs/order/dynamic_tsp_20/run_10/"
else:
opts.datasets = ["data/tsp/tsp20_test_seed1234.pkl"]
opts.model = "pretrained/tsp_20/"

opts.decode_strategy = "bs"
opts.width = [12]

assert opts.o is None or (len(opts.datasets) == 1 and len(opts.width) <= 1), \
"Cannot specify result filename with more than one dataset or more than one width"

Expand Down
16 changes: 15 additions & 1 deletion generate_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,17 @@
def generate_tsp_data(dataset_size, tsp_size):
return np.random.uniform(size=(dataset_size, tsp_size, 2)).tolist()

def generate_dynamic_tsp_data(dataset_size, num_nodes, threshold=0.1):
stack = []
init = np.random.uniform(0, 1, (dataset_size, num_nodes, 2))
for i in range(num_nodes):
stack.append(init)
init = np.clip(init + np.random.uniform(-threshold, threshold, (dataset_size, num_nodes, 2)), 0, 1)

np_stack = np.stack(stack, axis=1)

return np_stack


def generate_vrp_data(dataset_size, vrp_size):
CAPACITIES = {
Expand Down Expand Up @@ -104,7 +115,7 @@ def generate_pctsp_data(dataset_size, pctsp_size, penalty_factor=3):
parser.add_argument('--data_distribution', type=str, default='all',
help="Distributions to generate for problem, default 'all'.")

parser.add_argument("--dataset_size", type=int, default=10000, help="Size of the dataset")
parser.add_argument("--dataset_size", type=int, default=100, help="Size of the dataset")
parser.add_argument('--graph_sizes', type=int, nargs='+', default=[20, 50, 100],
help="Sizes of problem instances (default 20, 50, 100)")
parser.add_argument("-f", action='store_true', help="Set true to overwrite")
Expand All @@ -117,6 +128,7 @@ def generate_pctsp_data(dataset_size, pctsp_size, penalty_factor=3):

distributions_per_problem = {
'tsp': [None],
'dynamic_tsp' : [None],
'vrp': [None],
'pctsp': [None],
'op': ['const', 'unif', 'dist']
Expand Down Expand Up @@ -152,6 +164,8 @@ def generate_pctsp_data(dataset_size, pctsp_size, penalty_factor=3):
np.random.seed(opts.seed)
if problem == 'tsp':
dataset = generate_tsp_data(opts.dataset_size, graph_size)
elif problem == 'dynamic_tsp':
dataset = generate_dynamic_tsp_data(opts.dataset_size, graph_size)
elif problem == 'vrp':
dataset = generate_vrp_data(
opts.dataset_size, graph_size)
Expand Down
104 changes: 0 additions & 104 deletions gurobi/tsp.py

This file was deleted.

27 changes: 24 additions & 3 deletions nets/attention_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,8 @@ def __init__(self,
normalization='batch',
n_heads=8,
checkpoint_encoder=False,
shrink_size=None):
shrink_size=None,
use_single_time=False):
super(AttentionModel, self).__init__()

self.embedding_dim = embedding_dim
Expand All @@ -76,6 +77,8 @@ def __init__(self,
self.checkpoint_encoder = checkpoint_encoder
self.shrink_size = shrink_size

self.use_single_time = use_single_time

# Problem specific context parameters (placeholder and step context dimension)
if self.is_vrp or self.is_orienteering or self.is_pctsp:
# Embedding of last node + remaining_capacity / remaining length / remaining prize to collect
Expand Down Expand Up @@ -130,8 +133,9 @@ def forward(self, input, return_pi=False):
:return:
"""
original_input = input
if len(input.size()) == 4:
input = input[:, 0, :, :]
input = self.prepare_input(input)
#if len(input.size()) == 4:
# input = input[:, 0, :, :]


if self.checkpoint_encoder and self.training: # Only checkpoint if we need gradients
Expand All @@ -150,6 +154,7 @@ def forward(self, input, return_pi=False):

return cost, ll


def beam_search(self, *args, **kwargs):
return self.problem.beam_search(*args, **kwargs, model=self)

Expand Down Expand Up @@ -203,6 +208,22 @@ def _calc_log_likelihood(self, _log_p, a, mask):
# Calculate log_likelihood
return log_p.sum(1)

def prepare_input(self, input):
if self.is_vrp or self.is_orienteering or self.is_pctsp:
if len(input['loc'].size()) == 4:
data = {
'loc' : input['loc'][:, 0, :, :],
'demand' : input['demand'],
'depot' : input['depot']
}
else: data = input
else: # TSP
if len(input.size()) == 4:
data = input[:, 0, :, :]
else: data = input

return data

def _init_embed(self, input):

if self.is_vrp or self.is_orienteering or self.is_pctsp:
Expand Down
34 changes: 27 additions & 7 deletions nets/graph_encoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,15 +20,20 @@ def __init__(
input_dim,
embed_dim,
val_dim=None,
key_dim=None
key_dim=None,
is_vrp=False
):
super(StMultiHeadAttention, self).__init__()

self.n_heads = n_heads
self.is_vrp = is_vrp

self.spatial_attention = MultiHeadAttention(n_heads, input_dim, embed_dim, val_dim, key_dim)
self.temporal_attention = MultiHeadAttention(n_heads, input_dim, embed_dim, val_dim, key_dim)

self.fixed_emb = torch.nn.Linear(input_dim, input_dim)
self.fushed = torch.nn.Linear(2*input_dim, input_dim)

def forward(self, q, h=None, mask=None):

if h is None:
Expand All @@ -40,12 +45,24 @@ def forward(self, q, h=None, mask=None):
shape_temporal = (batch_size, graph_size, time, input_dim)

spatial = q.contiguous().view(batch_size*time, graph_size, input_dim)
temporal = q.transpose(1,2).contiguous().view(batch_size*graph_size, time, input_dim)


if self.is_vrp:
temporal = q[:,:,1:,:].transpose(1, 2).contiguous().view(batch_size * (graph_size-1), time, input_dim)
shape_temporal = (batch_size, graph_size-1, time, input_dim)
fixed_out = self.fixed_emb(q[:,:,0,:])
else:
temporal = q.transpose(1,2).contiguous().view(batch_size*graph_size, time, input_dim)

spatial_out = self.spatial_attention(spatial).view(shape_spatial)
temporal_out = self.temporal_attention(temporal).view(shape_temporal).transpose(1,2)
temporal_out = self.temporal_attention(temporal).view(shape_temporal).transpose(1, 2)

if self.is_vrp:
temporal_out = torch.cat((fixed_out[:,:,None,:], temporal_out), 2)

fusion = spatial_out + temporal_out #TODO: Implement fusuion
emb_cat = self.fushed(torch.cat((spatial_out, temporal_out), dim=-1))
emb_cat = torch.add(spatial_out, temporal_out)
fusion = torch.sigmoid(emb_cat)#TODO: Implement fusuion

return fusion

Expand Down Expand Up @@ -312,13 +329,15 @@ def __init__(
feed_forward_hidden=512,
normalization='batch',
st_attention=False,
is_vrp=False
):
super(MultiHeadAttentionLayer, self).__init__(
SkipConnection(
StMultiHeadAttention(
n_heads,
input_dim=embed_dim,
embed_dim=embed_dim
embed_dim=embed_dim,
is_vrp=is_vrp
) if st_attention else
MultiHeadAttention(
n_heads,
Expand Down Expand Up @@ -374,15 +393,16 @@ def __init__(
node_dim=None,
normalization='batch',
feed_forward_hidden=512,
st_attention=False
st_attention=False,
is_vrp=False,
):
super(GraphAttentionEncoder, self).__init__()

# To map input to embedding space
self.init_embed = nn.Linear(node_dim, embed_dim) if node_dim is not None else None

self.layers = nn.Sequential(*(
MultiHeadAttentionLayer(n_heads, embed_dim, feed_forward_hidden, normalization, st_attention)
MultiHeadAttentionLayer(n_heads, embed_dim, feed_forward_hidden, normalization, st_attention, is_vrp)
for _ in range(n_layers)
))

Expand Down
Loading

0 comments on commit 6d91c90

Please sign in to comment.