Skip to content

Commit

Permalink
Start working toward Python3 compatibility. (ray-project#117)
Browse files Browse the repository at this point in the history
  • Loading branch information
robertnishihara authored and pcmoritz committed Dec 11, 2016
1 parent 3d083c8 commit ddba1df
Show file tree
Hide file tree
Showing 48 changed files with 206 additions and 103 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ for _ in range(10):

# Fetch the results of the tasks and print their average.
estimate = np.mean(ray.get(result_ids))
print "Pi is approximately {}.".format(estimate)
print("Pi is approximately {}.".format(estimate))
```

Within the for loop, each call to `estimate_pi.remote(100)` sends a message to
Expand Down
2 changes: 1 addition & 1 deletion doc/using-ray-with-tensorflow.md
Original file line number Diff line number Diff line change
Expand Up @@ -190,5 +190,5 @@ for iteration in range(NUM_ITERS):
# Print the current weights. They should converge to roughly to the values 0.1
# and 0.3 used in generate_fake_x_y_data.
if iteration % 20 == 0:
print "Iteration {}: weights are {}".format(iteration, weights)
print("Iteration {}: weights are {}".format(iteration, weights))
```
6 changes: 5 additions & 1 deletion examples/alexnet/alexnet.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,10 @@
# The code for AlexNet is copied and adapted from the TensorFlow repository
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/models/image/alexnet/alexnet_benchmark.py.

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import ray
import numpy as np
import tarfile, io
Expand Down Expand Up @@ -390,7 +394,7 @@ def shuffle(batches):
# Randomly permute the order of the batches.
permuted_batches = np.random.permutation(batches)
new_batches = []
for i in range(len(batches) / 2):
for i in range(len(batches) // 2):
# Swap data between consecutive batches.
shuffled_batch1, shuffled_batch2 = shuffle_pair(permuted_batches[2 * i], permuted_batches[2 * i + 1])
new_batches += [shuffled_batch1, shuffled_batch2]
Expand Down
12 changes: 8 additions & 4 deletions examples/alexnet/driver.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import numpy as np
import ray
import os
Expand Down Expand Up @@ -28,7 +32,7 @@
imagenet_bucket = s3_resource.Bucket(args.s3_bucket)
objects = imagenet_bucket.objects.filter(Prefix=args.key_prefix)
image_tar_files = [str(obj.key) for obj in objects.all()]
print "Images will be downloaded from {} files.".format(len(image_tar_files))
print("Images will be downloaded from {} files.".format(len(image_tar_files)))

# Downloading the label file, and create a dictionary mapping the filenames of
# the images to their labels.
Expand All @@ -38,7 +42,7 @@
filename_label_pairs = [line.split(" ") for line in filename_label_str]
filename_label_dict = dict([(os.path.basename(name), label) for name, label in filename_label_pairs])
filename_label_dict_id = ray.put(filename_label_dict)
print "Labels extracted."
print("Labels extracted.")

# Download the imagenet dataset.
imagenet_data = alexnet.load_tarfiles_from_s3(args.s3_bucket, image_tar_files, [256, 256])
Expand All @@ -60,7 +64,7 @@
# Initialize the network and optimizer weights. This is only run once on the
# driver. We initialize the weights manually on the workers.
sess.run(init_all_variables)
print "Initialized network weights."
print("Initialized network weights.")

iteration = 0
while True:
Expand All @@ -82,7 +86,7 @@
gradient_ids.append(alexnet.compute_grad.remote(x_id, y_id, mean_id, weights_id))

# Print the accuracy on a random training batch.
print "Iteration {}: accuracy = {:.3}%".format(iteration, 100 * ray.get(accuracy))
print("Iteration {}: accuracy = {:.3}%".format(iteration, 100 * ray.get(accuracy)))

# Fetch the gradients. This blocks until the gradients have been computed.
gradient_sets = ray.get(gradient_ids)
Expand Down
2 changes: 1 addition & 1 deletion examples/hyperopt/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ while len(remaining_ids) > 0:
ready_ids, remaining_ids = ray.wait(remaining_ids, num_returns=1)
# Get the accuracy corresponding to the ready object ID.
accuracy = ray.get(ready_ids[0])
print "Accuracy {}".format(accuracy)
print("Accuracy {}".format(accuracy))
```

Note that the above example does not associate the accuracy with the parameters
Expand Down
15 changes: 10 additions & 5 deletions examples/hyperopt/driver.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,10 @@
# Most of the tensorflow code is adapted from Tensorflow's tutorial on using CNNs to train MNIST
# https://www.tensorflow.org/versions/r0.9/tutorials/mnist/pros/index.html#build-a-multilayer-convolutional-network

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import numpy as np
import ray
import argparse
Expand All @@ -24,7 +29,7 @@
steps = args.steps

# Load the mnist data and turn the data into remote objects.
print "Downloading the MNIST dataset. This may take a minute."
print("Downloading the MNIST dataset. This may take a minute.")
mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
train_images = ray.put(mnist.train.images)
train_labels = ray.put(mnist.train.labels)
Expand Down Expand Up @@ -65,20 +70,20 @@ def generate_random_params():
result_id = ready_ids[0]
params = params_mapping[result_id]
accuracy = ray.get(result_id)
print """We achieve accuracy {:.3}% with
print("""We achieve accuracy {:.3}% with
learning_rate: {:.2}
batch_size: {}
dropout: {:.2}
stddev: {:.2}
""".format(100 * accuracy, params["learning_rate"], params["batch_size"], params["dropout"], params["stddev"])
""".format(100 * accuracy, params["learning_rate"], params["batch_size"], params["dropout"], params["stddev"]))
if accuracy > best_accuracy:
best_params = params
best_accuracy = accuracy

# Record the best performing set of hyperparameters.
print """Best accuracy over {} trials was {:.3} with
print("""Best accuracy over {} trials was {:.3} with
learning_rate: {:.2}
batch_size: {}
dropout: {:.2}
stddev: {:.2}
""".format(trials, 100 * best_accuracy, best_params["learning_rate"], best_params["batch_size"], best_params["dropout"], best_params["stddev"])
""".format(trials, 100 * best_accuracy, best_params["learning_rate"], best_params["batch_size"], best_params["dropout"], best_params["stddev"]))
6 changes: 5 additions & 1 deletion examples/hyperopt/hyperopt.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import ray
import numpy as np
import tensorflow as tf
Expand All @@ -6,7 +10,7 @@ def get_batch(data, batch_index, batch_size):
# This method currently drops data when num_data is not divisible by
# batch_size.
num_data = data.shape[0]
num_batches = num_data / batch_size
num_batches = num_data // batch_size
batch_index %= num_batches
return data[(batch_index * batch_size):((batch_index + 1) * batch_size)]

Expand Down
2 changes: 1 addition & 1 deletion examples/lbfgs/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ built in methods for loading the data.
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
batch_size = 100
num_batches = mnist.train.num_examples / batch_size
num_batches = mnist.train.num_examples // batch_size
batches = [mnist.train.next_batch(batch_size) for _ in range(num_batches)]
```

Expand Down
12 changes: 8 additions & 4 deletions examples/lbfgs/driver.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import ray

import numpy as np
Expand Down Expand Up @@ -115,16 +119,16 @@ def full_grad(theta):
# algorithm.

# Load the mnist data and turn the data into remote objects.
print "Downloading the MNIST dataset. This may take a minute."
print("Downloading the MNIST dataset. This may take a minute.")
mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
batch_size = 100
num_batches = mnist.train.num_examples / batch_size
num_batches = mnist.train.num_examples // batch_size
batches = [mnist.train.next_batch(batch_size) for _ in range(num_batches)]
print "Putting MNIST in the object store."
print("Putting MNIST in the object store.")
batch_ids = [(ray.put(xs), ray.put(ys)) for (xs, ys) in batches]

# Initialize the weights for the network to the vector of all zeros.
theta_init = 1e-2 * np.random.normal(size=dim)
# Use L-BFGS to minimize the loss function.
print "Running L-BFGS."
print("Running L-BFGS.")
result = scipy.optimize.fmin_l_bfgs_b(full_loss, theta_init, maxiter=10, fprime=full_grad, disp=True)
6 changes: 5 additions & 1 deletion examples/rl_pong/driver.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,10 @@
# This code is copied and adapted from Andrej Karpathy's code for learning to
# play Pong https://gist.github.com/karpathy/a4166c7fe253700972fcbc77e4ea32c5.

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import numpy as np
import cPickle as pickle
import ray
Expand Down Expand Up @@ -135,7 +139,7 @@ def compute_gradient(model):
reward_sum = ray.get(reward_sums[i])
for k in model: grad_buffer[k] += grad[k] # accumulate grad over batch
running_reward = reward_sum if running_reward is None else running_reward * 0.99 + reward_sum * 0.01
print "Batch {}. episode reward total was {}. running mean: {}".format(batch_num, reward_sum, running_reward)
print("Batch {}. episode reward total was {}. running mean: {}".format(batch_num, reward_sum, running_reward))
for k, v in model.iteritems():
g = grad_buffer[k] # gradient
rmsprop_cache[k] = decay_rate * rmsprop_cache[k] + (1 - decay_rate) * g ** 2
Expand Down
13 changes: 8 additions & 5 deletions lib/python/ray/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

# Ray version string
__version__ = "0.01"

Expand All @@ -9,8 +13,7 @@
# This is done by associating all child processes with a "job" object that imposes this behavior.
(lambda kernel32: (lambda job: (lambda n: kernel32.SetInformationJobObject(job, 9, "\0" * 17 + chr(0x8 | 0x4 | 0x20) + "\0" * (n - 18), n))(0x90 if ctypes.sizeof(ctypes.c_void_p) > ctypes.sizeof(ctypes.c_int) else 0x70) and kernel32.AssignProcessToJobObject(job, ctypes.c_void_p(kernel32.GetCurrentProcess())))(ctypes.c_void_p(kernel32.CreateJobObjectW(None, None))) if kernel32 is not None else None)(ctypes.windll.kernel32)

import config
import serialization
from worker import register_class, error_info, init, connect, disconnect, get, put, wait, remote
from worker import Reusable, reusables
from worker import SCRIPT_MODE, WORKER_MODE, PYTHON_MODE, SILENT_MODE
import ray.serialization
from ray.worker import register_class, error_info, init, connect, disconnect, get, put, wait, remote
from ray.worker import Reusable, reusables
from ray.worker import SCRIPT_MODE, WORKER_MODE, PYTHON_MODE, SILENT_MODE
10 changes: 7 additions & 3 deletions lib/python/ray/array/distributed/core.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import numpy as np
import ray.array.remote as ra
import ray
Expand Down Expand Up @@ -141,10 +145,10 @@ def blockwise_dot(*matrices):
n = len(matrices)
if n % 2 != 0:
raise Exception("blockwise_dot expects an even number of arguments, but len(matrices) is {}.".format(n))
shape = (matrices[0].shape[0], matrices[n / 2].shape[1])
shape = (matrices[0].shape[0], matrices[n // 2].shape[1])
result = np.zeros(shape)
for i in range(n / 2):
result += np.dot(matrices[i], matrices[n / 2 + i])
for i in range(n // 2):
result += np.dot(matrices[i], matrices[n // 2 + i])
return result

@ray.remote
Expand Down
8 changes: 6 additions & 2 deletions lib/python/ray/array/distributed/linalg.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,12 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import numpy as np
import ray.array.remote as ra
import ray

from core import *
from .core import *

__all__ = ["tsqr", "modified_lu", "tsqr_hr", "qr"]

Expand Down Expand Up @@ -68,7 +72,7 @@ def tsqr(a):
else:
lower = [a.shape[1], 0]
upper = [2 * a.shape[1], BLOCK_SIZE]
ith_index /= 2
ith_index //= 2
q_block_current = ra.dot.remote(q_block_current, ra.subarray.remote(q_tree[ith_index, j], lower, upper))
q_result.objectids[i] = q_block_current
r = current_rs[0]
Expand Down
6 changes: 5 additions & 1 deletion lib/python/ray/array/distributed/random.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,12 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import numpy as np
import ray.array.remote as ra
import ray

from core import *
from .core import *

@ray.remote
def normal(shape):
Expand Down
4 changes: 4 additions & 0 deletions lib/python/ray/array/remote/core.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import numpy as np
import ray

Expand Down
4 changes: 4 additions & 0 deletions lib/python/ray/array/remote/linalg.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import numpy as np
import ray

Expand Down
4 changes: 4 additions & 0 deletions lib/python/ray/array/remote/random.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import numpy as np
import ray

Expand Down
9 changes: 0 additions & 9 deletions lib/python/ray/config.py

This file was deleted.

4 changes: 4 additions & 0 deletions lib/python/ray/pickling.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,9 @@
# Note that a little bit of code here is taken and slightly modified from the pickler because it was not possible to change its behavior otherwise.

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import sys
from ctypes import c_void_p
from cloudpickle import pickle, cloudpickle, CloudPickler, load, loads
Expand Down
5 changes: 4 additions & 1 deletion lib/python/ray/serialization.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,12 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import numpy as np
import pickling
import numbuf

import ray.pickling as pickling

def check_serializable(cls):
"""Throws an exception if Ray cannot serialize this class efficiently.
Expand Down
3 changes: 2 additions & 1 deletion lib/python/ray/services.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import psutil
Expand All @@ -10,7 +12,6 @@
import time

# Ray modules
import config
import photon
import plasma
import global_scheduler
Expand Down
Empty file added lib/python/ray/test/__init__.py
Empty file.
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import ray

import numpy as np
Expand Down
9 changes: 5 additions & 4 deletions lib/python/ray/worker.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import hashlib
Expand All @@ -16,10 +18,9 @@
import string

# Ray modules
import config
import pickling
import serialization
import services
import ray.pickling as pickling
import ray.serialization as serialization
import ray.services as services
import numbuf
import photon
import plasma
Expand Down
2 changes: 2 additions & 0 deletions lib/python/ray/workers/default_worker.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import sys
Expand Down
Loading

0 comments on commit ddba1df

Please sign in to comment.