[关闭]
@HarryUp 2019-06-26T03:35:01.000000Z 字数 17992 阅读 1930

Tutorial on tensorflow usage


Outline


1. Brief introduction to tensorflow

1.1. Architectures of CPU and GPU

架构_meitu_1.jpg-85.4kB


1.2. Tensorflow architecture

1.2.1. The programming stack

tensorflow_programming_environment_meitu_2.jpg-99.9kB


1.2.2. Units

  1. import tensorflow as tf
  2. # Creat a variable and initialize it as scalar 0
  3. state = tf.Variable(0, name="counter")
  4. # To creat an op, aiming to increase state by 1
  5. one = tf.placeholder(tf.int32, shape=None, name='one')
  6. new_value = tf.add(state, one)
  7. update = tf.assign(state, new_value)
  8. # After the graph startup, variables must be initialized
  9. # First, add an `initializer` op into the graph
  10. init_op = tf.global_variables_initializer()
  11. # Start the graph, run ops
  12. with tf.Session() as sess:
  13. # Run 'init' op
  14. sess.run(init_op)
  15. # Print the initial value of 'state'
  16. print(sess.run(state))
  17. # Run op to update 'state' and print 'state'
  18. for _ in range(3):
  19. sess.run(update, feed_dict={one:1})
  20. print(sess.run(state))
  21. # Output:
  22. # 0
  23. # 1
  24. # 2
  25. # 3

1.2.3. GPU usage


2. Start from a simple CNN

2.1. Convolutional neural network model

CNN.png-77.8kB


2.1.1. Convolution


2.1.2. Pooling

screen-shot-2016-08-10-at-3-38-39-am_meitu_3.jpg-39.2kB


2.2. MNIST database

  1. tensorflow.examples.tutorials.mnist

MNIST.png-29.7kB


2.3. Code implementation

2.3.1. Preliminary

  1. from __future__ import division, print_function, absolute_import
  2. import tensorflow as tf
  3. # Import MNIST data
  4. from tensorflow.examples.tutorials.mnist import input_data
  5. mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
  6. # Training Parameters
  7. learning_rate = 0.001
  8. num_steps = 500
  9. batch_size = 128
  10. display_step = 10
  11. # Network Parameters
  12. num_input = 784 # MNIST data input (img shape: 28*28)
  13. num_classes = 10 # MNIST total classes (0-9 digits)
  14. dropout = 0.75 # Dropout, probability to keep units
  15. # tf Graph input
  16. X = tf.placeholder(tf.float32, [None, num_input])
  17. Y = tf.placeholder(tf.float32, [None, num_classes])
  18. keep_prob = tf.placeholder(tf.float32) # dropout (keep probability)

2.3.2. Creat model architecture

  1. # Create some wrappers for simplicity
  2. def conv2d(x, W, b, strides=1):
  3. # Conv2D wrapper, with bias and relu activation
  4. x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
  5. x = tf.nn.bias_add(x, b)
  6. return tf.nn.relu(x)
  7. def maxpool2d(x, k=2):
  8. # MaxPool2D wrapper
  9. return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],
  10. padding='SAME')
  11. # Create model
  12. def conv_net(x, weights, biases, dropout):
  13. # MNIST data input is a 1-D vector of 784 features (28*28 pixels)
  14. # Reshape to match picture format [Height x Width x Channel]
  15. # Tensor input become 4-D: [Batch Size, Height, Width, Channel]
  16. x = tf.reshape(x, shape=[-1, 28, 28, 1])
  17. # Convolution Layer
  18. conv1 = conv2d(x, weights['wc1'], biases['bc1'])
  19. # Max Pooling (down-sampling)
  20. conv1 = maxpool2d(conv1, k=2)
  21. # Convolution Layer
  22. conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])
  23. # Max Pooling (down-sampling)
  24. conv2 = maxpool2d(conv2, k=2)
  25. # Fully connected layer
  26. # Reshape conv2 output to fit fully connected layer input
  27. fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]])
  28. fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
  29. fc1 = tf.nn.relu(fc1)
  30. # Apply Dropout
  31. fc1 = tf.nn.dropout(fc1, dropout)
  32. # Output, class prediction
  33. out = tf.add(tf.matmul(fc1, weights['out']), biases['out'])
  34. return out

2.3.3. Construct model graph

  1. # Store layers weight & bias
  2. weights = {
  3. # 5x5 conv, 1 input, 32 outputs
  4. 'wc1': tf.Variable(tf.random_normal([5, 5, 1, 32])),
  5. # 5x5 conv, 32 inputs, 64 outputs
  6. 'wc2': tf.Variable(tf.random_normal([5, 5, 32, 64])),
  7. # fully connected, 7*7*64 inputs, 1024 outputs
  8. 'wd1': tf.Variable(tf.random_normal([7*7*64, 1024])),
  9. # 1024 inputs, 10 outputs (class prediction)
  10. 'out': tf.Variable(tf.random_normal([1024, num_classes]))
  11. }
  12. biases = {
  13. 'bc1': tf.Variable(tf.random_normal([32])),
  14. 'bc2': tf.Variable(tf.random_normal([64])),
  15. 'bd1': tf.Variable(tf.random_normal([1024])),
  16. 'out': tf.Variable(tf.random_normal([num_classes]))
  17. }
  18. # Construct model
  19. logits = conv_net(X, weights, biases, keep_prob)
  20. prediction = tf.nn.softmax(logits)
  21. # Define loss and optimizer
  22. loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
  23. logits=logits, labels=Y))
  24. optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
  25. train_op = optimizer.minimize(loss_op)
  26. # Evaluate model
  27. correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
  28. accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
  29. # Initialize the variables (i.e. assign their default value)
  30. init = tf.global_variables_initializer()

2.3.4. Session run

  1. # Start training
  2. with tf.Session() as sess:
  3. # Run the initializer
  4. sess.run(init)
  5. for step in range(1, num_steps+1):
  6. batch_x, batch_y = mnist.train.next_batch(batch_size)
  7. # Run optimization op (backprop)
  8. sess.run(train_op, feed_dict={X: batch_x, Y: batch_y, keep_prob: dropout})
  9. if step % display_step == 0 or step == 1:
  10. # Calculate batch loss and accuracy
  11. loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,
  12. Y: batch_y,
  13. keep_prob: 1.0})
  14. print("Step " + str(step) + ", Minibatch Loss= " + \
  15. "{:.4f}".format(loss) + ", Training Accuracy= " + \
  16. "{:.3f}".format(acc))
  17. print("Optimization Finished!")
  18. # Calculate accuracy for 256 MNIST test images
  19. print("Testing Accuracy:", \
  20. sess.run(accuracy, feed_dict={X: mnist.test.images[:256],
  21. Y: mnist.test.labels[:256],
  22. keep_prob: 1.0}))
  23. # Testing Accuracy: 0.976562

3. Utilities

3.1. Save and restore model

  1. # 'Saver' op to save and restore all the variables
  2. saver = tf.train.Saver()
  3. # Save model weights to disk
  4. save_path = saver.save(sess, model_path)
  5. print("Model saved in file: %s" % save_path)
  6. # Restore model weights from previously saved model
  7. load_path = saver.restore(sess, model_path)
  8. print("Model restored from file: %s" % save_path)

3.2. Visualization - tensorboard basics

  1. # Construct model and encapsulating all ops into scopes, making
  2. # Tensorboard's Graph visualization more convenient
  3. with tf.name_scope('Model'):
  4. # Model
  5. pred = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax
  6. with tf.name_scope('Loss'):
  7. # Minimize error using cross entropy
  8. cost = tf.reduce_mean(-tf.reduce_sum(y * tf.log(pred), reduction_indices=1))
  9. with tf.name_scope('SGD'):
  10. # Gradient Descent
  11. optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
  12. with tf.name_scope('Accuracy'):
  13. # Accuracy
  14. acc = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
  15. acc = tf.reduce_mean(tf.cast(acc, tf.float32))
  16. # Initializing the variables
  17. init = tf.global_variables_initializer()
  18. # Create a summary to monitor cost tensor
  19. tf.summary.scalar("loss", cost)
  20. # Create a summary to monitor accuracy tensor
  21. tf.summary.scalar("accuracy", acc)
  22. # Merge all summaries into a single op
  23. merged_summary_op = tf.summary.merge_all()
  24. # Start Training
  25. with tf.Session() as sess:
  26. sess.run(init)
  27. # op to write logs to Tensorboard
  28. summary_writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())
  29. # ...
  30. # Run optimization op (backprop), cost op (to get loss value)
  31. # and summary nodes
  32. _, c, summary = sess.run([optimizer, cost, merged_summary_op],
  33. feed_dict={x: batch_xs, y: batch_ys})
  34. # Write logs at every iteration
  35. summary_writer.add_summary(summary, epoch * total_batch + i)
  36. # Run the command line:
  37. # --> tensorboard --logdir=/tmp/tensorflow_logs
  38. # Then open http://0.0.0.0:6006/ into your web browser

Loss and Accuracy Visualization
tensorboard_basic_1.png-278kB

Graph Visualization
tensorboard_basic_2.png-340.9kB


3.3. Visualization - tensorboard advanced

  1. with tf.name_scope('SGD'):
  2. # Gradient Descent
  3. optimizer = tf.train.GradientDescentOptimizer(learning_rate)
  4. # Op to calculate every variable gradient
  5. grads = tf.gradients(loss, tf.trainable_variables())
  6. grads = list(zip(grads, tf.trainable_variables()))
  7. # Op to update all variables according to their gradient
  8. apply_grads = optimizer.apply_gradients(grads_and_vars=grads)
  9. # Create summaries to visualize weights
  10. for var in tf.trainable_variables():
  11. tf.summary.histogram(var.name, var)
  12. # Summarize all gradients
  13. for grad, var in grads:
  14. tf.summary.histogram(var.name + '/gradient', grad)

Computation Graph Visualization
tensorboard_advanced_2.png-322.1kB

Weights and Gradients Visualization
tensorboard_advanced_3.png-987.3kB

Activations Visualization
tensorboard_advanced_4.png-592.8kB


4. Advanced APIs

4.1. High-level package

4.1.1. Keras

  1. from tensorflow import keras
  1. # Sequential model
  2. model = keras.Sequential()
  3. # Adds a densely-connected layer with 64 units to the model:
  4. model.add(keras.layers.Dense(64, activation='relu'))
  5. # Add another:
  6. model.add(keras.layers.Dense(64, activation='relu'))
  7. # Add a softmax layer with 10 output units:
  8. model.add(keras.layers.Dense(10, activation='softmax'))
  1. # Set up training
  2. model.compile(optimizer=tf.train.AdamOptimizer(0.001),
  3. loss='categorical_crossentropy',
  4. metrics=['accuracy'])
  5. # Input NumPy data
  6. model.fit(data, labels, epochs=10, batch_size=32)
  7. # Evaluate and predict
  8. model.evaluate(x, y, batch_size=32)
  9. model.predict(x, batch_size=32)
  1. from keras.applications import inception_v3

4.1.2. Slim (Tensorlayer)

  1. import tensorflow.contrib.slim as slim
  1. with slim.arg_scope([slim.conv2d], padding='SAME',
  2. weights_initializer=tf.truncated_normal_initializer(stddev=0.01)
  3. weights_regularizer=slim.l2_regularizer(0.0005)):
  4. net = slim.conv2d(inputs, 64, [11, 11], scope='conv1')
  5. net = slim.conv2d(net, 128, [11, 11], padding='VALID', scope='conv2')
  6. net = slim.conv2d(net, 256, [11, 11], scope='conv3')
  1. net = ...
  2. net = slim.conv2d(net, 256, [3, 3], scope='conv3_1')
  3. net = slim.conv2d(net, 256, [3, 3], scope='conv3_2')
  4. net = slim.conv2d(net, 256, [3, 3], scope='conv3_3')
  5. net = slim.max_pool2d(net, [2, 2], scope='pool2')
  6. # using slim.repeat
  7. net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
  8. net = slim.max_pool2d(net, [2, 2], scope='pool2')
  1. x = slim.fully_connected(x, 32, scope='fc/fc_1')
  2. x = slim.fully_connected(x, 64, scope='fc/fc_2')
  3. x = slim.fully_connected(x, 128, scope='fc/fc_3')
  4. # using slim.stack
  5. slim.stack(x, slim.fully_connected, [32, 64, 128], scope='fc')

4.2. Estimator

  1. # Define the input function for training
  2. input_fn = tf.estimator.inputs.numpy_input_fn(
  3. x={'images': mnist.train.images}, y=mnist.train.labels,
  4. batch_size=batch_size, num_epochs=None, shuffle=True)
  1. # Define the neural network
  2. def neural_net(x_dict):
  3. # TF Estimator input is a dict, in case of multiple inputs
  4. x = x_dict['images']
  5. # Hidden fully connected layer with 256 neurons
  6. layer_1 = tf.layers.dense(x, n_hidden_1)
  7. # Hidden fully connected layer with 256 neurons
  8. layer_2 = tf.layers.dense(layer_1, n_hidden_2)
  9. # Output fully connected layer with a neuron for each class
  10. out_layer = tf.layers.dense(layer_2, num_classes)
  11. return out_layer
  1. # Define the model function (following TF Estimator Template)
  2. def model_fn(features, labels, mode):
  3. # Build the neural network
  4. logits = neural_net(features)
  5. # Predictions
  6. pred_classes = tf.argmax(logits, axis=1)
  7. pred_probas = tf.nn.softmax(logits)
  8. # If prediction mode, early return
  9. if mode == tf.estimator.ModeKeys.PREDICT:
  10. return tf.estimator.EstimatorSpec(mode, predictions=pred_classes)
  11. # Define loss and optimizer
  12. loss_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
  13. logits=logits, labels=tf.cast(labels, dtype=tf.int32)))
  14. optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
  15. train_op = optimizer.minimize(loss_op, global_step=tf.train.get_global_step())
  16. # Evaluate the accuracy of the model
  17. acc_op = tf.metrics.accuracy(labels=labels, predictions=pred_classes)
  18. # TF Estimators requires to return a EstimatorSpec, that specify
  19. # the different ops for training, evaluating, ...
  20. estim_specs = tf.estimator.EstimatorSpec(
  21. mode=mode,
  22. predictions=pred_classes,
  23. loss=loss_op,
  24. train_op=train_op,
  25. eval_metric_ops={'accuracy': acc_op})
  26. return estim_specs
  1. # Build the Estimator
  2. model = tf.estimator.Estimator(model_fn)
  3. # Train the Model
  4. model.train(input_fn, steps=num_steps)

Checkpoint

first_train_calls_meitu_1.jpg-74.2kB

  1. my_checkpointing_config = tf.estimator.RunConfig(
  2. save_checkpoints_secs = 20*60, # Save checkpoints every 20 minutes.
  3. keep_checkpoint_max = 10, # Retain the 10 most recent checkpoints.
  4. )
  5. classifier = tf.estimator.DNNClassifier(
  6. feature_columns=my_feature_columns,
  7. hidden_units=[10, 10],
  8. n_classes=3,
  9. model_dir='models/iris',
  10. config=my_checkpointing_config)

4.3. Dataset

  1. # Create a dataset tensor from the images and the labels
  2. dataset = tf.contrib.data.Dataset.from_tensor_slices(
  3. (mnist.train.images, mnist.train.labels))
  4. # Create batches of data
  5. dataset = dataset.batch(batch_size)
  6. # Create an iterator, to go over the dataset
  7. iterator = dataset.make_initializable_iterator()
  8. # It is better to use 2 placeholders, to avoid to load all data into memory,
  9. # and avoid the 2Gb restriction length of a tensor.
  10. _data = tf.placeholder(tf.float32, [None, n_input])
  11. _labels = tf.placeholder(tf.float32, [None, n_classes])
  12. # Initialize the iterator
  13. sess.run(iterator.initializer, feed_dict={_data: mnist.train.images,
  14. _labels: mnist.train.labels})
  15. # Neural Net Input
  16. X, Y = iterator.get_next()

5. Dynamic mode

5.1. Dynamic graph


5.2. Eager mode

Eager API basics

  1. # Set Eager API
  2. tf.enable_eager_execution()
  3. tfe = tf.contrib.eager
  4. # Run the operation without the need for tf.Session
  5. a = tf.constant(2)
  6. b = tf.constant(3)
  7. c = a + b
  8. d = a * b
  9. # Full compatibility with Numpy
  10. a = tf.constant([[2., 1.],
  11. [1., 0.]], dtype=tf.float32)
  12. b = np.array([[3., 0.],
  13. [5., 1.]], dtype=np.float32)
  14. c = a + b
  15. d = tf.matmul(a, b)
  16. # Auto differentiation
  17. def square(x):
  18. return tf.multiply(x, x)
  19. grad = tfe.gradients_function(square)
  20. square(3.) # => 9.0
  21. grad(3.) # => [6.0]

A toy example

  1. class Model(tf.keras.Model):
  2. def __init__(self):
  3. super(Model, self).__init__()
  4. self.W = tfe.Variable(5., name='weight')
  5. self.B = tfe.Variable(10., name='bias')
  6. def call(self, inputs):
  7. return inputs * self.W + self.B
  8. # A toy dataset of points around 3 * x + 2
  9. NUM_EXAMPLES = 2000
  10. training_inputs = tf.random_normal([NUM_EXAMPLES])
  11. noise = tf.random_normal([NUM_EXAMPLES])
  12. training_outputs = training_inputs * 3 + 2 + noise
  13. # The loss function to be optimized
  14. def loss(model, inputs, targets):
  15. error = model(inputs) - targets
  16. return tf.reduce_mean(tf.square(error))
  17. def grad(model, inputs, targets):
  18. with tf.GradientTape() as tape:
  19. loss_value = loss(model, inputs, targets)
  20. return tape.gradient(loss_value, [model.W, model.B])
  21. # Define:
  22. # 1. A model.
  23. # 2. Derivatives of a loss function with respect to model parameters.
  24. # 3. A strategy for updating the variables based on the derivatives.
  25. model = Model()
  26. optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
  27. print("Initial loss: {:.3f}".format(loss(model, training_inputs, training_outputs)))
  28. # Training loop
  29. for i in range(300):
  30. grads = grad(model, training_inputs, training_outputs)
  31. optimizer.apply_gradients(zip(grads, [model.W, model.B]),
  32. global_step=tf.train.get_or_create_global_step())
  33. if i % 20 == 0:
  34. print("Loss at step {:03d}: {:.3f}".format(i, loss(model, training_inputs, training_outputs)))
  35. print("Final loss: {:.3f}".format(loss(model, training_inputs, training_outputs)))
  36. print("W = {}, B = {}".format(model.W.numpy(), model.B.numpy()))

Save and restore in eager mode

  1. model = MyModel()
  2. optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
  3. checkpoint_dir = ‘/path/to/model_dir
  4. checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
  5. root = tfe.Checkpoint(optimizer=optimizer,
  6. model=model,
  7. optimizer_step=tf.train.get_or_create_global_step())
  8. root.save(file_prefix=checkpoint_prefix)
  9. # or
  10. root.restore(tf.train.latest_checkpoint(checkpoint_dir))

Tensorboard in eager mode

  1. writer = tf.contrib.summary.create_file_writer(logdir)
  2. global_step=tf.train.get_or_create_global_step() # return global step var
  3. writer.set_as_default()
  4. for _ in range(iterations):
  5. global_step.assign_add(1)
  6. # Must include a record_summaries method
  7. with tf.contrib.summary.record_summaries_every_n_global_steps(100):
  8. # your model code goes here
  9. tf.contrib.summary.scalar('loss', loss)
  10. ...

Use eager execution in a graph environment

  1. def my_py_func(x):
  2. x = tf.matmul(x, x) # You can use tf ops
  3. print(x) # but it's eager!
  4. return x
  5. with tf.Session() as sess:
  6. x = tf.placeholder(dtype=tf.float32)
  7. # Call eager function in graph!
  8. pf = tfe.py_func(my_py_func, [x], tf.float32)
  9. sess.run(pf, feed_dict={x: [[2.0]]}) # [[4.0]]

5.3. AutoGraph (beta)

  1. pip install -U tf-nightly
  1. from tensorflow.contrib import autograph as ag

Using with annotations

  1. @ag.convert()
  2. def f(x):
  3. if x < 0:
  4. x = -x
  5. return x
  1. with tf.Graph().as_default():
  2. x = tf.constant(-1)
  3. y = f(x)
  4. with tf.Session() as sess:
  5. print(sess.run(y))
  6. # Output: 1

Using the functional API

  1. converted_f = ag.to_graph(f)
  2. print(converted_f(tf.constant(-1)))
  3. # Output: Tensor
  4. print(f(-1))
  5. # Output: 1
  1. print(ag.to_code(f))
  2. # Output: <Python and TensorFlow code>

6. Appendixs

6.1. Tricks

Learning rate decay

  1. global_step = tf.Variable(0, trainable=False)
  2. starter_learning_rate = 0.1
  3. learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,
  4. 100000, 0.96)
  5. # Passing global_step to minimize() will increment it at each step.
  6. learning_step = (
  7. tf.train.GradientDescentOptimizer(learning_rate)
  8. .minimize(...my loss..., global_step=global_step)
  9. )
  1. decayed_learning_rate = learning_rate *
  2. decay_rate ^ (global_step / decay_steps)

Find anywhere 'nan' is raised

  1. check= tf.add_check_numerics_ops
  2. ...
  3. sess.run([check, ...])

Filter which layer to be freezed through variable names

  1. tvars = tf.trainable_variables()
  2. tvars = [v for v in tvars if 'frozen' not in v.name]
  3. grads = tf.gradients(loss, tvars)

Leverage Timeline to analyse time consumption

  1. run_metadata = tf.RunMetadata()
  2. run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
  3. config = tf.ConfigProto(graph_options=tf.GraphOptions(
  4. optimizer_options=tf.OptimizerOptions(opt_level=tf.OptimizerOptions.L0)))
  5. with tf.Session(config=config) as sess:
  6. c_np = sess.run(c,options=run_options,run_metadata=run_metadata)
  7. tl = timeline.Timeline(run_metadata.step_stats)
  8. ctf = tl.generate_chrome_trace_format()
  9. with open('timeline.json','w') as wd:
  10. wd.write(ctf)
  11. # Open chrome and type: chrome://tracing
  12. # and import timeline.json

......


6.2. Extended libraries

Cupy, Dask

  1. # 在一个cpu上跑
  2. import numpy as np
  3. x = np.random.random((2,3))
  4. y = x.T.dot(np.log(x) + 1)
  5. z = y - y.mean(axis=0)
  6. print(z[:5])
  7. # 在GPU上跑
  8. import cupy as cp
  9. x = cp.random.random((2,3))
  10. y = x.T.dot(cp.log(x) + 1)
  11. z = y - y.mean()
  12. print(z[:5].get())
  13. # 在许多cpu上跑
  14. import dask.array as da
  15. x = da.random.random((2,3))
  16. y = x.T.dot(da.log(x) + 1)
  17. z = y - y.mean(axis=0)
  18. print(z[:5].compute())

Ray (under development)

  1. def add1(a, b):
  2. return a + b
  3. @ray.remote
  4. def add2(a, b):
  5. return a + b
  6. x_id = add2.remote(1, 2)
  7. ray.get(x_id) # 3
  1. import time
  2. def f1():
  3. time.sleep(1)
  4. @ray.remote
  5. def f2():
  6. time.sleep(1)
  7. # The following takes ten seconds.
  8. [f1() for _ in range(10)]
  9. # The following takes one second (assuming the system has at least ten CPUs).
  10. ray.get([f2.remote() for _ in range(10)])
  1. @ray.remote
  2. def f(x):
  3. return x + 1
  4. x = f.remote(0)
  5. y = f.remote(x)
  6. z = f.remote(y)
  7. ray.get(z) # 3

TensorLy

  1. ......

Suggestion

Tensorflow + Slim (TensorLayer)
Estimator + Checkpoint + TensorBoard
Eager execution if you want





添加新批注
在作者公开此批注前,只有你和作者可见。
回复批注