seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf from src.nn_utils.general import exp_mask_for_high_rank, mask_for_high_rank from src.nn_utils.integration_func import directional_attention_with_dense from src.nn_utils.nn import bn_dense_layer, linear def bi_directional_simple_block_attention( rep_tensor, rep_mask, block_len=5, scope=None, keep_prob=1., is_train=None, wd=0., activation='elu', hn=None): with tf.variable_scope(scope or 'bi_directional_simple_block_attn'): fw_attn_res = simple_block_attention( rep_tensor, rep_mask, block_len, "forward_attn", "forward", keep_prob, is_train, wd, activation, hn) bw_attn_res = simple_block_attention( rep_tensor, rep_mask, block_len, "backward_attn", "backward", keep_prob, is_train, wd, activation, hn)
tensorflow.variable_scope
14,700
import tensorflow as tf num_or_size_splits=num_of_joints, axis=-1) losses = [] # 计算每一个关键点的损失值,并累加求平均 for i in range(num_of_joints): heatmap_pred = tf.squeeze(heatmap_pred_list[i]) heatmap_true = tf.squeeze(heatmap_true_list[i]) loss = 0.5 * tf.losses.mean_squared_error(y_pred=heatmap_pred * true_weight[:, i], y_true=heatmap_true * true_weight[:, i])
tensorflow.squeeze
14,701
import tensorflow as tf [self.vocab.word_size() - 2, self.vocab.word_embed_dim], dtype=tf.float32, initializer=tf.constant_initializer( self.vocab.word_embeddings[2:], dtype=tf.float32), trainable=False) self.word_pad_unk_mat = tf.get_variable("word_unk_pad", [2, self.pretrained_word_mat.get_shape()[1]], dtype=tf.float32, initializer=tf.constant_initializer( self.vocab.word_embeddings[:2], dtype=tf.float32), trainable=True) self.word_mat = tf.concat([self.word_pad_unk_mat, self.pretrained_word_mat], axis=0) self.pretrained_char_mat = tf.get_variable("char_emb_mat", [self.vocab.char_size() - 2, self.vocab.char_embed_dim], dtype=tf.float32, initializer=tf.constant_initializer( self.vocab.char_embeddings[2:], dtype=tf.float32), trainable=False) self.char_pad_unk_mat = tf.get_variable("char_unk_pad", [2, self.pretrained_char_mat.get_shape()[1]], dtype=tf.float32, initializer=tf.constant_initializer( self.vocab.char_embeddings[:2],
tensorflow.concat
14,702
import tensorflow as tf log_probs = tf.nn.log_softmax(logits, axis=-1) labels = tf.reshape(labels, [-1])
tensorflow.reshape
14,703
import tensorflow as tf step_callback: (optional) A function that will be called before each optimization step, step_callback(iteration, feed_dict) ''' if self.sess is not None: self.sess.close() self.sess = tf.Session(graph=self.graph) with self.graph.as_default(): self._create_model(train_triples) self.sess.run(tf.initialize_all_variables()) batch_provider = self._create_batch_provider(train_triples)
tensorflow.Session
14,704
import tensorflow as tf w1_a = tf.get_variable('w1_a', [self.a_dim, n_l1], initializer=init_w, trainable=trainable) b1 = tf.get_variable('b1', [1, n_l1], initializer=init_b, trainable=trainable)
tensorflow.get_variable
14,705
import tensorflow as tf else: return tf.reshape(tf.stack(values=h, axis=1), [-1]) def lstm(xs, ms, s, scope, nh, init_scale=1.0): nbatch, nin = [v.value for v in xs[0].get_shape()] with tf.variable_scope(scope): wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale)) wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale)) b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0)) c, h = tf.split(axis=1, num_or_size_splits=2, value=s) for idx, (x, m) in enumerate(zip(xs, ms)): c = c*(1-m) h = h*(1-m) z = tf.matmul(x, wx) + tf.matmul(h, wh) + b i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z) i = tf.nn.sigmoid(i) f = tf.nn.sigmoid(f) o = tf.nn.sigmoid(o) u = tf.tanh(u) c = f*c + i*u h = o*tf.tanh(c) xs[idx] = h s = tf.concat(axis=1, values=[c, h]) return xs, s def _ln(x, g, b, e=1e-5, axes=[1]): u, s = tf.nn.moments(x, axes=axes, keep_dims=True)
tensorflow.matmul
14,706
import tensorflow as tf left_in.append(tf.random_normal((1, size * 2))) right_in.append(tf.random_normal((1, size * 2))) tracking.append(tf.random_normal((1, tracker_size * 2))) out = reducer(left_in, right_in, tracking=tracking) self.assertEqual(batch_size, len(out)) self.assertEqual(tf.float32, out[0].dtype) self.assertEqual((1, size * 2), out[0].shape) def testReduceTreeLSTM(self): with tf.device(self._test_device): size = 10 tracker_size = 8 reducer = spinn.Reducer(size, tracker_size=tracker_size) lstm_in = np.array([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, -1, -2, -3, -4, -5, -6, -7, -8, -9]], dtype=np.float32) c1 = np.array([[0, 1], [2, 3]], dtype=np.float32) c2 = np.array([[0, -1], [-2, -3]], dtype=np.float32)
tensorflow.device
14,707
import tensorflow as tf gan_train_ops = tf.contrib.gan.gan_train_ops(gan_model, gan_loss, gen_optimizer, dis_optimizer) while_loop = tf.contrib.tpu.while_loop if params['use_tpu'] else tf.while_loop # train the discriminator 100 steps inputs = [tf.constant(0), tf.constant(0.0)] cond = lambda i, x: tf.less(i, 100) def body(i, x): return tf.add(i, 1), gan_train_ops.discriminator_train_op dis_train_op = while_loop(cond, body, inputs)
tensorflow.constant
14,708
import tensorflow as tf # Create connected layers: fc1, fc2 with tf.contrib.framework.arg_scope([tf.contrib.layers.fully_connected], normalizer_fn=tf.contrib.layers.batch_norm, normalizer_params={"is_training": self.train}): self.fc1 = tf.contrib.layers.fully_connected(self.flatten, self.config.cifar10_cnn["fc1_nb_units"]) self.fc2 = tf.contrib.layers.fully_connected(self.fc1, self.config.data["num_categories"], activation_fn=None) # Compute loss with tf.name_scope("loss"): self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.fc2, labels=self.y)) # Optimizer with tf.name_scope("training_op"): self.training_op = tf.compat.v1.train.AdamOptimizer(self.learning_rate).minimize(self.loss) # Perf metrics with tf.name_scope("accuracy"):
tensorflow.name_scope
14,709
import tensorflow as tf kernel = _variable_with_weight_decay('weights', shape=kernel_shape, use_xavier=use_xavier, stddev=stddev, wd=weight_decay) stride_h, stride_w = stride outputs = tf.nn.conv2d(inputs, kernel, [1, stride_h, stride_w, 1], padding=padding) biases = _variable_on_cpu('biases', [num_output_channels], tf.constant_initializer(0.0)) outputs = tf.nn.bias_add(outputs, biases) if bn: outputs = tf.layers.batch_normalization(outputs, momentum=0.99, epsilon=1e-6, training=is_training) if activation_fn is not None: outputs = tf.nn.leaky_relu(outputs, alpha=0.2) return outputs
tensorflow.constant_initializer
14,710
import tensorflow as tf if alpha > 0: return tf.maximum(alpha * x, x, name=name) else: return tf.nn.relu(x, name=name)
tensorflow.nn.relu
14,711
import tensorflow as tf val_losses = np.array(val_losses) return (training_losses,val_losses, int(parameter_num)) """ Test RNN graph 0 step """ def test_rnn(test_data_x,test_data_y, g, checkpoint, input_prob, output_prob, state_prob, num_test): with tf.Session() as sess: sess.run(tf.global_variables_initializer()) test_data_yp = np.insert(test_data_y,0,0,axis=0)[:-1] "read the trained graph" g['saver'].restore(sess, checkpoint) "run the test points" #run the whole sequence, one class one total run for index,(X, Y, YP) in enumerate(gen_batch(test_data_x, test_data_y,test_data_yp, 1, num_test, None)):
tensorflow.Session
14,712
import tensorflow as tf self.is_training = tf.placeholder(tf.bool) initializer = tf.contrib.layers.variance_scaling_initializer() # Embedding Lookup 16 with tf.device('/cpu:0'), tf.name_scope("embedding"): if use_he_uniform: self.embedding_W = tf.get_variable(name='lookup_W', shape=[num_quantized_chars, embedding_size], initializer=tf.contrib.layers.variance_scaling_initializer()) else: self.embedding_W = tf.Variable(tf.random_uniform([num_quantized_chars, embedding_size], -1.0, 1.0),name="embedding_W") self.embedded_characters = tf.nn.embedding_lookup(self.embedding_W, self.input_x) embedded_text_expand = tf.expand_dims(self.embedded_characters, -1) with tf.device('/cpu:0'), tf.name_scope("embedding_tags"): W_tags = tf.get_variable("embed_W_tags", [tags_vocab_size, embedding_size], initializer=initializer) embedded_tags = tf.nn.embedding_lookup(W_tags, self.input_tags) embedded_tags_expanded = tf.expand_dims(embedded_tags, -1) with tf.device('/cpu:0'), tf.name_scope("embedding_deps"): W_deps = tf.get_variable("embed_W_deps", [deps_vocab_size, embedding_size], initializer=initializer) embedded_deps = tf.nn.embedding_lookup(W_deps, self.input_deps) embedded_deps_expanded = tf.expand_dims(embedded_deps, -1) with tf.device('/cpu:0'), tf.name_scope("embedding_head"): W_head = tf.get_variable("embed_W_head", [num_quantized_chars, embedding_size], initializer=initializer) embedded_head = tf.nn.embedding_lookup(W_head, self.input_head) embedded_head_expanded = tf.expand_dims(embedded_head, -1) cnn_inputs = tf.concat(
tensorflow.get_variable
14,713
import tensorflow as tf numpy.random.seed(42) tf.set_random_seed(1234) sess = tf.Session(graph=tf.get_default_graph())
tensorflow.set_random_seed
14,714
import tensorflow as tf pred, K, reprojected, crit_fake = model(x2d) crit_real = model.crit(x3d) crit_dis = tf.reduce_mean(tf.square(crit_real - tf.ones_like(crit_real))) + tf.reduce_mean(tf.square(crit_fake - tf.zeros_like(crit_fake))) crit_gen = tf.reduce_mean(tf.square(crit_fake - tf.ones_like(crit_fake)))
tensorflow.zeros_like
14,715
import tensorflow as tf f2 = tf.reduce_sum(half(masked, 1), 2) / tf.reduce_sum(half(mask, 1)) return tf.concat([x, f1, f2], 1) def batch_norm(x, train, name, decay=0.99, epsilon=1e-5): shape = x.get_shape().as_list() with tf.variable_scope(name): beta = tf.get_variable('beta', [shape[-1]], initializer=tf.constant_initializer(0.)) gamma = tf.get_variable('gamma', [shape[-1]], initializer=tf.random_normal_initializer(1., 0.02)) pop_mean = tf.get_variable('pop_mean', [shape[-1]], initializer=tf.constant_initializer(0.), trainable=False) pop_var = tf.get_variable('pop_var', [shape[-1]], initializer=tf.constant_initializer(1.), trainable=False) if pop_mean not in tf.moving_average_variables():
tensorflow.constant_initializer
14,716
import tensorflow as tf raise NotImplementedError() def get_labels(self): """Gets the list of labels for this data set.""" raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None): """Reads a tab separated value file.""" with tf.gfile.Open(input_file, "r") as f: reader = csv.reader(f, delimiter="\t", quotechar=quotechar) lines = [] for line in reader: lines.append(line) return lines class XnliProcessor(DataProcessor):
tensorflow.gfile.Open
14,717
import tensorflow as tf log_r = tf.cond( tf.less(t + 1, self.max_seq_len), lambda: self.tilt(rnn_out, latent_encoded, self.targets_ta.read(t+1)), lambda: 0.) # On the last step, log_r = 0. log_r *= tf.to_float(t < self.seq_lengths - 1) weights += log_r - prev_log_r new_state = TrainableVRNNState(rnn_state=next_rnn_state, rnn_out=rnn_out, latent_encoded=latent_encoded)
tensorflow.to_float
14,718
import tensorflow as tf Returns: A boolean tensor of shape [M, N], True for entries which are sampled. """ def _minibatch_subsample_fn(inputs): indicators, targets = inputs return sample_balanced_positive_negative(tf.cast(indicators, tf.bool), sample_size, tf.cast(targets, tf.bool), positive_fraction=positive_fraction) return tf.cast(tf.map_fn(_minibatch_subsample_fn, [indicators, labels], dtype=tf.bool, parallel_iterations=16, back_prop=True), dtype=dtype)
tensorflow.cast
14,719
import tensorflow as tf mask = tf.equal(mask, tf.ones_like(mask)) hidden_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer input_size = query.get_shape().as_list()[-1] # Trainable parameters w1 = tf.Variable(tf.random_normal([hidden_size, attention_size], stddev=0.1)) w2 = tf.Variable(tf.random_normal([input_size, attention_size], stddev=0.1)) b = tf.Variable(tf.random_normal([attention_size], stddev=0.1)) v = tf.Variable(tf.random_normal([attention_size], stddev=0.1)) with tf.name_scope('v'): # Applying fully connected layer with non-linear activation to each of the B*T timestamps; # the shape of `tmp` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size tmp1 = tf.tensordot(facts, w1, axes=1) tmp2 = tf.tensordot(query, w2, axes=1) tmp2 = tf.reshape(tmp2, [-1, 1, tf.shape(tmp2)[-1]]) tmp = tf.tanh((tmp1 + tmp2) + b) # For each of the timestamps its vector of size A from `tmp` is reduced with `v` vector v_dot_tmp = tf.tensordot(tmp, v, axes=1, name='v_dot_tmp') # (B,T) shape key_masks = mask # [B, 1, T] # key_masks = tf.expand_dims(mask, 1) # [B, 1, T] paddings = tf.ones_like(v_dot_tmp) * (-2 ** 32 + 1) v_dot_tmp = tf.where(key_masks, v_dot_tmp, paddings) # [B, 1, T] alphas = tf.nn.softmax(v_dot_tmp, name='alphas') # (B,T) shape
tensorflow.tensordot
14,720
import tensorflow as tf # model related configuration tf.app.flags.DEFINE_integer( 'train_image_size', 352, 'The size of the input image for the model to use.') tf.app.flags.DEFINE_integer( 'resnet_size', 50, 'The size of the ResNet model to use.') tf.app.flags.DEFINE_integer(
tensorflow.app.flags.DEFINE_integer
14,721
import tensorflow as tf """Define a single cell with variational dropout""" def get_a_cell(state_size,input_prob,state_prob,num_input): if cell_type == 'LSTM': if activation == 'linear': lstm=tf.nn.rnn_cell.LSTMCell(num_units=state_size, activation = tf.identity, state_is_tuple=True) cell_drop=tf.contrib.rnn.DropoutWrapper(lstm,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob) elif activation == 'relu': lstm=tf.nn.rnn_cell.LSTMCell(num_units=state_size, activation = tf.nn.relu, state_is_tuple=True) cell_drop=tf.contrib.rnn.DropoutWrapper(lstm,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob) else: #tanh by default lstm=tf.nn.rnn_cell.LSTMCell(num_units=state_size, state_is_tuple=True) cell_drop=tf.contrib.rnn.DropoutWrapper(lstm,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob) elif cell_type == 'GRU': if activation == 'linear': gru=tf.nn.rnn_cell.GRUCell(state_size, activation = tf.identity) cell_drop=tf.contrib.rnn.DropoutWrapper(gru,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob) elif activation == 'relu':
tensorflow.nn.rnn_cell.LSTMCell
14,722
import tensorflow as tf g = tf.gradients(U, x, grad_ys=self.dummy_x1_tf)[0] return tf.gradients(g, self.dummy_x1_tf)[0]
tensorflow.gradients
14,723
import tensorflow as tf * Qingyao Ai, Keping Bi, Cheng Luo, Jiafeng Guo, W. Bruce Croft. 2018. Unbiased Learning to Rank with Unbiased Propensity Estimation. In Proceedings of SIGIR '18 """ def __init__(self, data_set, exp_settings, forward_only=False): """Create the model. Args: data_set: (Raw_data) The dataset used to build the input layer. exp_settings: (dictionary) The dictionary containing the model settings. forward_only: Set true to conduct prediction only, false to conduct training. """ print('Build DLA atten') self.hparams = tf.contrib.training.HParams( learning_rate=0.05, # Learning rate. max_gradient_norm=5.0, # Clip gradients to this norm. loss_func='click_weighted_softmax_cross_entropy', # Select Loss function logits_to_prob='softmax', # the function used to convert logits to probability distributions ranker_learning_rate=-1.0, # The learning rate for ranker (-1 means same with learning_rate). ranker_loss_weight=1.0, # Set the weight of unbiased ranking loss l2_loss=0.0, # Set strength for L2 regularization. l1_loss=0.0, max_propensity_weight = -1, # Set maximum value for propensity weights constant_propensity_initialization = False, # Set true to initialize propensity with constants. grad_strategy='ada', # Select gradient strategy ) print(exp_settings['learning_algorithm_hparams'])
tensorflow.contrib.training.HParams
14,724
import tensorflow as tf callback.on_rollout_start() if step % self.update_buffer_interval ==0 and step>self.learning_starts: mean_agent = sum(all_r)/sum(all_r_step) mean_exp = sum(all_exp_r)/sum(all_exp_r_step) add_r = mean_agent>mean_exp-0.5 all_r = [] all_exp_r = [] all_r_step = [] all_exp_r_step = [] if add_r: self.ratio = min(self.ratio+2/self.batch_size,self.max_ratio) else: self.ratio = max(self.ratio-1/self.batch_size,self.init_ratio) print('|new-ratio:',self.ratio,'|mean-agent:',mean_agent,'|mean-exp:',mean_exp-0.5,'|') smry = tf.Summary(value=[tf.Summary.Value(tag="ratio", simple_value=self.ratio)]) writer.add_summary(smry,step) episode_rewards[-1] += reward_ if done: if self.action_noise is not None: self.action_noise.reset() if not isinstance(self.env, VecEnv): obs = self.env.reset() #if episode_rewards[-1] >= mean_expert_reward: # self.ratio = np.clip((self.ratio+1/self.batch_size),0,60/self.batch_s episode_rewards.append(0.0) maybe_is_success = info.get('is_success')
tensorflow.Summary.Value
14,725
import tensorflow as tf rnn_params, base_variable_scope="Model/RNN") tf.add_to_collection(tf.GraphKeys.SAVEABLE_OBJECTS, params_saveable) self._cost = tf.get_collection_ref(util.with_prefix(self._name, "cost"))[0]
tensorflow.add_to_collection
14,726
import tensorflow as tf return tf.reduce_mean(loss) loss = tf.map_fn(fn=lambda inp: sample_compute(inp), elems=tf.range(resample), dtype=tf.float32, parallel_iterations=32) final_loss = tf.reduce_mean(loss) return final_loss def contra_traj_lossV1(pred, tgt, temp=10.0): # Trajectory-wise contrastive loss traj_pred = tf.reduce_mean(pred, axis=1) traj_tgt = tf.reduce_mean(tgt, axis=1) p1, p2 = tf.split(traj_pred, 2, axis=0) t1, t2 = tf.split(traj_tgt, 2, axis=0) soft_sign = tf.tanh((t1 - t2) * temp) loss = tf.maximum(0.0, soft_sign * ((t1 - t2) - (p1 - p2))) loss = tf.reduce_mean(loss) return loss def horizon_sumV1(input, horizon=12): bs, epi_len = input.shape[:2] new_w = epi_len - horizon + 1
tensorflow.reduce_mean
14,727
import tensorflow as tf pred_mat = tf.get_variable('pred_mat', [in_size, self._out_vocab_size]) pred_bias = tf.get_variable('pred_bias', [self._out_vocab_size]) # Make a prediction for each tweet. def GetWordPred(o_): logits = tf.nn.xw_plus_b(o_, pred_mat, pred_bias) return tf.nn.softmax(logits) preds = GetWordPred(wvsum) z = tf.tile(tf.reshape(tf.reduce_sum(preds,1),[-1,1]), [1, out_vocab_size]) self.preds, self.z = preds, z self.probs = tf.div(preds, z) #normalize
tensorflow.nn.softmax
14,728
import tensorflow as tf def train_rnn_multi(raw_data_x, raw_data_y, val_data_x, val_data_y, timeindex_train, timeindex_val, g, num_epochs, num_steps, batch_size, input_prob, output_prob, state_prob, epoch_before_val = 50, max_checks_without_progress=50,epoch_overlap=None, verbose=True, save=False): with tf.Session() as sess: "initialize the variables"
tensorflow.Session
14,729
import tensorflow as tf examples_per_sec = num_epochs * num_batches * batch_size / wall_time self.report_benchmark( name="eager_train_%s" % ("gpu" if tfe.num_gpus() > 0 else "cpu"), iters=num_epochs * num_batches, extras={"examples_per_sec": examples_per_sec}, wall_time=wall_time) if __name__ == "__main__": tf.enable_eager_execution() tf.test.main()
tensorflow.test.main
14,730
import tensorflow as tf Args: var_name: name of variable as a string. """ if var_name not in self._initializers: if var_name == self.GAMMA: self._initializers[self.GAMMA] = tf.ones_initializer() elif var_name == self.BETA: self._initializers[self.BETA] = tf.zeros_initializer() def _build_statistics_variance(self, input_batch, reduction_indices, use_batch_stats): """Builds the statistics part of the graph when using moving variance. Args: input_batch: Input batch Tensor.
tensorflow.zeros_initializer
14,731
import tensorflow as tf n_neg_to_select = tf.cast(params['negative_ratio'] * n_positives, tf.int32) n_neg_to_select = tf.minimum(n_neg_to_select, tf.cast(n_negtives, tf.int32))
tensorflow.cast
14,732
import tensorflow as tf """ max_time = 8 batch_size = 16 inputs = tf.random_uniform([batch_size, max_time], maxval=30521, dtype=tf.int32)
tensorflow.random_uniform
14,733
from tensorflow.python.ops import random_ops else: gradient_shape = gradient.get_shape() noise = random_ops.truncated_normal(gradient_shape) * gradient_noise_scale noisy_gradients.append(gradient + noise)
tensorflow.python.ops.random_ops.truncated_normal
14,734
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2 path_values: A `Tensor` of type `float32`. name: A name for the operation (optional). Returns: A `Tensor` of type `float32`. """ result = _op_def_lib.apply_op("UnpackPath", path=path, path_values=path_values, name=name) return result def _InitOpDefLibrary(): op_list = _op_def_pb2.OpList() _text_format.Merge(_InitOpDefLibrary.op_list_ascii, op_list) _op_def_registry.register_op_list(op_list) op_def_lib = _op_def_library.OpDefLibrary() op_def_lib.add_op_list(op_list) return op_def_lib _InitOpDefLibrary.op_list_ascii = """op { name: "HardRoutingFunction" input_arg { name: "input_data"
tensorflow.core.framework.op_def_pb2.OpList
14,735
import tensorflow as tf by mistake. """ def fun_(*args, **kwargs): try: return fun(*args, **kwargs) except ValueError as e: if 'reuse' in str(e): with tf.variable_scope(tf.get_variable_scope(), reuse=True): return fun(*args, **kwargs) else: raise e return fun_
tensorflow.get_variable_scope
14,736
import tensorflow as tf min_score_thresh=0.65, min_iou_thresh=0.5, is_class_agnostic=False) nms_masks_expected2 = tf.stack([mask2, mask0, mask5, mask4]) nms_scores_expected2 = tf.constant([0.95, 1.0, 0.8, 0.7], dtype=tf.float32) nms_classes_expected2 = tf.constant([0, 1, 2, 2], dtype=tf.int32) self.assertAllEqual(nms_masks1.numpy(), nms_masks_expected1.numpy()) self.assertAllClose(nms_scores1.numpy(), nms_scores_expected1.numpy()) self.assertAllEqual(nms_classes1.numpy(), nms_classes_expected1.numpy()) self.assertAllEqual(nms_masks2.numpy(), nms_masks_expected2.numpy()) self.assertAllClose(nms_scores2.numpy(), nms_scores_expected2.numpy()) self.assertAllEqual(nms_classes2.numpy(), nms_classes_expected2.numpy()) def test_points_mask_iou(self): masks1 = tf.constant([[0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [1, 0, 1, 0, 1], [0, 1, 0, 1, 0]], dtype=tf.int32) masks2 = tf.constant([[0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [1, 0, 1, 0, 1]], dtype=tf.int32) iou = isu.points_mask_iou(masks1=masks1, masks2=masks2) expected_iou = tf.constant([[0, 0, 0], [0, 1, 0.6], [0, 0.6, 1.0], [0, 0.4, 0]], dtype=tf.float32) self.assertAllClose(iou.numpy(), expected_iou.numpy())
tensorflow.constant
14,737
import tensorflow as tf else: return -tf.reduce_sum(log_sum_exp(log_probs), [1, 2]) def mse_loss(pred, labels): try: batch_size = tf.cast(pred.shape[0], tf.float32) except Exception as e: print('Pred is a tf tensor %s' % str(e.message)) batch_size = tf.cast(tf.shape(pred)[0], tf.float32) loss_val = tf.sqrt(2 * tf.nn.l2_loss(pred - labels)) / batch_size return loss_val def pullaway_loss(embeddings, name='pullaway_loss'): """Pull Away loss calculation. Args: embeddings: The embeddings to be orthogonalized for varied faces. Shape [batch_size, embeddings_dim]
tensorflow.nn.l2_loss
14,738
import tensorflow as tf x, y_size[:-1], kernel, align_corners=False) resized = tf.nn.conv3d_transpose( value=resized, filter=kernel, output_shape=y_size, strides=[1, 1, 1, 1, 1], padding='SAME', name='resize_x_to_y') resized = tf.nn.bias_add( resized, bias) resized = self.ff_nl(resized) return resized else: raise NotImplementedError(mode) def conv_3d_op( self, data,
tensorflow.nn.bias_add
14,739
import tensorflow as tf td_map[self.train_model.states_ph] = states td_map[self.train_model.dones_ph] = masks td_map[self.polyak_model.states_ph] = states td_map[self.polyak_model.dones_ph] = masks if writer is not None: # run loss backprop with summary, but once every 10 runs save the metadata (memory, compute time, ...) if self.full_tensorboard_log and (1 + (steps / self.n_batch)) % 10 == 0: run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) run_metadata = tf.RunMetadata() step_return = self.sess.run([self.summary] + self.run_ops, td_map, options=run_options, run_metadata=run_metadata) writer.add_run_metadata(run_metadata, 'step%d' % steps) else: step_return = self.sess.run([self.summary] + self.run_ops, td_map) writer.add_summary(step_return[0], steps)
tensorflow.RunOptions
14,740
import tensorflow as tf "target_action": tf.zeros( obs_shape[:1] + [num_target_frames, 1], dtype=tf.int32), "target_reward": tf.zeros( obs_shape[:1] + [num_target_frames, 1], dtype=tf.int32), "target_policy": tf.zeros( obs_shape[:1] + [num_target_frames] + [action_space.n]), "target_value": tf.zeros( obs_shape[:1] + target_value_shape_suffix) } model.distributional_value_size = max(distributional_size, 1) model.use_epochs = hparams.use_epochs with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
tensorflow.zeros
14,741
import tensorflow as tf arc_seq = arc_seq.write(start_id + 2 * i, index) curr_log_prob = tf.nn.sparse_softmax_cross_entropy_with_logits(
tensorflow.nn.sparse_softmax_cross_entropy_with_logits
14,742
from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten # Block 1 conv1a = Conv2D(padding="same", filters=RNN_SIZE//8, kernel_size=[8, 8], strides=4, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(self.inputs) conv1b = Conv2D(padding="same", filters=RNN_SIZE//8, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv1a) conv1c = Conv2D(padding="same", filters=RNN_SIZE//8, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv1b)
tensorflow.keras.layers.Conv2D
14,743
import tensorflow as tf def dense(x, num_units, scope="dense", training=True, ema=None, init=False, bias_initializer=tf.constant_initializer(0.)): with tf.variable_scope(scope):
tensorflow.constant_initializer
14,744
import tensorflow as tf if isinstance(metric, ShapeAccuracyMetric): labels = sample['shapes'] weights = tf.math.sign(labels + 1) # -1 is mapped to zero, else 1 metric.update(labels, detections['shapes_logits'], weights) elif isinstance(metric, BoxIoUMetric): scene_id = str(sample['scene_filename'].numpy(), 'utf-8') # Get ground truth boxes labeled_boxes = labeled_boxes_init if metric.threed: rotations_y = tf.concat([tf_utils.euler_from_rotation_matrix( tf.reshape(detections['rotations_3d'][i], [3, 3]), 1) for i in range(num_boxes)], axis=0) rotations_y = tf.reshape(rotations_y, [-1, 1]) labeled_boxes = tf.concat([sample['translations_3d'], sample['sizes_3d'], rotations_y], axis=1) # Get predicted boxes predicted_boxes = detections['detection_boxes'] if metric.threed: rotations_y = tf.concat([tf_utils.euler_from_rotation_matrix( tf.reshape(detections['rotations_3d'][i], [3, 3]), 1) for i in range(num_boxes)], axis=0) rotations_y = tf.reshape(rotations_y, [-1, 1]) predicted_boxes = tf.concat([detections['translations_3d'],
tensorflow.reshape
14,745
import tensorflow as tf from tensorflow.contrib.tensorboard.plugins import projector from Bunch import Bunch tf.app.flags.DEFINE_string('input_path', '../data/tmp/grid03.14.c.tar.gz', 'input folder') tf.app.flags.DEFINE_string('input_name', '', 'input folder') tf.app.flags.DEFINE_string('test_path', '', 'test set folder') tf.app.flags.DEFINE_string('net', 'f100-f3', 'model configuration') tf.app.flags.DEFINE_string('model', 'noise', 'Type of the model to use: Autoencoder (ae)' 'WhatWhereAe (ww) U-netAe (u)') tf.app.flags.DEFINE_string('postfix', '', 'Postfix for the training folder') tf.app.flags.DEFINE_float('alpha', 10, 'Predictive reconstruction loss weight') tf.app.flags.DEFINE_float('beta', 0.0005, 'Reconstruction from noisy data loss weight')
tensorflow.app.flags.DEFINE_string
14,746
import tensorflow as tf # Gain bias bias_shape = [1, 1, 1, 1, self.hgru_k[idx]] if self.gate_bias_init == 'chronos': bias_init = -tf.log( tf.random_uniform( bias_shape, minval=1, maxval=self.timesteps - 1, dtype=self.dtype)) else: bias_init = tf.ones(bias_shape, dtype=self.dtype) setattr( self, 'gain_bias_%s' % layer, tf.get_variable( name='%s_gain_bias' % self.layer_name, dtype=self.dtype, trainable=True, initializer=bias_init)) if self.gate_bias_init == 'chronos': bias_init = -bias_init else: bias_init = tf.ones(bias_shape, dtype=self.dtype) setattr( self, 'mix_bias_%s' % layer, tf.get_variable( name='%s_mix_bias' % self.layer_name,
tensorflow.get_variable
14,747
import tensorflow as tf print('\rTrained in %.3fs. Global step %i' % (time() - start, step+1)) return summary class PPO_HC(PPO): def build_anet(self, state_in, name, reuse=False): reg = tf.contrib.layers.l2_regularizer(1e-3) with tf.variable_scope(name, reuse=reuse): layer_a1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg) layer_a2 = tf.layers.dense(layer_a1, 256, tf.nn.relu, kernel_regularizer=reg) mu = tf.layers.dense(layer_a2, self.a_dim, tf.nn.tanh, kernel_regularizer=reg) sigma = tf.layers.dense(layer_a2, self.a_dim, tf.nn.softplus, kernel_regularizer=reg) # sigma = tf.get_variable(name='pi_sigma', shape=self.a_dim, initializer=tf.constant_initializer(0.5)) sigma = tf.clip_by_value(sigma, 0.0, 1.0) norm_dist = tf.distributions.Normal(loc=mu * self.a_bound, scale=sigma) params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name) return norm_dist, params
tensorflow.layers.dense
14,748
import tensorflow as tf REPLACE_ITER_C = 1500 MEMORY_CAPACITY = 200000 BATCH_SIZE = 32 DISPLAY_THRESHOLD = 100 # display until the running reward > 100 DATA_PATH = './data' LOAD_MODEL = False SAVE_MODEL_ITER = 100000 RENDER = False OUTPUT_GRAPH = False ENV_NAME = 'BipedalWalker-v2' GLOBAL_STEP = tf.Variable(0, trainable=False) INCREASE_GS = GLOBAL_STEP.assign(tf.add(GLOBAL_STEP, 1)) LR_A = tf.train.exponential_decay(LR_A, GLOBAL_STEP, 10000, .97, staircase=True) LR_C = tf.train.exponential_decay(LR_C, GLOBAL_STEP, 10000, .97, staircase=True) END_POINT = (200 - 10) * (14/30) # from game env = gym.make(ENV_NAME) env.seed(1) STATE_DIM = env.observation_space.shape[0] # 24 ACTION_DIM = env.action_space.shape[0] # 4 ACTION_BOUND = env.action_space.high # [1, 1, 1, 1] # all placeholder for tf with tf.name_scope('S'): S = tf.placeholder(tf.float32, shape=[None, STATE_DIM], name='s') with tf.name_scope('R'):
tensorflow.train.exponential_decay
14,749
import tensorflow as tf def build_value(self, _input): with tf.variable_scope('VF'): hidden = tf.layers.dense(inputs=_input, units=self.vf_hidden_size, activation=tf.nn.elu) w = tf.get_variable("weights", (self.vf_hidden_size, 1)) return tf.matmul(hidden, w) def build_loss(self): cutoff_vf_manager = tf.reshape(tf.stop_gradient(self.manager_vf), [-1]) dot = tf.reduce_sum(tf.multiply(self.s_diff, self.g), axis=1) gcut = tf.stop_gradient(self.g) mag = tf.norm(self.s_diff, axis=1) * tf.norm(gcut, axis=1) + .0001 dcos = dot / mag manager_loss = -tf.reduce_sum((self.r - cutoff_vf_manager) * dcos) cutoff_vf_worker = tf.reshape(tf.stop_gradient(self.worker_vf), [-1]) log_p = tf.reduce_sum(self.log_pi * self.ac, [1]) worker_loss = (self.r + self.alpha * self.ri - cutoff_vf_worker) * log_p
tensorflow.stop_gradient
14,750
import tensorflow as tf def avg_norm(t): return tf.reduce_mean(tf.sqrt(tf.reduce_sum(tf.square(t), axis=-1))) def gradient_add(g1, g2, param): print([g1, g2, param.name]) assert (not (g1 is None and g2 is None)), param.name if g1 is None: return g2 elif g2 is None: return g1 else: return g1 + g2 def q_explained_variance(qpred, q): _, vary = tf.nn.moments(q, axes=[0, 1]) _, varpred = tf.nn.moments(q - qpred, axes=[0, 1]) check_shape([vary, varpred], [[]] * 2) return 1.0 - (varpred / vary)
tensorflow.nn.moments
14,751
import tensorflow as tf def assign_lr(self, session, lr_value): session.run(self._lr_update, feed_dict={self._new_lr: lr_value}) def export_ops(self, name): """Exports ops to collections.""" self._name = name ops = {util.with_prefix(self._name, "cost"): self._cost} if self._is_training: ops.update(lr=self._lr, new_lr=self._new_lr, lr_update=self._lr_update) if self._rnn_params: ops.update(rnn_params=self._rnn_params) for name, op in ops.items(): tf.add_to_collection(name, op) self._initial_state_name = util.with_prefix(self._name, "initial") self._final_state_name = util.with_prefix(self._name, "final") util.export_state_tuples(self._initial_state, self._initial_state_name) util.export_state_tuples(self._final_state, self._final_state_name) def import_ops(self): """Imports ops from collections.""" if self._is_training: self._train_op = tf.get_collection_ref("train_op")[0] self._lr = tf.get_collection_ref("lr")[0] self._new_lr = tf.get_collection_ref("new_lr")[0] self._lr_update = tf.get_collection_ref("lr_update")[0]
tensorflow.add_to_collection
14,752
import tensorflow as tf @registry.register_model class FeedForwardCategoricalPolicy(PolicyBase): """Feed-forward categorical.""" def body(self, features): observations = features["inputs_raw"] observations = tf.cast(observations, tf.float32) flat_observations = tf.layers.flatten(observations) with tf.variable_scope("policy"): x = flat_observations for size in self.hparams.policy_layers: x = tf.layers.dense(x, size, activation=tf.nn.relu) logits = tf.layers.dense(x, self.hparams.problem.num_actions) logits = tf.expand_dims(logits, axis=1) with tf.variable_scope("value"):
tensorflow.layers.flatten
14,753
import tensorflow as tf layer_c2 = tf.layers.dense(layer_c1, 256, tf.nn.relu, kernel_regularizer=reg) vf = tf.layers.dense(layer_c2, 1, kernel_regularizer=reg)
tensorflow.layers.dense
14,754
import tensorflow as tf from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_sparse_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import sparse_ops class BatchedSpMM: def __init__(self): self.b_module = tf.load_op_library('./batched.so') def call(self, sp_matrices, dense_matrices, adjoint_a=False, adjoint_b=False): sp_indices = [sp_m.indices for sp_m in sp_matrices] sp_values = [sp_m.values for sp_m in sp_matrices] sp_shape = [sp_m.dense_shape for sp_m in sp_matrices] return self.b_module.bspmm(sp_ids = sp_indices, sp_values = sp_values, sp_shape = sp_shape, rhs = dense_matrices, adjoint_a = adjoint_a, adjoint_b = adjoint_b) class BatchedSpMDT: def __init__(self):
tensorflow.load_op_library
14,755
import tensorflow as tf # Add weight decay to the loss. We exclude the batch norm variables because # doing so leads to a small improvement in accuracy. loss = cross_entropy + loc_loss + params['weight_decay'] * tf.add_n( [tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'batch_normalization' not in v.name]) total_loss = tf.identity(loss, name='total_loss') if mode == tf.estimator.ModeKeys.TRAIN: global_step = tf.train.get_or_create_global_step() lr_values = [params['learning_rate'] * decay for decay in params['lr_decay_factors']] learning_rate = tf.train.piecewise_constant(tf.cast(global_step, tf.int32), [int(_) for _ in params['decay_boundaries']], lr_values) truncated_learning_rate = tf.maximum(learning_rate, tf.constant(params['end_learning_rate'], dtype=learning_rate.dtype)) # Create a tensor named learning_rate for logging purposes. tf.identity(truncated_learning_rate, name='learning_rate') tf.summary.scalar('learning_rate', truncated_learning_rate) optimizer = tf.train.MomentumOptimizer(learning_rate=truncated_learning_rate, momentum=params['momentum']) # Batch norm requires update_ops to be added as a train_op dependency. update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): train_op = optimizer.minimize(loss, global_step) else: train_op = None
tensorflow.constant
14,756
import tensorflow as tf elif decoder.update_first: output, state = update(state, input_, None, input_symbol) context, new_weights = look(time, output, input_, pos=pos, prev_weights=prev_weights, context=context) if decoder.conditional_rnn: with tf.variable_scope('conditional_2'): output, state = update(state, context) elif not decoder.generate_first: output, state = update(state, input_, context, input_symbol) output_ = generate(output, input_, context) argmax = lambda: tf.argmax(output_, 1) target = lambda: inputs.read(time + 1) softmax = lambda: tf.squeeze(tf.multinomial(tf.log(tf.nn.softmax(output_)), num_samples=1), axis=1) use_target = tf.logical_and(time < time_steps - 1, tf.random_uniform([]) >= feed_previous) predicted_symbol = tf.case([ (use_target, target), (tf.logical_not(feed_argmax), softmax)], default=argmax) # default case is useful for beam-search predicted_symbol.set_shape([None]) predicted_symbol = tf.stop_gradient(predicted_symbol)
tensorflow.argmax
14,757
import tensorflow as tf def evaluate_legendre_polynomial(degree_l: TensorLike, order_m: TensorLike, x: TensorLike) -> TensorLike: degree_l = tf.convert_to_tensor(value=degree_l) order_m = tf.convert_to_tensor(value=order_m) x = tf.convert_to_tensor(value=x) pmm = _evaluate_legendre_polynomial_pmm_eval(order_m, x) return tf.where( tf.equal(degree_l, order_m), pmm, _evaluate_legendre_polynomial_branch(degree_l, order_m, x, pmm)) def _spherical_harmonics_normalization(l, m, var_type=tf.float64): l = tf.cast(l, dtype=var_type) m = tf.cast(m, dtype=var_type) numerator = (2.0 * l + 1.0) * factorial(l - tf.abs(m)) denominator = 4.0 * np.pi * factorial(l + tf.abs(m)) return tf.sqrt(numerator / denominator)
tensorflow.equal
14,758
import tensorflow as tf Arguments: Y_labels -- ground truth vector N_classes -- the number of classes in the ground truth vector N_ch -- number of channels, if any (for the feature vector only) Returns: one_hot -- one hot matrix encoding """ # Create a tensot flow constant equal to the number of classes C = tf.constant(N_classes, name="C") one_hot_matrix = tf.one_hot(vect-1, C, axis=0) #axis=0 means it is mapping to column vectors if N_ch != 0: one_hot_matrix= tf.expand_dims(one_hot_matrix, 1) # Create tensodr flow session sess = tf.Session() vect_hot = sess.run(one_hot_matrix) sess.close() return vect_hot #Place Holders for the input/output data
tensorflow.expand_dims
14,759
import tensorflow as tf with tf.variable_scope('target_q'): self.target_q = R + self.gamma * self.q_ with tf.variable_scope('abs_TD'): self.abs_td = tf.abs(self.target_q - self.q) self.ISWeights = tf.placeholder(tf.float32, [None, 1], name='IS_weights') with tf.variable_scope('TD_error'): self.loss = tf.reduce_mean(self.ISWeights * tf.squared_difference(self.target_q, self.q)) with tf.variable_scope('C_train'): self.train_op = tf.train.AdamOptimizer(self.lr).minimize(self.loss, global_step=GLOBAL_STEP)
tensorflow.placeholder
14,760
import tensorflow as tf def correlation_loss(source_samples, target_samples, weight, name='corr_loss'): """Adds a similarity loss term, the correlation between two representations. Args: source_samples: a tensor of shape [num_samples, num_features] target_samples: a tensor of shape [num_samples, num_features] weight: a scalar weight for the loss. scope: optional name scope for summary tags. Returns: a scalar tensor representing the correlation loss value. """ with tf.name_scope(name): source_samples -= tf.reduce_mean(source_samples, 0) target_samples -= tf.reduce_mean(target_samples, 0) source_samples = tf.nn.l2_normalize(source_samples, 1) target_samples = tf.nn.l2_normalize(target_samples, 1) source_cov = tf.matmul(tf.transpose(source_samples), source_samples) target_cov = tf.matmul(tf.transpose(target_samples), target_samples) corr_loss = tf.reduce_mean(tf.square(source_cov - target_cov)) * weight assert_op = tf.Assert(tf.is_finite(corr_loss), [corr_loss]) with tf.control_dependencies([assert_op]): tag = 'Correlation Loss' barrier = tf.no_op(tag)
tensorflow.reduce_mean
14,761
import tensorflow as tf q_values_adaptive = q_func(observations_ph.get(), num_actions, scope="adaptive_q_func") perturb_for_adaption = perturb_vars(original_scope="q_func", perturbed_scope="adaptive_q_func") kl = tf.reduce_sum(tf.nn.softmax(q_values) * (tf.log(tf.nn.softmax(q_values)) - tf.log(tf.nn.softmax(q_values_adaptive))), axis=-1) mean_kl = tf.reduce_mean(kl)
tensorflow.nn.softmax
14,762
from tensorflow.python.platform import gfile self.assertEqual([], save.last_checkpoints) s1 = save.save(sess, os.path.join(save_dir, "s1")) self.assertEqual([s1], save.last_checkpoints) self.assertTrue(gfile.Exists(s1)) s2 = save.save(sess, os.path.join(save_dir, "s2")) self.assertEqual([s1, s2], save.last_checkpoints)
tensorflow.python.platform.gfile.Exists
14,763
import tensorflow as tf trainable=True ) self.ch_len = tf.reshape(tf.reduce_sum( tf.cast(tf.cast(self.ch, tf.bool), tf.int32), axis=2), [-1]) self.qh_len = tf.reshape(tf.reduce_sum( tf.cast(tf.cast(self.qh, tf.bool), tf.int32), axis=2), [-1])
tensorflow.cast
14,764
import tensorflow as tf W_p = self._make_var('W_p', (1, 1, ch_mul * ch, ch)) X = tf.nn.relu(X)
tensorflow.nn.relu
14,765