Skip to content

Commit

Permalink
Remove unused lambda captures
Browse files Browse the repository at this point in the history
This fixes the lambda capture <> is not used compiler warning.

PiperOrigin-RevId: 222318600
  • Loading branch information
jaingaurav authored and tensorflower-gardener committed Nov 20, 2018
1 parent 095b54b commit e16716d
Show file tree
Hide file tree
Showing 7 changed files with 16 additions and 19 deletions.
2 changes: 1 addition & 1 deletion tensorflow/contrib/image/kernels/adjust_hsv_in_yiq_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ class AdjustHsvInYiqOp<CPUDevice> : public AdjustHsvInYiqOpBase {
*context->device()->tensorflow_cpu_worker_threads();
Shard(worker_threads.num_threads, worker_threads.workers, channel_count,
kCostPerChannel,
[channel_count, &input_data, &output_data, &tranformation_matrix](
[&input_data, &output_data, &tranformation_matrix](
int64 start_channel, int64 end_channel) {
// Applying projection matrix to input RGB vectors.
const float* p = input_data.data() + start_channel * kChannelSize;
Expand Down
4 changes: 2 additions & 2 deletions tensorflow/core/kernels/adjust_hue_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -216,8 +216,8 @@ class AdjustHueOp<CPUDevice> : public AdjustHueOpBase {
*context->device()->tensorflow_cpu_worker_threads();
Shard(worker_threads.num_threads, worker_threads.workers, channel_count,
kCostPerChannel,
[channel_count, &input_data, &output_data, delta_h](
int64 start_channel, int64 end_channel) {
[&input_data, &output_data, delta_h](int64 start_channel,
int64 end_channel) {
const float* p = input_data.data() + start_channel * kChannelSize;
float* q = output_data.data() + start_channel * kChannelSize;
for (int i = start_channel; i < end_channel; i++) {
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/kernels/barrier_ops.cc
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ class Barrier : public ResourceBase {
// SQSS is closed, nothing is left in the incomplete set,
// the queue is not already marked as closed, and (most
// importantly), the queue has entries in it.
[this, ctx, callback, component_index]() {
[this, ctx, callback]() {
if (!ctx->status().ok()) {
callback();
return;
Expand Down
19 changes: 9 additions & 10 deletions tensorflow/core/kernels/deep_conv2d.cc
Original file line number Diff line number Diff line change
Expand Up @@ -434,10 +434,9 @@ struct TransformFilters {
tile_spatial_size, base_filter_spatial_size, transform_matrix);

auto shard = [&ctx, &args, &transform, &base_filter_rows, &base_filter_cols,
&num_filters_transform, &in_depth, &out_depth,
&filter_shards_row, &filter_shards_col, &tile_spatial_size,
&filter_in, &transform_matrix,
&filter_out](int64 start, int64 limit) {
&num_filters_transform, &in_depth, &filter_shards_row,
&filter_shards_col, &tile_spatial_size, &filter_in,
&transform_matrix, &filter_out](int64 start, int64 limit) {
// Allocate buffer for pre-processed filter:
// [base_filter_rows, base_filter_cols, num_filters_transform, in_depth]
//
Expand Down Expand Up @@ -533,9 +532,9 @@ struct PackFilters {
const int64 out_depth = args.out_depth;
const int64 num_filters = filter_shards_row * filter_shards_col * out_depth;

auto shard = [&ctx, &packed_filters, &filter_transform_data,
&tile_spatial_size, &in_depth, &out_depth, &filter_shards_row,
&filter_shards_col, &num_filters](int64 start, int64 limit) {
auto shard = [&ctx, &packed_filters, &filter_transform_data, &in_depth,
&out_depth, &filter_shards_row, &filter_shards_col,
&num_filters](int64 start, int64 limit) {
const int64 filter_coord_stride = num_filters * in_depth;
for (int64 i = start; i < limit; ++i) {
// Allocate filter buffer [out_depth, shard_rows, shard_cols, in_depth].
Expand Down Expand Up @@ -1004,9 +1003,9 @@ struct DeepConv2D<CPUDevice, T> {
out_tile_spatial_size, tile_spatial_size, output_transform_matrix);

auto shard = [&ctx, &args, &transform, &packed_filters, &in_depth,
out_depth, tile_rows, tile_cols, out_tile_rows, out_tile_cols,
filter_shards_row, filter_shards_col, tile_spatial_size,
&input, &tile_transform_matrix, &output_transform_matrix,
out_depth, out_tile_rows, out_tile_cols, filter_shards_row,
filter_shards_col, tile_spatial_size, &input,
&tile_transform_matrix, &output_transform_matrix,
&output](int64 batch_start, int64 batch_limit) {
const int64 row_tiles =
(args.out_rows + out_tile_rows - 1) / out_tile_rows +
Expand Down
6 changes: 3 additions & 3 deletions tensorflow/core/kernels/tensor_array_ops.cc
Original file line number Diff line number Diff line change
Expand Up @@ -352,9 +352,9 @@ class TensorArrayGradOp : public TensorArrayCreationOp {
}

const auto key = strings::StrCat(output_handle(0), output_handle(1));
auto creator = [this, key, tensor_array, array_size, marked_size,
element_shape, shape_to_prepend, tensor_array_output_handle,
output_handle](TensorArray** ret) -> Status {
auto creator = [key, tensor_array, array_size, marked_size, element_shape,
shape_to_prepend,
tensor_array_output_handle](TensorArray** ret) -> Status {
*ret = new TensorArray(
key, tensor_array->ElemType(), *tensor_array_output_handle,
array_size, element_shape, tensor_array->HasIdenticalElementShapes(),
Expand Down
1 change: 0 additions & 1 deletion tensorflow/lite/kernels/internal/optimized/optimized_ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -4293,7 +4293,6 @@ inline void LogSoftmax(const SoftmaxParams& params,
using FixedPointScaledDiff =
gemmlowp::FixedPoint<int32, kScaledDiffIntegerBits>;
using FixedPointAccum = gemmlowp::FixedPoint<int32, kAccumulationIntegerBits>;
using FixedPoint0 = gemmlowp::FixedPoint<int32, 0>;

const int trailing_dim = input_shape.DimensionsCount() - 1;
const int outer_size =
Expand Down
1 change: 0 additions & 1 deletion tensorflow/lite/kernels/internal/reference/reference_ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -2736,7 +2736,6 @@ inline void LogSoftmax(const SoftmaxParams& params,
using FixedPointScaledDiff =
gemmlowp::FixedPoint<int32, kScaledDiffIntegerBits>;
using FixedPointAccum = gemmlowp::FixedPoint<int32, kAccumulationIntegerBits>;
using FixedPoint0 = gemmlowp::FixedPoint<int32, 0>;

const int trailing_dim = input_shape.DimensionsCount() - 1;
const int outer_size =
Expand Down

0 comments on commit e16716d

Please sign in to comment.