Skip to content

Commit

Permalink
Fixed the build warnings and added gitignore.
Browse files Browse the repository at this point in the history
  • Loading branch information
zuyu committed Jul 24, 2017
1 parent b7db8be commit d5f0214
Show file tree
Hide file tree
Showing 7 changed files with 58 additions and 21 deletions.
38 changes: 38 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
# CMake build directory
*build*/

# Backup files.
*~

# Prerequisites
*.d

# Compiled Object files
*.slo
*.lo
*.o
*.obj

# Precompiled Headers
*.gch
*.pch

# Compiled Dynamic libraries
*.so
*.dylib
*.dll

# Fortran module files
*.mod
*.smod

# Compiled Static libraries
*.lai
*.la
*.a
*.lib

# Executables
*.exe
*.out
*.app
1 change: 1 addition & 0 deletions src/cpu.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -428,6 +428,7 @@ int set_cpu_powersave(int powersave)
return -1;
#else
// TODO
(void) powersave; // Avoid unused parameter warning.
return -1;
#endif
}
Expand Down
1 change: 0 additions & 1 deletion src/layer/x86/convolution_3x3.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
static void conv3x3s1_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;

int outw = top_blob.w;
Expand Down
1 change: 0 additions & 1 deletion src/layer/x86/convolution_5x5.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
static void conv5x5s1_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;

int outw = top_blob.w;
Expand Down
1 change: 0 additions & 1 deletion src/layer/x86/convolution_x86.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,6 @@ int Convolution_x86::forward(const Mat& bottom_blob, Mat& top_blob) const

int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;

Mat bottom_blob_bordered = bottom_blob;
if (pad > 0)
Expand Down
32 changes: 16 additions & 16 deletions tools/caffe2ncnn.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -114,12 +114,12 @@ static int quantize_weight(float *data, size_t data_length, std::vector<unsigned
}

static bool quantize_weight(float *data, size_t data_length, int quantize_level, std::vector<float> &quantize_table, std::vector<unsigned char> &quantize_index) {

assert(quantize_level != 0);
assert(data != NULL);
assert(data_length > 0);

if (data_length < quantize_level) {
if (data_length < static_cast<size_t>(quantize_level)) {
fprintf(stderr, "No need quantize,because: data_length < quantize_level");
return false;
}
Expand All @@ -131,7 +131,7 @@ static bool quantize_weight(float *data, size_t data_length, int quantize_level,
float max_value = std::numeric_limits<float>::min();
float min_value = std::numeric_limits<float>::max();

for (int i = 0; i < data_length; ++i)
for (size_t i = 0; i < data_length; ++i)
{
if (max_value < data[i]) max_value = data[i];
if (min_value > data[i]) min_value = data[i];
Expand All @@ -145,7 +145,7 @@ static bool quantize_weight(float *data, size_t data_length, int quantize_level,
}

// 3. Align data to the quantized value
for (int i = 0; i < data_length; ++i)
for (size_t i = 0; i < data_length; ++i)
{
size_t table_index = int((data[i] - min_value) / strides);
table_index = std::min<float>(table_index, quantize_level - 1);
Expand Down Expand Up @@ -218,7 +218,7 @@ int main(int argc, char** argv)
int quantize_level = atoi(quantize_param);

if (quantize_level != 0 && quantize_level != 256 && quantize_level != 65536) {
fprintf(stderr, "only support quantize level = 0 or level = 256", argv[0]);
fprintf(stderr, "%s: only support quantize level = 0, 256, or 65536", argv[0]);
return -1;
}

Expand Down Expand Up @@ -308,7 +308,7 @@ int main(int argc, char** argv)
++it;
}
}
fprintf(pp, "%d %d\n", layer_count + bottom_reference.size(), blob_names.size() + splitncnn_blob_count);
fprintf(pp, "%lu %lu\n", layer_count + bottom_reference.size(), blob_names.size() + splitncnn_blob_count);

// populate
blob_name_decorated.clear();
Expand Down Expand Up @@ -449,12 +449,12 @@ int main(int argc, char** argv)
quantize_tag = quantize_weight((float *)blob.data().data(), blob.data_size(), float16_weights);
}
}

// write quantize tag first
if (j == 0)
if (j == 0)
fwrite(&quantize_tag, sizeof(int), 1, bp);

if (quantize_tag)
if (quantize_tag)
{
int p0 = ftell(bp);
if (quantize_level == 256)
Expand All @@ -472,14 +472,14 @@ int main(int argc, char** argv)
int nalign = alignSize(nwrite, 4);
unsigned char padding[4] = {0x00, 0x00, 0x00, 0x00};
fwrite(padding, sizeof(unsigned char), nalign - nwrite, bp);
}
else
}
else
{
// write original data
fwrite(blob.data().data(), sizeof(float), blob.data_size(), bp);
}
}

}
else if (layer.type() == "Crop")
{
Expand Down Expand Up @@ -570,7 +570,7 @@ int main(int argc, char** argv)
if (j == 0)
fwrite(&quantize_tag, sizeof(int), 1, bp);

if (quantize_tag)
if (quantize_tag)
{
int p0 = ftell(bp);
if (quantize_level == 256)
Expand All @@ -589,7 +589,7 @@ int main(int argc, char** argv)
unsigned char padding[4] = {0x00, 0x00, 0x00, 0x00};
fwrite(padding, sizeof(unsigned char), nalign - nwrite, bp);
}
else
else
{
// write original data
fwrite(blob.data().data(), sizeof(float), blob.data_size(), bp);
Expand All @@ -602,7 +602,7 @@ int main(int argc, char** argv)
const caffe::BlobShape& bs = input_param.shape(0);
for (int j=1; j<std::min((int)bs.dim_size(), 4); j++)
{
fprintf(pp, " %d", bs.dim(j));
fprintf(pp, " %lld", bs.dim(j));
}
for (int j=bs.dim_size(); j<4; j++)
{
Expand Down Expand Up @@ -662,7 +662,7 @@ int main(int argc, char** argv)
const caffe::BlobShape& bs = reshape_param.shape();
for (int j=1; j<std::min((int)bs.dim_size(), 4); j++)
{
fprintf(pp, " %d", bs.dim(j));
fprintf(pp, " %lld", bs.dim(j));
}
for (int j=bs.dim_size(); j<4; j++)
{
Expand Down
5 changes: 3 additions & 2 deletions tools/ncnn2mem.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
#include <ctype.h>
#include <stdio.h>
#include <string.h>
#include <cstddef>
#include <string>
#include <vector>
#include "layer.h"
Expand All @@ -24,7 +25,7 @@ static std::vector<std::string> blob_names;

static int find_blob_index_by_name(const char* name)
{
for (int i=0; i<blob_names.size(); i++)
for (std::size_t i=0; i<blob_names.size(); i++)
{
if (blob_names[i] == name)
{
Expand All @@ -38,7 +39,7 @@ static int find_blob_index_by_name(const char* name)

static void sanitize_name(char* name)
{
for (int i=0; i<strlen(name); i++)
for (std::size_t i=0; i<strlen(name); i++)
{
if (!isalnum(name[i]))
{
Expand Down

0 comments on commit d5f0214

Please sign in to comment.