HDF5 保存[B, C, H, W]四维数据的坑

4维数据保存 代码

1.保存后的数据

2.保存代码

void vector2hdf5(vector<Mat> &data, Mat &label, const char * filepath, string dataset1, string dataset2)
{
	int data_rows = data.size();
	int data_channel = 15;
	int image_width = 96;
	int image_height = 96;

	int label_cols = label.cols;
	int label_rows = label.rows;
	hid_t file_id;
	herr_t status;
	file_id = H5Fcreate(filepath, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);

	int rank_data = 4, rank_label = 2;

	hsize_t dims_data[4];
	hsize_t dims_label[2];

	dims_data[0] = data_rows;
	dims_data[1] = data_channel;
	dims_data[2] = image_height;
	dims_data[3] = image_width;

	dims_label[0] = label_rows;
	dims_label[1] = label_cols;


	hid_t data_id = H5Screate_simple(rank_data, dims_data, NULL);
	hid_t label_id = H5Screate_simple(rank_label, dims_label, NULL);


	hid_t dataset_id = H5Dcreate2(file_id, dataset1.c_str(), H5T_NATIVE_FLOAT, data_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
	hid_t labelset_id = H5Dcreate2(file_id, dataset2.c_str(), H5T_NATIVE_INT, label_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);

	cout << data[0].channels() << "  "<< data[0].rows<< "  "<< data[0].cols<< endl;
	float* data_mem = new float[data_rows * data_channel * image_height * image_width];
	float **array_data = new float*[data_rows];
	for (int j = 0; j < data_rows; j++) {
		array_data[j] = data_mem + j * data_channel * image_height * image_width;
		//for (int i = 0; i < image_height; i++)
		//{
		//	float * ptr = data[j].ptr<float>(i);
		//	for (size_t k = 0; k < data_channel * image_width; k++)
		//	{
		//		array_data[j][i * data_channel * image_width + k] = ptr[k];
		//		//cout << ptr[k] << endl;
		//	}
		//}
        //这儿 就是 文中提到的坑 H5 的保存数据顺序更 Mat 的不一样
        //H5:[num][15][96][96]  Mat:[96][96][15] 所以保存时 一定做好转换
		for (int n = 0; n < 15; n++)
		{
			for (int i = 0; i < image_height; i++)
			{
				float * ptr = data[j].ptr<float>(i);
				for (size_t k = 0; k < image_width; k++)
				{
					array_data[j][n*image_width *image_height +i*image_width + k] = ptr[k*data_channel + n];
				}
			}
		}
	}
	
	int * label_mem = new int[label_rows*label_cols];
	int **array_label = new int*[label_rows];
	for (int j = 0; j < label_rows; j++) {
		array_label[j] = label_mem + j*label_cols;
		for (int i = 0; i < label_cols; i++)
		{
			array_label[j][i] = (int)label.at<int>(j, i);
		}
	}

	status = H5Dwrite(dataset_id, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, array_data[0]);
	status = H5Dwrite(labelset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, array_label[0]);

	//关闭

	status = H5Sclose(data_id);
	status = H5Sclose(label_id);

	status = H5Dclose(dataset_id);
	status = H5Dclose(labelset_id);

	status = H5Fclose(file_id);

	delete[] array_data;
	delete[] array_label;
}

坑:H5:[num][15][96][96]  Mat:[96][96][15] 所以保存时 一定做好转换

prototxt文件

name: "CNN_FOR_ANTI_PRINT"
layer {
  name: "data"
  type: "HDF5Data"
  top: "data"
  top: "label"
  include {
    phase: TRAIN
  }
  hdf5_data_param {
    source: "data/train.txt"
    batch_size: 32
  }
}
layer {
  name: "data"
  type: "HDF5Data"
  top: "data"
  top: "label"
  include {
    phase: TEST
  }
  hdf5_data_param {
    source: "data/test.txt"
    batch_size: 32
  }
}
layer {
  name: "conv1"
  type: "Convolution"
  bottom: "data"
  top: "conv1"
  param {
    lr_mult: 1
  }
  param {
    lr_mult: 2
  }
  convolution_param {
    num_output: 32
    kernel_size: 5
    pad:2
    stride: 3
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
    }
  }
}
layer {
  name: "relu1"
  type: "ReLU"
  bottom: "conv1"
  top: "conv1"
}

layer {
  name: "conv2"
  type: "Convolution"
  bottom: "conv1"
  top: "conv2"
  param {
    lr_mult: 1
  }
  param {
    lr_mult: 2
  }
  convolution_param {
    num_output: 64
    kernel_size: 3
    stride: 2
    pad:1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
    }
  }
}
layer {
  name: "relu2"
  type: "ReLU"
  bottom: "conv2"
  top: "conv2"
}

layer {
  name: "conv3"
  type: "Convolution"
  bottom: "conv2"
  top: "conv3"
  param {
    lr_mult: 1
  }
  param {
    lr_mult: 2
  }
  convolution_param {
    num_output: 128
    kernel_size: 3
    stride: 2
    pad:1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
    }
  }
}
layer {
  name: "relu3"
  type: "ReLU"
  bottom: "conv3"
  top: "conv3"
}

layer {
  name: "conv4"
  type: "Convolution"
  bottom: "conv3"
  top: "conv4"
  param {
    lr_mult: 1
  }
  param {
    lr_mult: 2
  }
  convolution_param {
    num_output: 128
    kernel_size: 3
	pad: 1
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
    }
  }
}
layer {
  name: "relu4"
  type: "ReLU"
  bottom: "conv4"
  top: "conv4"
}

layer {
  name: "pool1"
  type: "Pooling"
  bottom: "conv4"
  top: "pool1"
  pooling_param {
    pool: AVE
    kernel_size: 8
    stride: 8
  }
}

layer {
  name: "ip2"
  type: "InnerProduct"
  bottom: "pool1"
  top: "ip2"
  param {
    lr_mult: 1
  }
  param {
    lr_mult: 2
  }
  inner_product_param {
    num_output: 2
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
    }
  }
}

layer {
  name: "accuracy"
  type: "Accuracy"
  bottom: "ip2"
  bottom: "label"
  top: "accuracy"
  include {
    phase: TEST
  }
}
layer {
  name: "accuracy"
  type: "Accuracy"
  bottom: "ip2"
  bottom: "label"
  top: "accuracy"
  include {
    phase: TRAIN
  }
}

layer {
  name: "loss"
  type: "SoftmaxWithLoss"
  bottom: "ip2"
  bottom: "label"
  top: "loss"
}

注意 这里 输出有两个节点,也可以使用 输出为1各节点的最大似然交叉熵损失 SigmoidCrossEntropyLoss,但是,使用该损失无法记录准确率,需要自己添加一个记录准确率的层。Accuracy 主要针对 SoftmaxWithLoss 损失。

猜你喜欢

转载自blog.csdn.net/u011808673/article/details/81152304