diff --git a/modules/perception/camera/app/lane_camera_perception.cc b/modules/perception/camera/app/lane_camera_perception.cc index 0e2e978d8ba..3ef552011dc 100644 --- a/modules/perception/camera/app/lane_camera_perception.cc +++ b/modules/perception/camera/app/lane_camera_perception.cc @@ -69,37 +69,38 @@ void LaneCameraPerception::InitLane( const std::string &work_root, base::BaseCameraModelPtr &model, const app::PerceptionParam &perception_param) { // Init lane - CHECK(perception_param.has_lane_param()) << "Failed to include lane_param"; - { + CHECK_GT(perception_param.lane_param_size(), 0) + << "Failed to include lane_param"; + for (int i = 0; i < perception_param.lane_param_size(); ++i) { // Initialize lane detector - auto lane_param = perception_param.lane_param(); + const auto &lane_param = perception_param.lane_param(i); CHECK(lane_param.has_lane_detector_param()) - << "Failed to include lane_detector_param"; + << "Failed to include lane_detector_param."; LaneDetectorInitOptions lane_detector_init_options; - auto lane_detector_param = lane_param.lane_detector_param(); - auto lane_detector_plugin_param = lane_detector_param.plugin_param(); + const auto &lane_detector_param = lane_param.lane_detector_param(); + const auto &lane_detector_plugin_param = lane_detector_param.plugin_param(); lane_detector_init_options.conf_file = lane_detector_plugin_param.config_file(); lane_detector_init_options.root_dir = GetAbsolutePath(work_root, lane_detector_plugin_param.root_dir()); - lane_detector_init_options.gpu_id = perception_param_.gpu_id(); model = common::SensorManager::Instance()->GetUndistortCameraModel( lane_detector_param.camera_name()); auto pinhole = static_cast(model.get()); name_intrinsic_map_.insert(std::pair( lane_detector_param.camera_name(), pinhole->get_intrinsic_params())); + lane_detector_init_options.gpu_id = perception_param.gpu_id(); lane_detector_init_options.base_camera_model = model; - AINFO << "lane detector name: " << lane_detector_plugin_param.name(); + AINFO << "lane_detector_name: " << lane_detector_plugin_param.name(); lane_detector_.reset(BaseLaneDetectorRegisterer::GetInstanceByName( lane_detector_plugin_param.name())); CHECK(lane_detector_ != nullptr); CHECK(lane_detector_->Init(lane_detector_init_options)) - << "Failed to init " << lane_detector_plugin_param.name(); + << "Failed to init: " << lane_detector_plugin_param.name(); AINFO << "Detector: " << lane_detector_->Name(); - // initialize lane postprocessor - auto lane_postprocessor_param = - perception_param_.lane_param().lane_postprocessor_param(); + // Initialize lane postprocessor + const auto &lane_postprocessor_param = + lane_param.lane_postprocessor_param(); LanePostprocessorInitOptions postprocessor_init_options; postprocessor_init_options.detect_config_root = GetAbsolutePath(work_root, lane_detector_plugin_param.root_dir()); @@ -114,21 +115,21 @@ void LaneCameraPerception::InitLane( lane_postprocessor_param.name())); CHECK(lane_postprocessor_ != nullptr); CHECK(lane_postprocessor_->Init(postprocessor_init_options)) - << "Failed to init " << lane_postprocessor_param.name(); - AINFO << "Lane postprocessor: " << lane_postprocessor_->Name(); + << "Failed to init: " << lane_postprocessor_param.name(); + AINFO << "lane_postprocessor: " << lane_postprocessor_->Name(); // Init output file folder - if (perception_param_.has_debug_param() && - perception_param_.debug_param().has_lane_out_dir()) { + if (perception_param.has_debug_param() && + perception_param.debug_param().has_lane_out_dir()) { write_out_lane_file_ = true; - out_lane_dir_ = perception_param_.debug_param().lane_out_dir(); + out_lane_dir_ = perception_param.debug_param().lane_out_dir(); EnsureDirectory(out_lane_dir_); } - if (perception_param_.has_debug_param() && - perception_param_.debug_param().has_calibration_out_dir()) { + if (perception_param.has_debug_param() && + perception_param.debug_param().has_calibration_out_dir()) { write_out_calib_file_ = true; - out_calib_dir_ = perception_param_.debug_param().calibration_out_dir(); + out_calib_dir_ = perception_param.debug_param().calibration_out_dir(); EnsureDirectory(out_calib_dir_); } } diff --git a/modules/perception/camera/app/obstacle_camera_perception.cc b/modules/perception/camera/app/obstacle_camera_perception.cc index 97a44d4e513..bf918b0dce6 100644 --- a/modules/perception/camera/app/obstacle_camera_perception.cc +++ b/modules/perception/camera/app/obstacle_camera_perception.cc @@ -44,7 +44,7 @@ bool ObstacleCameraPerception::Init( GetAbsolutePath(options.root_dir, options.conf_file); config_file = GetAbsolutePath(work_root, config_file); CHECK(cyber::common::GetProtoFromFile(config_file, &perception_param_)) - << "Read config failed: "; + << "Read config failed: " << config_file; CHECK(inference::CudaUtil::set_device_id(perception_param_.gpu_id())); // Init detector @@ -183,21 +183,21 @@ void ObstacleCameraPerception::InitLane( const std::string &work_root, const app::PerceptionParam &perception_param) { // Init lane - CHECK(perception_param.has_lane_param()) << "Failed to include lane_param."; - { + CHECK_GT(perception_param.lane_param_size(), 0) + << "Failed to include lane_param"; + for (int i = 0; i < perception_param.lane_param_size(); ++i) { // Initialize lane detector - auto lane_param = perception_param.lane_param(); + const auto &lane_param = perception_param.lane_param(i); CHECK(lane_param.has_lane_detector_param()) << "Failed to include lane_detector_param."; LaneDetectorInitOptions lane_detector_init_options; - auto lane_detector_param = - perception_param_.lane_param().lane_detector_param(); - auto lane_detector_plugin_param = lane_detector_param.plugin_param(); + const auto &lane_detector_param = lane_param.lane_detector_param(); + const auto &lane_detector_plugin_param = lane_detector_param.plugin_param(); lane_detector_init_options.conf_file = lane_detector_plugin_param.config_file(); lane_detector_init_options.root_dir = GetAbsolutePath(work_root, lane_detector_plugin_param.root_dir()); - lane_detector_init_options.gpu_id = perception_param_.gpu_id(); + lane_detector_init_options.gpu_id = perception_param.gpu_id(); base::BaseCameraModelPtr model = common::SensorManager::Instance()->GetUndistortCameraModel( lane_detector_param.camera_name()); @@ -211,8 +211,8 @@ void ObstacleCameraPerception::InitLane( AINFO << "Detector: " << lane_detector_->Name(); // Initialize lane postprocessor - auto lane_postprocessor_param = - perception_param_.lane_param().lane_postprocessor_param(); + const auto &lane_postprocessor_param = + lane_param.lane_postprocessor_param(); LanePostprocessorInitOptions postprocessor_init_options; postprocessor_init_options.detect_config_root = GetAbsolutePath(work_root, lane_detector_plugin_param.root_dir()); @@ -231,17 +231,17 @@ void ObstacleCameraPerception::InitLane( AINFO << "lane_postprocessor: " << lane_postprocessor_->Name(); // Init output file folder - if (perception_param_.has_debug_param() && - perception_param_.debug_param().has_lane_out_dir()) { + if (perception_param.has_debug_param() && + perception_param.debug_param().has_lane_out_dir()) { write_out_lane_file_ = true; - out_lane_dir_ = perception_param_.debug_param().lane_out_dir(); + out_lane_dir_ = perception_param.debug_param().lane_out_dir(); EnsureDirectory(out_lane_dir_); } - if (perception_param_.has_debug_param() && - perception_param_.debug_param().has_calibration_out_dir()) { + if (perception_param.has_debug_param() && + perception_param.debug_param().has_calibration_out_dir()) { write_out_calib_file_ = true; - out_calib_dir_ = perception_param_.debug_param().calibration_out_dir(); + out_calib_dir_ = perception_param.debug_param().calibration_out_dir(); EnsureDirectory(out_calib_dir_); } } @@ -313,47 +313,38 @@ bool ObstacleCameraPerception::Perception( name_intrinsic_map_.at(frame->data_provider->sensor_name()); CHECK(frame->calibration_service != nullptr); - // Lane detector and postprocessor: work on front_6mm only - if (lane_calibration_working_sensor_name_ == - frame->data_provider->sensor_name()) { - LaneDetectorOptions lane_detetor_options; - LanePostprocessorOptions lane_postprocessor_options; - if (!lane_detector_->Detect(lane_detetor_options, frame)) { - AERROR << "Failed to detect lane."; - return false; - } - PERCEPTION_PERF_BLOCK_END_WITH_INDICATOR( - frame->data_provider->sensor_name(), "LaneDetector"); + LaneDetectorOptions lane_detetor_options; + LanePostprocessorOptions lane_postprocessor_options; + if (!lane_detector_->Detect(lane_detetor_options, frame)) { + AERROR << "Failed to detect lane."; + return false; + } + PERCEPTION_PERF_BLOCK_END_WITH_INDICATOR( + frame->data_provider->sensor_name(), "LaneDetector"); - if (!lane_postprocessor_->Process2D(lane_postprocessor_options, frame)) { - AERROR << "Failed to postprocess lane 2D."; - return false; - } - PERCEPTION_PERF_BLOCK_END_WITH_INDICATOR( - frame->data_provider->sensor_name(), "LanePostprocessor2D"); + if (!lane_postprocessor_->Process2D(lane_postprocessor_options, frame)) { + AERROR << "Failed to postprocess lane 2D."; + return false; + } + PERCEPTION_PERF_BLOCK_END_WITH_INDICATOR( + frame->data_provider->sensor_name(), "LanePostprocessor2D"); - // Calibration service - frame->calibration_service->Update(frame); - PERCEPTION_PERF_BLOCK_END_WITH_INDICATOR( - frame->data_provider->sensor_name(), "CalibrationService"); + // Calibration service + frame->calibration_service->Update(frame); + PERCEPTION_PERF_BLOCK_END_WITH_INDICATOR( + frame->data_provider->sensor_name(), "CalibrationService"); - if (!lane_postprocessor_->Process3D(lane_postprocessor_options, frame)) { - AERROR << "Failed to postprocess lane 3D."; - return false; - } - PERCEPTION_PERF_BLOCK_END_WITH_INDICATOR( - frame->data_provider->sensor_name(), "LanePostprocessor3D"); + if (!lane_postprocessor_->Process3D(lane_postprocessor_options, frame)) { + AERROR << "Failed to postprocess lane 3D."; + return false; + } + PERCEPTION_PERF_BLOCK_END_WITH_INDICATOR( + frame->data_provider->sensor_name(), "LanePostprocessor3D"); - if (write_out_lane_file_) { - std::string lane_file_path = - out_lane_dir_ + "/" + std::to_string(frame->frame_id) + ".txt"; - WriteLanelines(write_out_lane_file_, lane_file_path, frame->lane_objects); - } - } else { - // Fill the frame using previous estimates - frame->calibration_service->Update(frame); - PERCEPTION_PERF_BLOCK_END_WITH_INDICATOR( - frame->data_provider->sensor_name(), "CalibrationService"); + if (write_out_lane_file_) { + std::string lane_file_path = + out_lane_dir_ + "/" + std::to_string(frame->frame_id) + ".txt"; + WriteLanelines(write_out_lane_file_, lane_file_path, frame->lane_objects); } if (write_out_calib_file_) { diff --git a/modules/perception/camera/app/perception.proto b/modules/perception/camera/app/perception.proto index 971ad2a3698..c1119664dc9 100644 --- a/modules/perception/camera/app/perception.proto +++ b/modules/perception/camera/app/perception.proto @@ -52,7 +52,7 @@ message PerceptionParam { optional int32 gpu_id = 5; optional string camera_intrinsics = 6; optional FeatureParam feature_param = 7; - optional LanePerceptionParam lane_param = 8; + repeated LanePerceptionParam lane_param = 8; optional CalibrationServiceParam calibration_service_param = 9; optional DebugParam debug_param = 10; optional ObjectTemplateParam object_template_param = 11; diff --git a/modules/perception/camera/test/camera_lib_lane_postprocessor_darkscnn_lane_postprocessor_test.cc b/modules/perception/camera/test/camera_lib_lane_postprocessor_darkscnn_lane_postprocessor_test.cc index 40a894be62b..1788584d172 100644 --- a/modules/perception/camera/test/camera_lib_lane_postprocessor_darkscnn_lane_postprocessor_test.cc +++ b/modules/perception/camera/test/camera_lib_lane_postprocessor_darkscnn_lane_postprocessor_test.cc @@ -186,12 +186,15 @@ TEST(darkSCNNLanePostprocessor, camera_lane_postprocessor_point_test) { intrinsic_map["onsemi_obstacle"] = frame.camera_k_matrix; extrinsic_map["onsemi_obstacle"] = ex_camera2lidar; + std::vector camera_names; + camera_names[0] = visual_camera; EXPECT_TRUE(visualize_.Init_all_info_single_camera( - visual_camera, intrinsic_map, extrinsic_map, ex_lidar2imu, pitch_adj, - yaw_adj, roll_adj, calibration_service_init_options.image_height, + camera_names, visual_camera, intrinsic_map, extrinsic_map, ex_lidar2imu, + pitch_adj, yaw_adj, roll_adj, + calibration_service_init_options.image_height, calibration_service_init_options.image_width)); - homography_im2car_ = visualize_.homography_im2car(); + homography_im2car_ = visualize_.homography_im2car(visual_camera); lane_postprocessor->SetIm2CarHomography(homography_im2car_); AINFO << "Initilize visualizer finished!"; diff --git a/modules/perception/camera/tools/offline/visualizer.cc b/modules/perception/camera/tools/offline/visualizer.cc index d05570e846d..bde502e7e05 100644 --- a/modules/perception/camera/tools/offline/visualizer.cc +++ b/modules/perception/camera/tools/offline/visualizer.cc @@ -120,7 +120,8 @@ bool Visualizer::Init(const std::vector &camera_names, } bool Visualizer::Init_all_info_single_camera( - const std::string &camera_name, + const std::vector &camera_names, + const std::string &visual_camera, const std::map &intrinsic_map, const std::map &extrinsic_map, const Eigen::Matrix4d &ex_lidar2imu, @@ -134,20 +135,13 @@ bool Visualizer::Init_all_info_single_camera( intrinsic_map_ = intrinsic_map; extrinsic_map_ = extrinsic_map; ex_lidar2imu_ = ex_lidar2imu; + camera_names_ = camera_names; last_timestamp_ = 0; small_h_ = static_cast(image_height_ * scale_ratio_); small_w_ = static_cast(image_width_ * scale_ratio_); world_h_ = 2 * small_h_; - AINFO << "world_h_: " << world_h_; - AINFO << "wide_pixel_: " << wide_pixel_; - AINFO << "small_h_: " << small_h_; - AINFO << "small_w_: " << small_w_; - camera_image_[camera_name + "_2D"] = - cv::Mat(small_h_, small_w_, CV_8UC3, apollo::perception::black_color); - camera_image_[camera_name + "_3D"] = - cv::Mat(small_h_, small_w_, CV_8UC3, apollo::perception::black_color); world_image_ = cv::Mat(world_h_, wide_pixel_, CV_8UC3, apollo::perception::black_color); color_cipv_ = apollo::perception::white_color; @@ -155,81 +149,100 @@ bool Visualizer::Init_all_info_single_camera( draw_range_circle(); - // 1. transform camera_>lidar - ex_camera2lidar_ = extrinsic_map_.at(camera_name); - AINFO << "ex_camera2lidar_ = " << extrinsic_map_.at(camera_name); + AINFO << "world_h_: " << world_h_; + AINFO << "wide_pixel_: " << wide_pixel_; + AINFO << "small_h_: " << small_h_; + AINFO << "small_w_: " << small_w_; - AINFO << "ex_lidar2imu_ =" << ex_lidar2imu_; + visual_camera_ = visual_camera; + // Set camera specifc parameters + for (auto camera_name : camera_names) { + camera_image_[camera_name] = + cv::Mat(small_h_, small_w_, CV_8UC3, apollo::perception::black_color); + camera_image_[camera_name] = + cv::Mat(small_h_, small_w_, CV_8UC3, apollo::perception::black_color); - // 2. transform camera->lidar->imu - ex_camera2imu_ = ex_lidar2imu_ * ex_camera2lidar_; - AINFO << "ex_camera2imu_ =" << ex_camera2imu_; + // 1. transform camera->lidar + ex_camera2lidar_[camera_name] = extrinsic_map_.at(camera_name); + AINFO << "ex_camera2lidar_ = " << extrinsic_map_.at(camera_name); - // intrinsic camera parameter - K_ = intrinsic_map_.at(camera_name).cast(); - AINFO << "intrinsic K_ =" << K_; - // homography_ground2image_.setIdentity(); - // homography_image2ground_.setIdentity(); + AINFO << "ex_lidar2imu_ =" << ex_lidar2imu_; - // rotate 90 degree around z axis to make x point forward - // double imu_height = 0; // imu height should be considred later - ex_imu2car_ << 0, 1, 0, 0, // cos(90), sin(90), 0, - -1, 0, 0, 0, // -sin(90), cos(90), 0, - 0, 0, 1, 0, // 0, 0, 1 - 0, 0, 0, 1; + // 2. transform camera->lidar->imu + ex_camera2imu_ = ex_lidar2imu_ * ex_camera2lidar_[camera_name]; + AINFO << "ex_camera2imu_ =" << ex_camera2imu_; - // 3. transform camera->lidar->imu->car - ex_camera2car_ = ex_imu2car_ * ex_camera2imu_; + // intrinsic camera parameter + K_[camera_name] = intrinsic_map_.at(camera_name).cast(); + AINFO << "intrinsic K_ =" << K_[camera_name]; + // homography_ground2image_.setIdentity(); + // homography_image2ground_.setIdentity(); - AINFO << "ex_camera2car_ =" << ex_camera2car_; + // rotate 90 degree around z axis to make x point forward + // double imu_height = 0; // imu height should be considred later + ex_imu2car_ << 0, 1, 0, 0, // cos(90), sin(90), 0, + -1, 0, 0, 0, // -sin(90), cos(90), 0, + 0, 0, 1, 0, // 0, 0, 1 + 0, 0, 0, 1; - // Adjust angle - adjust_angles(camera_name, pitch_adj_degree, yaw_adj_degree, roll_adj_degree); + // 3. transform camera->lidar->imu->car + ex_camera2car_ = ex_imu2car_ * ex_camera2imu_; - AINFO << "homography_image2ground_ =" << homography_image2ground_; + AINFO << "ex_camera2car_ =" << ex_camera2car_; - AINFO << "homography_ground2image_ =" << homography_ground2image_; + // Adjust angle + adjust_angles(camera_name, pitch_adj_degree, yaw_adj_degree, + roll_adj_degree); - // compute FOV points - p_fov_1_.x = 0; - p_fov_1_.y = static_cast(image_height_ * fov_cut_ratio_); + AINFO << "homography_image2ground_ =" + << homography_image2ground_[camera_name]; + AINFO << "homography_ground2image_ =" + << homography_ground2image_[camera_name]; - p_fov_2_.x = image_width_ - 1; - p_fov_2_.y = static_cast(image_height_ * fov_cut_ratio_); + // compute FOV points + p_fov_1_.x = 0; + p_fov_1_.y = static_cast(image_height_ * fov_cut_ratio_); - p_fov_3_.x = 0; - p_fov_3_.y = image_height_ - 1; + p_fov_2_.x = image_width_ - 1; + p_fov_2_.y = static_cast(image_height_ * fov_cut_ratio_); - p_fov_4_.x = image_width_ - 1; - p_fov_4_.y = image_height_ - 1; + p_fov_3_.x = 0; + p_fov_3_.y = image_height_ - 1; - AINFO << "p_fov_1_ =" << p_fov_1_; - AINFO << "p_fov_2_ =" << p_fov_2_; - AINFO << "p_fov_3_ =" << p_fov_3_; - AINFO << "p_fov_4_ =" << p_fov_4_; + p_fov_4_.x = image_width_ - 1; + p_fov_4_.y = image_height_ - 1; - vp1_(0) = 1024.0; - if (K_(0, 0) >= 1.0) { - vp1_(1) = (image_width_ >> 1) * vp1_(0) / K_(0, 0); - } else { - AWARN - << "Focal length (" << K_(0, 0) - << " in pixel) is incorrect. Please check camera intrinsic parameters."; - vp1_(1) = vp1_(0) * 0.25; - } + AINFO << "p_fov_1_ =" << p_fov_1_; + AINFO << "p_fov_2_ =" << p_fov_2_; + AINFO << "p_fov_3_ =" << p_fov_3_; + AINFO << "p_fov_4_ =" << p_fov_4_; + + vp1_[camera_name](0) = 1024.0; + if (K_[camera_name](0, 0) >= 1.0) { + vp1_[camera_name](1) = + (image_width_ >> 1) * vp1_[camera_name](0) / K_[camera_name](0, 0); + } else { + AWARN << "Focal length (" << K_[camera_name](0, 0) + << " in pixel) is incorrect. " + << " Please check camera intrinsic parameters."; + vp1_[camera_name](1) = vp1_[camera_name](0) * 0.25; + } - vp2_(0) = vp1_(0); - vp2_(1) = -vp1_(1); + vp2_[camera_name](0) = vp1_[camera_name](0); + vp2_[camera_name](1) = -vp1_[camera_name](1); - AINFO << "vanishing point 1:" << vp1_; - AINFO << "vanishing point 2:" << vp2_; + AINFO << "vanishing point 1:" << vp1_[camera_name]; + AINFO << "vanishing point 2:" << vp2_[camera_name]; - pitch_adj_degree_ = pitch_adj_degree; - yaw_adj_degree_ = yaw_adj_degree; - roll_adj_degree_ = roll_adj_degree; + pitch_adj_degree_[camera_name] = pitch_adj_degree; + yaw_adj_degree_[camera_name] = yaw_adj_degree; + roll_adj_degree_[camera_name] = roll_adj_degree; +} reset_key(); + all_camera_recieved_ = 0x0; + return true; } @@ -279,11 +292,11 @@ bool Visualizer::adjust_angles(const std::string &camera_name, Eigen::Matrix3d H; Eigen::Matrix3d H_inv; - H.block(0, 0, 3, 2) = (K_ * R.transpose()).block(0, 0, 3, 2); - H.block(0, 2, 3, 1) = -K_ * R.transpose() * T; + H.block(0, 0, 3, 2) = (K_[camera_name] * R.transpose()).block(0, 0, 3, 2); + H.block(0, 2, 3, 1) = -K_[camera_name] * R.transpose() * T; H_inv = H.inverse(); - homography_ground2image_ = H; - homography_image2ground_ = H_inv; + homography_ground2image_[camera_name] = H; + homography_image2ground_[camera_name] = H_inv; // Version 2. Conceptual // ex_car2camera_ = adjusted_camera2car_.inverse(); @@ -526,7 +539,7 @@ bool Visualizer::save_extrinsic_in_yaml(const std::string &camera_name, y_file << " secs: 0\n"; y_file << " nsecs: 0\n"; y_file << " frame_id: velodyne128\n"; - y_file << "child_frame_id: front_6mm\n"; + y_file << "child_frame_id: %s\n", camera_name.c_str(); y_file << "transform:\n"; y_file << " translation:\n"; y_file << " x: " << extrinsic(0, 3) << "\n"; @@ -588,7 +601,7 @@ bool Visualizer::save_manual_calibration_parameter( // Get current angle from extrinsics // ex_camera2lidar_ = extrinsic_map_.at(camera_name); - Eigen::Matrix3d R = ex_camera2lidar_.block(0, 0, 3, 3); + Eigen::Matrix3d R = ex_camera2lidar_[camera_name].block(0, 0, 3, 3); double old_pitch_radian = regularize_angle(atan2(R(2, 1), R(2, 2))); double old_roll_radian = regularize_angle( @@ -620,8 +633,9 @@ bool Visualizer::save_manual_calibration_parameter( << ", Z: " << quaternion(2) << ", W: " << quaternion(3); // Save the file // Yaw and Roll are swapped. - save_extrinsic_in_yaml(camera_name, ex_camera2lidar_, quaternion, - new_pitch_radian, new_yaw_radian, new_roll_radian); + save_extrinsic_in_yaml(camera_name, ex_camera2lidar_[camera_name], + quaternion, new_pitch_radian, new_yaw_radian, + new_roll_radian); return true; } @@ -691,52 +705,61 @@ bool Visualizer::key_handler(const std::string &camera_name, const int key) { break; case KEY_UP_NUM_LOCK_ON: case KEY_UP: if (manual_calibration_mode_ && - pitch_adj_degree_ + 0.05 <= max_pitch_degree_) { - pitch_adj_degree_ -= 0.05; + pitch_adj_degree_[camera_name] + 0.05 <= max_pitch_degree_) { + pitch_adj_degree_[camera_name] -= 0.05; + } else { + visual_camera_ = camera_names_[0]; } - AINFO << "Current pitch: " << pitch_adj_degree_; + AINFO << "Current pitch: " << pitch_adj_degree_[camera_name]; break; case KEY_DOWN_NUM_LOCK_ON: case KEY_DOWN: if (manual_calibration_mode_ && - pitch_adj_degree_ - 0.05 >= min_pitch_degree_) { - pitch_adj_degree_ += 0.05; + pitch_adj_degree_[camera_name] - 0.05 >= min_pitch_degree_) { + pitch_adj_degree_[camera_name] += 0.05; + } else { + visual_camera_ = camera_names_[1]; } - AINFO << "Current pitch: " << pitch_adj_degree_; + AINFO << "Current pitch: " << pitch_adj_degree_[camera_name]; break; case KEY_RIGHT_NUM_LOCK_ON: case KEY_RIGHT: if (manual_calibration_mode_ && - yaw_adj_degree_ + 0.05 <= max_yaw_degree_) { - yaw_adj_degree_ -= 0.05; + yaw_adj_degree_[camera_name] + 0.05 <= max_yaw_degree_) { + yaw_adj_degree_[camera_name] -= 0.05; } - AINFO << "Current yaw: " << yaw_adj_degree_; + AINFO << "Current yaw: " << yaw_adj_degree_[camera_name]; break; case KEY_LEFT_NUM_LOCK_ON: case KEY_LEFT: if (manual_calibration_mode_ && - yaw_adj_degree_ - 0.05 >= min_yaw_degree_) { - yaw_adj_degree_ += 0.05; + yaw_adj_degree_[camera_name] - 0.05 >= min_yaw_degree_) { + yaw_adj_degree_[camera_name] += 0.05; } - AINFO << "Current yaw: " << yaw_adj_degree_; + AINFO << "Current yaw: " << yaw_adj_degree_[camera_name]; break; case KEY_SHIFT_LEFT_NUM_LOCK_ON: case KEY_SHIFT_RIGHT: if (manual_calibration_mode_ && - roll_adj_degree_ + 0.05 <= max_roll_degree_) { - roll_adj_degree_ -= 0.05; + roll_adj_degree_[camera_name] + 0.05 <= max_roll_degree_) { + roll_adj_degree_[camera_name] -= 0.05; } - AINFO << "Current roll: " << roll_adj_degree_; + AINFO << "Current roll: " << roll_adj_degree_[camera_name]; break; case KEY_SHIFT_RIGHT_NUM_LOCK_ON: case KEY_SHIFT_LEFT: if (manual_calibration_mode_ && - roll_adj_degree_ - 0.05 >= min_roll_degree_) { - roll_adj_degree_ += 0.05; + roll_adj_degree_[camera_name] - 0.05 >= min_roll_degree_) { + roll_adj_degree_[camera_name] += 0.05; } - AINFO << "Current roll: " << roll_adj_degree_; + AINFO << "Current roll: " << roll_adj_degree_[camera_name]; break; case KEY_CTRL_S_NUM_LOCK_ON: case KEY_CTRL_S: if (manual_calibration_mode_) { - save_manual_calibration_parameter(camera_name, pitch_adj_degree_, - yaw_adj_degree_, roll_adj_degree_); - AINFO << "Saved calibration parameters(pyr): (" << pitch_adj_degree_ - << ", " << yaw_adj_degree_ << ", " << roll_adj_degree_ << ")"; + save_manual_calibration_parameter( + visual_camera_, + pitch_adj_degree_[camera_name], + yaw_adj_degree_[camera_name], + roll_adj_degree_[camera_name]); + AINFO << "Saved calibration parameters(pyr): (" + << pitch_adj_degree_[camera_name] + << ", " << yaw_adj_degree_[camera_name] << ", " + << roll_adj_degree_[camera_name] << ")"; } break; case KEY_ALT_C_NUM_LOCK_ON: case KEY_ALT_C: @@ -796,12 +819,17 @@ bool Visualizer::key_handler(const std::string &camera_name, const int key) { case KEY_SHIFT_LEFT: case KEY_SHIFT_RIGHT: if (manual_calibration_mode_) { - adjust_angles(camera_name, pitch_adj_degree_, yaw_adj_degree_, - roll_adj_degree_); + adjust_angles(camera_name, + pitch_adj_degree_[camera_name], + yaw_adj_degree_[camera_name], + roll_adj_degree_[camera_name]); if (show_help_text_) { - help_str_ += "\nAdjusted Pitch: " + std::to_string(pitch_adj_degree_); - help_str_ += "\nAdjusted Yaw: " + std::to_string(yaw_adj_degree_); - help_str_ += "\nAdjusted Roll: " + std::to_string(roll_adj_degree_); + help_str_ += "\nAdjusted Pitch: " + + std::to_string(pitch_adj_degree_[camera_name]); + help_str_ += "\nAdjusted Yaw: " + + std::to_string(yaw_adj_degree_[camera_name]); + help_str_ += "\nAdjusted Roll: " + + std::to_string(roll_adj_degree_[camera_name]); } } } @@ -926,10 +954,12 @@ void Visualizer::ShowResult(const cv::Mat &img, const CameraFrame &frame) { std::string camera_name = frame.data_provider->sensor_name(); if (frame.timestamp - last_timestamp_ > 0.02) { + draw_selected_image_boundary(small_w_, small_h_, + &(camera_image_[visual_camera_])); cv::Mat bigimg(world_h_, small_w_ + wide_pixel_, CV_8UC3); - camera_image_["front_6mm"].copyTo( + camera_image_[camera_name].copyTo( bigimg(cv::Rect(0, 0, small_w_, small_h_))); - camera_image_["front_12mm"].copyTo( + camera_image_[camera_name].copyTo( bigimg(cv::Rect(0, small_h_, small_w_, small_h_))); world_image_.copyTo(bigimg(cv::Rect(small_w_, 0, wide_pixel_, world_h_))); if (write_out_img_) { @@ -962,6 +992,7 @@ void Visualizer::ShowResult(const cv::Mat &img, const CameraFrame &frame) { } void Visualizer::Draw2Dand3D_all_info_single_camera( + const std::string &camera_name, const cv::Mat &img, const CameraFrame &frame, const Eigen::Matrix3d &intrinsic, @@ -970,7 +1001,6 @@ void Visualizer::Draw2Dand3D_all_info_single_camera( const base::MotionBufferPtr motion_buffer) { cv::Mat image_2D = img.clone(); // All clone should be replaced with global - cv::Mat image_3D = img.clone(); // variable and allocated at Init.. // plot FOV @@ -994,7 +1024,8 @@ void Visualizer::Draw2Dand3D_all_info_single_camera( // cv::line(world_image_, world_point_to_bigimg(image2ground(p_fov_1_)), if (show_vp_grid_) { - cv::line(image_2D, ground2image(vp1_), ground2image(vp2_), + cv::line(image_2D, ground2image(camera_name, vp1_[camera_name]), + ground2image(camera_name, vp2_[camera_name]), apollo::perception::white_color, 2); } // plot laneline on image and ground plane @@ -1005,18 +1036,17 @@ void Visualizer::Draw2Dand3D_all_info_single_camera( cv::Point p_prev; p_prev.x = static_cast(object.curve_image_point_set[0].x); p_prev.y = static_cast(object.curve_image_point_set[0].y); - Eigen::Vector2d p_prev_ground = image2ground(p_prev); + Eigen::Vector2d p_prev_ground = image2ground(camera_name, p_prev); for (unsigned i = 1; i < object.curve_image_point_set.size(); i++) { cv::Point p_cur; p_cur.x = static_cast(object.curve_image_point_set[i].x); p_cur.y = static_cast(object.curve_image_point_set[i].y); - Eigen::Vector2d p_cur_ground = image2ground(p_cur); + Eigen::Vector2d p_cur_ground = image2ground(camera_name, p_cur); if (p_cur.x >= 0 && p_cur.y >= 0 && p_prev.x >= 0 && p_prev.y >= 0 && p_cur.x < image_width_ && p_cur.y < image_height_ && p_prev.x < image_width_ && p_prev.y < image_height_) { cv::line(image_2D, p_prev, p_cur, lane_color, line_thickness_); - cv::line(image_3D, p_prev, p_cur, lane_color, line_thickness_); } cv::line(world_image_, world_point_to_bigimg(p_prev_ground), world_point_to_bigimg(p_cur_ground), lane_color, 2); @@ -1033,7 +1063,7 @@ void Visualizer::Draw2Dand3D_all_info_single_camera( p_prev_ground(0) = x; p_prev_ground(1) = curve_coord.a * x * x * x + curve_coord.b * x * x + curve_coord.c * x + curve_coord.d; - cv::Point p_prev = ground2image(p_prev_ground); + cv::Point p_prev = ground2image(camera_name, p_prev_ground); x += step; for (unsigned int i = 0; x < curve_coord.x_end && i < lane_step_num_; @@ -1042,12 +1072,11 @@ void Visualizer::Draw2Dand3D_all_info_single_camera( p_cur_ground(0) = x; p_cur_ground(1) = curve_coord.a * x * x * x + curve_coord.b * x * x + curve_coord.c * x + curve_coord.d; - cv::Point p_cur = ground2image(p_cur_ground); + cv::Point p_cur = ground2image(camera_name, p_cur_ground); if (p_cur.x >= 0 && p_cur.y >= 0 && p_prev.x >= 0 && p_prev.y >= 0 && p_cur.x < image_width_ && p_cur.y < image_height_ && p_prev.x < image_width_ && p_prev.y < image_height_) { cv::line(image_2D, p_prev, p_cur, lane_color, line_thickness_); - cv::line(image_3D, p_prev, p_cur, lane_color, line_thickness_); } cv::line(world_image_, world_point_to_bigimg(p_prev_ground), world_point_to_bigimg(p_cur_ground), lane_color, 2); @@ -1066,11 +1095,6 @@ void Visualizer::Draw2Dand3D_all_info_single_camera( static_cast(rect.width), static_cast(rect.height)); cv::Scalar color = colorlistobj[object->track_id % colorlistobj.size()];; - if (object->b_cipv) { - cv::rectangle(image_2D, r, color_cipv_, cipv_line_thickness_); - } - cv::rectangle(image_2D, r, color, 2); - cv::putText( image_2D, // type_to_string(object->type) + "->" + @@ -1099,7 +1123,7 @@ void Visualizer::Draw2Dand3D_all_info_single_camera( c_2D.x = static_cast(rect.x + rect.width / 2); c_2D.y = static_cast(rect.y + rect.height); ADEBUG << "Image Footprint c_2D: (" << c_2D.x << ", " << c_2D.y << ")"; - c_2D_l = image2ground(c_2D); + c_2D_l = image2ground(camera_name, c_2D); ADEBUG << "Image Footprint position: (" << c_2D_l(0) << ", " << c_2D_l(1) << ")"; } else { @@ -1132,77 +1156,83 @@ void Visualizer::Draw2Dand3D_all_info_single_camera( cv::Point(static_cast(rect.x), static_cast(rect.y - 10)), cv::FONT_HERSHEY_DUPLEX, 1, apollo::perception::lime_color, 2); - // plot projected 3D box on image_3D - Eigen::Matrix3d rotate_ry; - rotate_ry << cos(theta), 0, sin(theta), - 0, 1, 0, - -sin(theta), 0, cos(theta); - std::vector p(8); - std::vector proj(8); - std::vector p_proj(8); - p[0] << object->size(0) * 0.5, object->size(2) * 0.5, - object->size(1) * 0.5; - p[1] << -object->size(0) * 0.5, object->size(2) * 0.5, - object->size(1) * 0.5; - p[2] << -object->size(0) * 0.5, object->size(2) * 0.5, - -object->size(1) * 0.5; - p[3] << object->size(0) * 0.5, object->size(2) * 0.5, - -object->size(1) * 0.5; - p[4] << object->size(0) * 0.5, -object->size(2) * 0.5, - object->size(1) * 0.5; - p[5] << -object->size(0) * 0.5, -object->size(2) * 0.5, - object->size(1) * 0.5; - p[6] << -object->size(0) * 0.5, -object->size(2) * 0.5, - -object->size(1) * 0.5; - p[7] << object->size(0) * 0.5, -object->size(2) * 0.5, - -object->size(1) * 0.5; - - for (uint i = 0; i < p.size(); i++) { - proj[i] = intrinsic * (rotate_ry * p[i] + pos); - if (fabs(p[i](2)) > std::numeric_limits::min()) { - p_proj[i].x = static_cast(proj[i](0) / proj[i](2)); - p_proj[i].y = static_cast(proj[i](1) / proj[i](2)); + if (show_camera_box3d_) { + Eigen::Matrix3d rotate_ry; + rotate_ry << cos(theta), 0, sin(theta), + 0, 1, 0, + -sin(theta), 0, cos(theta); + std::vector p(8); + std::vector proj(8); + std::vector p_proj(8); + p[0] << object->size(0) * 0.5, object->size(2) * 0.5, + object->size(1) * 0.5; + p[1] << -object->size(0) * 0.5, object->size(2) * 0.5, + object->size(1) * 0.5; + p[2] << -object->size(0) * 0.5, object->size(2) * 0.5, + -object->size(1) * 0.5; + p[3] << object->size(0) * 0.5, object->size(2) * 0.5, + -object->size(1) * 0.5; + p[4] << object->size(0) * 0.5, -object->size(2) * 0.5, + object->size(1) * 0.5; + p[5] << -object->size(0) * 0.5, -object->size(2) * 0.5, + object->size(1) * 0.5; + p[6] << -object->size(0) * 0.5, -object->size(2) * 0.5, + -object->size(1) * 0.5; + p[7] << object->size(0) * 0.5, -object->size(2) * 0.5, + -object->size(1) * 0.5; + + for (uint i = 0; i < p.size(); i++) { + proj[i] = intrinsic * (rotate_ry * p[i] + pos); + if (fabs(p[i](2)) > std::numeric_limits::min()) { + p_proj[i].x = static_cast(proj[i](0) / proj[i](2)); + p_proj[i].y = static_cast(proj[i](1) / proj[i](2)); + } + } + if (object->b_cipv) { + cv::line(image_2D, p_proj[0], p_proj[1], color_cipv_, + cipv_line_thickness_); + cv::line(image_2D, p_proj[1], p_proj[2], color_cipv_, + cipv_line_thickness_); + cv::line(image_2D, p_proj[2], p_proj[3], color_cipv_, + cipv_line_thickness_); + cv::line(image_2D, p_proj[3], p_proj[0], color_cipv_, + cipv_line_thickness_); + cv::line(image_2D, p_proj[4], p_proj[5], color_cipv_, + cipv_line_thickness_); + cv::line(image_2D, p_proj[5], p_proj[6], color_cipv_, + cipv_line_thickness_); + cv::line(image_2D, p_proj[6], p_proj[7], color_cipv_, + cipv_line_thickness_); + cv::line(image_2D, p_proj[7], p_proj[4], color_cipv_, + cipv_line_thickness_); + cv::line(image_2D, p_proj[0], p_proj[4], color_cipv_, + cipv_line_thickness_); + cv::line(image_2D, p_proj[1], p_proj[5], color_cipv_, + cipv_line_thickness_); + cv::line(image_2D, p_proj[2], p_proj[6], color_cipv_, + cipv_line_thickness_); + cv::line(image_2D, p_proj[3], p_proj[7], color_cipv_, + cipv_line_thickness_); } - } - if (object->b_cipv) { - cv::line(image_3D, p_proj[0], p_proj[1], color_cipv_, - cipv_line_thickness_); - cv::line(image_3D, p_proj[1], p_proj[2], color_cipv_, - cipv_line_thickness_); - cv::line(image_3D, p_proj[2], p_proj[3], color_cipv_, - cipv_line_thickness_); - cv::line(image_3D, p_proj[3], p_proj[0], color_cipv_, - cipv_line_thickness_); - cv::line(image_3D, p_proj[4], p_proj[5], color_cipv_, - cipv_line_thickness_); - cv::line(image_3D, p_proj[5], p_proj[6], color_cipv_, - cipv_line_thickness_); - cv::line(image_3D, p_proj[6], p_proj[7], color_cipv_, - cipv_line_thickness_); - cv::line(image_3D, p_proj[7], p_proj[4], color_cipv_, - cipv_line_thickness_); - cv::line(image_3D, p_proj[0], p_proj[4], color_cipv_, - cipv_line_thickness_); - cv::line(image_3D, p_proj[1], p_proj[5], color_cipv_, - cipv_line_thickness_); - cv::line(image_3D, p_proj[2], p_proj[6], color_cipv_, - cipv_line_thickness_); - cv::line(image_3D, p_proj[3], p_proj[7], color_cipv_, - cipv_line_thickness_); - } - cv::line(image_3D, p_proj[0], p_proj[1], color, line_thickness_); - cv::line(image_3D, p_proj[1], p_proj[2], color, line_thickness_); - cv::line(image_3D, p_proj[2], p_proj[3], color, line_thickness_); - cv::line(image_3D, p_proj[3], p_proj[0], color, line_thickness_); - cv::line(image_3D, p_proj[4], p_proj[5], color, line_thickness_); - cv::line(image_3D, p_proj[5], p_proj[6], color, line_thickness_); - cv::line(image_3D, p_proj[6], p_proj[7], color, line_thickness_); - cv::line(image_3D, p_proj[7], p_proj[4], color, line_thickness_); - cv::line(image_3D, p_proj[0], p_proj[4], color, line_thickness_); - cv::line(image_3D, p_proj[1], p_proj[5], color, line_thickness_); - cv::line(image_3D, p_proj[2], p_proj[6], color, line_thickness_); - cv::line(image_3D, p_proj[3], p_proj[7], color, line_thickness_); + cv::line(image_2D, p_proj[0], p_proj[1], color, line_thickness_); + cv::line(image_2D, p_proj[1], p_proj[2], color, line_thickness_); + cv::line(image_2D, p_proj[2], p_proj[3], color, line_thickness_); + cv::line(image_2D, p_proj[3], p_proj[0], color, line_thickness_); + cv::line(image_2D, p_proj[4], p_proj[5], color, line_thickness_); + cv::line(image_2D, p_proj[5], p_proj[6], color, line_thickness_); + cv::line(image_2D, p_proj[6], p_proj[7], color, line_thickness_); + cv::line(image_2D, p_proj[7], p_proj[4], color, line_thickness_); + cv::line(image_2D, p_proj[0], p_proj[4], color, line_thickness_); + cv::line(image_2D, p_proj[1], p_proj[5], color, line_thickness_); + cv::line(image_2D, p_proj[2], p_proj[6], color, line_thickness_); + cv::line(image_2D, p_proj[3], p_proj[7], color, line_thickness_); + } else { + if (object->b_cipv) { + cv::rectangle(image_2D, r, color_cipv_, cipv_line_thickness_); + } + cv::rectangle(image_2D, r, color, 2); + } // plot obstacles on ground plane in lidar coordinates Eigen::Matrix2d rotate_rz; @@ -1282,18 +1312,17 @@ void Visualizer::Draw2Dand3D_all_info_single_camera( Eigen::Vector2d p_prev_ground; p_prev_ground(0) = virtual_egolane_ground.left_line.line_point[0](0); p_prev_ground(1) = virtual_egolane_ground.left_line.line_point[0](1); - cv::Point p_prev = ground2image(p_prev_ground); + cv::Point p_prev = ground2image(camera_name, p_prev_ground); for (unsigned i = 1; i < virtual_egolane_ground.left_line.line_point.size(); i++) { Eigen::Vector2d p_cur_ground; p_cur_ground(0) = virtual_egolane_ground.left_line.line_point[i](0); p_cur_ground(1) = virtual_egolane_ground.left_line.line_point[i](1); - cv::Point p_cur = ground2image(p_cur_ground); + cv::Point p_cur = ground2image(camera_name, p_cur_ground); if (p_cur.x >= 0 && p_cur.y >= 0 && p_prev.x >= 0 && p_prev.y >= 0 && p_cur.x < image_width_ && p_cur.y < image_height_ && p_prev.x < image_width_ && p_prev.y < image_height_) { cv::line(image_2D, p_prev, p_cur, virtual_lane_color_, line_thickness_); - cv::line(image_3D, p_prev, p_cur, virtual_lane_color_, line_thickness_); } cv::line(world_image_, world_point_to_bigimg(p_prev_ground), world_point_to_bigimg(p_cur_ground), virtual_lane_color_, 2); @@ -1304,19 +1333,18 @@ void Visualizer::Draw2Dand3D_all_info_single_camera( // Right ego lane p_prev_ground(0) = virtual_egolane_ground.right_line.line_point[0](0); p_prev_ground(1) = virtual_egolane_ground.right_line.line_point[0](1); - p_prev = ground2image(p_prev_ground); + p_prev = ground2image(camera_name, p_prev_ground); for (unsigned i = 1; i < virtual_egolane_ground.right_line.line_point.size(); i++) { Eigen::Vector2d p_cur_ground; p_cur_ground(0) = virtual_egolane_ground.right_line.line_point[i](0); p_cur_ground(1) = virtual_egolane_ground.right_line.line_point[i](1); - cv::Point p_cur = ground2image(p_cur_ground); + cv::Point p_cur = ground2image(camera_name, p_cur_ground); if (p_cur.x >= 0 && p_cur.y >= 0 && p_prev.x >= 0 && p_prev.y >= 0 && p_cur.x < image_width_ && p_cur.y < image_height_ && p_prev.x < image_width_ && p_prev.y < image_height_) { cv::line(image_2D, p_prev, p_cur, virtual_lane_color_, line_thickness_); - cv::line(image_3D, p_prev, p_cur, virtual_lane_color_, line_thickness_); } cv::line(world_image_, world_point_to_bigimg(p_prev_ground), world_point_to_bigimg(p_cur_ground), virtual_lane_color_, 2); @@ -1326,13 +1354,9 @@ void Visualizer::Draw2Dand3D_all_info_single_camera( } last_timestamp_ = frame.timestamp; - camera_image_[frame.data_provider->sensor_name() + "_2D"] = image_2D; + camera_image_[frame.data_provider->sensor_name()] = image_2D; cv::resize(image_2D, - camera_image_[frame.data_provider->sensor_name() + "_2D"], - cv::Size(small_w_, small_h_)); - camera_image_[frame.data_provider->sensor_name() + "_3D"] = image_3D; - cv::resize(image_3D, - camera_image_[frame.data_provider->sensor_name() + "_3D"], + camera_image_[frame.data_provider->sensor_name()], cv::Size(small_w_, small_h_)); } @@ -1342,6 +1366,9 @@ void Visualizer::ShowResult_all_info_single_camera(const cv::Mat &img, const Eigen::Affine3d &world2camera) { if (frame.timestamp - last_timestamp_ < 0.02) return; + world_image_ = cv::Mat(world_h_, wide_pixel_, CV_8UC3, + apollo::perception::black_color); + // draw results on visulization panel int line_pos = 0; cv::Mat image = img.clone(); @@ -1412,42 +1439,51 @@ void Visualizer::ShowResult_all_info_single_camera(const cv::Mat &img, if (intrinsic_map_.find(camera_name) != intrinsic_map_.end() && extrinsic_map_.find(camera_name) != extrinsic_map_.end()) { Draw2Dand3D_all_info_single_camera( - image, frame, intrinsic_map_.at(camera_name).cast(), + camera_name, image, frame, + intrinsic_map_.at(camera_name).cast(), extrinsic_map_.at(camera_name), world2camera, motion_buffer); } else { AERROR << "Failed to find necessuary intrinsic or extrinsic params."; } // copy visual results into visualization panel - cv::Mat bigimg(world_h_, small_w_ + wide_pixel_, CV_8UC3); - camera_image_[camera_name + "_2D"].copyTo( - bigimg(cv::Rect(0, 0, small_w_, small_h_))); - camera_image_[camera_name + "_3D"].copyTo( - bigimg(cv::Rect(0, small_h_, small_w_, small_h_))); - world_image_.copyTo(bigimg(cv::Rect(small_w_, 0, wide_pixel_, world_h_))); - - // output visualization panel - if (write_out_img_) { - char path[1000]; - static int k = 0; - snprintf(path, sizeof(path), "%s/%06d.jpg", path_.c_str(), k++); - AINFO << "snapshot is saved at " << path; - cv::imwrite(path, bigimg); - } - if (cv_imshow_img_) { - cv::namedWindow("Apollo Visualizer", CV_WINDOW_NORMAL); - cv::setWindowProperty("Apollo Visualizer", CV_WND_PROP_FULLSCREEN, - CV_WINDOW_FULLSCREEN); - cv::imshow("Apollo Visualizer", bigimg); - int key = cvWaitKey(30); - key_handler(camera_name, key); - } - - // re-initialize empty world_image_ - world_image_ = cv::Mat(world_h_, wide_pixel_, CV_8UC3, - apollo::perception::black_color); - draw_range_circle(); + if (camera_name == camera_names_[0]) { + all_camera_recieved_ |= 0x1; + } else if (camera_name == camera_names_[1]) { + all_camera_recieved_ |= 0x2; + } + if (all_camera_recieved_ == 0x3) { + if (camera_name == visual_camera_) { + draw_range_circle(); + draw_selected_image_boundary(small_w_, small_h_, + &(camera_image_[visual_camera_])); + cv::Mat bigimg(world_h_, small_w_ + wide_pixel_, CV_8UC3); + camera_image_[camera_names_[0]].copyTo( + bigimg(cv::Rect(0, 0, small_w_, small_h_))); + camera_image_[camera_names_[1]].copyTo( + bigimg(cv::Rect(0, small_h_, small_w_, small_h_))); + world_image_.copyTo( + bigimg(cv::Rect(small_w_, 0, wide_pixel_, world_h_))); + cv::namedWindow("Apollo Visualizer", CV_WINDOW_NORMAL); + cv::setWindowProperty("Apollo Visualizer", CV_WND_PROP_FULLSCREEN, + CV_WINDOW_FULLSCREEN); + cv::imshow("Apollo Visualizer", bigimg); + int key = cvWaitKey(30); + key_handler(camera_name, key); + + // output visualization panel + if (write_out_img_) { + char path[1000]; + static int k = 0; + snprintf(path, sizeof(path), "%s/%06d.jpg", path_.c_str(), k++); + AINFO << "snapshot is saved at " << path; + cv::imwrite(path, bigimg); + } + all_camera_recieved_ = 0x0; + } // if (camera_name == visual_camera) + } // if (all_camera_recieved_ == 0x3) + } // if (cv_imshow_img_) } void Visualizer::draw_range_circle() { @@ -1464,6 +1500,13 @@ void Visualizer::draw_range_circle() { } } +void Visualizer::draw_selected_image_boundary( + const int width, int const height, cv::Mat *image) { + cv::Rect image_boundary(0, 0, width, height); + cv::rectangle(*image, + image_boundary, apollo::perception::light_green_color, 4); +} + cv::Point Visualizer::world_point_to_bigimg(const Eigen::Vector2d &p) { cv::Point point; point.x = static_cast(-p(1) * m2pixel_ + wide_pixel_ * 0.5); @@ -1478,12 +1521,13 @@ cv::Point Visualizer::world_point_to_bigimg(const Eigen::Vector4f &p) { return point; } -Eigen::Vector2d Visualizer::image2ground(cv::Point p_img) { +Eigen::Vector2d Visualizer::image2ground(const std::string &camera_name, + cv::Point p_img) { Eigen::Vector3d p_homo; p_homo << p_img.x, p_img.y, 1; Eigen::Vector3d p_ground; - p_ground = homography_image2ground_ * p_homo; + p_ground = homography_image2ground_[camera_name] * p_homo; if (fabs(p_ground(2)) > std::numeric_limits::min()) { p_ground(0) = p_ground(0) / p_ground(2); p_ground(1) = p_ground(1) / p_ground(2); @@ -1492,12 +1536,13 @@ Eigen::Vector2d Visualizer::image2ground(cv::Point p_img) { } return p_ground.block(0, 0, 2, 1); } -cv::Point Visualizer::ground2image(Eigen::Vector2d p_ground) { +cv::Point Visualizer::ground2image(const std::string &camera_name, + Eigen::Vector2d p_ground) { Eigen::Vector3d p_homo; p_homo << p_ground(0), p_ground(1), 1; Eigen::Vector3d p_img; - p_img = homography_ground2image_ * p_homo; + p_img = homography_ground2image_[camera_name] * p_homo; if (fabs(p_img(2)) > std::numeric_limits::min()) { p_img(0) = p_img(0) / p_img(2); p_img(1) = p_img(1) / p_img(2); diff --git a/modules/perception/camera/tools/offline/visualizer.h b/modules/perception/camera/tools/offline/visualizer.h index 5fce2e2d08c..2414ad5ce35 100644 --- a/modules/perception/camera/tools/offline/visualizer.h +++ b/modules/perception/camera/tools/offline/visualizer.h @@ -36,7 +36,8 @@ class Visualizer { bool Init(const std::vector &camera_names, TransformServer *tf_server); bool Init_all_info_single_camera( - const std::string &camera_name, + const std::vector &camera_names, + const std::string &visual_camera, const std::map &intrinsic_map, const std::map &extrinsic_map, const Eigen::Matrix4d &ex_lidar2imu, @@ -56,6 +57,7 @@ class Visualizer { const base::MotionBufferPtr motion_buffer, const Eigen::Affine3d &world2camera); void Draw2Dand3D_all_info_single_camera( + const std::string &camera_name, const cv::Mat &img, const CameraFrame &frame, const Eigen::Matrix3d &intrinsic, @@ -66,12 +68,16 @@ class Visualizer { const base::MotionBufferPtr motion_buffer); cv::Point world_point_to_bigimg(const Eigen::Vector2d &p); cv::Point world_point_to_bigimg(const Eigen::Vector4f &p); - Eigen::Vector2d image2ground(cv::Point p_img); - cv::Point ground2image(Eigen::Vector2d p_ground); + Eigen::Vector2d image2ground(const std::string &camera_name, + cv::Point p_img); + cv::Point ground2image(const std::string &camera_name, + Eigen::Vector2d p_ground); std::string type_to_string(const apollo::perception::base::ObjectType type); std::string sub_type_to_string( const apollo::perception::base::ObjectSubType type); - Eigen::Matrix3d homography_im2car() { return homography_image2ground_; } + Eigen::Matrix3d homography_im2car(std::string camera_name) { + return homography_image2ground_[camera_name]; + } void Set_ROI(int input_offset_y, int crop_height, int crop_width) { roi_start_ = input_offset_y; roi_height_ = crop_height; @@ -94,12 +100,15 @@ class Visualizer { bool copy_backup_file(const std::string &filename); bool key_handler(const std::string &camera_name, const int key); bool reset_key(); + void draw_range_circle(); + void draw_selected_image_boundary(const int width, int const height, + cv::Mat *image); bool write_out_img_ = false; bool cv_imshow_img_ = true; // homograph between image and ground plane - Eigen::Matrix3d homography_image2ground_ = Eigen::Matrix3d::Identity(3, 3); - Eigen::Matrix3d homography_ground2image_ = Eigen::Matrix3d::Identity(3, 3); + std::map homography_image2ground_; + std::map homography_ground2image_; private: std::map camera_image_; @@ -119,9 +128,9 @@ class Visualizer { double degree_to_radian_factor_ = M_PI / 180.0; double radian_to_degree_factor_ = 180.0 / M_PI; - double pitch_adj_degree_ = 0; - double yaw_adj_degree_ = 0; - double roll_adj_degree_ = 0; + std::map pitch_adj_degree_; + std::map yaw_adj_degree_; + std::map roll_adj_degree_; double max_pitch_degree_ = 5.0; double min_pitch_degree_ = -5.0; @@ -138,16 +147,16 @@ class Visualizer { int roi_start_ = 312; int roi_width_ = 1920; - Eigen::Vector2d vp1_; - Eigen::Vector2d vp2_; - - void draw_range_circle(); + std::map vp1_; + std::map vp2_; + std::vector camera_names_; + std::string visual_camera_ = "front_6mm"; // map for store params std::map intrinsic_map_; std::map extrinsic_map_; Eigen::Matrix4d ex_lidar2imu_; - Eigen::Matrix4d ex_camera2lidar_; + std::map ex_camera2lidar_; Eigen::Matrix4d ex_camera2imu_; Eigen::Matrix4d ex_imu2camera_; Eigen::Matrix4d ex_car2camera_; @@ -156,7 +165,7 @@ class Visualizer { Eigen::Matrix4d adjusted_camera2car_ = Eigen::Matrix4d::Identity(); Eigen::Matrix4d projection_matrix_; - Eigen::Matrix3d K_; + std::map K_; // Visualization related variables bool use_class_color_ = true; @@ -192,6 +201,7 @@ class Visualizer { double speed_limit_ = 1.0; // in m/s unsigned int lane_step_num_ = 20; Cipv cipv_; + unsigned int all_camera_recieved_ = 0; }; } // namespace camera diff --git a/modules/perception/onboard/component/fusion_camera_detection_component.cc b/modules/perception/onboard/component/fusion_camera_detection_component.cc index 98b811ac93f..adfec0f1ccb 100644 --- a/modules/perception/onboard/component/fusion_camera_detection_component.cc +++ b/modules/perception/onboard/component/fusion_camera_detection_component.cc @@ -234,11 +234,11 @@ bool FusionCameraDetectionComponent::Init() { AINFO << "velodyne128_novatel_extrinsics: " << ex_lidar2imu; CHECK(visualize_.Init_all_info_single_camera( - visual_camera_, intrinsic_map_, extrinsic_map_, ex_lidar2imu, - pitch_adj_degree, yaw_adj_degree, roll_adj_degree, image_height_, - image_width_)); + camera_names_, visual_camera_, intrinsic_map_, extrinsic_map_, + ex_lidar2imu, pitch_adj_degree, yaw_adj_degree, roll_adj_degree, + image_height_, image_width_)); - homography_im2car_ = visualize_.homography_im2car(); + homography_im2car_ = visualize_.homography_im2car(visual_camera_); camera_obstacle_pipeline_->SetIm2CarHomography(homography_im2car_); if (enable_cipv_) { @@ -800,19 +800,17 @@ int FusionCameraDetectionComponent::InternalProc( *error_code)); bool send_viz_ret = camera_viz_writer_->Write(viz_msg); AINFO << "send out camera visualization msg, ts: " - << std::to_string(msg_timestamp) << " send_viz_ret: " << send_viz_ret; - - // visualize right away - if (camera_name == visual_camera_) { - cv::Mat output_image(image_height_, image_width_, CV_8UC3, - cv::Scalar(0, 0, 0)); - base::Image8U out_image(image_height_, image_width_, base::Color::RGB); - camera_frame.data_provider->GetImage(image_options, &out_image); - memcpy(output_image.data, out_image.cpu_data(), - out_image.total() * sizeof(uint8_t)); - visualize_.ShowResult_all_info_single_camera( - output_image, camera_frame, motion_buffer_, world2camera); - } + << std::to_string(msg_timestamp) + << " send_viz_ret: " << send_viz_ret; + + cv::Mat output_image(image_height_, image_width_, CV_8UC3, + cv::Scalar(0, 0, 0)); + base::Image8U out_image(image_height_, image_width_, base::Color::RGB); + camera_frame.data_provider->GetImage(image_options, &out_image); + memcpy(output_image.data, out_image.cpu_data(), + out_image.total() * sizeof(uint8_t)); + visualize_.ShowResult_all_info_single_camera( + output_image, camera_frame, motion_buffer_, world2camera); } // send out camera debug message diff --git a/modules/perception/onboard/component/lane_detection_component.cc b/modules/perception/onboard/component/lane_detection_component.cc index 663b598e531..30b0fd19986 100644 --- a/modules/perception/onboard/component/lane_detection_component.cc +++ b/modules/perception/onboard/component/lane_detection_component.cc @@ -210,10 +210,10 @@ bool LaneDetectionComponent::Init() { AINFO << "velodyne128_novatel_extrinsics: " << ex_lidar2imu; CHECK(visualize_.Init_all_info_single_camera( - visual_camera_, intrinsic_map_, extrinsic_map_, ex_lidar2imu, - pitch_adj_degree, yaw_adj_degree, roll_adj_degree, image_height_, - image_width_)); - homography_image2ground_ = visualize_.homography_im2car(); + camera_names_, visual_camera_, intrinsic_map_, extrinsic_map_, + ex_lidar2imu, pitch_adj_degree, yaw_adj_degree, roll_adj_degree, + image_height_, image_width_)); + homography_image2ground_ = visualize_.homography_im2car(visual_camera_); camera_lane_pipeline_->SetIm2CarHomography(homography_image2ground_); if (enable_visualization_) { @@ -666,17 +666,14 @@ int LaneDetectionComponent::InternalProc( std::shared_ptr> image_blob(new base::Blob); camera_frame.data_provider->GetImageBlob(image_options, image_blob.get()); - // visualize right away - if (camera_name == visual_camera_) { - cv::Mat output_image(image_height_, image_width_, CV_8UC3, - cv::Scalar(0, 0, 0)); - base::Image8U out_image(image_height_, image_width_, base::Color::RGB); - camera_frame.data_provider->GetImage(image_options, &out_image); - memcpy(output_image.data, out_image.cpu_data(), - out_image.total() * sizeof(uint8_t)); - visualize_.ShowResult_all_info_single_camera(output_image, camera_frame, - mot_buffer_, world2camera); - } + cv::Mat output_image(image_height_, image_width_, CV_8UC3, + cv::Scalar(0, 0, 0)); + base::Image8U out_image(image_height_, image_width_, base::Color::RGB); + camera_frame.data_provider->GetImage(image_options, &out_image); + memcpy(output_image.data, out_image.cpu_data(), + out_image.total() * sizeof(uint8_t)); + visualize_.ShowResult_all_info_single_camera(output_image, camera_frame, + mot_buffer_, world2camera); } // send out lane message diff --git a/modules/perception/production/conf/perception/camera/obstacle.pt b/modules/perception/production/conf/perception/camera/obstacle.pt index 81c803cc63f..21a35ef51a6 100644 --- a/modules/perception/production/conf/perception/camera/obstacle.pt +++ b/modules/perception/production/conf/perception/camera/obstacle.pt @@ -55,6 +55,23 @@ lane_param { } } +lane_param { + lane_detector_param { + plugin_param { + name : "DarkSCNNLaneDetector" + root_dir : "/apollo/modules/perception/production/data/perception/camera/models/lane_detector/" + config_file : "config_darkSCNN.pt" + } + camera_name : "front_12mm" + } + lane_postprocessor_param { + name : "DarkSCNNLanePostprocessor" + root_dir : "/apollo/modules/perception/production/data/perception/camera/models/lane_postprocessor/darkSCNN/" + config_file : "config.pt" + } +} + + calibration_service_param { plugin_param { name : "OnlineCalibrationService"