一年多了,一直忘了补充c++版本测量物体尺寸的代码文章,被微信公众号的同学催更,,,,,,本文适合有一定基础的同学,环境搭建,模型转换,opencv4.6.0引用目录,库目录什么的,因为比较懒就不写那么详细啦。全部工程源码看主页!!!
我们使用d435i相机会有c++软件包,去官网下载librealsense-master.zip,
解压:
我们在examples文件夹内,先测试一下相机的效果:
我改写了一下代码:
int main() { // 相机参数配置 rs2::pipeline pipe; rs2::config cfg; cfg.enable_stream(RS2_STREAM_DEPTH, 1280, 720, RS2_FORMAT_Z16, 30); cfg.enable_stream(RS2_STREAM_COLOR, 1280, 720, RS2_FORMAT_BGR8, 30); rs2::pipeline_profile profile = pipe.start(cfg); rs2::align align_to_color(RS2_STREAM_COLOR); // 创建深度流和彩色流对齐的实例化对象 int topk = 100; float score_thres = 0.25f; float iou_thres = 0.65f; std::vector<Object> objs; while (true) { rs2::frameset frameset = pipe.wait_for_frames(); auto aligned_frameset = align_to_color.process(frameset); // 实际进行流对齐 // 基于对齐的混合流获取深度流和彩色流,进而获取流对齐后的深度内参 rs2::video_frame color_stream = aligned_frameset.get_color_frame(); rs2::depth_frame aligned_depth_stream = aligned_frameset.get_depth_frame(); rs2::video_stream_profile depth_stream_profile = aligned_depth_stream.get_profile().as<rs2::video_stream_profile>(); const auto depth_intrinsics = depth_stream_profile.get_intrinsics(); //获取对齐后的深度内参 // 获取彩色图像宽 const int w = color_stream.as<rs2::video_frame>().get_width(); const int h = color_stream.as<rs2::video_frame>().get_height(); // 获取图像中心像素点 float u = 0.5; float v = 0.5; int c = w * u; int r = h * v; float pixe_center[2]; pixe_center[0] = c; pixe_center[1] = r; // 将像素坐标投影到3D坐标 float point_in_color_coordinates[3]; // 像素坐标系转换到相机坐标系 float pixed_center_depth_value = aligned_depth_stream.get_distance(pixe_center[0], pixe_center[1]); rs2_deproject_pixel_to_point(point_in_color_coordinates, &depth_intrinsics, pixe_center, pixed_center_depth_value); std::cout << "像素中心在在彩色相机坐标系下的X坐标" << point_in_color_coordinates[0] << "Y坐标系" << point_in_color_coordinates[1] << "Z坐标" << point_in_color_coordinates[2] << endl; // Create OpenCV matrix of size (w,h) from the colorized depth data Mat image(Size(w, h), CV_8UC3, (void*)color_stream.get_data(), Mat::AUTO_STEP); circle(image, Point(c, r), 5, Scalar(0, 0, 255), -1); // Draw red circle at center // Display 3D coordinates stringstream ss; ss << "3D Coordinates: (" << point_in_color_coordinates[0] << "X " << point_in_color_coordinates[1] << "Y " << point_in_color_coordinates[2] << "Z"; putText(image, ss.str(), Point(10, 30), FONT_HERSHEY_SIMPLEX, 0.7, Scalar(0, 255, 0), 2); // Update the window with new data imshow("d453i", image); waitKey(1); } //cv::destroyAllWindows(); return 0; }c++编译一下,就能看到深度相机的显示和深度图显示,我的d435i相机找不着了,就不放效果图了。。。。。。。
然后就是yolov8关键点算法的tensorrt框架的c++版本部署,可以先做图片或视频上的部署,看看部署效果对不对,
或者用rknn框架也可以:
开启摄像头(不是d435i相机)看看关键点算法效果代码:
#include "opencv2/opencv.hpp" #include "yolov8-pose.hpp" #include <chrono> const std::vector<std::vector<unsigned int>> KPS_COLORS = {{0, 255, 0}, {0, 255, 0}, {0, 255, 0}, {0, 255, 0}, {0, 255, 0}, {255, 128, 0}, {255, 128, 0}, {255, 128, 0}, {255, 128, 0}, {255, 128, 0}, {255, 128, 0}, {51, 153, 255}, {51, 153, 255}, {51, 153, 255}, {51, 153, 255}, {51, 153, 255}, {51, 153, 255}}; const std::vector<std::vector<unsigned int>> SKELETON = {{16, 14}, {14, 12}, {17, 15}, {15, 13}, {12, 13}, {6, 12}, {7, 13}, {6, 7}, {6, 8}, {7, 9}, {8, 10}, {9, 11}, {2, 3}, {1, 2}, {1, 3}, {2, 4}, {3, 5}, {4, 6}, {5, 7}}; const std::vector<std::vector<unsigned int>> LIMB_COLORS = {{51, 153, 255}, {51, 153, 255}, {51, 153, 255}, {51, 153, 255}, {255, 51, 255}, {255, 51, 255}, {255, 51, 255}, {255, 128, 0}, {255, 128, 0}, {255, 128, 0}, {255, 128, 0}, {255, 128, 0}, {0, 255, 0}, {0, 255, 0}, {0, 255, 0}, {0, 255, 0}, {0, 255, 0}, {0, 255, 0}, {0, 255, 0}}; int main(int argc, char** argv) { // cuda:0 cudaSetDevice(0); const std::string engine_file_path{argv[1]}; const std::string path{argv[2]}; std::vector<std::string> imagePathList; bool isVideo{false}; assert(argc == 3); auto yolov8_pose = new YOLOv8_pose(engine_file_path); yolov8_pose->make_pipe(true); if (IsFile(path)) { std::string suffix = path.substr(path.find_last_of('.') + 1); if (suffix == "jpg" || suffix == "jpeg" || suffix == "png") { imagePathList.push_back(path); } else if (suffix == "mp4" || suffix == "avi" || suffix == "m4v" || suffix == "mpeg" || suffix == "mov" || suffix == "mkv") { isVideo = true; } else { printf("suffix %s is wrong !!!\n", suffix.c_str()); std::abort(); } } else if (IsFolder(path)) { cv::glob(path + "/*.jpg", imagePathList); } cv::Mat res, image; cv::Size size = cv::Size{640, 640}; int topk = 100; float score_thres = 0.25f; float iou_thres = 0.65f; std::vector<Object> objs; cv::namedWindow("result", cv::WINDOW_AUTOSIZE); if (isVideo) { cv::VideoCapture cap(path); if (!cap.isOpened()) { printf("can not open %s\n", path.c_str()); return -1; } while (cap.read(image)) { objs.clear(); yolov8_pose->copy_from_Mat(image, size); auto start = std::chrono::system_clock::now(); yolov8_pose->infer(); auto end = std::chrono::system_clock::now(); yolov8_pose->postprocess(objs, score_thres, iou_thres, topk); yolov8_pose->draw_objects(image, res, objs, SKELETON, KPS_COLORS, LIMB_COLORS); auto tc = (double)std::chrono::duration_cast<std::chrono::microseconds>(end - start).count() / 1000.; printf("cost %2.4lf ms\n", tc); cv::imshow("result", res); if (cv::waitKey(10) == 'q') { break; } } } else { for (auto& path : imagePathList) { objs.clear(); image = cv::imread(path); yolov8_pose->copy_from_Mat(image, size); auto start = std::chrono::system_clock::now(); yolov8_pose->infer(); auto end = std::chrono::system_clock::now(); yolov8_pose->postprocess(objs, score_thres, iou_thres, topk); yolov8_pose->draw_objects(image, res, objs, SKELETON, KPS_COLORS, LIMB_COLORS); auto tc = (double)std::chrono::duration_cast<std::chrono::microseconds>(end - start).count() / 1000.; printf("cost %2.4lf ms\n", tc); cv::imshow("result", res); cv::waitKey(0); } } cv::destroyAllWindows(); delete yolov8_pose; return 0; }具体怎么玩,这里就不赘述了,主要时间太长了,我懒得再复现一次了。。。。
然后大家把基于tensoort框架或者rknn框架的yolov8关键点算法部署成功之后,就可以结合d435i相机做物体的测量啦!!!!
下面是基于tensorrt框架的核心代码:
while (true) { rs2::frameset frameset = pipe.wait_for_frames(); auto aligned_frameset = align_to_color.process(frameset); // 实际进行流对齐 // 基于对齐的混合流获取深度流和彩色流,进而获取流对齐后的深度内参 rs2::video_frame color_stream = aligned_frameset.get_color_frame(); rs2::depth_frame aligned_depth_stream = aligned_frameset.get_depth_frame(); rs2::video_stream_profile depth_stream_profile = aligned_depth_stream.get_profile().as<rs2::video_stream_profile>(); const auto depth_intrinsics = depth_stream_profile.get_intrinsics(); //获取对齐后的深度内参 // 获取彩色图像宽 const int w = color_stream.as<rs2::video_frame>().get_width(); const int h = color_stream.as<rs2::video_frame>().get_height(); // 获取图像中心像素点 //float u = 0.5; //float v = 0.5; //int c = w * u; //int r = h * v; //float pixe_center[2]; //pixe_center[0] = c; // pixe_center[1] = r; // 将像素坐标投影到3D坐标 //float point_in_color_coordinates[3]; // 像素坐标系转换到相机坐标系 //float pixed_center_depth_value = aligned_depth_stream.get_distance(pixe_center[0], pixe_center[1]); //rs2_deproject_pixel_to_point(point_in_color_coordinates, &depth_intrinsics, pixe_center, pixed_center_depth_value); // std::cout << "像素中心在在彩色相机坐标系下的X坐标" << point_in_color_coordinates[0] << "Y坐标系" << point_in_color_coordinates[1] << "Z坐标" << point_in_color_coordinates[2] << endl; // Create OpenCV matrix of size (w,h) from the colorized depth data Mat image(Size(w, h), CV_8UC3, (void*)color_stream.get_data(), Mat::AUTO_STEP); objs.clear(); yolov8_pose->copy_from_Mat(image, size); //auto start = std::chrono::system_clock::now(); yolov8_pose->infer(); //auto end = std::chrono::system_clock::now(); yolov8_pose->postprocess(objs, score_thres, iou_thres, topk); //yolov8_pose->draw_objects(image, res, objs, SKELETON, KPS_COLORS, LIMB_COLORS); //auto tc = (double)std::chrono::duration_cast<std::chrono::microseconds>(end - start).count() / 1000.; //printf("cost %2.4lf ms\n", tc); for (const auto& obj : objs) { std::cout << "Bounding Box: " << obj.rect << ", Score: " << obj.prob << std::endl; for (int k = 0; k < 2; k++) { int kps_x = std::round(obj.kps[k * 3]); int kps_y = std::round(obj.kps[k * 3 + 1]); float kps_s = obj.kps[k * 3 + 2]; //std::cout << "Keypoint " << k + 1 << ": (x=" << kps_x << ", y=" << kps_y << ", score=" << kps_s << ")" << std::endl; // // 绘制关键点 circle(image, Point(kps_x, kps_y), 5, Scalar(KPS_COLORS[k][0], KPS_COLORS[k][1], KPS_COLORS[k][2]), -1); //// 在图像上打印关键点坐标 //std::stringstream ss; //ss << "(" << kps_x << "," << kps_y << ")"; //putText(image, ss.str(), Point(kps_x, kps_y), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 0, 255), 1, LINE_AA); } //if (objs.size() >= 2) { Point p0 = Point(obj.kps[0 * 3], obj.kps[0 * 3 + 1]); Point p1 = Point(obj.kps[1 * 3], obj.kps[1 * 3 + 1]); // 创建两个浮点数数组来保存像素坐标 float pixel_coords0[2] = { static_cast<float>(p0.x), static_cast<float>(p0.y) }; float pixel_coords1[2] = { static_cast<float>(p1.x), static_cast<float>(p1.y) }; // 绘制从尾部到头部之间的连线 line(image, p0, p1, Scalar(255, 0, 255), 4); // 将像素坐标投影到3D坐标 float point_in_color_coordinates0[3]; float point_in_color_coordinates1[3]; // 检查关键点是否在有效范围内 if (p0.x > 0 && p0.x < 1280 && p0.y > 0 && p0.y < 720) { float point0_dis = aligned_depth_stream.get_distance(p0.x, p0.y); // 获取第一个点的深度值 cout << "Point 0 Depth: " << point0_dis << " meters" << endl; rs2_deproject_pixel_to_point(point_in_color_coordinates0, &depth_intrinsics, pixel_coords0, point0_dis); cout << "Point 0 3D coordinates: (" << point_in_color_coordinates0[0] << ", " << point_in_color_coordinates0[1] << ", " << point_in_color_coordinates0[2] << ")" << endl; } if (p1.x > 0 && p1.x < 1280 && p1.y > 0 && p1.y < 720) { float point1_dis = aligned_depth_stream.get_distance(p1.x, p1.y); // 获取第二个点的深度值 cout << "Point 1 Depth: " << point1_dis << " meters" << endl; rs2_deproject_pixel_to_point(point_in_color_coordinates1, &depth_intrinsics, pixel_coords1, point1_dis); cout << "Point 1 3D coordinates: (" << point_in_color_coordinates1[0] << ", " << point_in_color_coordinates1[1] << ", " << point_in_color_coordinates1[2] << ")" << endl; } // 计算两点间的真实距离 if (p0.x > 0 && p0.x < 1280 && p0.y > 0 && p0.y < 720 && p1.x > 0 && p1.x < 1280 && p1.y > 0 && p1.y < 720) { float distance = std::sqrt( std::pow(point_in_color_coordinates0[0] - point_in_color_coordinates1[0], 2) + std::pow(point_in_color_coordinates0[1] - point_in_color_coordinates1[1], 2) + std::pow(point_in_color_coordinates0[2] - point_in_color_coordinates1[2], 2) ); std::cout << "Distance between points: " << distance << " meters" << std::endl; // 显示两点之间的实际距离 stringstream ss_dist; ss_dist << "Distance: " << distance << " meters"; putText(image, ss_dist.str(), Point(10, 60), FONT_HERSHEY_SIMPLEX, 0.7, Scalar(0, 255, 0), 2, LINE_AA); } // Display 3D coordinates stringstream ss0, ss1; ss0 << "3D Coordinates: (" << point_in_color_coordinates0[0] << "X0 " << point_in_color_coordinates0[1] << "Y0 " << point_in_color_coordinates0[2] << "Z0 "; ss1 << "3D Coordinates: (" << point_in_color_coordinates1[0] << "X1 " << point_in_color_coordinates1[1] << "Y1 " << point_in_color_coordinates1[2] << "Z1 "; putText(image, ss0.str(), Point(p0.x, p0.y - 20), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 255, 0), 1, LINE_AA); putText(image, ss1.str(), Point(p1.x, p1.y - 20), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 255, 0), 1, LINE_AA); //} std::cout << std::endl; } // Update the window with new data imshow("d453i", image); waitKey(1); }去年的效果: