Browse Source

上传文件至 'v1'

weiboyan 1 year ago
parent
commit
d2e894aa8d
5 changed files with 1701 additions and 0 deletions
  1. 1151 0
      v1/ArithPmd.cpp
  2. 195 0
      v1/ArithPmd.h
  3. 292 0
      v1/WaferBuild.cpp
  4. 34 0
      v1/inc.h
  5. 29 0
      v1/main.cpp

File diff suppressed because it is too large
+ 1151 - 0
v1/ArithPmd.cpp


+ 195 - 0
v1/ArithPmd.h

@@ -0,0 +1,195 @@
1
+#ifndef ARITHPMD_H
2
+#define ARITHPMD_H
3
+
4
+#include "inc.h"
5
+#include "nlohmann/json.hpp"
6
+#include <opencv2/core/eigen.hpp>
7
+#include <pcl/point_types.h>
8
+#include <pcl/common/transforms.h>
9
+#include "pcl/common/centroid.h" 
10
+// #include <opencv2/aruco.hpp>
11
+
12
+struct Screen {
13
+    float width;    // 屏幕宽度(m)
14
+    float height;  // 屏幕高度(m)
15
+    uint16_t cols;  // 屏幕分辨率列数
16
+    uint16_t rows;  // 屏幕分辨率行数
17
+};
18
+
19
+struct ROI {
20
+    uint32_t startRow;
21
+    uint32_t startCol;
22
+    uint32_t blockRows;
23
+    uint32_t blockCols;
24
+};
25
+
26
+/*
27
+ * 重要变量用 Matrix,中间变量要是需要时传 Array
28
+ * 能用 row,col,width,height,就不用x,y
29
+ * 能用Qt的功能就用Qt的,如目录
30
+ * 图像用img,用拍摄图像而非照片表示
31
+ * R旋转,TL平移向量,T变换矩阵,CVT将V坐标系的变量变换到C坐标系的变换矩阵
32
+ */
33
+
34
+bool save_matrix_as_img(const MatrixXf& m, std::string path);
35
+
36
+/*
37
+ * 矩阵数据输出到文件
38
+ * add_index: 是否将第一行或第一列变为索引下标
39
+ * orientation:0,第一行变为索引,否则,第一列变为索引
40
+*/
41
+bool save_matrix_as_txt(const MatrixXf& m, std::string path, bool add_index = false, int orientation = 0);
42
+
43
+/*
44
+ * 矩阵保存为 ply 格式的点云文件
45
+ * params: 矩阵, file path
46
+ */
47
+bool save_matrix_as_ply(const MatrixXf& m, std::string path);
48
+
49
+/*
50
+ * 将矩阵变为齐次
51
+ */
52
+MatrixXf matrix_to_home(const MatrixXf& origin);
53
+
54
+/*
55
+ * Rays from camera, for every pixel, a line cross this pixel and origin of camera coordinate
56
+ * 每个像素拍摄的路径设为一个向量,本函数产生一个矩形平面,为每个像素生成这样一个向量,每个向量用两点定义,第一点为相机原点,
57
+ * 第二点(x,y,z)为相机坐标系下给定z所求得的三维坐标,忽略镜头畸变
58
+ * 参数:相机内参,相机分辨率,给定Z,camera_rays(相机坐标系下z=给定值的 rays from camera)
59
+ * camrea_rays 格式: 3 x (img.h x img.w), 每列格式为(x,y,z),像素保存顺序为从左往右,从上往下
60
+ */
61
+bool configCameraRay(const Matrix3f& cameraMatrix, const cv::Size& imageSize, float Z, MatrixXf& camera_rays);
62
+
63
+/*
64
+ * 计算相机光线与参考平面的交点,参考平面用点法式表达
65
+ * 参数:参考平面点,参考平面法线,rays from camera,保存交点矩阵
66
+ * refPlane 格式:3 x (img.h x img.w), 每列格式为(x,y,z),像素保存顺序为从左往右,从上往下
67
+ */
68
+bool configRefPlane(const Eigen::Vector3f& plane_point, const Eigen::Vector3f& plane_normal, const MatrixXf& camera_rays, MatrixXf& refPlane);
69
+
70
+/*
71
+ * for every screen pixel, compute their position in world coordinate system
72
+ * params: Screen information, rotation of screen relative to world, translation of screen relative to world, matrix save screen pixels postion,
73
+ * screen_pix_pos 格式: 3 x (screen.h x screen.w), 每列格式为(x,y,z),像素保存顺序为从左往右,从上往下
74
+ */
75
+bool configScreenPixelPos(const Screen& screen, const Matrix4f& WST, MatrixXf& screen_pix_pos);
76
+
77
+/*
78
+ * 生成四步相移法条纹图案
79
+ * 参数:图案尺寸,x,y方向周期,保存结果(size = 16,顺序为高频x,y,低频x,y)
80
+ */
81
+// bool pattern_gen(std::vector<uint8_t> pat_size, std::vector<uint8_t> period, std::vector<MatrixXf>& pats);
82
+
83
+/* four step phase shifting
84
+ * params: images(size=16), result wraped phase map(size=4)
85
+ */
86
+bool phase_shifting(const std::vector<MatrixXf>& imgs, std::vector<MatrixXf>& wraped_ps_maps);
87
+
88
+/*
89
+ * two frequency phase unwrapping
90
+ * params: 包裹相位(高频竖条纹,高频横条纹,低频竖条纹,低频横条纹),高频竖条纹周期数,高频横条纹周期数,保存相位结果
91
+ */
92
+bool phase_unwrapping(const vector<MatrixXf>& wraped_ps_maps, uint16_t period_width, uint16_t period_height, vector<MatrixXf>& unwrap_ps_maps);
93
+
94
+/*
95
+ * 求屏幕上相位与相机img对应的像素的三维坐标
96
+ * params:相位图,屏幕信息,条纹图宽度方向上周期数,高度方向周期数,屏幕像素位置,
97
+ * 与相机img相位匹配的屏幕像素坐标(格式: 3 x (cam_img.w x cam_img.h), 每列格式为(x,y,z),像素保存顺序为从左往右,从上往下)
98
+ */
99
+bool screen_camera_phase_match(const vector<MatrixXf>& unwrap_ps_maps, const Screen& screen, uint16_t period_width, uint16_t period_height,
100
+    const MatrixXf& screen_pix_pos, MatrixXf& screen_camera_phase_match_pos);
101
+
102
+/*
103
+ * 斜率计算
104
+ * params: 相机光心坐标,参考平面,屏幕相机相位匹配,保存斜率结果X和Y方向,尺寸为1 x img.size
105
+ */
106
+bool slope_calculate(const Vector3f& camera_world, const MatrixXf& refPlane, const MatrixXf& screen_camera_phase_match_pos,
107
+    std::vector<MatrixXfR>& slope);
108
+
109
+/*
110
+ * 从斜率恢复相位,modal 法,使用 Zernike polynomials
111
+ * params: x方向斜率(M x N),y方向斜率,保存高度结果,x和y方向的物理尺寸范围(mm), Zernike polynomials 项数
112
+ */
113
+bool modal_reconstruction(const MatrixXf& sx, const MatrixXf& sy, MatrixXfR& Z, const std::vector<float>& range, uint32_t terms);
114
+
115
+double computeReprojectionErrors(
116
+    const vector<vector<cv::Point3f>>& objectPoints,
117
+    const vector<vector<cv::Point2f>>& imagePoints,
118
+    const vector<Mat>& rvecs, const vector<Mat>& tvecs,
119
+    const Mat& cameraMatrix, const Mat& distCoeffs,
120
+    vector<float>& perViewErrors);
121
+
122
+/*
123
+// aruco 标定板参数
124
+struct ArucoBoard {
125
+    cv::Size markers_size;
126
+    float markerLength;    // unit: m
127
+    float markerSeparation;
128
+    cv::Ptr<cv::aruco::Dictionary> dictionary;
129
+    float screenOffset_X;
130
+    float screenOffset_Y;
131
+};
132
+
133
+
134
+bool aruco_analyze(const vector<Mat>& imgs_board, const ArucoBoard& board, const Mat& cameraMatrix, const Mat& distCoeffs, vector<Mat>& Rmats,
135
+    vector<cv::Vec3f>& Tvecs, vector<vector<int>>& Ids, vector<vector<vector<cv::Point2f>>>& corners,
136
+    vector<vector<vector<cv::Point2f>>>& rejectedCandidates, std::string output_dir);
137
+*/
138
+/*
139
+ * system geometry calibration
140
+ * implement of paper "Flexible geometrical calibration for fringe-reflection
141
+ measurement(2012)"
142
+ */
143
+bool system_calib(vector<Matrix3f>& CVR, vector<Vector3f>& CVTL, Matrix3f& CSR, VectorXf& CSTL_D, vector<Vector3f>& n);
144
+
145
+class ArithPmd
146
+{
147
+public:
148
+    ArithPmd();
149
+    ~ArithPmd();
150
+
151
+public:
152
+    cv::Size img_size;      // 相机图像分辨率
153
+    uint16_t period_width;  // 宽度方向条纹周期数
154
+    uint16_t period_height; // 高度方向条纹周期数
155
+
156
+    cv::Mat mask0, mask1, mask2, mask3;
157
+    
158
+private:
159
+    Screen screen;          // 屏幕信息
160
+    ROI roi0,roi1,roi2,roi3;                // 拍摄图片上的重建区域
161
+
162
+    Matrix4f CTC0,CTC1, CTC2, CTC3;
163
+
164
+    MatrixXf refPlane0, refPlane1, refPlane2, refPlane3;
165
+    Vector3f camera_origin_world0, camera_origin_world1, camera_origin_world2, camera_origin_world3;
166
+    MatrixXf screen_pix_pos0, screen_pix_pos1, screen_pix_pos2, screen_pix_pos3;
167
+    MatrixXf roi_m0, roi_m1, roi_m2, roi_m3; // mask 区域的矩阵
168
+
169
+    // std::vector<Matrix4f*> CTC{}; // 旋转平移矩阵
170
+
171
+    // 计算出来的结果
172
+    // std::vector<MatrixXf*> refPlane{};      // 相机光线与参考平面的交点,世界坐标系下,3 x (img.h x img.w)
173
+    // std::vector <Vector3f*> camera_origin_world{ };   // 相机坐标系原点在世界坐标系下的表示
174
+    // std::vector<MatrixXf*> screen_pix_pos{};// 屏幕像素在世界坐标系下的位置,3 x (screen.h x screen.w)
175
+
176
+    
177
+    // std::vector<Mat> patterns;   // 投影的条纹图案
178
+    // std::vector<Eigen::MatrixXf> img_pats;  // 拍摄的条纹图像
179
+    // uint16_t screen_delay;  // 投影条纹时等待的时间(毫秒)
180
+
181
+
182
+public:
183
+
184
+    int Initlization(const nlohmann::json& param);
185
+
186
+    int Preprocess(const std::vector<cv::Mat>& images, int area_threshold, int32_t camera_index);
187
+
188
+    int Predict(const std::vector<Eigen::MatrixXf>& img_pats, MatrixXfR& pcl, int32_t camera_index);
189
+
190
+    int Stitch(const pcl::PointCloud<pcl::PointXYZ>& cloud1, const pcl::PointCloud<pcl::PointXYZ>& cloud2, pcl::PointCloud<pcl::PointXYZ>& cloud_output, int32_t camera_index);
191
+};
192
+
193
+
194
+
195
+#endif // ARITHPMD_H

+ 292 - 0
v1/WaferBuild.cpp

@@ -0,0 +1,292 @@
1
+#ifndef _EXPORTING 
2
+#define _EXPORTING 
3
+#endif 
4
+
5
+#include "WaferRebuild.h"
6
+#include "ArithPmd.h"
7
+
8
+WaferRebuild::WaferRebuild()
9
+{
10
+    arith_pmd = new ArithPmd();
11
+}
12
+
13
+WaferRebuild::~WaferRebuild()
14
+{
15
+    if (arith_pmd != nullptr)
16
+    {
17
+        delete arith_pmd;
18
+        arith_pmd = nullptr;
19
+    }
20
+}
21
+
22
+std::tuple<int, std::string> WaferRebuild::initParam(const nlohmann::json& param)
23
+{
24
+    try
25
+    {
26
+        int retcode = arith_pmd->Initlization(param);
27
+        return std::make_tuple(0, "sucess");
28
+    }
29
+    catch (const std::exception& e)
30
+    {
31
+        return std::make_tuple(-1, e.what());
32
+    }
33
+	
34
+}
35
+
36
+
37
+std::tuple<int, std::string> WaferRebuild::calcCloud(const std::vector<std::shared_ptr<infra::Image>>& image_list, std::shared_ptr<infra::CloudPoint<double>>& cloud_points, int32_t camera_index)
38
+{
39
+    try
40
+    {
41
+        if (!cloud_points) {
42
+            throw std::runtime_error("Error: cloud_points is nullptr!");
43
+        }
44
+        std::vector<Eigen::MatrixXf> img_pats;
45
+        std::vector<cv::Mat> gray_images;
46
+        img_pats.resize(image_list.size());
47
+        for (int i = 0; i < image_list.size(); ++i) {
48
+            int type = (image_list[i]->type() == -1) ? CV_8UC1 : CV_8UC3; // 假设 -1 是灰度图,其他都是三通道图像
49
+            cv::Mat mat;
50
+            cv::Mat grayMat;
51
+            if (type == CV_8UC1) {
52
+                grayMat = cv::Mat(arith_pmd->img_size.height, arith_pmd->img_size.width, type, image_list[i]->data());
53
+                gray_images.push_back(grayMat);
54
+            }
55
+            else {
56
+                mat = cv::Mat(arith_pmd->img_size.height, arith_pmd->img_size.width, type, image_list[i]->data());
57
+                cv::cvtColor(mat, grayMat, cv::COLOR_BGR2GRAY);
58
+                gray_images.push_back(grayMat);
59
+            }
60
+            // cv::cv2eigen(grayMat, img_pats[i]);
61
+        }
62
+        std::vector<cv::Mat> gray_temp{ gray_images[gray_images.size() - 4], gray_images[gray_images.size() - 3], gray_images[gray_images.size() - 2], gray_images[gray_images.size() - 1] };
63
+        for (int i = 0; i < gray_images.size(); ++i)
64
+        {
65
+            cv::Mat temp;
66
+            if (camera_index == 0)
67
+            {
68
+                cv::bitwise_and(gray_images[i], gray_images[i], temp, arith_pmd->mask0);
69
+            }
70
+            else if (camera_index == 1)
71
+            {
72
+                cv::bitwise_and(gray_images[i], gray_images[i], temp, arith_pmd->mask1);
73
+            }
74
+            else if (camera_index == 2)
75
+            {
76
+                cv::bitwise_and(gray_images[i], gray_images[i], temp, arith_pmd->mask2);
77
+            }
78
+            else if (camera_index == 3)
79
+            {
80
+                cv::bitwise_and(gray_images[i], gray_images[i], temp, arith_pmd->mask3);
81
+            }
82
+            // cv::bitwise_and(gray_images[i], gray_images[i], temp, arith_pmd->mask);
83
+            cv::imwrite("temp.bmp", temp);
84
+            cv::cv2eigen(temp, img_pats[i]);
85
+        }
86
+        int retcode0 = arith_pmd->Preprocess(gray_temp, 500, camera_index);
87
+        MatrixXfR pcl;
88
+        int retcode = arith_pmd->Predict(img_pats, pcl, camera_index);
89
+
90
+        // std::vector<infra::Point3d<float>> points;
91
+
92
+        // 预留足够空间
93
+        int numPoints = pcl.cols();
94
+        cloud_points->cloud_point.resize(numPoints);
95
+        // cloud_points->cloud_point.reserve(numPoints);
96
+        // cloud_points.reserve(numPoints);
97
+        // 遍历每一个点
98
+        for (int i = 0; i < numPoints; ++i) {
99
+            // 创建一个新的 Point3d 对象并添加到 vector
100
+            infra::Point3d<double> point = infra::Point3d<double>(
101
+                pcl(0, i), // X 坐标
102
+                pcl(1, i), // Y 坐标 m  
103
+                pcl(2, i)  // Z 坐标
104
+            );
105
+            cloud_points->cloud_point[i] = point;
106
+        }
107
+        save_matrix_as_ply(pcl, "surface.ply");
108
+        return std::make_tuple(0, "sucess");
109
+    }
110
+    catch (const std::exception& e) {
111
+        return std::make_tuple(-1, e.what());
112
+    }
113
+
114
+}
115
+
116
+pcl::PointCloud<pcl::PointXYZ> convertToPCLPointCloud(const std::shared_ptr<infra::CloudPoint<double>>& cloud_points) {
117
+    pcl::PointCloud<pcl::PointXYZ> cloud;
118
+
119
+    if (!cloud_points) {
120
+        std::cerr << "Error: Input cloud_points is nullptr." << std::endl;
121
+        return cloud;
122
+    }
123
+
124
+    // Resize the PCL PointCloud to the size of cloud_points
125
+    cloud.points.resize(cloud_points->size());
126
+
127
+    // Convert each point from infra::CloudPoint<double> to pcl::PointXYZ
128
+    for (size_t i = 0; i < cloud_points->size(); ++i) {
129
+        const infra::Point3d<double>& point = cloud_points->cloud_point[i];
130
+        cloud.points[i].x = static_cast<float>(point.x);
131
+        cloud.points[i].y = static_cast<float>(point.y);
132
+        cloud.points[i].z = static_cast<float>(point.z);
133
+    }
134
+
135
+    return cloud;
136
+}
137
+
138
+static std::shared_ptr<infra::CloudPoint<double>> convertToInfraCloudPoint(const pcl::PointCloud<pcl::PointXYZ>& cloud) {
139
+    // Create a new infra::CloudPoint<double> object
140
+    auto cloud_points = std::make_shared<infra::CloudPoint<double>>();
141
+
142
+    // Resize the cloud_points->cloud_point vector to the size of cloud
143
+    cloud_points->cloud_point.resize(cloud.points.size());
144
+
145
+    // Convert each pcl::PointXYZ point to infra::Point3d<double> and store in cloud_points
146
+    for (size_t i = 0; i < cloud.points.size(); ++i) {
147
+        cloud_points->cloud_point[i].x = static_cast<double>(cloud.points[i].x);
148
+        cloud_points->cloud_point[i].y = static_cast<double>(cloud.points[i].y);
149
+        cloud_points->cloud_point[i].z = static_cast<double>(cloud.points[i].z);
150
+    }
151
+
152
+    return cloud_points;
153
+}
154
+
155
+std::tuple<int, std::string> WaferRebuild::mergeCloud(const std::map<int32_t, std::shared_ptr<infra::CloudPoint<double>>>& cloud_points_list,
156
+    std::shared_ptr<infra::CloudPoint<double>>& cloud_points)
157
+{
158
+    try
159
+    {
160
+        int error_code = 0;
161
+        std::string error_message = "Success";
162
+        pcl::PointCloud<pcl::PointXYZ> cloud_output;
163
+        auto it = cloud_points_list.begin();
164
+        if (it == cloud_points_list.end()) {
165
+            error_code = -1;
166
+            error_message = "Error: Input cloud_points_list is empty.";
167
+            return std::make_tuple(error_code, error_message);
168
+        }
169
+       cloud_output = convertToPCLPointCloud(it->second);
170
+        ++it;
171
+        // Merge each subsequent cloud in the list
172
+        while (it != cloud_points_list.end()) {
173
+            pcl::PointCloud<pcl::PointXYZ> cloud_to_merge = convertToPCLPointCloud(it->second);
174
+
175
+            // Call Stitch function to merge cloud_output and cloud_to_merge
176
+            int stitch_result = arith_pmd->Stitch(cloud_output, cloud_to_merge, cloud_output, it->first);
177
+            if (stitch_result != 0) {
178
+                error_code = stitch_result;
179
+                error_message = "Error: Stitching failed for camera index " + std::to_string(it->first);
180
+                break;
181
+            }
182
+            ++it;
183
+        }
184
+        cloud_points = convertToInfraCloudPoint(cloud_output);
185
+
186
+        return std::make_tuple(error_code, error_message);
187
+    }
188
+    catch (const std::exception& e) {
189
+        return std::make_tuple(-1, e.what());
190
+    }
191
+}
192
+
193
+
194
+static infra::Point3d<double> calculateCentroid(const pcl::PointCloud<pcl::PointXYZ>& pcl_cloud) {
195
+    infra::Point3d<double> centroid;
196
+
197
+    // Create an Eigen vector to store the centroid
198
+    Eigen::Vector4f pcl_centroid;
199
+
200
+    // Compute centroid using PCL function
201
+    pcl::compute3DCentroid(pcl_cloud, pcl_centroid);
202
+
203
+    // Convert the centroid from Eigen vector to infra::Point3d<double>
204
+    centroid.x = pcl_centroid[0];
205
+    centroid.y = pcl_centroid[1];
206
+    centroid.z = pcl_centroid[2];
207
+
208
+    return centroid;
209
+}
210
+
211
+
212
+std::tuple<int, std::string> WaferRebuild::warpage(const std::shared_ptr<infra::CloudPoint<double>>& cloud_points, int32_t filterMM, nlohmann::json& result)
213
+{
214
+    try {
215
+        if (!cloud_points) {
216
+            throw std::runtime_error("Input cloud_points is nullptr.");
217
+        }
218
+
219
+        // Convert infra::CloudPoint<double> to pcl::PointCloud<pcl::PointXYZ>
220
+        pcl::PointCloud<pcl::PointXYZ> pcl_cloud = convertToPCLPointCloud(cloud_points);
221
+
222
+        // Calculate centroid (assuming centroid calculation is implemented in math_tool.h)
223
+        infra::Point3d<double> centroid = calculateCentroid(pcl_cloud);  // Example function, replace with actual implementation
224
+
225
+        // Define x and y axes
226
+        infra::Point3d<double> axis_x(1.0, 0.0, 0.0);  // Assuming x-axis direction
227
+        infra::Point3d<double> axis_y(0.0, 1.0, 0.0);  // Assuming y-axis direction
228
+
229
+        // Calculate heights and populate result JSON
230
+        for (auto& axis : { "x", "y" }) {
231
+            std::vector<double> heights;
232
+            double bow = 0.0;
233
+            double warpage = 0.0;
234
+
235
+            infra::Point3d<double> axis_vector = (axis == "x") ? axis_x : axis_y;
236
+
237
+            // Calculate heights and populate heights vector
238
+            for (size_t i = 0; i < pcl_cloud.points.size(); ++i) {
239
+                infra::Point3d<double>& point = cloud_points->cloud_point[i];
240
+                double height = point.x * axis_vector.x + point.y * axis_vector.y + point.z * axis_vector.z;
241
+                heights.push_back(height);
242
+            }
243
+
244
+            // Calculate bow (average height)
245
+            if (!heights.empty()) {
246
+                bow = std::accumulate(heights.begin(), heights.end(), 0.0) / heights.size();
247
+            }
248
+
249
+            // Calculate warpage (standard deviation of heights)
250
+            if (heights.size() > 1) {
251
+                double sum_sq_diff = std::inner_product(heights.begin(), heights.end(), heights.begin(), 0.0);
252
+                double mean = bow;
253
+                double variance = sum_sq_diff / heights.size() - mean * mean;
254
+                warpage = std::sqrt(variance);
255
+            }
256
+
257
+            // Fill JSON result
258
+            result[axis]["height"] = heights;
259
+            result[axis]["bow"] = bow;
260
+            result[axis]["warpage"] = warpage;
261
+        }
262
+
263
+        /*
264
+        // Calculate max bow and max warpage
265
+        double max_bow = 0.0;
266
+        double max_warpage = 0.0;
267
+        double angle_max_bow = 0.0;
268
+        double angle_max_warpage = 0.0;
269
+
270
+        // Calculate max_bow and max_warpage values
271
+        // (example calculation, replace with actual implementation)
272
+        // Assume you have functions to find maximum bow and warpage and their angles
273
+        max_bow = calculateMaxBow(result);
274
+        max_warpage = calculateMaxWarpage(result);
275
+        angle_max_bow = calculateAngleMaxBow(result);
276
+        angle_max_warpage = calculateAngleMaxWarpage(result);
277
+
278
+        // Fill max bow and max warpage in JSON result
279
+        result["max_bow"]["angle"] = angle_max_bow;
280
+        result["max_bow"]["height"] = result[angle_max_bow]["height"];
281
+        result["max_bow"]["max_bow"] = max_bow;
282
+
283
+        result["max_warpage"]["angle"] = angle_max_warpage;
284
+        result["max_warpage"]["height"] = result[angle_max_warpage]["height"];
285
+        result["max_warpage"]["max_warpage"] = max_warpage;
286
+        */
287
+        return std::make_tuple(0, "Success");
288
+    }
289
+    catch (const std::exception& e) {
290
+        return std::make_tuple(-1, e.what());
291
+    }
292
+}

+ 34 - 0
v1/inc.h

@@ -0,0 +1,34 @@
1
+#ifndef PMDCFG_H
2
+#define PMDCFG_H
3
+
4
+/*
5
+ * ¸ÃÎļþ¶¨Òå³£ÓÃÀàÐÍ
6
+ */
7
+
8
+#include <opencv2/opencv.hpp>
9
+#include <Eigen/Dense>
10
+#include<Eigen/src/Core/util/Macros.h>
11
+
12
+using std::endl;
13
+using std::cout;
14
+using std::vector;
15
+
16
+
17
+typedef Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> MatrixXfR;
18
+using Eigen::MatrixXf;
19
+using Eigen::Matrix3f;
20
+using Eigen::Matrix4f;
21
+typedef Eigen::Matrix<uint32_t, Eigen::Dynamic, Eigen::Dynamic> MatrixXUI;
22
+
23
+using Eigen::VectorXf;
24
+using Eigen::Vector2f;
25
+using Eigen::Vector3f;
26
+using Eigen::Vector4f;
27
+typedef Eigen::Matrix<uint32_t, Eigen::Dynamic, 1> VectorXUI;
28
+
29
+using Eigen::ArrayXf;
30
+using Eigen::ArrayXXf;
31
+
32
+using cv::Mat;
33
+
34
+#endif // PMDCFG_H

+ 29 - 0
v1/main.cpp

@@ -0,0 +1,29 @@
1
+#include <iostream>
2
+#include <thread>
3
+#include "sys_time.h"
4
+#include "log.h"
5
+#include "data_queue.h"
6
+#include "math_tool.h"
7
+
8
+int main()
9
+{
10
+	initLog("PMD", "./pmd.log", 0);
11
+
12
+	infra::DataQueue<std::string> queue;
13
+	queue.push("aaa");
14
+	queue.push("ddd");
15
+
16
+	LOG_WARN("size: %lu", queue.size());
17
+
18
+	int64_t s = infra::SysTime::current_ms();
19
+	std::this_thread::sleep_for(std::chrono::seconds(2));
20
+	int64_t e = infra::SysTime::current_ms();
21
+	std::cout << e - s << std::endl;
22
+	LOG_WARN("e-s: %lu", e - s);
23
+
24
+
25
+	infra::Point3d<float> ee(1.1, 3.2, 5.6);
26
+	std::cout << ee.x << ee.y << ee.z << std::endl;
27
+	getchar();
28
+	return 0;
29
+}