Intel® RealSense™ Cross Platform API
Intel Realsense Cross-platform API
rsutil.h
Go to the documentation of this file.
1 /* License: Apache 2.0. See LICENSE file in root directory.
2  Copyright(c) 2015 Intel Corporation. All Rights Reserved. */
3 
4 #ifndef LIBREALSENSE_RSUTIL2_H
5 #define LIBREALSENSE_RSUTIL2_H
6 
7 #include "h/rs_types.h"
8 #include "h/rs_sensor.h"
9 #include "h/rs_frame.h"
10 #include "rs.h"
11 #include "assert.h"
12 #include <stdlib.h>
13 #include <stdbool.h>
14 #include <stdint.h>
15 #include <math.h>
16 #include <float.h>
17 
18 /* Given a point in 3D space, compute the corresponding pixel coordinates in an image with no distortion or forward distortion coefficients produced by the same camera */
19 static void rs2_project_point_to_pixel(float pixel[2], const struct rs2_intrinsics * intrin, const float point[3])
20 {
21  //assert(intrin->model != RS2_DISTORTION_INVERSE_BROWN_CONRADY); // Cannot project to an inverse-distorted image
22 
23  float x = point[0] / point[2], y = point[1] / point[2];
24 
26  {
27 
28  float r2 = x*x + y*y;
29  float f = 1 + intrin->coeffs[0]*r2 + intrin->coeffs[1]*r2*r2 + intrin->coeffs[4]*r2*r2*r2;
30  x *= f;
31  y *= f;
32  float dx = x + 2*intrin->coeffs[2]*x*y + intrin->coeffs[3]*(r2 + 2*x*x);
33  float dy = y + 2*intrin->coeffs[3]*x*y + intrin->coeffs[2]*(r2 + 2*y*y);
34  x = dx;
35  y = dy;
36  }
37  if (intrin->model == RS2_DISTORTION_FTHETA)
38  {
39  float r = sqrtf(x*x + y*y);
40  if (r < FLT_EPSILON)
41  {
42  r = FLT_EPSILON;
43  }
44  float rd = (float)(1.0f / intrin->coeffs[0] * atan(2 * r* tan(intrin->coeffs[0] / 2.0f)));
45  x *= rd / r;
46  y *= rd / r;
47  }
49  {
50  float r = sqrtf(x*x + y*y);
51  if (r < FLT_EPSILON)
52  {
53  r = FLT_EPSILON;
54  }
55  float theta = atan(r);
56  float theta2 = theta*theta;
57  float series = 1 + theta2*(intrin->coeffs[0] + theta2*(intrin->coeffs[1] + theta2*(intrin->coeffs[2] + theta2*intrin->coeffs[3])));
58  float rd = theta*series;
59  x *= rd / r;
60  y *= rd / r;
61  }
62 
63  pixel[0] = x * intrin->fx + intrin->ppx;
64  pixel[1] = y * intrin->fy + intrin->ppy;
65 }
66 
67 /* Given pixel coordinates and depth in an image with no distortion or inverse distortion coefficients, compute the corresponding point in 3D space relative to the same camera */
68 static void rs2_deproject_pixel_to_point(float point[3], const struct rs2_intrinsics * intrin, const float pixel[2], float depth)
69 {
70  assert(intrin->model != RS2_DISTORTION_MODIFIED_BROWN_CONRADY); // Cannot deproject from a forward-distorted image
71  //assert(intrin->model != RS2_DISTORTION_BROWN_CONRADY); // Cannot deproject to an brown conrady model
72 
73  float x = (pixel[0] - intrin->ppx) / intrin->fx;
74  float y = (pixel[1] - intrin->ppy) / intrin->fy;
76  {
77  float r2 = x*x + y*y;
78  float f = 1 + intrin->coeffs[0]*r2 + intrin->coeffs[1]*r2*r2 + intrin->coeffs[4]*r2*r2*r2;
79  float ux = x*f + 2*intrin->coeffs[2]*x*y + intrin->coeffs[3]*(r2 + 2*x*x);
80  float uy = y*f + 2*intrin->coeffs[3]*x*y + intrin->coeffs[2]*(r2 + 2*y*y);
81  x = ux;
82  y = uy;
83  }
85  {
86  float rd = sqrtf(x*x + y*y);
87  if (rd < FLT_EPSILON)
88  {
89  rd = FLT_EPSILON;
90  }
91 
92  float theta = rd;
93  float theta2 = rd*rd;
94  for (int i = 0; i < 4; i++)
95  {
96  float f = theta*(1 + theta2*(intrin->coeffs[0] + theta2*(intrin->coeffs[1] + theta2*(intrin->coeffs[2] + theta2*intrin->coeffs[3])))) - rd;
97  if (abs(f) < FLT_EPSILON)
98  {
99  break;
100  }
101  float df = 1 + theta2*(3 * intrin->coeffs[0] + theta2*(5 * intrin->coeffs[1] + theta2*(7 * intrin->coeffs[2] + 9 * theta2*intrin->coeffs[3])));
102  theta -= f / df;
103  theta2 = theta*theta;
104  }
105  float r = tan(theta);
106  x *= r / rd;
107  y *= r / rd;
108  }
109  if (intrin->model == RS2_DISTORTION_FTHETA)
110  {
111  float rd = sqrtf(x*x + y*y);
112  if (rd < FLT_EPSILON)
113  {
114  rd = FLT_EPSILON;
115  }
116  float r = (float)(tan(intrin->coeffs[0] * rd) / atan(2 * tan(intrin->coeffs[0] / 2.0f)));
117  x *= r / rd;
118  y *= r / rd;
119  }
120 
121  point[0] = depth * x;
122  point[1] = depth * y;
123  point[2] = depth;
124 }
125 
126 /* Transform 3D coordinates relative to one sensor to 3D coordinates relative to another viewpoint */
127 static void rs2_transform_point_to_point(float to_point[3], const struct rs2_extrinsics * extrin, const float from_point[3])
128 {
129  to_point[0] = extrin->rotation[0] * from_point[0] + extrin->rotation[3] * from_point[1] + extrin->rotation[6] * from_point[2] + extrin->translation[0];
130  to_point[1] = extrin->rotation[1] * from_point[0] + extrin->rotation[4] * from_point[1] + extrin->rotation[7] * from_point[2] + extrin->translation[1];
131  to_point[2] = extrin->rotation[2] * from_point[0] + extrin->rotation[5] * from_point[1] + extrin->rotation[8] * from_point[2] + extrin->translation[2];
132 }
133 
134 /* Calculate horizontal and vertical feild of view, based on video intrinsics */
135 static void rs2_fov(const struct rs2_intrinsics * intrin, float to_fov[2])
136 {
137  to_fov[0] = (atan2f(intrin->ppx + 0.5f, intrin->fx) + atan2f(intrin->width - (intrin->ppx + 0.5f), intrin->fx)) * 57.2957795f;
138  to_fov[1] = (atan2f(intrin->ppy + 0.5f, intrin->fy) + atan2f(intrin->height - (intrin->ppy + 0.5f), intrin->fy)) * 57.2957795f;
139 }
140 
141 static void next_pixel_in_line(float curr[2], const float start[2], const float end[2])
142 {
143  float line_slope = (end[1] - start[1]) / (end[0] - start[0]);
144  if (fabs(end[0] - curr[0]) > fabs(end[1] - curr[1]))
145  {
146  curr[0] = end[0] > curr[0] ? curr[0] + 1 : curr[0] - 1;
147  curr[1] = end[1] - line_slope * (end[0] - curr[0]);
148  }
149  else
150  {
151  curr[1] = end[1] > curr[1] ? curr[1] + 1 : curr[1] - 1;
152  curr[0] = end[0] - ((end[1] + curr[1]) / line_slope);
153  }
154 }
155 
156 static bool is_pixel_in_line(const float curr[2], const float start[2], const float end[2])
157 {
158  return ((end[0] >= start[0] && end[0] >= curr[0] && curr[0] >= start[0]) || (end[0] <= start[0] && end[0] <= curr[0] && curr[0] <= start[0])) &&
159  ((end[1] >= start[1] && end[1] >= curr[1] && curr[1] >= start[1]) || (end[1] <= start[1] && end[1] <= curr[1] && curr[1] <= start[1]));
160 }
161 
162 static void adjust_2D_point_to_boundary(float p[2], int width, int height)
163 {
164  if (p[0] < 0) p[0] = 0;
165  if (p[0] > width) p[0] = (float)width;
166  if (p[1] < 0) p[1] = 0;
167  if (p[1] > height) p[1] = (float)height;
168 }
169 
170 /* Find projected pixel with unknown depth search along line. */
171 static void rs2_project_color_pixel_to_depth_pixel(float to_pixel[2],
172  const uint16_t* data, float depth_scale,
173  float depth_min, float depth_max,
174  const struct rs2_intrinsics* depth_intrin,
175  const struct rs2_intrinsics* color_intrin,
176  const struct rs2_extrinsics* color_to_depth,
177  const struct rs2_extrinsics* depth_to_color,
178  const float from_pixel[2])
179 {
180  //Find line start pixel
181  float start_pixel[2] = { 0 }, min_point[3] = { 0 }, min_transformed_point[3] = { 0 };
182  rs2_deproject_pixel_to_point(min_point, color_intrin, from_pixel, depth_min);
183  rs2_transform_point_to_point(min_transformed_point, color_to_depth, min_point);
184  rs2_project_point_to_pixel(start_pixel, depth_intrin, min_transformed_point);
185  adjust_2D_point_to_boundary(start_pixel, depth_intrin->width, depth_intrin->height);
186 
187  //Find line end depth pixel
188  float end_pixel[2] = { 0 }, max_point[3] = { 0 }, max_transformed_point[3] = { 0 };
189  rs2_deproject_pixel_to_point(max_point, color_intrin, from_pixel, depth_max);
190  rs2_transform_point_to_point(max_transformed_point, color_to_depth, max_point);
191  rs2_project_point_to_pixel(end_pixel, depth_intrin, max_transformed_point);
192  adjust_2D_point_to_boundary(end_pixel, depth_intrin->width, depth_intrin->height);
193 
194  //search along line for the depth pixel that it's projected pixel is the closest to the input pixel
195  float min_dist = -1;
196  for (float p[2] = { start_pixel[0], start_pixel[1] }; is_pixel_in_line(p, start_pixel, end_pixel); next_pixel_in_line(p, start_pixel, end_pixel))
197  {
198  float depth = depth_scale * data[(int)p[1] * depth_intrin->width + (int)p[0]];
199  if (depth == 0)
200  continue;
201 
202  float projected_pixel[2] = { 0 }, point[3] = { 0 }, transformed_point[3] = { 0 };
203  rs2_deproject_pixel_to_point(point, depth_intrin, p, depth);
204  rs2_transform_point_to_point(transformed_point, depth_to_color, point);
205  rs2_project_point_to_pixel(projected_pixel, color_intrin, transformed_point);
206 
207  float new_dist = pow((projected_pixel[1] - from_pixel[1]), 2) + pow((projected_pixel[0] - from_pixel[0]), 2);
208  if (new_dist < min_dist || min_dist < 0)
209  {
210  min_dist = new_dist;
211  to_pixel[0] = p[0];
212  to_pixel[1] = p[1];
213  }
214  }
215 }
216 
217 #endif
Definition: rs_types.h:47
float translation[3]
Definition: rs_sensor.h:86
Definition: rs_types.h:51
float coeffs[5]
Definition: rs_types.h:66
Exposes librealsense functionality for C compilers.
float rotation[9]
Definition: rs_sensor.h:85
float ppx
Definition: rs_types.h:61
Exposes RealSense frame functionality for C compilers.
Exposes RealSense structs.
Definition: rs_types.h:49
int width
Definition: rs_types.h:59
Exposes RealSense sensor functionality for C compilers.
Definition: rs_types.h:48
Cross-stream extrinsics: encode the topology describing how the different devices are connected...
Definition: rs_sensor.h:83
rs2_distortion model
Definition: rs_types.h:65
float fy
Definition: rs_types.h:64
float fx
Definition: rs_types.h:63
Video stream intrinsics.
Definition: rs_types.h:57
int height
Definition: rs_types.h:60
float ppy
Definition: rs_types.h:62