Select Git revision
camera.cpp 5.98 KiB
#include "../../inc/camera.h"
Camera::Camera(){
cfg = new rs2::config();
pipe = new rs2::pipeline();
align_to_depth = new rs2::align(RS2_STREAM_DEPTH);
spatFilter = new rs2::spatial_filter();
tempFilter = new rs2::temporal_filter();
color_frame = new rs2::video_frame(rs2::frame());
depth_frame = new rs2::depth_frame(rs2::frame());
depth_scale = 1.0f;
}
Camera::~Camera(){
delete align_to_depth;
delete tempFilter;
delete spatFilter;
delete pipe;
delete cfg;
delete depth_frame;
delete color_frame;
}
/*
* Public
*/
cv::Mat Camera::getDepthFrame(){
static cv::Mat values = cv::Mat(depth_frame->get_height(), depth_frame->get_width(), CV_16UC1);
static cv::Mat meters = cv::Mat(depth_frame->get_height(), depth_frame->get_width(), CV_32FC1);
uchar* new_values = (uchar*)depth_frame->get_data();
if(values.data != new_values){
values.data = new_values;
values.convertTo(meters, CV_32FC1, depth_scale);
}
return meters.clone();
};
cv::Mat Camera::getColorFrame(){
static cv::Mat colors = cv::Mat(color_frame->get_height(), color_frame->get_width(), CV_8UC3);
colors.data = (uchar*)color_frame->get_data();
return colors.clone();
};
int Camera::start(){
// check for a device available
if(!cfg->can_resolve(*pipe)){
//std::cout << "Error: No device found" << std::endl;
return 1;
}
// Logs
//std::remove("./camera.logs");
//rs2::log_to_file(RS2_LOG_SEVERITY_DEBUG, "./camera.logs");
// Settings
//cfg->enable_stream(rs2_stream::RS2_STREAM_DEPTH, 1280, 720, rs2_format::RS2_FORMAT_Z16);
//cfg->enable_stream(rs2_stream::RS2_STREAM_COLOR, 1280, 720, rs2_format::RS2_FORMAT_RGB8);
cfg->enable_stream(rs2_stream::RS2_STREAM_DEPTH);
cfg->enable_stream(rs2_stream::RS2_STREAM_COLOR);
rs2::depth_sensor sensor = pipe->start(*cfg).get_device().first<rs2::depth_sensor>();
depth_scale = sensor.get_depth_scale();
// Doc presets : https://dev.intelrealsense.com/docs/d400-series-visual-presets
sensor.set_option(RS2_OPTION_VISUAL_PRESET, RS2_RS400_VISUAL_PRESET_HIGH_DENSITY);
//sensor.set_option(RS2_OPTION_VISUAL_PRESET, RS2_RS400_VISUAL_PRESET_DEFAULT);
// 5 = range mapped to unlimited
spatFilter->set_option(RS2_OPTION_HOLES_FILL, 5);
warmUpDepthLens();
// save intrisic profil since it's an heavy operation (used to project and deproject pixels)
// and we use it only in one precise case
capture();
intr_profile = depth_frame->get_profile().as<rs2::video_stream_profile>().get_intrinsics();
//std::cout << intr_profile.width << "x" << intr_profile.height << std::endl;
return 0;
}
void Camera::stop(){
pipe->stop();
}
void Camera::capture(){
auto frameset = pipe->wait_for_frames();
// Trying to get frames from the depth perspective (aligned on depth camera)
frameset = align_to_depth->process(frameset);
rs2::video_frame colorFrame = frameset.get_color_frame();
rs2::depth_frame depthFrame = frameset.get_depth_frame();
if(depthFrame && colorFrame){
filterDepthFrame(depthFrame);
// Values relative to camera (not in meter)
depth_frame->swap(depthFrame);
color_frame->swap(colorFrame);
}
}
// Get the coordinates of the pixel matching the point relative to the real world
cv::Point2i Camera::projectPointToPixel(cv::Point3f point3D){
float point[3] = {point3D.x, point3D.y, point3D.z};
float pixel[2];
rs2_project_point_to_pixel(pixel, &intr_profile, point);
return cv::Point2i(pixel[0], pixel[1]);
}
// Get the point relative to the real world matching the coordinates of the pixel
cv::Point3f Camera::deprojectPixelToPoint(float coord[], float z){
float p[3];
rs2_deproject_pixel_to_point(p, &intr_profile, coord, z);
return cv::Point3f(p[0], p[1], p[2]);
}
/*
Custom function based on librealsense, take f and pp as args,
they are related to the camera's profil, but adapted to what we want to display,
the limits of the pixels match the limits of the camera's frame and
the 3D projection match the camera's too
Works in our case, because our camera's profil is RS2_DISTORTION_BROWN_CONRADY
(profil describing what kind of distoration is applied on the frame to adjust on the lens)
*/
cv::Point2i Camera::projectPointToPixel(cv::Point3f point, cv::Point2f f, cv::Point2f pp){
float x = point.x / point.z;
float y = point.y / point.z;
return cv::Point2i( x*f.x+pp.x, y*f.y+pp.y);
}
cv::Point3f Camera::deprojectPixelToPoint(float pixel[], float z, cv::Point2f f, cv::Point2f pp){
float x = (pixel[0] - pp.x) / f.x;
float y = (pixel[1] - pp.y) / f.y;
return cv::Point3f(z*x, z*y, z);
}
std::vector<cv::Point2f> Camera::getAdaptedIntrinsics(cv::Mat &projection){
float fx = static_cast<float>(intr_profile.fx * projection.size().width) / croppingMask.width;
float fy = static_cast<float>(intr_profile.fy * projection.size().height) / croppingMask.height;
cv::Point2f fxy = cv::Point2f(fx, fy);
float ppx = static_cast<float>(intr_profile.ppx * projection.size().width) / croppingMask.width;
float ppy = static_cast<float>(intr_profile.ppy * projection.size().height) / croppingMask.height;
cv::Point2f ppxy = cv::Point2f(ppx, ppy);
return std::vector<cv::Point2f> {fxy, ppxy};
}
void Camera::printCroppingMask(){
cv::Rect mask = getCroppingMask();
std::cout << "(" << mask.x << "," << mask.y << ") + " << mask.width << "x" << mask.height << std::endl;
}
/*
* Private
*/
// Capture 30 frames to give autoexposure, etc. a chance to settle
void Camera::warmUpDepthLens()
{
for (int i = 0; i < 30; ++i)
{
rs2::frameset data = pipe->wait_for_frames();
auto frameDepth = data.get_depth_frame();
filterDepthFrame(frameDepth);
}
}
void Camera::filterDepthFrame(rs2::depth_frame &frameDepth)
{
frameDepth = spatFilter->process(frameDepth);
frameDepth = tempFilter->process(frameDepth);
}