Select Git revision
Forked from
orestis.malaspin / rust-101
Source project has a limited visibility.
projection.cpp 8.21 KiB
#include "../../inc/projection.h"
#include <chrono>
/*
* MAIN
*/
Projection::Projection(){
adjustingMatrix = cv::getRotationMatrix2D(cv::Point(0, 0), 0, 1);
distanceTopSandbox = 1.0f;
}
cv::Point2i Projection::rotatePixel(cv::Point2i pixel){
cv::Mat tmp = (cv::Mat_<cv::Vec2f>(1, 1) << cv::Vec2f(pixel.x, pixel.y));
cv::transform(tmp, tmp, adjustingMatrix);
return cv::Point2i(tmp.at<cv::Vec2f>(0, 0));
}
cv::Point2i Projection::revertRotatePixel(cv::Point2i pixel){
cv::Mat tmp = (cv::Mat_<cv::Vec2f>(1, 1) << cv::Vec2f(pixel.x, pixel.y));
cv::Mat invMat;
cv::invertAffineTransform(adjustingMatrix, invMat);
cv::transform(tmp, tmp, invMat);
return cv::Point2i(tmp.at<cv::Vec2f>(0, 0));
}
/*
Adjust the projected frame with the topology from the camera to the beamer POV
*/
void Projection::adjustFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Vec3b> &src, cv::Mat_<cv::Vec3b> &dst, Camera *camera, cv::Point3f beamer_pos){
if(deprojectMap.empty() || deprojectMap.size() != dst.size()){
if(!deprojectMap.empty()){
deprojectMap.release();
frameMap.release();
resized_depth.release();
resized_src.release();
}
std::cout << "Create" << std::endl;
deprojectMap.create(dst.rows, dst.cols);
frameMap.create(dst.rows, dst.cols);
resized_depth.create(dst.rows, dst.cols);
resized_src.create(dst.rows, dst.cols);
std::vector<cv::Point2f> profil = camera->getAdaptedIntrinsics(dst);
fxy = profil.at(0);
ppxy = profil.at(1);
}
deprojectMap = cv::Point2i(-1,-1);
frameMap = cv::Point2i(-1,-1);
// resize to match 1:1 ratio with dst, since we'll do later:
// dst[i] = src[i]
cv::resize(src, resized_src, dst.size());
cv::resize(depth, resized_depth, dst.size());
// 75 ms
//std::chrono::milliseconds start_ms = std::chrono::duration_cast< std::chrono::milliseconds >( std::chrono::system_clock::now().time_since_epoch() );
deprojectPixelsFromDepth(resized_depth, camera, camera->getCroppingMask() , beamer_pos, deprojectMap, fxy, ppxy);
//std::chrono::milliseconds now_ms = std::chrono::duration_cast< std::chrono::milliseconds >( std::chrono::system_clock::now().time_since_epoch() );
//std::cout << "Deproject : " << (now_ms - start_ms).count() << std::endl;
// 18-19 ms
//start_ms = std::chrono::duration_cast< std::chrono::milliseconds >( std::chrono::system_clock::now().time_since_epoch() );
filterLowestDeprojectedPoints(resized_depth, deprojectMap, frameMap);
//now_ms = std::chrono::duration_cast< std::chrono::milliseconds >( std::chrono::system_clock::now().time_since_epoch() );
//std::cout << "Filter : " << (now_ms - start_ms).count() << std::endl;
// 14-15 ms
//start_ms = std::chrono::duration_cast< std::chrono::milliseconds >( std::chrono::system_clock::now().time_since_epoch() );
buildFrame(resized_depth, frameMap, resized_src, dst);
//now_ms = std::chrono::duration_cast< std::chrono::milliseconds >( std::chrono::system_clock::now().time_since_epoch() );
//std::cout << "Build : " << (now_ms - start_ms).count() << std::endl;
cv::warpAffine(dst, dst, adjustingMatrix, dst.size());
}
/*
* PRIVATE
*/
/*
Deproject pixels in 3D, then adapt to Beamer's POV, and go back to 2D
This gives us the location od pixels adapted to the Beamer projection
*/
void Projection::deprojectPixelsFromDepth(cv::Mat_<float> &depth, Camera *camera, cv::Rect mask, cv::Point3f beamer_pos, cv::Mat_<cv::Point2i> &deprojectMap, cv::Point2f fxy, cv::Point2f ppxy){
// scale coord of the mask with the new resolution of the depth frame
float mask_scale_x = mask.x * depth.cols / mask.width;
float mask_scale_y = mask.y * depth.rows / mask.height;
for (int j = 0; j < depth.rows; j++){
for (int i = 0; i < depth.cols; i++){
// coordinates of the pixel relative to the orginial image taken from the camera
int base_x = mask_scale_x+i;
int base_y = mask_scale_y+j;
float z = depth.at<float>(j,i);
// pixels based on the original depth frame taken from the camera
cv::Point2i pixel = findMatchingPixel( base_x, base_y, z, camera, beamer_pos, fxy, ppxy );
// pixel relative to the cropping mask (the area where the frame is projected)
pixel.x -= mask_scale_x;
pixel.y -= mask_scale_y;
deprojectMap.at<cv::Point2i>(j,i) = pixel;
//deprojectMap.at<cv::Point2i>(j,i) = findMatchingPixel( i, j, depth.at<float>(j,i), camera, beamer_pos, fxy, ppxy );
}
}
}
/*
Save the highest points in deprojectMap into frameMap,
because some points can be deprojected at the same location
frameMap indicates for each pixel of dst, where it should get the value from in src
deprojectMap indicates for each pixel, where it'll be displayed
*/
void Projection::filterLowestDeprojectedPoints(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &deprojectMap, cv::Mat_<cv::Point2i> &frameMap){
for (int j = 0; j < deprojectMap.rows; j++){
for (int i = 0; i < deprojectMap.cols; i++){
// coords of the new pixel
cv::Point2i deprojectedPixel = deprojectMap.at<cv::Point2i>(j,i);
cv::Point2i highestDepthPixel = cv::Point2i(i,j);
if( (0 <= deprojectedPixel.x && deprojectedPixel.x < depth.cols) &&
(0 <= deprojectedPixel.y && deprojectedPixel.y < depth.rows) ){
// check and keep the highest point at the location pointed by pixel
cv::Point2i currentDepthPixel = frameMap.at<cv::Point2i>(deprojectedPixel);
if( (0 <= currentDepthPixel.x && currentDepthPixel.x < depth.cols) &&
(0 <= currentDepthPixel.y && currentDepthPixel.y < depth.rows) ){
if(depth.at<float>(currentDepthPixel) <= depth.at<float>(j,i)){
highestDepthPixel = currentDepthPixel;
}
}
frameMap.at<cv::Point2i>(deprojectedPixel) = highestDepthPixel;
}
}
}
}
/*
Build the frame using frameMap,
where each pixel describes in which pixel of the source it should take the value from
dst[i] = src[frameMap[i]]
*/
void Projection::buildFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &frameMap, cv::Mat_<cv::Vec3b> &src, cv::Mat_<cv::Vec3b> &dst){
for (int j = 0; j < frameMap.rows; j++){
for (int i = 0; i < frameMap.cols; i++){
cv::Point2i pixel_src = frameMap.at<cv::Point2i>(j,i);
cv::Point2i pixel_dst = cv::Point2i(i,j);
if( (0<=pixel_src.x && pixel_src.x<depth.cols) && (0<=pixel_src.y && pixel_src.y<depth.rows) ){
// src and dst must be of same size
//copyPixelsInto(pixel_dst, dst, pixel_src, src, depth);
dst.at<cv::Vec3b>(pixel_dst) = src.at<cv::Vec3b>(pixel_src);
}
}
}
}
/*
C : Camera position
B : Beamer position
P : Point computed by camera depth
V : Point adjusted to plan
A : Point of the right-angle triangle PAB
E : Point of the right-angle triangle VEP
Where
CP : distance from camera to point (value of depth_frame)
CB : distance from camera to beamer (beamer's position is relative to the camera)
*/
cv::Point2i Projection::findMatchingPixel(int i, int j, float z, Camera *camera, cv::Point3f CB, cv::Point2f fxy, cv::Point2f ppxy){
float pixel[2] = {static_cast<float>(i), static_cast<float>(j)};
const float BEz = distanceTopSandbox - CB.z;
cv::Point3f CP = camera->deprojectPixelToPoint(pixel, z, fxy, ppxy);
cv::Point3f BP = CP - CB;
float BAz = BP.z;
float alpha = BEz / BAz;
cv::Point3f BV = (alpha * BP);
cv::Point3f CV = CB + BV;
return camera->projectPointToPixel(CV, fxy, ppxy);
}
/*
* Debug
*/
void Projection::printAdjustingMatrix(){
cv::Mat matrix = getAdjustingMatrix();
for (int y = 0; y < matrix.rows; y++){
for (int x = 0; x < matrix.cols; x++){
std::cout << (float)matrix.at<double>(y, x) << " ";
}
std::cout << std::endl;
}
}