Select Git revision
projection.cpp 7.38 KiB
#include "../../inc/projection.h"
/*
* MAIN
*/
Projection::Projection(){
adjustingMatrix = cv::getRotationMatrix2D(cv::Point(0, 0), 0, 1);
distanceTopSandbox = 1.0f;
}
cv::Point2i Projection::rotatePixel(cv::Point2i pixel){
cv::Mat tmp = (cv::Mat_<cv::Vec2f>(1, 1) << cv::Vec2f(pixel.x, pixel.y));
cv::transform(tmp, tmp, adjustingMatrix);
return cv::Point2i(tmp.at<cv::Vec2f>(0, 0));
}
cv::Point2i Projection::revertRotatePixel(cv::Point2i pixel){
cv::Mat tmp = (cv::Mat_<cv::Vec2f>(1, 1) << cv::Vec2f(pixel.x, pixel.y));
cv::Mat invMat;
cv::invertAffineTransform(adjustingMatrix, invMat);
cv::transform(tmp, tmp, invMat);
return cv::Point2i(tmp.at<cv::Vec2f>(0, 0));
}
// Adjust the projected frame with the topology from the camera to the beamer POV
void Projection::adjustFrame(cv::Mat_<float> depth, cv::Mat_<cv::Vec3b> src, cv::Mat_<cv::Vec3b> &dst, Camera *camera, cv::Point3f beamer_pos){
if(deprojectMap.empty() || deprojectMap.size() != depth.size()){
if(!deprojectMap.empty()){
deprojectMap.release();
frameMap.release();
resized_dst.release();
}
deprojectMap.create(depth.rows, depth.cols);
frameMap.create(depth.rows, depth.cols);
resized_dst.create(getMatchingSize(dst, depth));
}
deprojectMap = cv::Point2i(-1,-1);
frameMap = cv::Point2i(-1,-1);
// resize the frames to be a multiple of the camera size :
// src.size = n * camera.depth.size , where n is uint > 0
cv::resize(dst, resized_dst, resized_dst.size());
cv::resize(src, src, resized_dst.size());
deprojectPixelsFromDepth(depth, camera->getCroppingMask(), camera, beamer_pos, deprojectMap);
filterLowestDeprojectedPoints(depth, deprojectMap, frameMap);
buildFrame(depth, frameMap, src, resized_dst);
cv::resize(resized_dst, dst, dst.size());
cv::warpAffine(dst, dst, adjustingMatrix, dst.size());
}
/*
* PRIVATE
*/
void Projection::deprojectPixelsFromDepth(cv::Mat_<float> &depth, cv::Rect mask, Camera *camera, cv::Point3f beamer_pos, cv::Mat_<cv::Point2i> &deprojectMap){
// Browse the depth frame matching the cropping mask
// while adapting pixels's position to the beamer's position
for (int j = 0; j < depth.rows; j++){
for (int i = 0; i < depth.cols; i++){
// coordinates of the pixel relative to the orginial image taken from the camera
int base_x = mask.x+i;
int base_y = mask.y+j;
float z = depth.at<float>(j,i);
// pixels based on the original depth frame taken from the camera
cv::Point2i pixel = findMatchingPixel( base_x, base_y, z, camera, beamer_pos );
// pixel relative to the cropping mask (the area where the frame is projected)
pixel.x -= mask.x;
pixel.y -= mask.y;
deprojectMap.at<cv::Point2i>(j,i) = pixel;
}
}
}
void Projection::filterLowestDeprojectedPoints(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &deprojectMap, cv::Mat_<cv::Point2i> &frameMap){
for (int j = 0; j < deprojectMap.rows; j++){
for (int i = 0; i < deprojectMap.cols; i++){
// coords of the new pixel
cv::Point2i deprojectedPixel = deprojectMap.at<cv::Point2i>(j,i);
cv::Point2i highestDepthPixel = cv::Point2i(i,j);
if( (0 <= deprojectedPixel.x && deprojectedPixel.x < depth.cols) &&
(0 <= deprojectedPixel.y && deprojectedPixel.y < depth.rows) ){
// check and keep the highest point at the location pointed by pixel
cv::Point2i currentDepthPixel = frameMap.at<cv::Point2i>(deprojectedPixel);
if( (0 <= currentDepthPixel.x && currentDepthPixel.x < depth.cols) &&
(0 <= currentDepthPixel.y && currentDepthPixel.y < depth.rows) ){
if(depth.at<float>(currentDepthPixel) <= depth.at<float>(j,i)){
highestDepthPixel = currentDepthPixel;
}
}
frameMap.at<cv::Point2i>(deprojectedPixel) = highestDepthPixel;
}
}
}
}
void Projection::buildFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &frameMap, cv::Mat_<cv::Vec3b> &src, cv::Mat_<cv::Vec3b> &dst){
for (int j = 0; j < frameMap.rows; j++){
for (int i = 0; i < frameMap.cols; i++){
cv::Point2i pixel_src = frameMap.at<cv::Point2i>(j,i);
cv::Point2i pixel_dst = cv::Point2i(i,j);
if( (0<=pixel_src.x && pixel_src.x<depth.cols) && (0<=pixel_src.y && pixel_src.y<depth.rows) ){
// src and dst must be of same size
copyPixelsInto(pixel_dst, dst, pixel_src, src, depth);
}
}
}
}
cv::Size Projection::getMatchingSize(cv::Mat &src, cv::Mat &base){
cv::Size bigSize;
bigSize.width = (src.size().width % base.size().width == 0) ? src.size().width : src.size().width - (src.size().width % base.size().width) + base.size().width;
bigSize.height = (src.size().height % base.size().height == 0) ? src.size().height : src.size().height - (src.size().height % base.size().height) + base.size().height;
return bigSize;
}
// pixels coordinates are relative to the camera depth frame
void Projection::copyPixelsInto(cv::Point2i pixel_dst, cv::Mat_<cv::Vec3b> &dst, cv::Point2i pixel_src, cv::Mat_<cv::Vec3b> &src, cv::Mat_<float> &depth){
if( src.size().width == dst.size().width && src.size().height == dst.size().height ){
// determines the size of the square of pixels to copy from the source matching the position of the pixel of the depth frame
// (since src and dst are multiple of the depth frame's size)
int ratio_w = src.size().width / depth.size().width;
int ratio_h = src.size().height / depth.size().height;
for(int j=0; j<ratio_h; j++){
for (int i=0; i<ratio_w; i++){
int x_dst = pixel_dst.x * ratio_w + i;
int y_dst = pixel_dst.y * ratio_h + j;
int x_src = pixel_src.x * ratio_w + i;
int y_src = pixel_src.y * ratio_h + j;
dst.at<cv::Vec3b>(y_dst, x_dst) = src.at<cv::Vec3b>(y_src, x_src);
}
}
}
}
/*
C : Camera position
B : Beamer position
P : Point computed by camera depth
V : Point adjusted to plan
A : Point of the right-angle triangle PAB
E : Point of the right-angle triangle VEP
Where
CP : distance from camera to point (value of depth_frame)
CB : distance from camera to beamer (beamer's position is relative to the camera)
*/
cv::Point2i Projection::findMatchingPixel(int i, int j, float z, Camera *camera, cv::Point3f CB){
float pixel[2] = {static_cast<float>(i), static_cast<float>(j)};
const float BEz = distanceTopSandbox - CB.z;
cv::Point3f CP = camera->deprojectPixelToPoint(pixel, z);
cv::Point3f BP = CP - CB;
float BAz = BP.z;
float alpha = BEz / BAz;
cv::Point3f BV = (alpha * BP);
cv::Point3f CV = CB + BV;
return camera->projectPointToPixel(CV);
}
/*
* Debug
*/
void Projection::printAdjustingMatrix(){
cv::Mat matrix = getAdjustingMatrix();
for (int y = 0; y < matrix.rows; y++){
for (int x = 0; x < matrix.cols; x++){
std::cout << (float)matrix.at<double>(y, x) << " ";
}
std::cout << std::endl;
}
}