[Jderobot-admin] jderobot-r908 - in trunk/src/components: kinectServer kinectViewer openniServer openniServer-kinect
frivas en jderobot.org
frivas en jderobot.org
Mie Mayo 1 23:31:10 CEST 2013
Author: frivas
Date: 2013-05-01 23:30:10 +0200 (Wed, 01 May 2013)
New Revision: 908
Removed:
trunk/src/components/openniServer/openni2.cpp
Modified:
trunk/src/components/kinectServer/kinectServer.cpp
trunk/src/components/kinectViewer/drawarea.cpp
trunk/src/components/kinectViewer/drawarea.h
trunk/src/components/kinectViewer/kinectViewer.cpp
trunk/src/components/kinectViewer/kinectViewergui.cpp
trunk/src/components/kinectViewer/kinectViewergui.h
trunk/src/components/kinectViewer/myprogeo.cpp
trunk/src/components/kinectViewer/myprogeo.h
trunk/src/components/kinectViewer/util3d.cpp
trunk/src/components/kinectViewer/util3d.h
trunk/src/components/openniServer-kinect/openniServer-kinect.cpp
trunk/src/components/openniServer/openniServer.cpp
Log:
a?\195?\177adida nueva coficiacion para rgbd
Modified: trunk/src/components/kinectServer/kinectServer.cpp
===================================================================
--- trunk/src/components/kinectServer/kinectServer.cpp 2013-05-01 21:26:00 UTC (rev 907)
+++ trunk/src/components/kinectServer/kinectServer.cpp 2013-05-01 21:30:10 UTC (rev 908)
@@ -74,44 +74,9 @@
for (i=0; i<image.rows* image.cols; i++) {
//std::cout << i << std::endl;
int pval = depth[Xn_disparity[i]];
- int lb = pval & 0xff;
- switch (pval>>8) {
- case 0:
- image.data[3*i+0] = 255;
- image.data[3*i+1] = 255-lb;
- image.data[3*i+2] = 255-lb;
- break;
- case 1:
- image.data[3*i+0] = 255;
- image.data[3*i+1] = lb;
- image.data[3*i+2] = 0;
- break;
- case 2:
- image.data[3*i+0] = 255-lb;
- image.data[3*i+1] = 255;
- image.data[3*i+2] = 0;
- break;
- case 3:
- image.data[3*i+0] = 0;
- image.data[3*i+1] = 255;
- image.data[3*i+2] = lb;
- break;
- case 4:
- image.data[3*i+0] = 0;
- image.data[3*i+1] = 255-lb;
- image.data[3*i+2] = 255;
- break;
- case 5:
- image.data[3*i+0] = 0;
- image.data[3*i+1] = 0;
- image.data[3*i+2] = 255-lb;
- break;
- default:
- image.data[3*i+0] = 0;
- image.data[3*i+1] = 0;
- image.data[3*i+2] = 0;
- break;
- }
+ image.data[3*i+0] = (float(*pDepth)/(float)MAX_LENGHT)*255.;
+ image.data[3*i+1] = (*pDepth)>>8;
+ image.data[3*i+2] = (*pDepth)&0xff;
}
}
Modified: trunk/src/components/kinectViewer/drawarea.cpp
===================================================================
--- trunk/src/components/kinectViewer/drawarea.cpp 2013-05-01 21:26:00 UTC (rev 907)
+++ trunk/src/components/kinectViewer/drawarea.cpp 2013-05-01 21:30:10 UTC (rev 908)
@@ -190,6 +190,9 @@
}
bool DrawArea::on_expose_event(GdkEventExpose* event) {
+ }
+
+ bool DrawArea::my_expose_event(){
//std::cout << "expose" << std::endl;
Gtk::Allocation allocation = get_allocation();
GLfloat width, height;
@@ -374,14 +377,12 @@
glEnd();
}
glColor3f( 0, 0, 0 );
- if (true){
-std::cout << "kk" << cloud.size() << std::endl;
-glPointSize(10.0f);
- for (std::vector<jderobot::RGBPoint>::iterator it = cloud.begin(); it != cloud.end(); ++it){
+ if (draw_kinect_points){
+ for (std::vector<jderobot::RGBPoint>::iterator it = cloud.begin(); it != cloud.end(); ++it){
if (draw_kinect_with_color)
glColor3f( it->r, it->g, it->b );
glBegin(GL_POINTS);
- glVertex3f(it->x*10,it->y*10,it->z*10);
+ glVertex3f(it->x/scale,it->y/scale,it->z/scale);
glEnd();
}
Modified: trunk/src/components/kinectViewer/drawarea.h
===================================================================
--- trunk/src/components/kinectViewer/drawarea.h 2013-05-01 21:26:00 UTC (rev 907)
+++ trunk/src/components/kinectViewer/drawarea.h 2013-05-01 21:30:10 UTC (rev 908)
@@ -96,6 +96,7 @@
void clear_points();
void clearExtraLines();
void setCamerasResolution(int width, int height);
+ bool my_expose_event();
bool draw_kinect_points;
bool draw_kinect_with_color;
Modified: trunk/src/components/kinectViewer/kinectViewer.cpp
===================================================================
--- trunk/src/components/kinectViewer/kinectViewer.cpp 2013-05-01 21:26:00 UTC (rev 907)
+++ trunk/src/components/kinectViewer/kinectViewer.cpp 2013-05-01 21:30:10 UTC (rev 908)
@@ -53,7 +53,7 @@
#include <jderobot/pointcloud.h>
#include <colorspaces/colorspacesmm.h>
#include "kinectViewergui.h"
-#include <pthread.h>
+#include "pthread.h"
#include "controllers/Pose3DMotors-controller.h"
#include "controllers/leds-controller.h"
@@ -122,6 +122,7 @@
std::cerr << msg << std::endl;
}
pthread_exit(NULL);
+ return NULL;
}
/**
@@ -148,8 +149,7 @@
int globalWidth=0;
int globalHeight=0;
- // Reset thread array
- for(int i=0;i<MAX_COMPONENTS;threads[i++]=0);
+
pthread_attr_init(&attr);
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
@@ -311,8 +311,6 @@
throw "kinectViewer: Could not create the grafic interface";
for (i = 0; i < n_components; i++) {
pthread_join(threads[i], NULL);
- if(threads[i] != 0)
- pthread_join(threads[i], NULL);
}
if (ic)
ic->destroy();
Modified: trunk/src/components/kinectViewer/kinectViewergui.cpp
===================================================================
--- trunk/src/components/kinectViewer/kinectViewergui.cpp 2013-05-01 21:26:00 UTC (rev 907)
+++ trunk/src/components/kinectViewer/kinectViewergui.cpp 2013-05-01 21:30:10 UTC (rev 908)
@@ -27,7 +27,7 @@
modesAvalables=1; //only one mode
}
else{
- reconstructMode=0;
+ reconstructMode=1;
modesAvalables=2; //both modes
}
}
@@ -119,9 +119,9 @@
std::cout << "Creating Progeos Virtual Cameras" << std::endl;
mypro= new kinectViewer::myprogeo();
- mypro->load_cam((char*)path_rgb.c_str(),0);
+ mypro->load_cam((char*)path_rgb.c_str(),0,width, height);
- mypro->load_cam((char*)path_ir.c_str(),1);
+ mypro->load_cam((char*)path_ir.c_str(),1,width, height);
util = new kinectViewer::util3d(mypro);
/*Show window. Note: Set window visibility to false in Glade, otherwise opengl won't work*/
@@ -138,6 +138,7 @@
void
kinectViewergui::updateAll( const colorspaces::Image& imageRGB, const colorspaces::Image& imageDEPTH )
{
+ cv::Mat distance(imageRGB.rows, imageRGB.cols, CV_32FC1);
CvPoint pt1,pt2;
if (w_toggle_rgb->get_active()){
colorspaces::ImageRGB8 img_rgb888(imageRGB);//conversion will happen if needed
@@ -145,40 +146,48 @@
w_imageRGB->clear();
/*si queremos pintar las lineas*/
if (lines_rgb_active){
- IplImage* src = cvCreateImage(cvSize(img_rgb888.width,img_rgb888.height), IPL_DEPTH_8U, 3);
- memcpy((unsigned char *) src->imageData, &(img_rgb888.data[0]),img_rgb888.width*img_rgb888.height * 3);
+ cv::Mat src = cv::Mat(cvSize(img_rgb888.width,img_rgb888.height), CV_8UC3, img_rgb888.data);
+ memcpy((unsigned char *) src.data, &(img_rgb888.data[0]),img_rgb888.width*img_rgb888.height * 3);
util->draw_room(src,0, world->lines, world->numlines);
- memmove(&(img_rgb888.data[0]),(unsigned char *) src->imageData,img_rgb888.width*img_rgb888.height * 3);
+ memmove(&(img_rgb888.data[0]),(unsigned char *) src.data,img_rgb888.width*img_rgb888.height * 3);
}
w_imageRGB->set(imgBuff);
- displayFrameRate();
- while (gtkmain.events_pending())
- gtkmain.iteration();
}
- if (w_toggle_depth->get_active()){
+ if (w_toggle_depth->get_active()||((reconstruct_depth_activate)&&(reconstructMode==0))){
colorspaces::ImageRGB8 img_rgb888(imageDEPTH);//conversion will happen if needed
- Glib::RefPtr<Gdk::Pixbuf> imgBuff = Gdk::Pixbuf::create_from_data((const guint8*) img_rgb888.data,Gdk::COLORSPACE_RGB,false,8,img_rgb888.width,img_rgb888.height,img_rgb888.step);
- w_imageDEPTH->clear();
- if (lines_depth_active){
- IplImage* src = cvCreateImage(cvSize(img_rgb888.width,img_rgb888.height), IPL_DEPTH_8U, 3);
- memcpy((unsigned char *) src->imageData, &(img_rgb888.data[0]),img_rgb888.width*img_rgb888.height * 3);
- util->draw_room(src,1, world->lines, world->numlines);
- memmove(&(img_rgb888.data[0]),(unsigned char *) src->imageData,img_rgb888.width*img_rgb888.height * 3);
-
+ cv::Mat srcDEPTH = cv::Mat(cvSize(img_rgb888.width,img_rgb888.height), CV_8UC3, img_rgb888.data);
+ /*split channels to separate distance from image*/
+ std::vector<cv::Mat> layers;
+ cv::split(srcDEPTH, layers);
+ cv::Mat colorDepth(srcDEPTH.size(),srcDEPTH.type());
+ cv::cvtColor(layers[0],colorDepth,CV_GRAY2RGB);
+
+ for (int x=0; x< layers[1].cols ; x++){
+ for (int y=0; y<layers[1].rows; y++){
+ distance.at<float>(y,x) = ((int)layers[1].at<unsigned char>(y,x)<<8)|(int)layers[2].at<unsigned char>(y,x); }
}
- w_imageDEPTH->set(imgBuff);
- displayFrameRate();
- while (gtkmain.events_pending())
- gtkmain.iteration();
+
+ if (w_toggle_depth->get_active()){
+ Glib::RefPtr<Gdk::Pixbuf> imgBuff = Gdk::Pixbuf::create_from_data((const guint8*) img_rgb888.data,Gdk::COLORSPACE_RGB,false,8,img_rgb888.width,img_rgb888.height,img_rgb888.step);
+ w_imageDEPTH->clear();
+ if (lines_depth_active){
+ util->draw_room(colorDepth,1, world->lines, world->numlines);
+ }
+ memmove(&(img_rgb888.data[0]),(unsigned char *) colorDepth.data,img_rgb888.width*img_rgb888.height * 3);
+
+ w_imageDEPTH->set(imgBuff);
+ }
}
if (reconstruct_depth_activate){
- if (reconstructMode==0)
- add_depth_pointsImage(imageDEPTH, imageRGB);
+ if (reconstructMode==0){
+ add_depth_pointsImage(imageRGB, distance);
+ }
else
add_depth_pointsCloud();
}
+ world->my_expose_event();
while (gtkmain.events_pending())
gtkmain.iteration();
}
@@ -208,6 +217,7 @@
if (reconstructMode!=0)
add_depth_pointsCloud();
}
+ world->my_expose_event();
while (gtkmain.events_pending())
gtkmain.iteration();
}
@@ -238,7 +248,7 @@
add_depth_pointsCloud();
}
}
-
+ world->my_expose_event();
while (gtkmain.events_pending())
gtkmain.iteration();
}
@@ -254,6 +264,7 @@
add_depth_pointsCloud();
}
}
+ world->my_expose_event();
while (gtkmain.events_pending())
gtkmain.iteration();
}
@@ -359,42 +370,18 @@
}
void
-kinectViewergui::add_depth_pointsImage(const colorspaces::Image& imageDEPTH, const colorspaces::Image& imageRGB){
- float distance;
+kinectViewergui::add_depth_pointsImage(const colorspaces::Image& imageRGB, cv::Mat distance){
+ float d;
+ //std::cout << "point image" << std::endl;
world->clear_points();
//std::cout << "inicio reconstrucción" << std::endl;
- for( unsigned int i = 0 ; i < cWidth*cHeight ; i++) {
- if (((int)imageDEPTH.data[3*i+0]==255) && ( ((int)imageDEPTH.data[3*i+1] >0 ) && ((int)imageDEPTH.data[3*i+1] < 255)) && (((int)imageDEPTH.data[3*i+2] > 0 ) && ((int)imageDEPTH.data[3*i+2] < 255))){
- distance = -((int)imageDEPTH.data[3*i+1] - 255);
- }
- else if (((int)imageDEPTH.data[3*i+0]==255) && (((int)imageDEPTH.data[3*i+1] >= 0) && ((int)imageDEPTH.data[3*i+1]<255)) && ((int)imageDEPTH.data[3*i+2] == 0)){
- distance = 255+ (int)imageDEPTH.data[3*i+1];
- }
- else if ((((int)imageDEPTH.data[3*i+0] >= 0 ) && ((int)imageDEPTH.data[3*i+0] < 255)) && ((int)imageDEPTH.data[3*i+1]==255) && ((int)imageDEPTH.data[3*i+2] == 0)){
- distance = 2*255 - ((int)imageDEPTH.data[3*i+0] - 255);
- }
- else if (((int)imageDEPTH.data[3*i+0] == 0) && ((int)imageDEPTH.data[3*i+1]==255) && (((int)imageDEPTH.data[3*i+2] >= 0 ) && ((int)imageDEPTH.data[3*i+2] < 255))){
- distance = 3*255 + (int)imageDEPTH.data[3*i+2];
- }
- else if (((int)imageDEPTH.data[3*i+0] == 0) && ((int)imageDEPTH.data[3*i+1] >= 0 ) && ((int)imageDEPTH.data[3*i+1] < 255) && ((int)imageDEPTH.data[3*i+2]==255)){
- distance = 4*255 - ((int)imageDEPTH.data[3*i+1] - 255);
- }
- else if (((int)imageDEPTH.data[3*i+0] == 0) && ((int)imageDEPTH.data[3*i+1] == 0) && ((int)imageDEPTH.data[3*i+2] > 0 ) && ((int)imageDEPTH.data[3*i+2] < 255)){
- distance = 5*255 - ((int)imageDEPTH.data[3*i+2] - 255);
- }
- else{
- distance=0;
- }
-
- if (distance != 0 ){
- //PRUEBA DE DISTANCIA
- /*if (i==153600 + 320){
- std::cout << distance << std::endl;
- }*/
-
- distance = distance *10;
-
+ for (int xIm=0; xIm< cWidth; xIm++){
+ for (int yIm=0; yIm<cHeight ; yIm++){
+ d=distance.at<float>(yIm,xIm);
+ if (d!=0){
+ //std::cout << d << std::endl;
+ //d=d*10;
float xp,yp,zp,camx,camy,camz;
float ux,uy,uz;
float x,y;
@@ -407,7 +394,7 @@
- mypro->mybackproject(i % cWidth, i / cWidth, &xp, &yp, &zp, &camx, &camy, &camz,0);
+ mypro->mybackproject(xIm, yIm, &xp, &yp, &zp, &camx, &camy, &camz,0);
//vector unitario
float modulo;
@@ -423,9 +410,9 @@
uy = (yp-camy)*modulo;
uz = (zp-camz)*modulo;
- Fx= distance*fx + camx;
- Fy= distance*fy + camy;
- Fz= distance*fz + camz;
+ Fx= d*fx + camx;
+ Fy= d*fy + camy;
+ Fz= d*fz + camz;
/* calculamos el punto real */
t = (-(fx*camx) + (fx*Fx) - (fy*camy) + (fy*Fy) - (fz*camz) + (fz*Fz))/((fx*ux) + (fy*uy) + (fz*uz));
@@ -440,11 +427,12 @@
std::cout << ux << "," << uy<< "," << uz << std::endl;*/
//k= (80-yp)/uy;
//std::cout << "distancia" << distance << std::endl;
-
- world->add_kinect_point(t*ux + camx,t*uy+ camy,t*uz + camz,(int)imageRGB.data[3*i],(int)imageRGB.data[3*i+1],(int)imageRGB.data[3*i+2]);
+ //std::cout<< t*ux + camx << ", " << t*uy + camy << ", " << t*uz + camz << std::endl;
+ world->add_kinect_point(t*ux + camx,t*uy+ camy,t*uz + camz,(int)imageRGB.data[3*(yIm*cWidth+xIm)],(int)imageRGB.data[3*(yIm*cWidth+xIm)+1],(int)imageRGB.data[3*(yIm*cWidth+xIm)+2]);
//world->add_line(distance*ux + camx,distance*uy+ camy,distance*uz + camz,camx,camy,camz);
}
+ }
}
//std::cout << "fin reconstrucción" << std::endl;
@@ -455,7 +443,6 @@
world->clear_points();
std::vector<jderobot::RGBPoint> cloud;
cloud=pointCloud->getCloud();
-
for (std::vector<jderobot::RGBPoint>::iterator it = cloud.begin(); it != cloud.end(); ++it){
world->add_kinect_point(it->x,it->y,it->z,(int)it->r,(int)it->g,(int)it->b);
}
@@ -556,13 +543,13 @@
void
kinectViewergui::on_w_radio_mode_pointcloud_activate(){
if (w_radio_mode_pointcloud->get_active())
- reconstructMode=0;
+ reconstructMode=1;
}
void
kinectViewergui::on_w_radio_mode_image_activate(){
if (w_radio_mode_image->get_active())
- reconstructMode=1;
+ reconstructMode=0;
}
void
Modified: trunk/src/components/kinectViewer/kinectViewergui.h
===================================================================
--- trunk/src/components/kinectViewer/kinectViewergui.h 2013-05-01 21:26:00 UTC (rev 907)
+++ trunk/src/components/kinectViewer/kinectViewergui.h 2013-05-01 21:30:10 UTC (rev 908)
@@ -93,7 +93,7 @@
bool on_clicked_event_depth(GdkEventButton* event);
bool reconstruct_depth_activate;
void on_reconstruct_depth();
- void add_depth_pointsImage(const colorspaces::Image& imageDEPTH, const colorspaces::Image& imageRGB);
+ void add_depth_pointsImage(const colorspaces::Image& imageRGB, cv::Mat distance);
void add_depth_pointsCloud();
void add_cameras_position();
void add_camera_position();
Modified: trunk/src/components/kinectViewer/myprogeo.cpp
===================================================================
--- trunk/src/components/kinectViewer/myprogeo.cpp 2013-05-01 21:26:00 UTC (rev 907)
+++ trunk/src/components/kinectViewer/myprogeo.cpp 2013-05-01 21:30:10 UTC (rev 908)
@@ -33,127 +33,34 @@
/* gets the calibration of the camera from a file */
-int myprogeo::load_cam_line(FILE *myfile,int cam)
-{
- char word1[MAX_BUFFER],word2[MAX_BUFFER];
- int i=0;
- char buffer_file[MAX_BUFFER];
- double roll;
-
- buffer_file[0]=fgetc(myfile);
- if (feof(myfile)) return EOF;
- if (buffer_file[0]==(char)255) return EOF;
- if (buffer_file[0]=='#') {while(fgetc(myfile)!='\n'); return 0;}
- if (buffer_file[0]==' ') {while(buffer_file[0]==' ') buffer_file[0]=fgetc(myfile);}
- if (buffer_file[0]=='\t') {while(buffer_file[0]=='\t') buffer_file[0]=fgetc(myfile);}
-
- //Captures a line and then we will process it with sscanf checking that the last character is \n. We can't doit with fscanf because this function does not difference \n from blank space.
- while((buffer_file[i]!='\n') &&
- (buffer_file[i] != (char)255) &&
- (i<MAX_BUFFER-1) ) {
- buffer_file[++i]=fgetc(myfile);
- }
-
- if (i >= MAX_BUFFER-1) {
- printf("%s...\n", buffer_file);
- printf ("Line too long in config file!\n");
- return -1;
- }
- buffer_file[++i]='\0';
-
-
- if (sscanf(buffer_file,"%s",word1)!=1) return 0;
- // return EOF; empty line
- else {
- if (strcmp(word1,"positionX")==0){
- sscanf(buffer_file,"%s %s",word1,word2);
- cameras[cam].position.X=(float)atof(word2);
- }
- else if (strcmp(word1,"positionY")==0){
- sscanf(buffer_file,"%s %s",word1,word2);
- cameras[cam].position.Y=(float)atof(word2);
- }
- else if (strcmp(word1,"positionZ")==0){
- sscanf(buffer_file,"%s %s",word1,word2);
- cameras[cam].position.Z=(float)atof(word2);
- }
- else if (strcmp(word1,"positionH")==0){
- sscanf(buffer_file,"%s %s",word1,word2);
- cameras[cam].position.H=(float)atof(word2);
- }
- else if (strcmp(word1,"FOApositionX")==0){
- sscanf(buffer_file,"%s %s",word1,word2);
- cameras[cam].foa.X=(float)atof(word2);
- }
- else if (strcmp(word1,"FOApositionY")==0){
- sscanf(buffer_file,"%s %s",word1,word2);
- cameras[cam].foa.Y=(float)atof(word2);
- }
- else if (strcmp(word1,"FOApositionZ")==0){
- sscanf(buffer_file,"%s %s",word1,word2);
- cameras[cam].foa.Z=(float)atof(word2);
- }
- else if (strcmp(word1,"FOApositionH")==0){
- sscanf(buffer_file,"%s %s",word1,word2);
- cameras[cam].foa.H=(float)atof(word2);
- }
- else if (strcmp(word1,"roll")==0){
- sscanf(buffer_file,"%s %s",word1,word2);
- cameras[cam].roll=(float)atof(word2);
- }
- else if (strcmp(word1,"f")==0){
- sscanf(buffer_file,"%s %s",word1,word2);
- cameras[cam].fdistx=(float)atof(word2);
- cameras[cam].fdisty=(float)atof(word2);
- }
- else if (strcmp(word1,"fx")==0){
- sscanf(buffer_file,"%s %s",word1,word2);
- cameras[cam].fdistx=(float)atof(word2);
- }
- else if (strcmp(word1,"fy")==0){
- sscanf(buffer_file,"%s %s",word1,word2);
- cameras[cam].fdisty=(float)atof(word2);
- }
- else if (strcmp(word1,"skew")==0){
- sscanf(buffer_file,"%s %s",word1,word2);
- cameras[cam].skew=(float)atof(word2);
- }
- else if (strcmp(word1,"u0")==0){
- sscanf(buffer_file,"%s %s",word1,word2);
- cameras[cam].u0=(float)atof(word2);
- }
- else if (strcmp(word1,"v0")==0){
- sscanf(buffer_file,"%s %s",word1,word2);
- cameras[cam].v0=(float)atof(word2);
- }
- else if (strcmp(word1,"columns")==0){
- sscanf(buffer_file,"%s %s",word1,word2);
- cameras[cam].columns=(int)atoi(word2);
- }
- else if (strcmp(word1,"rows")==0){
- sscanf(buffer_file,"%s %s",word1,word2);
- cameras[cam].rows=(int)atoi(word2);
- }
- }
- return 1;
-}
-
-/* gets the calibration of the camera from a file */
-void myprogeo::load_cam(char *fich_in,int cam)
+void myprogeo::load_cam(char *fich_in,int cam, int w, int h)
{
FILE *entrada;
int i;
+ if (strlen(fich_in) ==0 ){
+ std::cout << w << ", " << h << std::endl;
+ this->cameras[cam].fdistx=515;
+ this->cameras[cam].fdisty=515;
+ this->cameras[cam].u0=h/2;
+ this->cameras[cam].v0=w/2;
+ this->cameras[cam].position.X=0;
+ this->cameras[cam].position.Y=0;
+ this->cameras[cam].position.Z=0;
+ this->cameras[cam].foa.X=0;
+ this->cameras[cam].foa.Y=1;
+ this->cameras[cam].foa.Z=0;
+ this->cameras[cam].skew=0;
+ this->cameras[cam].roll=0;
+ update_camera_matrix(&cameras[cam]);
- std::cout << fich_in << std::endl;
- entrada=fopen(fich_in,"r");
- if(entrada==NULL){
- printf("tracker3D: camera input calibration file %s does not exits\n",fich_in);
- }else{
- do{i=load_cam_line(entrada,cam);}while(i!=EOF);
- fclose(entrada);
- }
- update_camera_matrix(&cameras[cam]);
- //display_camerainfo(cameras[cam]);
+
+ }
+ else{
+ xmlReader(&(this->cameras[cam]), fich_in);
+ update_camera_matrix(&cameras[cam]);
+ }
+
+ display_camerainfo(cameras[cam]);
}
Modified: trunk/src/components/kinectViewer/myprogeo.h
===================================================================
--- trunk/src/components/kinectViewer/myprogeo.h 2013-05-01 21:26:00 UTC (rev 907)
+++ trunk/src/components/kinectViewer/myprogeo.h 2013-05-01 21:30:10 UTC (rev 908)
@@ -44,8 +44,7 @@
public:
myprogeo();
~myprogeo();
- int load_cam_line(FILE *myfile,int cam);
- void load_cam(char *fich_in,int cam);
+ void load_cam(char *fich_in,int cam, int w, int h);
void mybackproject(float x, float y, float* xp, float* yp, float* zp, float* camx, float* camy, float* camz, int cam);
void myproject(float x, float y, float z, float* xp, float* yp, int cam);
void mygetcameraposition(float *x, float *y, float *z, int cam);
Modified: trunk/src/components/kinectViewer/util3d.cpp
===================================================================
--- trunk/src/components/kinectViewer/util3d.cpp 2013-05-01 21:26:00 UTC (rev 907)
+++ trunk/src/components/kinectViewer/util3d.cpp 2013-05-01 21:30:10 UTC (rev 908)
@@ -32,7 +32,7 @@
util3d::~util3d(){
}
-int util3d::cvDrawline(IplImage* image,HPoint2D p1, HPoint2D p2, CvScalar color,int cam){
+int util3d::cvDrawline(cv::Mat image,HPoint2D p1, HPoint2D p2, cv::Scalar color,int cam){
HPoint2D gooda,goodb;
CvPoint pt1,pt2;
TPinHoleCamera camera;
@@ -41,13 +41,13 @@
if(displayline(p1,p2,&gooda,&goodb,camera)==1){
pt1.x=(int)gooda.y; pt1.y=camera.rows-1-(int)gooda.x;
pt2.x=(int)goodb.y; pt2.y=camera.rows-1-(int)goodb.x;
- cvLine(image, pt1, pt2, color, 2, 1, 0);
+ cv::line(image,pt1,pt2,color,2,1,0);
return 1;
}
return 0;
}
-void util3d::draw_room(IplImage *image,int cam, float lines[][8], int n_lines){
+void util3d::draw_room(cv::Mat image,int cam, float lines[][8], int n_lines){
int i,j;
HPoint2D a,b;
HPoint3D a3A,a3B,a3C,a3D;
Modified: trunk/src/components/kinectViewer/util3d.h
===================================================================
--- trunk/src/components/kinectViewer/util3d.h 2013-05-01 21:26:00 UTC (rev 907)
+++ trunk/src/components/kinectViewer/util3d.h 2013-05-01 21:30:10 UTC (rev 908)
@@ -30,14 +30,14 @@
namespace kinectViewer {
- class util3d {
+class util3d {
public:
util3d(myprogeo* p);
~util3d();
- void draw_room(IplImage *image,int cam, float lines[][8], int n_lines);
+ void draw_room(cv::Mat image,int cam, float lines[][8], int n_lines);
private:
- int cvDrawline(IplImage* image,HPoint2D p1, HPoint2D p2, CvScalar color,int cam);
+ int cvDrawline(cv::Mat image,HPoint2D p1, HPoint2D p2, cv::Scalar color,int cam);
myprogeo* mypro;
};
}
Deleted: trunk/src/components/openniServer/openni2.cpp
===================================================================
--- trunk/src/components/openniServer/openni2.cpp 2013-05-01 21:26:00 UTC (rev 907)
+++ trunk/src/components/openniServer/openni2.cpp 2013-05-01 21:30:10 UTC (rev 908)
@@ -1,113 +0,0 @@
-/*****************************************************************************
-* *
-* OpenNI 2.x Alpha *
-* Copyright (C) 2012 PrimeSense Ltd. *
-* *
-* This file is part of OpenNI. *
-* *
-* Licensed under the Apache License, Version 2.0 (the "License"); *
-* you may not use this file except in compliance with the License. *
-* You may obtain a copy of the License at *
-* *
-* http://www.apache.org/licenses/LICENSE-2.0 *
-* *
-* Unless required by applicable law or agreed to in writing, software *
-* distributed under the License is distributed on an "AS IS" BASIS, *
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
-* See the License for the specific language governing permissions and *
-* limitations under the License. *
-* *
-*****************************************************************************/
-#include <OpenNI.h>
-
-
-
-int main(int argc, char** argv)
-{
- int deviceID;
- openni::Status rc = openni::STATUS_OK;
-
- rc = openni::OpenNI::initialize();
- if (rc != openni::STATUS_OK)
- {
- printf("%s: Initialize failed\n%s\n", argv[0], openni::OpenNI::getExtendedError());
- return 1;
- }
-
- openni::Array<openni::DeviceInfo> deviceList;
- openni::OpenNI::enumerateDevices(&deviceList);
-
- const char* deviceUri;
- //checking the number off connected devices
- if (deviceList.getSize() < 1)
- {
- printf("Missing devices\n");
- openni::OpenNI::shutdown();
- return 1;
- }
-
- //getting the Uri of the selected device
- deviceUri = deviceList[deviceID].getUri();
-
- //getting the device from the uri
- openni::Device device;
- openni::VideoStream depth;
- rc = device.open(deviceUri);
- if (rc != openni::STATUS_OK)
- {
- printf("%s: Couldn't open device %d\n%s\n", deviceID, deviceUri, openni::OpenNI::getExtendedError());
- openni::OpenNI::shutdown();
- return 3;
- }
-
-
-/*
-
- rc = depth1.create(device1, openni::SENSOR_DEPTH);
- if (rc != openni::STATUS_OK)
- {
- printf("%s: Couldn't create stream %d on device %s\n%s\n", argv[0], openni::SENSOR_DEPTH, device1Uri, openni::OpenNI::getExtendedError());
- openni::OpenNI::shutdown();
- return 4;
- }
- rc = depth2.create(device2, openni::SENSOR_DEPTH);
- if (rc != openni::STATUS_OK)
- {
- printf("%s: Couldn't create stream %d on device %s\n%s\n", argv[0], openni::SENSOR_DEPTH, device2Uri, openni::OpenNI::getExtendedError());
- openni::OpenNI::shutdown();
- return 4;
- }
-
- rc = depth1.start();
- if (rc != openni::STATUS_OK)
- {
- printf("%s: Couldn't start stream %d on device %s\n%s\n", argv[0], openni::SENSOR_DEPTH, device1Uri, openni::OpenNI::getExtendedError());
- openni::OpenNI::shutdown();
- return 5;
- }
- rc = depth2.start();
- if (rc != openni::STATUS_OK)
- {
- printf("%s: Couldn't start stream %d on device %s\n%s\n", argv[0], openni::SENSOR_DEPTH, device2Uri, openni::OpenNI::getExtendedError());
- openni::OpenNI::shutdown();
- return 5;
- }
-
- if (!depth1.isValid() && !depth2.isValid())
- {
- printf("SimpleViewer: No valid streams. Exiting\n");
- openni::OpenNI::shutdown();
- return 6;
- }*/
-
- /*SampleViewer sampleViewer("Simple Viewer", depth1, depth2);
-
- rc = sampleViewer.init(argc, argv);
- if (rc != openni::STATUS_OK)
- {
- printf("SimpleViewer: Initialization failed\n%s\n", openni::OpenNI::getExtendedError());
- openni::OpenNI::shutdown();
- return 7;
- }
- sampleViewer.run();*/
-}
Modified: trunk/src/components/openniServer/openniServer.cpp
===================================================================
--- trunk/src/components/openniServer/openniServer.cpp 2013-05-01 21:26:00 UTC (rev 907)
+++ trunk/src/components/openniServer/openniServer.cpp 2013-05-01 21:30:10 UTC (rev 908)
@@ -54,11 +54,12 @@
#define VID_MICROSOFT 0x45e
#define PID_NUI_MOTOR 0x02b0
#define NUM_THREADS 5
+#define MAX_LENGHT 10000
#define CHECK_RC(rc, what) \
if (rc != openni::STATUS_OK) \
{ \
- printf("%s failed: %s\n", what, openni::OpenNI::getExtendedError()); \
+ std::cout << what << " failed: " << openni::OpenNI::getExtendedError() << std::endl; \
\
}
@@ -101,7 +102,7 @@
rc = openni::OpenNI::initialize();
if (rc != openni::STATUS_OK)
{
- printf("%d: Initialize failed\n%s\n", SELCAM, openni::OpenNI::getExtendedError());
+ std::cout << SELCAM << ": Initialize failed: "<< openni::OpenNI::getExtendedError() <<std::endl;
}
openni::Array<openni::DeviceInfo> deviceList;
@@ -111,9 +112,8 @@
//checking the number off connected devices
if (deviceList.getSize() < 1)
{
- printf("Missing devices\n");
+ std::cout << "Missing devices" << std::endl;
openni::OpenNI::shutdown();
- //return 1;
}
//getting the Uri of the selected device
@@ -124,9 +124,8 @@
rc = m_device.open(deviceUri);
if (rc != openni::STATUS_OK)
{
- printf("%d: Couldn't open device %s\n%s\n", SELCAM, deviceUri, openni::OpenNI::getExtendedError());
+ std::cout << SELCAM << " : Couldn't open device " << deviceUri << ": " << openni::OpenNI::getExtendedError() << std::endl;
openni::OpenNI::shutdown();
- //return 3;
}
//NITE
@@ -137,7 +136,6 @@
if (m_pUserTracker->create(&m_device) != nite::STATUS_OK)
{
std::cout << "OpenniServer: Couldn't create userTracker " << std::endl;
- //return openni::STATUS_ERROR;
}
#endif
@@ -153,13 +151,13 @@
rc = depth.start();
if (rc != openni::STATUS_OK)
{
- printf("OpenniServer: Couldn't start depth stream:\n%s\n", openni::OpenNI::getExtendedError());
+ std::cout << "OpenniServer: Couldn't start depth stream: "<< openni::OpenNI::getExtendedError() << std::endl;
depth.destroy();
}
}
else
{
- printf("OpenniServer: Couldn't find depth stream:\n%s\n", openni::OpenNI::getExtendedError());
+ std::cout << "OpenniServer: Couldn't find depth stream: " << openni::OpenNI::getExtendedError() << std::endl;
}
//color
@@ -169,13 +167,13 @@
rc = color.start();
if (rc != openni::STATUS_OK)
{
- printf("OpenniServer: Couldn't start color stream:\n%s\n", openni::OpenNI::getExtendedError());
+ std::cout << "OpenniServer: Couldn't start color stream: " << openni::OpenNI::getExtendedError() << std::endl;
color.destroy();
}
}
else
{
- printf("OpenniServer: Couldn't find color stream:\n%s\n", openni::OpenNI::getExtendedError());
+ std::cout << "OpenniServer: Couldn't find color stream: " << openni::OpenNI::getExtendedError() << std::endl;
}
openni::VideoMode depthVideoMode;
@@ -211,9 +209,9 @@
}
else
{
- printf("Error - expect color and depth to be in same resolution: D: %dx%d, C: %dx%d\n",
- depthWidth, depthHeight,
- colorWidth, colorHeight);
+ std::cout << "Error - expect color and depth to be in same resolution: D: " << depthWidth << "x" << depthHeight << "C: " << colorWidth << "x" << colorHeight << std::endl;
+
+
}
}
else if (depth.isValid())
@@ -230,7 +228,7 @@
}
else
{
- printf("Error - expects at least one of the streams to be valid...\n");
+ std::cout << "Error - expects at least one of the streams to be valid..." << std::endl;
}
distances.resize(m_width*m_height);
@@ -253,17 +251,20 @@
#ifndef WITH_NITE2
if (rc != openni::STATUS_OK)
{
- printf("Wait failed\n");
+ std::cout << "Wait failed" << std::endl;
}
switch (changedIndex)
{
case 0:
- depth.readFrame(&m_depthFrame); break;
+ depth.readFrame(&m_depthFrame);
+ break;
case 1:
- color.readFrame(&m_colorFrame); break;
+ color.readFrame(&m_colorFrame);
+ break;
default:
- printf("Error in wait\n");
+ std::cout << "Error in wait" << std::endl;
+ break;
}
//nite
#else
@@ -272,7 +273,7 @@
m_depthFrame = userTrackerFrame.getDepthFrame();
if (rcN != nite::STATUS_OK)
{
- printf("GetNextData failed\n");
+ std::cout << "GetNextData failed" << std::endl;
//return;
}
#endif
@@ -284,13 +285,13 @@
//OJO it control
usleep(1000);
}
-
+ return NULL;
}
/**
-* \brief Class wich contains all the functions and variables to make run the Robot Cameras
+* \brief Class which contains all the functions and variables to make run the Robot Cameras
*/
class CameraRGB: virtual public jderobot::Camera {
public:
@@ -319,7 +320,7 @@
playerdetection=0;
#endif
int fps = prop->getPropertyAsIntWithDefault(prefix+"fps",5);
- //we use formats acording to colorspaces
+ //we use formats according to colorspaces
std::string fmtStr = prop->getPropertyWithDefault(prefix+"Format","YUY2");//default format YUY2
imageFmt = colorspaces::Image::Format::searchFormat(fmtStr);
if (!imageFmt)
@@ -353,6 +354,7 @@
virtual std::string startCameraStreaming(const Ice::Current&){
context.tracer().info("Should be made anything to start camera streaming: " + cameraDescription->name);
+ return std::string("");
}
virtual void stopCameraStreaming(const Ice::Current&) {
@@ -360,7 +362,7 @@
}
virtual Ice::Int setCameraDescription(const jderobot::CameraDescriptionPtr&, const Ice::Current&){
-
+ return 0;
}
private:
@@ -589,6 +591,7 @@
virtual std::string startCameraStreaming(const Ice::Current&){
context.tracer().info("Should be made anything to start camera streaming: " + cameraDescription->name);
+ return std::string("");
}
virtual void stopCameraStreaming(const Ice::Current&) {
@@ -596,7 +599,7 @@
}
virtual Ice::Int setCameraDescription(const jderobot::CameraDescriptionPtr&, const Ice::Current&){
-
+ return 0;
}
private:
@@ -670,45 +673,10 @@
distances[(y*m_depthFrame.getWidth() + x)] = *pDepth;
if (*pDepth != 0)
{
- int pval = (int)*pDepth/10;
- int lb = pval & 0xff;
- switch (pval>>8) {
- case 0:
- src.data[(y*m_depthFrame.getWidth()+ x)*3+0] = (unsigned int)255;
- src.data[(y*m_depthFrame.getWidth()+ x)*3+1] = (unsigned int)(255-lb);
- src.data[(y*m_depthFrame.getWidth()+ x)*3+2] = (unsigned int)(255-lb);
- break;
- case 1:
- src.data[(y*m_depthFrame.getWidth()+ x)*3+0] = (unsigned int)255;
- src.data[(y*m_depthFrame.getWidth()+ x)*3+1] = (unsigned int)lb;
- src.data[(y*m_depthFrame.getWidth()+ x)*3+2] = (unsigned int)0;
- break;
- case 2:
- src.data[(y*m_depthFrame.getWidth()+ x)*3+0] = (unsigned int)(255-lb);
- src.data[(y*m_depthFrame.getWidth()+ x)*3+1] = (unsigned int)255;
- src.data[(y*m_depthFrame.getWidth()+ x)*3+2] = (unsigned int)0;
- break;
- case 3:
- src.data[(y*m_depthFrame.getWidth()+ x)*3+0] = (unsigned int)0;
- src.data[(y*m_depthFrame.getWidth()+ x)*3+1] = (unsigned int)255;
- src.data[(y*m_depthFrame.getWidth()+ x)*3+2] = (unsigned int)lb;
- break;
- case 4:
- src.data[(y*m_depthFrame.getWidth()+ x)*3+0] = (unsigned int)0;
- src.data[(y*m_depthFrame.getWidth()+ x)*3+1] = (unsigned int)(255-lb);
- src.data[(y*m_depthFrame.getWidth()+ x)*3+2] = (unsigned int)255;
- break;
- case 5:
- src.data[(y*m_depthFrame.getWidth()+ x)*3+0] = (unsigned int)0;
- src.data[(y*m_depthFrame.getWidth()+ x)*3+1] = (unsigned int)0;
- src.data[(y*m_depthFrame.getWidth()+ x)*3+2] = (unsigned int)(255-lb);
- break;
- default:
- src.data[(y*m_depthFrame.getWidth()+ x)*3+0] = (unsigned int)0;
- src.data[(y*m_depthFrame.getWidth()+ x)*3+1] = (unsigned int)0;
- src.data[(y*m_depthFrame.getWidth()+ x)*3+2] = (unsigned int)0;
- break;
- }
+ src.data[(y*m_depthFrame.getWidth()+ x)*3+0] = (float(*pDepth)/(float)MAX_LENGHT)*255.;
+ src.data[(y*m_depthFrame.getWidth()+ x)*3+1] = (*pDepth)>>8;
+ src.data[(y*m_depthFrame.getWidth()+ x)*3+2] = (*pDepth)&0xff;
+
}
else{
src.data[(y*m_depthFrame.getWidth()+ x)*3+0] = 0;
@@ -726,45 +694,9 @@
distances[(y*m_depthFrame.getWidth() + x)] = *pDepth;
if (*pDepth != 0)
{
- int pval = (int)*pDepth/10;
- int lb = pval & 0xff;
- switch (pval>>8) {
- case 0:
- src.data[(y*m_depthFrame.getWidth()+ x)*3+0] = (unsigned int)255;
- src.data[(y*m_depthFrame.getWidth()+ x)*3+1] = (unsigned int)(255-lb);
- src.data[(y*m_depthFrame.getWidth()+ x)*3+2] = (unsigned int)(255-lb);
- break;
- case 1:
- src.data[(y*m_depthFrame.getWidth()+ x)*3+0] = (unsigned int)255;
- src.data[(y*m_depthFrame.getWidth()+ x)*3+1] = (unsigned int)lb;
- src.data[(y*m_depthFrame.getWidth()+ x)*3+2] = (unsigned int)0;
- break;
- case 2:
- src.data[(y*m_depthFrame.getWidth()+ x)*3+0] = (unsigned int)(255-lb);
- src.data[(y*m_depthFrame.getWidth()+ x)*3+1] = (unsigned int)255;
- src.data[(y*m_depthFrame.getWidth()+ x)*3+2] = (unsigned int)0;
- break;
- case 3:
- src.data[(y*m_depthFrame.getWidth()+ x)*3+0] = (unsigned int)0;
- src.data[(y*m_depthFrame.getWidth()+ x)*3+1] = (unsigned int)255;
- src.data[(y*m_depthFrame.getWidth()+ x)*3+2] = (unsigned int)lb;
- break;
- case 4:
- src.data[(y*m_depthFrame.getWidth()+ x)*3+0] = (unsigned int)0;
- src.data[(y*m_depthFrame.getWidth()+ x)*3+1] = (unsigned int)(255-lb);
- src.data[(y*m_depthFrame.getWidth()+ x)*3+2] = (unsigned int)255;
- break;
- case 5:
- src.data[(y*m_depthFrame.getWidth()+ x)*3+0] = (unsigned int)0;
- src.data[(y*m_depthFrame.getWidth()+ x)*3+1] = (unsigned int)0;
- src.data[(y*m_depthFrame.getWidth()+ x)*3+2] = (unsigned int)(255-lb);
- break;
- default:
- src.data[(y*m_depthFrame.getWidth()+ x)*3+0] = (unsigned int)0;
- src.data[(y*m_depthFrame.getWidth()+ x)*3+1] = (unsigned int)0;
- src.data[(y*m_depthFrame.getWidth()+ x)*3+2] = (unsigned int)0;
- break;
- }
+ src.data[(y*m_depthFrame.getWidth()+ x)*3+0] = (float(*pDepth)/(float)MAX_LENGHT)*255.;
+ src.data[(y*m_depthFrame.getWidth()+ x)*3+1] = (*pDepth)>>8;
+ src.data[(y*m_depthFrame.getWidth()+ x)*3+2] = (*pDepth)&0xff;
}
#endif
}
Modified: trunk/src/components/openniServer-kinect/openniServer-kinect.cpp
===================================================================
--- trunk/src/components/openniServer-kinect/openniServer-kinect.cpp 2013-05-01 21:26:00 UTC (rev 907)
+++ trunk/src/components/openniServer-kinect/openniServer-kinect.cpp 2013-05-01 21:30:10 UTC (rev 908)
@@ -62,6 +62,7 @@
#define VID_MICROSOFT 0x45e
#define PID_NUI_MOTOR 0x02b0
#define NUM_THREADS 5
+#define MAX_LENGHT 10000
#define CHECK_RC(rc, what) \
if (rc != XN_STATUS_OK) \
@@ -99,7 +100,7 @@
std::vector<int> distances;
IplImage* srcRGB=NULL;
int colors[10][3];
-int userGeneratorActive=0;
+int userGeneratorActive=1;
int width;
int height;
@@ -356,6 +357,7 @@
imageDescription->width = width;
imageDescription->height = height;
int playerdetection = prop->getPropertyAsIntWithDefault(prefix+"PlayerDetection",0);
+ std::cout << prefix+"PlayerDetection" << std::endl;
if (!(userGeneratorActive))
playerdetection=0;
int fps = prop->getPropertyAsIntWithDefault(prefix+"fps",5);
@@ -465,6 +467,7 @@
for (XnUInt x = 0; x < sensors[SELCAM].imageMD.XRes(); ++x, ++pImage, ++pTex)
{
+ //std::cout << segmentation << std::endl;
if (segmentation){
pixelsID[(y*sensors[SELCAM].imageMD.XRes() + x)]= *pLabels;
if (*pLabels!=0)
@@ -473,10 +476,15 @@
srcRGB->imageData[(y*sensors[SELCAM].imageMD.XRes() + x)*3 + 1] = colors[*pLabels][1];
srcRGB->imageData[(y*sensors[SELCAM].imageMD.XRes() + x)*3 + 2] = colors[*pLabels][2];
}
- else{
+ /*else{
srcRGB->imageData[(y*sensors[SELCAM].imageMD.XRes() + x)*3 + 0] = 0;
srcRGB->imageData[(y*sensors[SELCAM].imageMD.XRes() + x)*3 + 1] = 0;
srcRGB->imageData[(y*sensors[SELCAM].imageMD.XRes() + x)*3 + 2] = 0;
+ }*/
+ else{
+ srcRGB->imageData[(y*sensors[SELCAM].imageMD.XRes() + x)*3 + 0] = pImage->nRed;
+ srcRGB->imageData[(y*sensors[SELCAM].imageMD.XRes() + x)*3 + 1] = pImage->nGreen;
+ srcRGB->imageData[(y*sensors[SELCAM].imageMD.XRes() + x)*3 + 2] = pImage->nBlue;
}
++pLabels;
}
@@ -693,47 +701,9 @@
distances[(y*sensors[SELCAM].depthMD.XRes() + x)] = *pDepth;
if (*pDepth != 0)
{
- int pval = (int)*pDepth/10;
-
- int lb = pval & 0xff;
-
- switch (pval>>8) {
- case 0:
- src->imageData[(y*sensors[SELCAM].depthMD.XRes() + x)*3+0] = 255;
- src->imageData[(y*sensors[SELCAM].depthMD.XRes() + x)*3+1] = 255-lb;
- src->imageData[(y*sensors[SELCAM].depthMD.XRes() + x)*3+2] = 255-lb;
- break;
- case 1:
- src->imageData[(y*sensors[SELCAM].depthMD.XRes() + x)*3+0] = 255;
- src->imageData[(y*sensors[SELCAM].depthMD.XRes() + x)*3+1] = lb;
- src->imageData[(y*sensors[SELCAM].depthMD.XRes() + x)*3+2] = 0;
- break;
- case 2:
- src->imageData[(y*sensors[SELCAM].depthMD.XRes() + x)*3+0] = 255-lb;
- src->imageData[(y*sensors[SELCAM].depthMD.XRes() + x)*3+1] = 255;
- src->imageData[(y*sensors[SELCAM].depthMD.XRes() + x)*3+2] = 0;
- break;
- case 3:
- src->imageData[(y*sensors[SELCAM].depthMD.XRes() + x)*3+0] = 0;
- src->imageData[(y*sensors[SELCAM].depthMD.XRes() + x)*3+1] = 255;
- src->imageData[(y*sensors[SELCAM].depthMD.XRes() + x)*3+2] = lb;
- break;
- case 4:
- src->imageData[(y*sensors[SELCAM].depthMD.XRes() + x)*3+0] = 0;
- src->imageData[(y*sensors[SELCAM].depthMD.XRes() + x)*3+1] = 255-lb;
- src->imageData[(y*sensors[SELCAM].depthMD.XRes() + x)*3+2] = 255;
- break;
- case 5:
- src->imageData[(y*sensors[SELCAM].depthMD.XRes() + x)*3+0] = 0;
- src->imageData[(y*sensors[SELCAM].depthMD.XRes() + x)*3+1] = 0;
- src->imageData[(y*sensors[SELCAM].depthMD.XRes() + x)*3+2] = 255-lb;
- break;
- default:
- src->imageData[(y*sensors[SELCAM].depthMD.XRes() + x)*3+0] = 0;
- src->imageData[(y*sensors[SELCAM].depthMD.XRes() + x)*3+1] = 0;
- src->imageData[(y*sensors[SELCAM].depthMD.XRes() + x)*3+2] = 0;
- break;
- }
+ src->imageData[(y*sensors[SELCAM].depthMD.XRes() + x)*3+0] = (float(*pDepth)/(float)MAX_LENGHT)*255.;
+ src->imageData[(y*sensors[SELCAM].depthMD.XRes() + x)*3+1] = (*pDepth)>>8;
+ src->imageData[(y*sensors[SELCAM].depthMD.XRes() + x)*3+2] = (*pDepth)&0xff;
}
else{
src->imageData[(y*sensors[SELCAM].depthMD.XRes() + x)*3+0] = 0;
More information about the Jderobot-admin
mailing list