|
Working on Example Camera Code
Hello, I am trying to simply get the camera to feed me data, from the example code, but I have some questions. When I run it using the image flashed in the cRIO, it works well (I guess, as in I get constant data). However when I try to use the GetImage from the axis camera it no longer feeds data. I used a utility to get the HSV from my camera, but that did not help, in fact it made it worse. Does the code look right?
Also what do these lines do? I understand that they write images to the cRIO after throughout the image processing, but in what cases would I want to use them. Apparently do this is bad for the cRIO as well?
Code:
BinaryImage *thresholdImage = image->ThresholdHSV(threshold); // get just the green target pixels
//thresholdImage->Write("/threshold.bmp");
BinaryImage *convexHullImage = thresholdImage->ConvexHull(false); // fill in partial and full rectangles
//convexHullImage->Write("/ConvexHull.bmp");
BinaryImage *filteredImage = convexHullImage->ParticleFilter(criteria, 1); //Remove small particles
//filteredImage->Write("Filtered.bmp");
Code:
#include "WPILib.h"
#include "Vision/RGBImage.h"
#include "Vision/BinaryImage.h"
#include "Math.h"
/**
* Sample program to use NIVision to find rectangles in the scene that are illuminated
* by a LED ring light (similar to the model from FIRSTChoice). The camera sensitivity
* is set very low so as to only show light sources and remove any distracting parts
* of the image.
*
* The CriteriaCollection is the set of criteria that is used to filter the set of
* rectangles that are detected. In this example we're looking for rectangles with
* a minimum width of 30 pixels and maximum of 400 pixels.
*
* The algorithm first does a color threshold operation that only takes objects in the
* scene that have a bright green color component. Then a convex hull operation fills
* all the rectangle outlines (even the partially occluded ones). Then a small object filter
* removes small particles that might be caused by green reflection scattered from other
* parts of the scene. Finally all particles are scored on rectangularity, aspect ratio,
* and hollowness to determine if they match the target.
*
* Look in the VisionImages directory inside the project that is created for the sample
* images as well as the NI Vision Assistant file that contains the vision command
* chain (open it with the Vision Assistant)
*/
//Camera constants used for distance calculation
#define X_IMAGE_RES 320 //X Image resolution in pixels, should be 160, 320 or 640
//#define VIEW_ANGLE 48 //Axis 206 camera
#define VIEW_ANGLE 43.5 //Axis M1011 camera
#define PI 3.141592653
//Score limits used for target identification
#define RECTANGULARITY_LIMIT 60
#define ASPECT_RATIO_LIMIT 75
#define X_EDGE_LIMIT 40
#define Y_EDGE_LIMIT 60
//Minimum area of particles to be considered
#define AREA_MINIMUM 500
//Edge profile constants used for hollowness score calculation
#define XMAXSIZE 24
#define XMINSIZE 24
#define YMAXSIZE 24
#define YMINSIZE 48
const double xMax[XMAXSIZE] = {1, 1, 1, 1, .5, .5, .5, .5, .5, .5, .5, .5, .5, .5, .5, .5, .5, .5, .5, .5, 1, 1, 1, 1};
const double xMin[XMINSIZE] = {.4, .6, .1, .1, .1, .1, .1, .1, .1, .1, .1, .1, .1, .1, .1, .1, .1, .1, .1, .1, .1, .1, 0.6, 0};
const double yMax[YMAXSIZE] = {1, 1, 1, 1, .5, .5, .5, .5, .5, .5, .5, .5, .5, .5, .5, .5, .5, .5, .5, .5, 1, 1, 1, 1};
const double yMin[YMINSIZE] = {.4, .6, .05, .05, .05, .05, .05, .05, .05, .05, .05, .05, .05, .05, .05, .05, .05, .05, .05, .05, .05, .05,
.05, .05, .05, .05, .05, .05, .05, .05, .05, .05, .05, .05, .05, .05, .05, .05, .05, .05, .05, .05, .05, .05,
.05, .05, .6, 0};
class RobotDemo : public SimpleRobot
{
//Structure to represent the scores for the various tests used for target identification
struct Scores {
double rectangularity;
double aspectRatioInner;
double aspectRatioOuter;
double xEdge;
double yEdge;
};
// RobotDrive myRobot; // robot drive system
// Joystick stick; // only joystick
Scores *scores;
DriverStationLCD *lcd;
SmartDashboard *sds;
Jaguar *FRmotor; // Front Right motor, ID: 1
Jaguar *FLmotor; // Front Right motor, ID: 2
Jaguar *RRmotor; // Front Right motor, ID: 3
Jaguar *RLmotor; // Front Right motor, ID: 4
RobotDrive *drive;
public:
RobotDemo(void)
{
lcd = DriverStationLCD::GetInstance();
/**
FRmotor = new Jaguar(1); //(10);
FLmotor = new Jaguar(2); //(13);
RRmotor = new Jaguar(3); //(32);
RLmotor = new Jaguar(4); //(31);
drive = new RobotDrive(FLmotor, RLmotor, FRmotor, RRmotor);
drive->SetInvertedMotor(RobotDrive::kFrontRightMotor, true);
drive->SetInvertedMotor(RobotDrive::kFrontLeftMotor, false);
drive->SetInvertedMotor(RobotDrive::kRearRightMotor, true);
drive->SetInvertedMotor(RobotDrive::kRearLeftMotor, false);
drive->SetExpiration(15);
// Motor Safety Enabled
FRmotor->SetExpiration(0.1);
FRmotor->SetSafetyEnabled(true);
FLmotor->SetExpiration(0.1);
FLmotor->SetSafetyEnabled(true);
RRmotor->SetExpiration(0.1);
RRmotor->SetSafetyEnabled(true);
RLmotor->SetExpiration(0.1);
RLmotor->SetSafetyEnabled(true);
*/
}
void Autonomous(void)
{
Threshold threshold(60, 100, 90, 255, 20, 255); //HSV threshold criteria, ranges are in that order ie. Hue is 60-100
// Threshold threshold(80, 140, 0, 228, 15, 150); //HSV threshold criteria, ranges are in that order ie. Hue is 60-100
ParticleFilterCriteria2 criteria[] = {
{IMAQ_MT_AREA, AREA_MINIMUM, 65535, false, false}
}; //Particle filter criteria, used to filter out small particles
while (IsAutonomous() && IsEnabled()) {
lcd->Clear();
/**
* Do the image capture with the camera and apply the algorithm described above. This
* sample will either get images from the camera or from an image file stored in the top
* level directory in the flash memory on the cRIO. The file name in this case is "testImage.jpg"
*/
AxisCamera &camera = AxisCamera::GetInstance("10.15.95.11"); //To use the Axis camera uncomment this line
ColorImage *image;
camera.GetImage(image); //To get the images from the camera comment the line above and uncomment this one
camera.WriteResolution(AxisCamera::kResolution_320x240);
camera.WriteWhiteBalance(AxisCamera::kWhiteBalance_FixedOutdoor1);
// image = new RGBImage("/HybridLine_SmallGreen3.jpg"); // get the sample image from the cRIO flash
printf("Width: %dx", image->GetWidth());
printf("Height: %d", image->GetHeight());
BinaryImage *thresholdImage = image->ThresholdHSV(threshold); // get just the green target pixels
//thresholdImage->Write("/threshold.bmp");
BinaryImage *convexHullImage = thresholdImage->ConvexHull(false); // fill in partial and full rectangles
//convexHullImage->Write("/ConvexHull.bmp");
BinaryImage *filteredImage = convexHullImage->ParticleFilter(criteria, 1); //Remove small particles
//filteredImage->Write("Filtered.bmp");
vector<ParticleAnalysisReport> *reports = filteredImage->GetOrderedParticleAnalysisReports(); //get a particle analysis report for each particle
scores = new Scores[reports->size()];
//Iterate through each particle, scoring it and determining whether it is a target or not
for (unsigned i = 0; i < reports->size(); i++) {
ParticleAnalysisReport *report = &(reports->at(i));
scores[i].rectangularity = scoreRectangularity(report);
scores[i].aspectRatioOuter = scoreAspectRatio(filteredImage, report, true);
scores[i].aspectRatioInner = scoreAspectRatio(filteredImage, report, false);
scores[i].xEdge = scoreXEdge(thresholdImage, report);
scores[i].yEdge = scoreYEdge(thresholdImage, report);
if(scoreCompare(scores[i], false))
{
lcd->Printf(DriverStationLCD::kUser_Line1, 1, "%dHi:%f Y:%f\n", i, report->center_mass_x_normalized, report->center_mass_y_normalized);
lcd->Printf(DriverStationLCD::kUser_Line2, 1, "Dist: %f \n", computeDistance(thresholdImage, report, false));
sds->PutNumber("Hi particle:", i);
sds->PutNumber("Hi center X:", report->center_mass_x_normalized);
sds->PutNumber("Hi Dist:", computeDistance(thresholdImage, report, false));
printf("particle: %d is a High Goal centerX: %f centerY: %f \n", i, report->center_mass_x_normalized, report->center_mass_y_normalized);
printf("Distance: %f \n", computeDistance(thresholdImage, report, false));
} else if (scoreCompare(scores[i], true)) {
lcd->Printf(DriverStationLCD::kUser_Line1, 1, "particle: %d is a Middle Goal centerX: %f centerY: %f \n", i, report->center_mass_x_normalized, report->center_mass_y_normalized);
lcd->Printf(DriverStationLCD::kUser_Line2, 1, "Distance: %f \n", computeDistance(thresholdImage, report, true));
sds->PutNumber("Mid particle:", i);
sds->PutNumber("Mid center X:", report->center_mass_x_normalized);
sds->PutNumber("Mid Dist:", computeDistance(thresholdImage, report, false));
printf("particle: %d is a Middle Goal centerX: %f centerY: %f \n", i, report->center_mass_x_normalized, report->center_mass_y_normalized);
printf("Distance: %f \n", computeDistance(thresholdImage, report, true));
} else {
lcd->Printf(DriverStationLCD::kUser_Line1, 1, "particle: %d is not a goal centerX: %f centerY: %f \n", i, report->center_mass_x_normalized, report->center_mass_y_normalized);
printf("particle: %d is not a goal centerX: %f centerY: %f \n", i, report->center_mass_x_normalized, report->center_mass_y_normalized);
}
lcd->Printf(DriverStationLCD::kUser_Line3,1, "r%fAR%f \n", scores[i].rectangularity, scores[i].aspectRatioInner);
lcd->Printf(DriverStationLCD::kUser_Line4,1, "AR%fx%fy%f \n", scores[i].aspectRatioOuter, scores[i].xEdge, scores[i].yEdge);
sds->PutNumber("rect:", scores[i].rectangularity);
printf("rect: %f ARinner: %f \n", scores[i].rectangularity, scores[i].aspectRatioInner);
printf("ARouter: %f xEdge: %f yEdge: %f \n", scores[i].aspectRatioOuter, scores[i].xEdge, scores[i].yEdge);
}
lcd->UpdateLCD();
printf("\n");
// be sure to delete images after using them
delete filteredImage;
delete convexHullImage;
delete thresholdImage;
delete image;
//delete allocated reports and Scores objects also
delete scores;
delete reports;
}
}
/**
* Runs the motors with arcade steering.
*/
void OperatorControl(void)
{
while (IsOperatorControl())
{
Wait(0.005); // wait for a motor update time
}
}
/**
* Computes the estimated distance to a target using the height of the particle in the image. For more information and graphics
* showing the math behind this approach see the Vision Processing section of the ScreenStepsLive documentation.
*
* @param image The image to use for measuring the particle estimated rectangle
* @param report The Particle Analysis Report for the particle
* @param outer True if the particle should be treated as an outer target, false to treat it as a center target
* @return The estimated distance to the target in Inches.
*/
double computeDistance (BinaryImage *image, ParticleAnalysisReport *report, bool outer) {
double rectShort, height;
int targetHeight;
imaqMeasureParticle(image->GetImaqImage(), report->particleIndex, 0, IMAQ_MT_EQUIVALENT_RECT_SHORT_SIDE, &rectShort);
//using the smaller of the estimated rectangle short side and the bounding rectangle height results in better performance
//on skewed rectangles
height = min(report->boundingRect.height, rectShort);
targetHeight = outer ? 29 : 21;
return X_IMAGE_RES * targetHeight / (height * 12 * 2 * tan(VIEW_ANGLE*PI/(180*2)));
}
/**
* Computes a score (0-100) comparing the aspect ratio to the ideal aspect ratio for the target. This method uses
* the equivalent rectangle sides to determine aspect ratio as it performs better as the target gets skewed by moving
* to the left or right. The equivalent rectangle is the rectangle with sides x and y where particle area= x*y
* and particle perimeter= 2x+2y
*
* @param image The image containing the particle to score, needed to perform additional measurements
* @param report The Particle Analysis Report for the particle, used for the width, height, and particle number
* @param outer Indicates whether the particle aspect ratio should be compared to the ratio for the inner target or the outer
* @return The aspect ratio score (0-100)
*/
double scoreAspectRatio(BinaryImage *image, ParticleAnalysisReport *report, bool outer){
double rectLong, rectShort, idealAspectRatio, aspectRatio;
idealAspectRatio = outer ? (62/29) : (62/20); //Dimensions of goal opening + 4 inches on all 4 sides for reflective tape
imaqMeasureParticle(image->GetImaqImage(), report->particleIndex, 0, IMAQ_MT_EQUIVALENT_RECT_LONG_SIDE, &rectLong);
imaqMeasureParticle(image->GetImaqImage(), report->particleIndex, 0, IMAQ_MT_EQUIVALENT_RECT_SHORT_SIDE, &rectShort);
//Divide width by height to measure aspect ratio
if(report->boundingRect.width > report->boundingRect.height){
//particle is wider than it is tall, divide long by short
aspectRatio = 100*(1-fabs((1-((rectLong/rectShort)/idealAspectRatio))));
} else {
//particle is taller than it is wide, divide short by long
aspectRatio = 100*(1-fabs((1-((rectShort/rectLong)/idealAspectRatio))));
}
return (max(0, min(aspectRatio, 100))); //force to be in range 0-100
}
/**
* Compares scores to defined limits and returns true if the particle appears to be a target
*
* @param scores The structure containing the scores to compare
* @param outer True if the particle should be treated as an outer target, false to treat it as a center target
*
* @return True if the particle meets all limits, false otherwise
*/
bool scoreCompare(Scores scores, bool outer){
bool isTarget = true;
isTarget &= scores.rectangularity > RECTANGULARITY_LIMIT;
if(outer){
isTarget &= scores.aspectRatioOuter > ASPECT_RATIO_LIMIT;
} else {
isTarget &= scores.aspectRatioInner > ASPECT_RATIO_LIMIT;
}
isTarget &= scores.xEdge > X_EDGE_LIMIT;
isTarget &= scores.yEdge > Y_EDGE_LIMIT;
return isTarget;
}
/**
* Computes a score (0-100) estimating how rectangular the particle is by comparing the area of the particle
* to the area of the bounding box surrounding it. A perfect rectangle would cover the entire bounding box.
*
* @param report The Particle Analysis Report for the particle to score
* @return The rectangularity score (0-100)
*/
double scoreRectangularity(ParticleAnalysisReport *report){
if(report->boundingRect.width*report->boundingRect.height !=0){
return 100*report->particleArea/(report->boundingRect.width*report->boundingRect.height);
} else {
return 0;
}
}
/**
* Computes a score based on the match between a template profile and the particle profile in the X direction. This method uses the
* the column averages and the profile defined at the top of the sample to look for the solid vertical edges with
* a hollow center.
*
* @param image The image to use, should be the image before the convex hull is performed
* @param report The Particle Analysis Report for the particle
*
* @return The X Edge Score (0-100)
*/
double scoreXEdge(BinaryImage *image, ParticleAnalysisReport *report){
double total = 0;
LinearAverages *averages = imaqLinearAverages2(image->GetImaqImage(), IMAQ_COLUMN_AVERAGES, report->boundingRect);
for(int i=0; i < (averages->columnCount); i++){
if(xMin[i*(XMINSIZE-1)/averages->columnCount] < averages->columnAverages[i]
&& averages->columnAverages[i] < xMax[i*(XMAXSIZE-1)/averages->columnCount]){
total++;
}
}
total = 100*total/(averages->columnCount); //convert to score 0-100
imaqDispose(averages); //let IMAQ dispose of the averages struct
return total;
}
/**
* Computes a score based on the match between a template profile and the particle profile in the Y direction. This method uses the
* the row averages and the profile defined at the top of the sample to look for the solid horizontal edges with
* a hollow center
*
* @param image The image to use, should be the image before the convex hull is performed
* @param report The Particle Analysis Report for the particle
*
* @return The Y Edge score (0-100)
*/
double scoreYEdge(BinaryImage *image, ParticleAnalysisReport *report){
double total = 0;
LinearAverages *averages = imaqLinearAverages2(image->GetImaqImage(), IMAQ_ROW_AVERAGES, report->boundingRect);
for(int i=0; i < (averages->rowCount); i++){
if(yMin[i*(YMINSIZE-1)/averages->rowCount] < averages->rowAverages[i]
&& averages->rowAverages[i] < yMax[i*(YMAXSIZE-1)/averages->rowCount]){
total++;
}
}
total = 100*total/(averages->rowCount); //convert to score 0-100
imaqDispose(averages); //let IMAQ dispose of the averages struct
return total;
}
};
START_ROBOT_CLASS(RobotDemo);
Thanks!
|