001/* 002 * MIT License 003 * 004 * Copyright (c) PhotonVision 005 * 006 * Permission is hereby granted, free of charge, to any person obtaining a copy 007 * of this software and associated documentation files (the "Software"), to deal 008 * in the Software without restriction, including without limitation the rights 009 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 010 * copies of the Software, and to permit persons to whom the Software is 011 * furnished to do so, subject to the following conditions: 012 * 013 * The above copyright notice and this permission notice shall be included in all 014 * copies or substantial portions of the Software. 015 * 016 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 017 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 018 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 019 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 020 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 021 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 022 * SOFTWARE. 023 */ 024 025package org.photonvision; 026 027import edu.wpi.first.apriltag.AprilTagFieldLayout; 028import edu.wpi.first.cscore.OpenCvLoader; 029import edu.wpi.first.hal.FRCNetComm.tResourceType; 030import edu.wpi.first.hal.HAL; 031import edu.wpi.first.math.Matrix; 032import edu.wpi.first.math.Pair; 033import edu.wpi.first.math.geometry.Pose2d; 034import edu.wpi.first.math.geometry.Pose3d; 035import edu.wpi.first.math.geometry.Rotation2d; 036import edu.wpi.first.math.geometry.Rotation3d; 037import edu.wpi.first.math.geometry.Transform3d; 038import edu.wpi.first.math.geometry.Translation2d; 039import edu.wpi.first.math.geometry.Translation3d; 040import edu.wpi.first.math.interpolation.TimeInterpolatableBuffer; 041import edu.wpi.first.math.numbers.N1; 042import edu.wpi.first.math.numbers.N3; 043import edu.wpi.first.math.numbers.N8; 044import edu.wpi.first.wpilibj.DriverStation; 045import java.util.ArrayList; 046import java.util.HashSet; 047import java.util.List; 048import java.util.Optional; 049import java.util.Set; 050import org.photonvision.estimation.TargetModel; 051import org.photonvision.estimation.VisionEstimation; 052import org.photonvision.targeting.PhotonPipelineResult; 053import org.photonvision.targeting.PhotonTrackedTarget; 054 055/** 056 * The PhotonPoseEstimator class filters or combines readings from all the AprilTags visible at a 057 * given timestamp on the field to produce a single robot in field pose, using the strategy set 058 * below. Example usage can be found in our apriltagExample example project. 059 */ 060public class PhotonPoseEstimator { 061 private static int InstanceCount = 0; 062 063 /** Position estimation strategies that can be used by the {@link PhotonPoseEstimator} class. */ 064 public enum PoseStrategy { 065 /** Choose the Pose with the lowest ambiguity. */ 066 LOWEST_AMBIGUITY, 067 068 /** Choose the Pose which is closest to the camera height. */ 069 CLOSEST_TO_CAMERA_HEIGHT, 070 071 /** Choose the Pose which is closest to a set Reference position. */ 072 CLOSEST_TO_REFERENCE_POSE, 073 074 /** Choose the Pose which is closest to the last pose calculated */ 075 CLOSEST_TO_LAST_POSE, 076 077 /** Return the average of the best target poses using ambiguity as weight. */ 078 AVERAGE_BEST_TARGETS, 079 080 /** 081 * Use all visible tags to compute a single pose estimate on coprocessor. This option needs to 082 * be enabled on the PhotonVision web UI as well. 083 */ 084 MULTI_TAG_PNP_ON_COPROCESSOR, 085 086 /** 087 * Use all visible tags to compute a single pose estimate. This runs on the RoboRIO, and can 088 * take a lot of time. 089 */ 090 MULTI_TAG_PNP_ON_RIO, 091 092 /** 093 * Use distance data from best visible tag to compute a Pose. This runs on the RoboRIO in order 094 * to access the robot's yaw heading, and MUST have addHeadingData called every frame so heading 095 * data is up-to-date. 096 * 097 * <p>Yields a Pose2d in estimatedRobotPose (0 for z, roll, pitch) 098 * 099 * <p>https://www.chiefdelphi.com/t/frc-6328-mechanical-advantage-2025-build-thread/477314/98 100 */ 101 PNP_DISTANCE_TRIG_SOLVE, 102 103 /** 104 * Solve a constrained version of the Perspective-n-Point problem with the robot's drivebase 105 * flat on the floor. This computation takes place on the RoboRIO, and typically takes not more 106 * than 2ms. See {@link PhotonPoseEstimator.ConstrainedSolvepnpParams} and {@link 107 * org.photonvision.jni.ConstrainedSolvepnpJni} for details and tuning handles this strategy 108 * exposes. This strategy needs addHeadingData called every frame so heading data is up-to-date. 109 * If Multi-Tag PNP is enabled on the coprocessor, it will be used to provide an initial seed to 110 * the optimization algorithm -- otherwise, the multi-tag fallback strategy will be used as the 111 * seed. 112 */ 113 CONSTRAINED_SOLVEPNP 114 } 115 116 /** 117 * Tuning handles we have over the CONSTRAINED_SOLVEPNP {@link PhotonPoseEstimator.PoseStrategy}. 118 * Internally, the cost function is a sum-squared of pixel reprojection error + (optionally) 119 * heading error * heading scale factor. 120 * 121 * @param headingFree If true, heading is completely free to vary. If false, heading excursions 122 * from the provided heading measurement will be penalized 123 * @param headingScaleFactor If headingFree is false, this weights the cost of changing our robot 124 * heading estimate against the tag corner reprojection error const. 125 */ 126 public static final record ConstrainedSolvepnpParams( 127 boolean headingFree, double headingScaleFactor) {} 128 129 private AprilTagFieldLayout fieldTags; 130 private TargetModel tagModel = TargetModel.kAprilTag36h11; 131 private PoseStrategy primaryStrategy; 132 private PoseStrategy multiTagFallbackStrategy = PoseStrategy.LOWEST_AMBIGUITY; 133 private Transform3d robotToCamera; 134 135 private Pose3d lastPose; 136 private Pose3d referencePose; 137 protected double poseCacheTimestampSeconds = -1; 138 private final Set<Integer> reportedErrors = new HashSet<>(); 139 140 private final TimeInterpolatableBuffer<Rotation2d> headingBuffer = 141 TimeInterpolatableBuffer.createBuffer(1.0); 142 143 /** 144 * Create a new PhotonPoseEstimator. 145 * 146 * @param fieldTags A WPILib {@link AprilTagFieldLayout} linking AprilTag IDs to Pose3d objects 147 * with respect to the FIRST field using the <a href= 148 * "https://docs.wpilib.org/en/stable/docs/software/advanced-controls/geometry/coordinate-systems.html#field-coordinate-system">Field 149 * Coordinate System</a>. Note that setting the origin of this layout object will affect the 150 * results from this class. 151 * @param strategy The strategy it should use to determine the best pose. 152 * @param robotToCamera Transform3d from the center of the robot to the camera mount position (ie, 153 * robot ➔ camera) in the <a href= 154 * "https://docs.wpilib.org/en/stable/docs/software/advanced-controls/geometry/coordinate-systems.html#robot-coordinate-system">Robot 155 * Coordinate System</a>. 156 */ 157 public PhotonPoseEstimator( 158 AprilTagFieldLayout fieldTags, PoseStrategy strategy, Transform3d robotToCamera) { 159 this.fieldTags = fieldTags; 160 this.primaryStrategy = strategy; 161 this.robotToCamera = robotToCamera; 162 163 if (strategy == PoseStrategy.MULTI_TAG_PNP_ON_RIO 164 || strategy == PoseStrategy.CONSTRAINED_SOLVEPNP) { 165 OpenCvLoader.forceStaticLoad(); 166 } 167 168 HAL.report(tResourceType.kResourceType_PhotonPoseEstimator, InstanceCount); 169 InstanceCount++; 170 } 171 172 /** Invalidates the pose cache. */ 173 private void invalidatePoseCache() { 174 poseCacheTimestampSeconds = -1; 175 } 176 177 private void checkUpdate(Object oldObj, Object newObj) { 178 if (oldObj != newObj && oldObj != null && !oldObj.equals(newObj)) { 179 invalidatePoseCache(); 180 } 181 } 182 183 /** 184 * Get the AprilTagFieldLayout being used by the PositionEstimator. 185 * 186 * <p>Note: Setting the origin of this layout will affect the results from this class. 187 * 188 * @return the AprilTagFieldLayout 189 */ 190 public AprilTagFieldLayout getFieldTags() { 191 return fieldTags; 192 } 193 194 /** 195 * Set the AprilTagFieldLayout being used by the PositionEstimator. 196 * 197 * <p>Note: Setting the origin of this layout will affect the results from this class. 198 * 199 * @param fieldTags the AprilTagFieldLayout 200 */ 201 public void setFieldTags(AprilTagFieldLayout fieldTags) { 202 checkUpdate(this.fieldTags, fieldTags); 203 this.fieldTags = fieldTags; 204 } 205 206 /** 207 * Get the TargetModel representing the tags being detected. This is used for on-rio multitag. 208 * 209 * <p>By default, this is {@link TargetModel#kAprilTag36h11}. 210 */ 211 public TargetModel getTagModel() { 212 return tagModel; 213 } 214 215 /** 216 * Set the TargetModel representing the tags being detected. This is used for on-rio multitag. 217 * 218 * @param tagModel E.g. {@link TargetModel#kAprilTag16h5}. 219 */ 220 public void setTagModel(TargetModel tagModel) { 221 this.tagModel = tagModel; 222 } 223 224 /** 225 * Get the Position Estimation Strategy being used by the Position Estimator. 226 * 227 * @return the strategy 228 */ 229 public PoseStrategy getPrimaryStrategy() { 230 return primaryStrategy; 231 } 232 233 /** 234 * Set the Position Estimation Strategy used by the Position Estimator. 235 * 236 * @param strategy the strategy to set 237 */ 238 public void setPrimaryStrategy(PoseStrategy strategy) { 239 checkUpdate(this.primaryStrategy, strategy); 240 241 if (strategy == PoseStrategy.MULTI_TAG_PNP_ON_RIO 242 || strategy == PoseStrategy.CONSTRAINED_SOLVEPNP) { 243 OpenCvLoader.forceStaticLoad(); 244 } 245 this.primaryStrategy = strategy; 246 } 247 248 /** 249 * Set the Position Estimation Strategy used in multi-tag mode when only one tag can be seen. Must 250 * NOT be MULTI_TAG_PNP 251 * 252 * @param strategy the strategy to set 253 */ 254 public void setMultiTagFallbackStrategy(PoseStrategy strategy) { 255 checkUpdate(this.multiTagFallbackStrategy, strategy); 256 if (strategy == PoseStrategy.MULTI_TAG_PNP_ON_COPROCESSOR 257 || strategy == PoseStrategy.MULTI_TAG_PNP_ON_RIO) { 258 DriverStation.reportWarning( 259 "Fallback cannot be set to MULTI_TAG_PNP! Setting to lowest ambiguity", false); 260 strategy = PoseStrategy.LOWEST_AMBIGUITY; 261 } 262 this.multiTagFallbackStrategy = strategy; 263 } 264 265 /** 266 * Return the reference position that is being used by the estimator. 267 * 268 * @return the referencePose 269 */ 270 public Pose3d getReferencePose() { 271 return referencePose; 272 } 273 274 /** 275 * Update the stored reference pose for use when using the <b>CLOSEST_TO_REFERENCE_POSE</b> 276 * strategy. 277 * 278 * @param referencePose the referencePose to set 279 */ 280 public void setReferencePose(Pose3d referencePose) { 281 checkUpdate(this.referencePose, referencePose); 282 this.referencePose = referencePose; 283 } 284 285 /** 286 * Update the stored reference pose for use when using the <b>CLOSEST_TO_REFERENCE_POSE</b> 287 * strategy. 288 * 289 * @param referencePose the referencePose to set 290 */ 291 public void setReferencePose(Pose2d referencePose) { 292 setReferencePose(new Pose3d(referencePose)); 293 } 294 295 /** 296 * Update the stored last pose. Useful for setting the initial estimate when using the 297 * <b>CLOSEST_TO_LAST_POSE</b> strategy. 298 * 299 * @param lastPose the lastPose to set 300 */ 301 public void setLastPose(Pose3d lastPose) { 302 this.lastPose = lastPose; 303 } 304 305 /** 306 * Update the stored last pose. Useful for setting the initial estimate when using the 307 * <b>CLOSEST_TO_LAST_POSE</b> strategy. 308 * 309 * @param lastPose the lastPose to set 310 */ 311 public void setLastPose(Pose2d lastPose) { 312 setLastPose(new Pose3d(lastPose)); 313 } 314 315 /** 316 * Add robot heading data to buffer. Must be called periodically for the 317 * <b>PNP_DISTANCE_TRIG_SOLVE</b> strategy. 318 * 319 * @param timestampSeconds timestamp of the robot heading data. 320 * @param heading Field-relative robot heading at given timestamp. Standard WPILIB field 321 * coordinates. 322 */ 323 public void addHeadingData(double timestampSeconds, Rotation3d heading) { 324 addHeadingData(timestampSeconds, heading.toRotation2d()); 325 } 326 327 /** 328 * Add robot heading data to buffer. Must be called periodically for the 329 * <b>PNP_DISTANCE_TRIG_SOLVE</b> strategy. 330 * 331 * @param timestampSeconds timestamp of the robot heading data. 332 * @param heading Field-relative robot heading at given timestamp. Standard WPILIB field 333 * coordinates. 334 */ 335 public void addHeadingData(double timestampSeconds, Rotation2d heading) { 336 headingBuffer.addSample(timestampSeconds, heading); 337 } 338 339 /** 340 * @return The current transform from the center of the robot to the camera mount position 341 */ 342 public Transform3d getRobotToCameraTransform() { 343 return robotToCamera; 344 } 345 346 /** 347 * Useful for pan and tilt mechanisms and such. 348 * 349 * @param robotToCamera The current transform from the center of the robot to the camera mount 350 * position 351 */ 352 public void setRobotToCameraTransform(Transform3d robotToCamera) { 353 this.robotToCamera = robotToCamera; 354 } 355 356 /** 357 * Updates the estimated position of the robot, assuming no camera calibration is required for the 358 * selected strategy. Returns empty if: 359 * 360 * <ul> 361 * <li>The timestamp of the provided pipeline result is the same as in the previous call to 362 * {@code update()}. 363 * <li>No targets were found in the pipeline results. 364 * </ul> 365 * 366 * Will report a warning if strategy is multi-tag-on-rio because camera calibration data is not 367 * provided in this overload. 368 * 369 * @param cameraResult The latest pipeline result from the camera 370 * @return an {@link EstimatedRobotPose} with an estimated pose, timestamp, and targets used to 371 * create the estimate. 372 */ 373 public Optional<EstimatedRobotPose> update(PhotonPipelineResult cameraResult) { 374 return update(cameraResult, Optional.empty(), Optional.empty()); 375 } 376 377 /** 378 * Updates the estimated position of the robot. Returns empty if: 379 * 380 * <ul> 381 * <li>The timestamp of the provided pipeline result is the same as in the previous call to 382 * {@code update()}. 383 * <li>No targets were found in the pipeline results. 384 * <li>The strategy is CONSTRAINED_SOLVEPNP, but no constrainedPnpParams were provided (use the 385 * other function overload). 386 * </ul> 387 * 388 * @param cameraMatrix Camera calibration data for multi-tag-on-rio strategy - can be empty 389 * otherwise 390 * @param distCoeffs Camera calibration data for multi-tag-on-rio strategy - can be empty 391 * otherwise 392 * @return an {@link EstimatedRobotPose} with an estimated pose, timestamp, and targets used to 393 * create the estimate. 394 */ 395 public Optional<EstimatedRobotPose> update( 396 PhotonPipelineResult cameraResult, 397 Optional<Matrix<N3, N3>> cameraMatrix, 398 Optional<Matrix<N8, N1>> distCoeffs) { 399 return update(cameraResult, cameraMatrix, distCoeffs, Optional.empty()); 400 } 401 402 /** 403 * Updates the estimated position of the robot. Returns empty if: 404 * 405 * <ul> 406 * <li>The timestamp of the provided pipeline result is the same as in the previous call to 407 * {@code update()}. 408 * <li>No targets were found in the pipeline results. 409 * <li>The strategy is CONSTRAINED_SOLVEPNP, but the provided constrainedPnpParams are empty. 410 * </ul> 411 * 412 * @param cameraMatrix Camera calibration data for multi-tag-on-rio strategy - can be empty 413 * otherwise 414 * @param distCoeffs Camera calibration data for multi-tag-on-rio strategy - can be empty 415 * otherwise 416 * @param constrainedPnpParams Constrained SolvePNP params, if needed. 417 * @return an {@link EstimatedRobotPose} with an estimated pose, timestamp, and targets used to 418 * create the estimate. 419 */ 420 public Optional<EstimatedRobotPose> update( 421 PhotonPipelineResult cameraResult, 422 Optional<Matrix<N3, N3>> cameraMatrix, 423 Optional<Matrix<N8, N1>> distCoeffs, 424 Optional<ConstrainedSolvepnpParams> constrainedPnpParams) { 425 // Time in the past -- give up, since the following if expects times > 0 426 if (cameraResult.getTimestampSeconds() < 0) { 427 return Optional.empty(); 428 } 429 430 // If the pose cache timestamp was set, and the result is from the same 431 // timestamp, return an 432 // empty result 433 if (poseCacheTimestampSeconds > 0 434 && Math.abs(poseCacheTimestampSeconds - cameraResult.getTimestampSeconds()) < 1e-6) { 435 return Optional.empty(); 436 } 437 438 // Remember the timestamp of the current result used 439 poseCacheTimestampSeconds = cameraResult.getTimestampSeconds(); 440 441 // If no targets seen, trivial case -- return empty result 442 if (!cameraResult.hasTargets()) { 443 return Optional.empty(); 444 } 445 446 return update( 447 cameraResult, cameraMatrix, distCoeffs, constrainedPnpParams, this.primaryStrategy); 448 } 449 450 /** 451 * Internal convenience method for using a fallback strategy for update(). This should only be 452 * called after timestamp checks have been done by another update() overload. 453 * 454 * @param cameraResult The latest pipeline result from the camera 455 * @param strategy The pose strategy to use. Can't be CONSTRAINED_SOLVEPNP. 456 * @return an {@link EstimatedRobotPose} with an estimated pose, timestamp, and targets used to 457 * create the estimate. 458 */ 459 private Optional<EstimatedRobotPose> update( 460 PhotonPipelineResult cameraResult, PoseStrategy strategy) { 461 return update(cameraResult, Optional.empty(), Optional.empty(), Optional.empty(), strategy); 462 } 463 464 private Optional<EstimatedRobotPose> update( 465 PhotonPipelineResult cameraResult, 466 Optional<Matrix<N3, N3>> cameraMatrix, 467 Optional<Matrix<N8, N1>> distCoeffs, 468 Optional<ConstrainedSolvepnpParams> constrainedPnpParams, 469 PoseStrategy strategy) { 470 Optional<EstimatedRobotPose> estimatedPose = 471 switch (strategy) { 472 case LOWEST_AMBIGUITY -> lowestAmbiguityStrategy(cameraResult); 473 case CLOSEST_TO_CAMERA_HEIGHT -> closestToCameraHeightStrategy(cameraResult); 474 case CLOSEST_TO_REFERENCE_POSE -> 475 closestToReferencePoseStrategy(cameraResult, referencePose); 476 case CLOSEST_TO_LAST_POSE -> { 477 setReferencePose(lastPose); 478 yield closestToReferencePoseStrategy(cameraResult, referencePose); 479 } 480 case AVERAGE_BEST_TARGETS -> averageBestTargetsStrategy(cameraResult); 481 case MULTI_TAG_PNP_ON_RIO -> 482 multiTagOnRioStrategy(cameraResult, cameraMatrix, distCoeffs); 483 case MULTI_TAG_PNP_ON_COPROCESSOR -> multiTagOnCoprocStrategy(cameraResult); 484 case PNP_DISTANCE_TRIG_SOLVE -> pnpDistanceTrigSolveStrategy(cameraResult); 485 case CONSTRAINED_SOLVEPNP -> 486 constrainedPnpStrategy(cameraResult, cameraMatrix, distCoeffs, constrainedPnpParams); 487 }; 488 489 if (estimatedPose.isPresent()) { 490 lastPose = estimatedPose.get().estimatedPose; 491 } 492 493 return estimatedPose; 494 } 495 496 private Optional<EstimatedRobotPose> pnpDistanceTrigSolveStrategy(PhotonPipelineResult result) { 497 PhotonTrackedTarget bestTarget = result.getBestTarget(); 498 499 if (bestTarget == null) return Optional.empty(); 500 501 var headingSampleOpt = headingBuffer.getSample(result.getTimestampSeconds()); 502 if (headingSampleOpt.isEmpty()) { 503 return Optional.empty(); 504 } 505 Rotation2d headingSample = headingSampleOpt.get(); 506 507 Translation2d camToTagTranslation = 508 new Translation3d( 509 bestTarget.getBestCameraToTarget().getTranslation().getNorm(), 510 new Rotation3d( 511 0, 512 -Math.toRadians(bestTarget.getPitch()), 513 -Math.toRadians(bestTarget.getYaw()))) 514 .rotateBy(robotToCamera.getRotation()) 515 .toTranslation2d() 516 .rotateBy(headingSample); 517 518 var tagPoseOpt = fieldTags.getTagPose(bestTarget.getFiducialId()); 519 if (tagPoseOpt.isEmpty()) { 520 return Optional.empty(); 521 } 522 var tagPose2d = tagPoseOpt.get().toPose2d(); 523 524 Translation2d fieldToCameraTranslation = 525 tagPose2d.getTranslation().plus(camToTagTranslation.unaryMinus()); 526 527 Translation2d camToRobotTranslation = 528 robotToCamera.getTranslation().toTranslation2d().unaryMinus().rotateBy(headingSample); 529 530 Pose2d robotPose = 531 new Pose2d(fieldToCameraTranslation.plus(camToRobotTranslation), headingSample); 532 533 return Optional.of( 534 new EstimatedRobotPose( 535 new Pose3d(robotPose), 536 result.getTimestampSeconds(), 537 result.getTargets(), 538 PoseStrategy.PNP_DISTANCE_TRIG_SOLVE)); 539 } 540 541 private Optional<EstimatedRobotPose> constrainedPnpStrategy( 542 PhotonPipelineResult result, 543 Optional<Matrix<N3, N3>> cameraMatrixOpt, 544 Optional<Matrix<N8, N1>> distCoeffsOpt, 545 Optional<ConstrainedSolvepnpParams> constrainedPnpParams) { 546 boolean hasCalibData = cameraMatrixOpt.isPresent() && distCoeffsOpt.isPresent(); 547 // cannot run multitagPNP, use fallback strategy 548 if (!hasCalibData) { 549 return update( 550 result, cameraMatrixOpt, distCoeffsOpt, Optional.empty(), this.multiTagFallbackStrategy); 551 } 552 553 if (constrainedPnpParams.isEmpty()) { 554 return Optional.empty(); 555 } 556 557 // Need heading if heading fixed 558 if (!constrainedPnpParams.get().headingFree 559 && headingBuffer.getSample(result.getTimestampSeconds()).isEmpty()) { 560 return update( 561 result, cameraMatrixOpt, distCoeffsOpt, Optional.empty(), this.multiTagFallbackStrategy); 562 } 563 564 Pose3d fieldToRobotSeed; 565 566 // Attempt to use multi-tag to get a pose estimate seed 567 if (result.getMultiTagResult().isPresent()) { 568 fieldToRobotSeed = 569 Pose3d.kZero.plus( 570 result.getMultiTagResult().get().estimatedPose.best.plus(robotToCamera.inverse())); 571 } else { 572 // HACK - use fallback strategy to gimme a seed pose 573 // TODO - make sure nested update doesn't break state 574 var nestedUpdate = 575 update( 576 result, 577 cameraMatrixOpt, 578 distCoeffsOpt, 579 Optional.empty(), 580 this.multiTagFallbackStrategy); 581 if (nestedUpdate.isEmpty()) { 582 // best i can do is bail 583 return Optional.empty(); 584 } 585 fieldToRobotSeed = nestedUpdate.get().estimatedPose; 586 } 587 588 if (!constrainedPnpParams.get().headingFree) { 589 // If heading fixed, force rotation component 590 fieldToRobotSeed = 591 new Pose3d( 592 fieldToRobotSeed.getTranslation(), 593 new Rotation3d(headingBuffer.getSample(result.getTimestampSeconds()).get())); 594 } 595 596 var pnpResult = 597 VisionEstimation.estimateRobotPoseConstrainedSolvepnp( 598 cameraMatrixOpt.get(), 599 distCoeffsOpt.get(), 600 result.getTargets(), 601 robotToCamera, 602 fieldToRobotSeed, 603 fieldTags, 604 tagModel, 605 constrainedPnpParams.get().headingFree, 606 headingBuffer.getSample(result.getTimestampSeconds()).get(), 607 constrainedPnpParams.get().headingScaleFactor); 608 // try fallback strategy if solvePNP fails for some reason 609 if (!pnpResult.isPresent()) 610 return update( 611 result, cameraMatrixOpt, distCoeffsOpt, Optional.empty(), this.multiTagFallbackStrategy); 612 var best = Pose3d.kZero.plus(pnpResult.get().best); // field-to-robot 613 614 return Optional.of( 615 new EstimatedRobotPose( 616 best, 617 result.getTimestampSeconds(), 618 result.getTargets(), 619 PoseStrategy.CONSTRAINED_SOLVEPNP)); 620 } 621 622 private Optional<EstimatedRobotPose> multiTagOnCoprocStrategy(PhotonPipelineResult result) { 623 if (result.getMultiTagResult().isEmpty()) { 624 return update(result, this.multiTagFallbackStrategy); 625 } 626 627 var best_tf = result.getMultiTagResult().get().estimatedPose.best; 628 var best = 629 Pose3d.kZero 630 .plus(best_tf) // field-to-camera 631 .relativeTo(fieldTags.getOrigin()) 632 .plus(robotToCamera.inverse()); // field-to-robot 633 return Optional.of( 634 new EstimatedRobotPose( 635 best, 636 result.getTimestampSeconds(), 637 result.getTargets(), 638 PoseStrategy.MULTI_TAG_PNP_ON_COPROCESSOR)); 639 } 640 641 private Optional<EstimatedRobotPose> multiTagOnRioStrategy( 642 PhotonPipelineResult result, 643 Optional<Matrix<N3, N3>> cameraMatrixOpt, 644 Optional<Matrix<N8, N1>> distCoeffsOpt) { 645 if (cameraMatrixOpt.isEmpty() || distCoeffsOpt.isEmpty()) { 646 DriverStation.reportWarning( 647 "No camera calibration data provided for multi-tag-on-rio", 648 Thread.currentThread().getStackTrace()); 649 return update(result, this.multiTagFallbackStrategy); 650 } 651 652 if (result.getTargets().size() < 2) { 653 return update(result, this.multiTagFallbackStrategy); 654 } 655 656 var pnpResult = 657 VisionEstimation.estimateCamPosePNP( 658 cameraMatrixOpt.get(), distCoeffsOpt.get(), result.getTargets(), fieldTags, tagModel); 659 // try fallback strategy if solvePNP fails for some reason 660 if (!pnpResult.isPresent()) 661 return update( 662 result, cameraMatrixOpt, distCoeffsOpt, Optional.empty(), this.multiTagFallbackStrategy); 663 664 var best = 665 Pose3d.kZero 666 .plus(pnpResult.get().best) // field-to-camera 667 .plus(robotToCamera.inverse()); // field-to-robot 668 669 return Optional.of( 670 new EstimatedRobotPose( 671 best, 672 result.getTimestampSeconds(), 673 result.getTargets(), 674 PoseStrategy.MULTI_TAG_PNP_ON_RIO)); 675 } 676 677 /** 678 * Return the estimated position of the robot with the lowest position ambiguity from a List of 679 * pipeline results. 680 * 681 * @param result pipeline result 682 * @return the estimated position of the robot in the FCS and the estimated timestamp of this 683 * estimation. 684 */ 685 private Optional<EstimatedRobotPose> lowestAmbiguityStrategy(PhotonPipelineResult result) { 686 PhotonTrackedTarget lowestAmbiguityTarget = null; 687 688 double lowestAmbiguityScore = 10; 689 690 for (PhotonTrackedTarget target : result.targets) { 691 double targetPoseAmbiguity = target.getPoseAmbiguity(); 692 // Make sure the target is a Fiducial target. 693 if (targetPoseAmbiguity != -1 && targetPoseAmbiguity < lowestAmbiguityScore) { 694 lowestAmbiguityScore = targetPoseAmbiguity; 695 lowestAmbiguityTarget = target; 696 } 697 } 698 699 // Although there are confirmed to be targets, none of them may be fiducial 700 // targets. 701 if (lowestAmbiguityTarget == null) return Optional.empty(); 702 703 int targetFiducialId = lowestAmbiguityTarget.getFiducialId(); 704 705 Optional<Pose3d> targetPosition = fieldTags.getTagPose(targetFiducialId); 706 707 if (targetPosition.isEmpty()) { 708 reportFiducialPoseError(targetFiducialId); 709 return Optional.empty(); 710 } 711 712 return Optional.of( 713 new EstimatedRobotPose( 714 targetPosition 715 .get() 716 .transformBy(lowestAmbiguityTarget.getBestCameraToTarget().inverse()) 717 .transformBy(robotToCamera.inverse()), 718 result.getTimestampSeconds(), 719 result.getTargets(), 720 PoseStrategy.LOWEST_AMBIGUITY)); 721 } 722 723 /** 724 * Return the estimated position of the robot using the target with the lowest delta height 725 * difference between the estimated and actual height of the camera. 726 * 727 * @param result pipeline result 728 * @return the estimated position of the robot in the FCS and the estimated timestamp of this 729 * estimation. 730 */ 731 private Optional<EstimatedRobotPose> closestToCameraHeightStrategy(PhotonPipelineResult result) { 732 double smallestHeightDifference = 10e9; 733 EstimatedRobotPose closestHeightTarget = null; 734 735 for (PhotonTrackedTarget target : result.targets) { 736 int targetFiducialId = target.getFiducialId(); 737 738 // Don't report errors for non-fiducial targets. This could also be resolved by 739 // adding -1 to 740 // the initial HashSet. 741 if (targetFiducialId == -1) continue; 742 743 Optional<Pose3d> targetPosition = fieldTags.getTagPose(target.getFiducialId()); 744 745 if (targetPosition.isEmpty()) { 746 reportFiducialPoseError(target.getFiducialId()); 747 continue; 748 } 749 750 double alternateTransformDelta = 751 Math.abs( 752 robotToCamera.getZ() 753 - targetPosition 754 .get() 755 .transformBy(target.getAlternateCameraToTarget().inverse()) 756 .getZ()); 757 double bestTransformDelta = 758 Math.abs( 759 robotToCamera.getZ() 760 - targetPosition 761 .get() 762 .transformBy(target.getBestCameraToTarget().inverse()) 763 .getZ()); 764 765 if (alternateTransformDelta < smallestHeightDifference) { 766 smallestHeightDifference = alternateTransformDelta; 767 closestHeightTarget = 768 new EstimatedRobotPose( 769 targetPosition 770 .get() 771 .transformBy(target.getAlternateCameraToTarget().inverse()) 772 .transformBy(robotToCamera.inverse()), 773 result.getTimestampSeconds(), 774 result.getTargets(), 775 PoseStrategy.CLOSEST_TO_CAMERA_HEIGHT); 776 } 777 778 if (bestTransformDelta < smallestHeightDifference) { 779 smallestHeightDifference = bestTransformDelta; 780 closestHeightTarget = 781 new EstimatedRobotPose( 782 targetPosition 783 .get() 784 .transformBy(target.getBestCameraToTarget().inverse()) 785 .transformBy(robotToCamera.inverse()), 786 result.getTimestampSeconds(), 787 result.getTargets(), 788 PoseStrategy.CLOSEST_TO_CAMERA_HEIGHT); 789 } 790 } 791 792 // Need to null check here in case none of the provided targets are fiducial. 793 return Optional.ofNullable(closestHeightTarget); 794 } 795 796 /** 797 * Return the estimated position of the robot using the target with the lowest delta in the vector 798 * magnitude between it and the reference pose. 799 * 800 * @param result pipeline result 801 * @param referencePose reference pose to check vector magnitude difference against. 802 * @return the estimated position of the robot in the FCS and the estimated timestamp of this 803 * estimation. 804 */ 805 private Optional<EstimatedRobotPose> closestToReferencePoseStrategy( 806 PhotonPipelineResult result, Pose3d referencePose) { 807 if (referencePose == null) { 808 DriverStation.reportError( 809 "[PhotonPoseEstimator] Tried to use reference pose strategy without setting the reference!", 810 false); 811 return Optional.empty(); 812 } 813 814 double smallestPoseDelta = 10e9; 815 EstimatedRobotPose lowestDeltaPose = null; 816 817 for (PhotonTrackedTarget target : result.targets) { 818 int targetFiducialId = target.getFiducialId(); 819 820 // Don't report errors for non-fiducial targets. This could also be resolved by 821 // adding -1 to 822 // the initial HashSet. 823 if (targetFiducialId == -1) continue; 824 825 Optional<Pose3d> targetPosition = fieldTags.getTagPose(target.getFiducialId()); 826 827 if (targetPosition.isEmpty()) { 828 reportFiducialPoseError(targetFiducialId); 829 continue; 830 } 831 832 Pose3d altTransformPosition = 833 targetPosition 834 .get() 835 .transformBy(target.getAlternateCameraToTarget().inverse()) 836 .transformBy(robotToCamera.inverse()); 837 Pose3d bestTransformPosition = 838 targetPosition 839 .get() 840 .transformBy(target.getBestCameraToTarget().inverse()) 841 .transformBy(robotToCamera.inverse()); 842 843 double altDifference = Math.abs(calculateDifference(referencePose, altTransformPosition)); 844 double bestDifference = Math.abs(calculateDifference(referencePose, bestTransformPosition)); 845 846 if (altDifference < smallestPoseDelta) { 847 smallestPoseDelta = altDifference; 848 lowestDeltaPose = 849 new EstimatedRobotPose( 850 altTransformPosition, 851 result.getTimestampSeconds(), 852 result.getTargets(), 853 PoseStrategy.CLOSEST_TO_REFERENCE_POSE); 854 } 855 if (bestDifference < smallestPoseDelta) { 856 smallestPoseDelta = bestDifference; 857 lowestDeltaPose = 858 new EstimatedRobotPose( 859 bestTransformPosition, 860 result.getTimestampSeconds(), 861 result.getTargets(), 862 PoseStrategy.CLOSEST_TO_REFERENCE_POSE); 863 } 864 } 865 return Optional.ofNullable(lowestDeltaPose); 866 } 867 868 /** 869 * Return the average of the best target poses using ambiguity as weight. 870 * 871 * @param result pipeline result 872 * @return the estimated position of the robot in the FCS and the estimated timestamp of this 873 * estimation. 874 */ 875 private Optional<EstimatedRobotPose> averageBestTargetsStrategy(PhotonPipelineResult result) { 876 List<Pair<PhotonTrackedTarget, Pose3d>> estimatedRobotPoses = new ArrayList<>(); 877 double totalAmbiguity = 0; 878 879 for (PhotonTrackedTarget target : result.targets) { 880 int targetFiducialId = target.getFiducialId(); 881 882 // Don't report errors for non-fiducial targets. This could also be resolved by 883 // adding -1 to 884 // the initial HashSet. 885 if (targetFiducialId == -1) continue; 886 887 Optional<Pose3d> targetPosition = fieldTags.getTagPose(target.getFiducialId()); 888 889 if (targetPosition.isEmpty()) { 890 reportFiducialPoseError(targetFiducialId); 891 continue; 892 } 893 894 double targetPoseAmbiguity = target.getPoseAmbiguity(); 895 896 // Pose ambiguity is 0, use that pose 897 if (targetPoseAmbiguity == 0) { 898 return Optional.of( 899 new EstimatedRobotPose( 900 targetPosition 901 .get() 902 .transformBy(target.getBestCameraToTarget().inverse()) 903 .transformBy(robotToCamera.inverse()), 904 result.getTimestampSeconds(), 905 result.getTargets(), 906 PoseStrategy.AVERAGE_BEST_TARGETS)); 907 } 908 909 totalAmbiguity += 1.0 / target.getPoseAmbiguity(); 910 911 estimatedRobotPoses.add( 912 new Pair<>( 913 target, 914 targetPosition 915 .get() 916 .transformBy(target.getBestCameraToTarget().inverse()) 917 .transformBy(robotToCamera.inverse()))); 918 } 919 920 // Take the average 921 922 Translation3d transform = new Translation3d(); 923 Rotation3d rotation = new Rotation3d(); 924 925 if (estimatedRobotPoses.isEmpty()) return Optional.empty(); 926 927 for (Pair<PhotonTrackedTarget, Pose3d> pair : estimatedRobotPoses) { 928 // Total ambiguity is non-zero confirmed because if it was zero, that pose was 929 // returned. 930 double weight = (1.0 / pair.getFirst().getPoseAmbiguity()) / totalAmbiguity; 931 Pose3d estimatedPose = pair.getSecond(); 932 transform = transform.plus(estimatedPose.getTranslation().times(weight)); 933 rotation = rotation.plus(estimatedPose.getRotation().times(weight)); 934 } 935 936 return Optional.of( 937 new EstimatedRobotPose( 938 new Pose3d(transform, rotation), 939 result.getTimestampSeconds(), 940 result.getTargets(), 941 PoseStrategy.AVERAGE_BEST_TARGETS)); 942 } 943 944 /** 945 * Difference is defined as the vector magnitude between the two poses 946 * 947 * @return The absolute "difference" (>=0) between two Pose3ds. 948 */ 949 private double calculateDifference(Pose3d x, Pose3d y) { 950 return x.getTranslation().getDistance(y.getTranslation()); 951 } 952 953 private void reportFiducialPoseError(int fiducialId) { 954 if (!reportedErrors.contains(fiducialId)) { 955 DriverStation.reportError( 956 "[PhotonPoseEstimator] Tried to get pose of unknown AprilTag: " + fiducialId, false); 957 reportedErrors.add(fiducialId); 958 } 959 } 960}