001/*
002 * MIT License
003 *
004 * Copyright (c) PhotonVision
005 *
006 * Permission is hereby granted, free of charge, to any person obtaining a copy
007 * of this software and associated documentation files (the "Software"), to deal
008 * in the Software without restriction, including without limitation the rights
009 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
010 * copies of the Software, and to permit persons to whom the Software is
011 * furnished to do so, subject to the following conditions:
012 *
013 * The above copyright notice and this permission notice shall be included in all
014 * copies or substantial portions of the Software.
015 *
016 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
017 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
018 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
019 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
020 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
021 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
022 * SOFTWARE.
023 */
024
025package org.photonvision;
026
027import edu.wpi.first.apriltag.AprilTagFieldLayout;
028import edu.wpi.first.cscore.OpenCvLoader;
029import edu.wpi.first.hal.FRCNetComm.tResourceType;
030import edu.wpi.first.hal.HAL;
031import edu.wpi.first.math.Matrix;
032import edu.wpi.first.math.Pair;
033import edu.wpi.first.math.geometry.Pose2d;
034import edu.wpi.first.math.geometry.Pose3d;
035import edu.wpi.first.math.geometry.Rotation2d;
036import edu.wpi.first.math.geometry.Rotation3d;
037import edu.wpi.first.math.geometry.Transform3d;
038import edu.wpi.first.math.geometry.Translation2d;
039import edu.wpi.first.math.geometry.Translation3d;
040import edu.wpi.first.math.interpolation.TimeInterpolatableBuffer;
041import edu.wpi.first.math.numbers.N1;
042import edu.wpi.first.math.numbers.N3;
043import edu.wpi.first.math.numbers.N8;
044import edu.wpi.first.wpilibj.DriverStation;
045import java.util.*;
046import org.photonvision.estimation.TargetModel;
047import org.photonvision.estimation.VisionEstimation;
048import org.photonvision.targeting.PhotonPipelineResult;
049import org.photonvision.targeting.PhotonTrackedTarget;
050
051/**
052 * The PhotonPoseEstimator class filters or combines readings from all the AprilTags visible at a
053 * given timestamp on the field to produce a single robot in field pose, using the strategy set
054 * below. Example usage can be found in our apriltagExample example project.
055 */
056public class PhotonPoseEstimator {
057    private static int InstanceCount = 1;
058
059    /** Position estimation strategies that can be used by the {@link PhotonPoseEstimator} class. */
060    public enum PoseStrategy {
061        /** Choose the Pose with the lowest ambiguity. */
062        LOWEST_AMBIGUITY,
063
064        /** Choose the Pose which is closest to the camera height. */
065        CLOSEST_TO_CAMERA_HEIGHT,
066
067        /** Choose the Pose which is closest to a set Reference position. */
068        CLOSEST_TO_REFERENCE_POSE,
069
070        /** Choose the Pose which is closest to the last pose calculated */
071        CLOSEST_TO_LAST_POSE,
072
073        /** Return the average of the best target poses using ambiguity as weight. */
074        AVERAGE_BEST_TARGETS,
075
076        /**
077         * Use all visible tags to compute a single pose estimate on coprocessor. This option needs to
078         * be enabled on the PhotonVision web UI as well.
079         */
080        MULTI_TAG_PNP_ON_COPROCESSOR,
081
082        /**
083         * Use all visible tags to compute a single pose estimate. This runs on the RoboRIO, and can
084         * take a lot of time.
085         */
086        MULTI_TAG_PNP_ON_RIO,
087
088        /**
089         * Use distance data from best visible tag to compute a Pose. This runs on the RoboRIO in order
090         * to access the robot's yaw heading, and MUST have addHeadingData called every frame so heading
091         * data is up-to-date.
092         *
093         * <p>Yields a Pose2d in estimatedRobotPose (0 for z, roll, pitch)
094         *
095         * <p>https://www.chiefdelphi.com/t/frc-6328-mechanical-advantage-2025-build-thread/477314/98
096         */
097        PNP_DISTANCE_TRIG_SOLVE,
098
099        /**
100         * Solve a constrained version of the Perspective-n-Point problem with the robot's drivebase
101         * flat on the floor. This computation takes place on the RoboRIO, and typically takes not more
102         * than 2ms. See {@link PhotonPoseEstimator.ConstrainedSolvepnpParams} and {@link
103         * org.photonvision.jni.ConstrainedSolvepnpJni} for details and tuning handles this strategy
104         * exposes. This strategy needs addHeadingData called every frame so heading data is up-to-date.
105         * If Multi-Tag PNP is enabled on the coprocessor, it will be used to provide an initial seed to
106         * the optimization algorithm -- otherwise, the multi-tag fallback strategy will be used as the
107         * seed.
108         */
109        CONSTRAINED_SOLVEPNP
110    }
111
112    /**
113     * Tuning handles we have over the CONSTRAINED_SOLVEPNP {@link PhotonPoseEstimator.PoseStrategy}.
114     * Internally, the cost function is a sum-squared of pixel reprojection error + (optionally)
115     * heading error * heading scale factor.
116     *
117     * @param headingFree If true, heading is completely free to vary. If false, heading excursions
118     *     from the provided heading measurement will be penalized
119     * @param headingScaleFactor If headingFree is false, this weights the cost of changing our robot
120     *     heading estimate against the tag corner reprojection error const.
121     */
122    public static final record ConstrainedSolvepnpParams(
123            boolean headingFree, double headingScaleFactor) {}
124
125    private AprilTagFieldLayout fieldTags;
126    private TargetModel tagModel = TargetModel.kAprilTag36h11;
127    private PoseStrategy primaryStrategy;
128    private PoseStrategy multiTagFallbackStrategy = PoseStrategy.LOWEST_AMBIGUITY;
129    private Transform3d robotToCamera;
130
131    private Pose3d lastPose;
132    private Pose3d referencePose;
133    protected double poseCacheTimestampSeconds = -1;
134    private final Set<Integer> reportedErrors = new HashSet<>();
135
136    private final TimeInterpolatableBuffer<Rotation2d> headingBuffer =
137            TimeInterpolatableBuffer.createBuffer(1.0);
138
139    /**
140     * Create a new PhotonPoseEstimator.
141     *
142     * @param fieldTags A WPILib {@link AprilTagFieldLayout} linking AprilTag IDs to Pose3d objects
143     *     with respect to the FIRST field using the <a href=
144     *     "https://docs.wpilib.org/en/stable/docs/software/advanced-controls/geometry/coordinate-systems.html#field-coordinate-system">Field
145     *     Coordinate System</a>. Note that setting the origin of this layout object will affect the
146     *     results from this class.
147     * @param strategy The strategy it should use to determine the best pose.
148     * @param robotToCamera Transform3d from the center of the robot to the camera mount position (ie,
149     *     robot ➔ camera) in the <a href=
150     *     "https://docs.wpilib.org/en/stable/docs/software/advanced-controls/geometry/coordinate-systems.html#robot-coordinate-system">Robot
151     *     Coordinate System</a>.
152     */
153    public PhotonPoseEstimator(
154            AprilTagFieldLayout fieldTags, PoseStrategy strategy, Transform3d robotToCamera) {
155        this.fieldTags = fieldTags;
156        this.primaryStrategy = strategy;
157        this.robotToCamera = robotToCamera;
158
159        if (strategy == PoseStrategy.MULTI_TAG_PNP_ON_RIO
160                || strategy == PoseStrategy.CONSTRAINED_SOLVEPNP) {
161            OpenCvLoader.forceStaticLoad();
162        }
163
164        HAL.report(tResourceType.kResourceType_PhotonPoseEstimator, InstanceCount);
165        InstanceCount++;
166    }
167
168    /** Invalidates the pose cache. */
169    private void invalidatePoseCache() {
170        poseCacheTimestampSeconds = -1;
171    }
172
173    private void checkUpdate(Object oldObj, Object newObj) {
174        if (!Objects.equals(oldObj, newObj)) {
175            invalidatePoseCache();
176        }
177    }
178
179    /**
180     * Get the AprilTagFieldLayout being used by the PositionEstimator.
181     *
182     * <p>Note: Setting the origin of this layout will affect the results from this class.
183     *
184     * @return the AprilTagFieldLayout
185     */
186    public AprilTagFieldLayout getFieldTags() {
187        return fieldTags;
188    }
189
190    /**
191     * Set the AprilTagFieldLayout being used by the PositionEstimator.
192     *
193     * <p>Note: Setting the origin of this layout will affect the results from this class.
194     *
195     * @param fieldTags the AprilTagFieldLayout
196     */
197    public void setFieldTags(AprilTagFieldLayout fieldTags) {
198        checkUpdate(this.fieldTags, fieldTags);
199        this.fieldTags = fieldTags;
200    }
201
202    /**
203     * Get the TargetModel representing the tags being detected. This is used for on-rio multitag.
204     *
205     * <p>By default, this is {@link TargetModel#kAprilTag36h11}.
206     */
207    public TargetModel getTagModel() {
208        return tagModel;
209    }
210
211    /**
212     * Set the TargetModel representing the tags being detected. This is used for on-rio multitag.
213     *
214     * @param tagModel E.g. {@link TargetModel#kAprilTag16h5}.
215     */
216    public void setTagModel(TargetModel tagModel) {
217        this.tagModel = tagModel;
218    }
219
220    /**
221     * Get the Position Estimation Strategy being used by the Position Estimator.
222     *
223     * @return the strategy
224     */
225    public PoseStrategy getPrimaryStrategy() {
226        return primaryStrategy;
227    }
228
229    /**
230     * Set the Position Estimation Strategy used by the Position Estimator.
231     *
232     * @param strategy the strategy to set
233     */
234    public void setPrimaryStrategy(PoseStrategy strategy) {
235        checkUpdate(this.primaryStrategy, strategy);
236
237        if (strategy == PoseStrategy.MULTI_TAG_PNP_ON_RIO
238                || strategy == PoseStrategy.CONSTRAINED_SOLVEPNP) {
239            OpenCvLoader.forceStaticLoad();
240        }
241        this.primaryStrategy = strategy;
242    }
243
244    /**
245     * Set the Position Estimation Strategy used in multi-tag mode when only one tag can be seen. Must
246     * NOT be MULTI_TAG_PNP
247     *
248     * @param strategy the strategy to set
249     */
250    public void setMultiTagFallbackStrategy(PoseStrategy strategy) {
251        checkUpdate(this.multiTagFallbackStrategy, strategy);
252        if (strategy == PoseStrategy.MULTI_TAG_PNP_ON_COPROCESSOR
253                || strategy == PoseStrategy.MULTI_TAG_PNP_ON_RIO) {
254            DriverStation.reportWarning(
255                    "Fallback cannot be set to MULTI_TAG_PNP! Setting to lowest ambiguity", false);
256            strategy = PoseStrategy.LOWEST_AMBIGUITY;
257        }
258        this.multiTagFallbackStrategy = strategy;
259    }
260
261    /**
262     * Return the reference position that is being used by the estimator.
263     *
264     * @return the referencePose
265     */
266    public Pose3d getReferencePose() {
267        return referencePose;
268    }
269
270    /**
271     * Update the stored reference pose for use when using the <b>CLOSEST_TO_REFERENCE_POSE</b>
272     * strategy.
273     *
274     * @param referencePose the referencePose to set
275     */
276    public void setReferencePose(Pose3d referencePose) {
277        checkUpdate(this.referencePose, referencePose);
278        this.referencePose = referencePose;
279    }
280
281    /**
282     * Update the stored reference pose for use when using the <b>CLOSEST_TO_REFERENCE_POSE</b>
283     * strategy.
284     *
285     * @param referencePose the referencePose to set
286     */
287    public void setReferencePose(Pose2d referencePose) {
288        setReferencePose(new Pose3d(referencePose));
289    }
290
291    /**
292     * Update the stored last pose. Useful for setting the initial estimate when using the
293     * <b>CLOSEST_TO_LAST_POSE</b> strategy.
294     *
295     * @param lastPose the lastPose to set
296     */
297    public void setLastPose(Pose3d lastPose) {
298        this.lastPose = lastPose;
299    }
300
301    /**
302     * Update the stored last pose. Useful for setting the initial estimate when using the
303     * <b>CLOSEST_TO_LAST_POSE</b> strategy.
304     *
305     * @param lastPose the lastPose to set
306     */
307    public void setLastPose(Pose2d lastPose) {
308        setLastPose(new Pose3d(lastPose));
309    }
310
311    /**
312     * Add robot heading data to buffer. Must be called periodically for the
313     * <b>PNP_DISTANCE_TRIG_SOLVE</b> strategy.
314     *
315     * @param timestampSeconds Timestamp of the robot heading data.
316     * @param heading Field-relative robot heading at given timestamp. Standard WPILIB field
317     *     coordinates.
318     */
319    public void addHeadingData(double timestampSeconds, Rotation3d heading) {
320        addHeadingData(timestampSeconds, heading.toRotation2d());
321    }
322
323    /**
324     * Add robot heading data to buffer. Must be called periodically for the
325     * <b>PNP_DISTANCE_TRIG_SOLVE</b> strategy.
326     *
327     * @param timestampSeconds Timestamp of the robot heading data.
328     * @param heading Field-relative robot heading at given timestamp. Standard WPILIB field
329     *     coordinates.
330     */
331    public void addHeadingData(double timestampSeconds, Rotation2d heading) {
332        headingBuffer.addSample(timestampSeconds, heading);
333    }
334
335    /**
336     * Clears all heading data in the buffer, and adds a new seed. Useful for preventing estimates
337     * from utilizing heading data provided prior to a pose or rotation reset.
338     *
339     * @param timestampSeconds Timestamp of the robot heading data.
340     * @param heading Field-relative robot heading at given timestamp. Standard WPILIB field
341     *     coordinates.
342     */
343    public void resetHeadingData(double timestampSeconds, Rotation3d heading) {
344        headingBuffer.clear();
345        addHeadingData(timestampSeconds, heading);
346    }
347
348    /**
349     * Clears all heading data in the buffer, and adds a new seed. Useful for preventing estimates
350     * from utilizing heading data provided prior to a pose or rotation reset.
351     *
352     * @param timestampSeconds Timestamp of the robot heading data.
353     * @param heading Field-relative robot heading at given timestamp. Standard WPILIB field
354     *     coordinates.
355     */
356    public void resetHeadingData(double timestampSeconds, Rotation2d heading) {
357        headingBuffer.clear();
358        addHeadingData(timestampSeconds, heading);
359    }
360
361    /**
362     * @return The current transform from the center of the robot to the camera mount position
363     */
364    public Transform3d getRobotToCameraTransform() {
365        return robotToCamera;
366    }
367
368    /**
369     * Useful for pan and tilt mechanisms and such.
370     *
371     * @param robotToCamera The current transform from the center of the robot to the camera mount
372     *     position
373     */
374    public void setRobotToCameraTransform(Transform3d robotToCamera) {
375        this.robotToCamera = robotToCamera;
376    }
377
378    /**
379     * Updates the estimated position of the robot, assuming no camera calibration is required for the
380     * selected strategy. Returns empty if:
381     *
382     * <ul>
383     *   <li>The timestamp of the provided pipeline result is the same as in the previous call to
384     *       {@code update()}.
385     *   <li>No targets were found in the pipeline results.
386     * </ul>
387     *
388     * Will report a warning if strategy is multi-tag-on-rio because camera calibration data is not
389     * provided in this overload.
390     *
391     * @param cameraResult The latest pipeline result from the camera
392     * @return an {@link EstimatedRobotPose} with an estimated pose, timestamp, and targets used to
393     *     create the estimate.
394     */
395    public Optional<EstimatedRobotPose> update(PhotonPipelineResult cameraResult) {
396        return update(cameraResult, Optional.empty(), Optional.empty());
397    }
398
399    /**
400     * Updates the estimated position of the robot. Returns empty if:
401     *
402     * <ul>
403     *   <li>The timestamp of the provided pipeline result is the same as in the previous call to
404     *       {@code update()}.
405     *   <li>No targets were found in the pipeline results.
406     *   <li>The strategy is CONSTRAINED_SOLVEPNP, but no constrainedPnpParams were provided (use the
407     *       other function overload).
408     * </ul>
409     *
410     * @param cameraMatrix Camera calibration data for multi-tag-on-rio strategy - can be empty
411     *     otherwise
412     * @param distCoeffs Camera calibration data for multi-tag-on-rio strategy - can be empty
413     *     otherwise
414     * @return an {@link EstimatedRobotPose} with an estimated pose, timestamp, and targets used to
415     *     create the estimate.
416     */
417    public Optional<EstimatedRobotPose> update(
418            PhotonPipelineResult cameraResult,
419            Optional<Matrix<N3, N3>> cameraMatrix,
420            Optional<Matrix<N8, N1>> distCoeffs) {
421        return update(cameraResult, cameraMatrix, distCoeffs, Optional.empty());
422    }
423
424    /**
425     * Updates the estimated position of the robot. Returns empty if:
426     *
427     * <ul>
428     *   <li>The timestamp of the provided pipeline result is the same as in the previous call to
429     *       {@code update()}.
430     *   <li>No targets were found in the pipeline results.
431     *   <li>The strategy is CONSTRAINED_SOLVEPNP, but the provided constrainedPnpParams are empty.
432     * </ul>
433     *
434     * @param cameraMatrix Camera calibration data for multi-tag-on-rio strategy - can be empty
435     *     otherwise
436     * @param distCoeffs Camera calibration data for multi-tag-on-rio strategy - can be empty
437     *     otherwise
438     * @param constrainedPnpParams Constrained SolvePNP params, if needed.
439     * @return an {@link EstimatedRobotPose} with an estimated pose, timestamp, and targets used to
440     *     create the estimate.
441     */
442    public Optional<EstimatedRobotPose> update(
443            PhotonPipelineResult cameraResult,
444            Optional<Matrix<N3, N3>> cameraMatrix,
445            Optional<Matrix<N8, N1>> distCoeffs,
446            Optional<ConstrainedSolvepnpParams> constrainedPnpParams) {
447        // Time in the past -- give up, since the following if expects times > 0
448        if (cameraResult.getTimestampSeconds() < 0) {
449            return Optional.empty();
450        }
451
452        // If the pose cache timestamp was set, and the result is from the same
453        // timestamp, return an
454        // empty result
455        if (poseCacheTimestampSeconds > 0
456                && Math.abs(poseCacheTimestampSeconds - cameraResult.getTimestampSeconds()) < 1e-6) {
457            return Optional.empty();
458        }
459
460        // Remember the timestamp of the current result used
461        poseCacheTimestampSeconds = cameraResult.getTimestampSeconds();
462
463        // If no targets seen, trivial case -- return empty result
464        if (!cameraResult.hasTargets()) {
465            return Optional.empty();
466        }
467
468        return update(
469                cameraResult, cameraMatrix, distCoeffs, constrainedPnpParams, this.primaryStrategy);
470    }
471
472    /**
473     * Internal convenience method for using a fallback strategy for update(). This should only be
474     * called after timestamp checks have been done by another update() overload.
475     *
476     * @param cameraResult The latest pipeline result from the camera
477     * @param strategy The pose strategy to use. Can't be CONSTRAINED_SOLVEPNP.
478     * @return an {@link EstimatedRobotPose} with an estimated pose, timestamp, and targets used to
479     *     create the estimate.
480     */
481    private Optional<EstimatedRobotPose> update(
482            PhotonPipelineResult cameraResult, PoseStrategy strategy) {
483        return update(cameraResult, Optional.empty(), Optional.empty(), Optional.empty(), strategy);
484    }
485
486    private Optional<EstimatedRobotPose> update(
487            PhotonPipelineResult cameraResult,
488            Optional<Matrix<N3, N3>> cameraMatrix,
489            Optional<Matrix<N8, N1>> distCoeffs,
490            Optional<ConstrainedSolvepnpParams> constrainedPnpParams,
491            PoseStrategy strategy) {
492        Optional<EstimatedRobotPose> estimatedPose =
493                switch (strategy) {
494                    case LOWEST_AMBIGUITY -> lowestAmbiguityStrategy(cameraResult);
495                    case CLOSEST_TO_CAMERA_HEIGHT -> closestToCameraHeightStrategy(cameraResult);
496                    case CLOSEST_TO_REFERENCE_POSE ->
497                            closestToReferencePoseStrategy(cameraResult, referencePose);
498                    case CLOSEST_TO_LAST_POSE -> {
499                        setReferencePose(lastPose);
500                        yield closestToReferencePoseStrategy(cameraResult, referencePose);
501                    }
502                    case AVERAGE_BEST_TARGETS -> averageBestTargetsStrategy(cameraResult);
503                    case MULTI_TAG_PNP_ON_RIO ->
504                            multiTagOnRioStrategy(cameraResult, cameraMatrix, distCoeffs);
505                    case MULTI_TAG_PNP_ON_COPROCESSOR -> multiTagOnCoprocStrategy(cameraResult);
506                    case PNP_DISTANCE_TRIG_SOLVE -> pnpDistanceTrigSolveStrategy(cameraResult);
507                    case CONSTRAINED_SOLVEPNP ->
508                            constrainedPnpStrategy(cameraResult, cameraMatrix, distCoeffs, constrainedPnpParams);
509                };
510
511        if (estimatedPose.isPresent()) {
512            lastPose = estimatedPose.get().estimatedPose;
513        }
514
515        return estimatedPose;
516    }
517
518    private Optional<EstimatedRobotPose> pnpDistanceTrigSolveStrategy(PhotonPipelineResult result) {
519        PhotonTrackedTarget bestTarget = result.getBestTarget();
520
521        if (bestTarget == null) return Optional.empty();
522
523        var headingSampleOpt = headingBuffer.getSample(result.getTimestampSeconds());
524        if (headingSampleOpt.isEmpty()) {
525            return Optional.empty();
526        }
527        Rotation2d headingSample = headingSampleOpt.get();
528
529        Translation2d camToTagTranslation =
530                new Translation3d(
531                                bestTarget.getBestCameraToTarget().getTranslation().getNorm(),
532                                new Rotation3d(
533                                        0,
534                                        -Math.toRadians(bestTarget.getPitch()),
535                                        -Math.toRadians(bestTarget.getYaw())))
536                        .rotateBy(robotToCamera.getRotation())
537                        .toTranslation2d()
538                        .rotateBy(headingSample);
539
540        var tagPoseOpt = fieldTags.getTagPose(bestTarget.getFiducialId());
541        if (tagPoseOpt.isEmpty()) {
542            return Optional.empty();
543        }
544        var tagPose2d = tagPoseOpt.get().toPose2d();
545
546        Translation2d fieldToCameraTranslation =
547                tagPose2d.getTranslation().plus(camToTagTranslation.unaryMinus());
548
549        Translation2d camToRobotTranslation =
550                robotToCamera.getTranslation().toTranslation2d().unaryMinus().rotateBy(headingSample);
551
552        Pose2d robotPose =
553                new Pose2d(fieldToCameraTranslation.plus(camToRobotTranslation), headingSample);
554
555        return Optional.of(
556                new EstimatedRobotPose(
557                        new Pose3d(robotPose),
558                        result.getTimestampSeconds(),
559                        result.getTargets(),
560                        PoseStrategy.PNP_DISTANCE_TRIG_SOLVE));
561    }
562
563    private Optional<EstimatedRobotPose> constrainedPnpStrategy(
564            PhotonPipelineResult result,
565            Optional<Matrix<N3, N3>> cameraMatrixOpt,
566            Optional<Matrix<N8, N1>> distCoeffsOpt,
567            Optional<ConstrainedSolvepnpParams> constrainedPnpParams) {
568        boolean hasCalibData = cameraMatrixOpt.isPresent() && distCoeffsOpt.isPresent();
569        // cannot run multitagPNP, use fallback strategy
570        if (!hasCalibData) {
571            return update(
572                    result, cameraMatrixOpt, distCoeffsOpt, Optional.empty(), this.multiTagFallbackStrategy);
573        }
574
575        if (constrainedPnpParams.isEmpty()) {
576            return Optional.empty();
577        }
578
579        // Need heading if heading fixed
580        if (!constrainedPnpParams.get().headingFree
581                && headingBuffer.getSample(result.getTimestampSeconds()).isEmpty()) {
582            return update(
583                    result, cameraMatrixOpt, distCoeffsOpt, Optional.empty(), this.multiTagFallbackStrategy);
584        }
585
586        Pose3d fieldToRobotSeed;
587
588        // Attempt to use multi-tag to get a pose estimate seed
589        if (result.getMultiTagResult().isPresent()) {
590            fieldToRobotSeed =
591                    Pose3d.kZero.plus(
592                            result.getMultiTagResult().get().estimatedPose.best.plus(robotToCamera.inverse()));
593        } else {
594            // HACK - use fallback strategy to gimme a seed pose
595            // TODO - make sure nested update doesn't break state
596            var nestedUpdate =
597                    update(
598                            result,
599                            cameraMatrixOpt,
600                            distCoeffsOpt,
601                            Optional.empty(),
602                            this.multiTagFallbackStrategy);
603            if (nestedUpdate.isEmpty()) {
604                // best i can do is bail
605                return Optional.empty();
606            }
607            fieldToRobotSeed = nestedUpdate.get().estimatedPose;
608        }
609
610        if (!constrainedPnpParams.get().headingFree) {
611            // If heading fixed, force rotation component
612            fieldToRobotSeed =
613                    new Pose3d(
614                            fieldToRobotSeed.getTranslation(),
615                            new Rotation3d(headingBuffer.getSample(result.getTimestampSeconds()).get()));
616        }
617
618        var pnpResult =
619                VisionEstimation.estimateRobotPoseConstrainedSolvepnp(
620                        cameraMatrixOpt.get(),
621                        distCoeffsOpt.get(),
622                        result.getTargets(),
623                        robotToCamera,
624                        fieldToRobotSeed,
625                        fieldTags,
626                        tagModel,
627                        constrainedPnpParams.get().headingFree,
628                        headingBuffer.getSample(result.getTimestampSeconds()).get(),
629                        constrainedPnpParams.get().headingScaleFactor);
630        // try fallback strategy if solvePNP fails for some reason
631        if (!pnpResult.isPresent())
632            return update(
633                    result, cameraMatrixOpt, distCoeffsOpt, Optional.empty(), this.multiTagFallbackStrategy);
634        var best = Pose3d.kZero.plus(pnpResult.get().best); // field-to-robot
635
636        return Optional.of(
637                new EstimatedRobotPose(
638                        best,
639                        result.getTimestampSeconds(),
640                        result.getTargets(),
641                        PoseStrategy.CONSTRAINED_SOLVEPNP));
642    }
643
644    private Optional<EstimatedRobotPose> multiTagOnCoprocStrategy(PhotonPipelineResult result) {
645        if (result.getMultiTagResult().isEmpty()) {
646            return update(result, this.multiTagFallbackStrategy);
647        }
648
649        var best_tf = result.getMultiTagResult().get().estimatedPose.best;
650        var best =
651                Pose3d.kZero
652                        .plus(best_tf) // field-to-camera
653                        .relativeTo(fieldTags.getOrigin())
654                        .plus(robotToCamera.inverse()); // field-to-robot
655        return Optional.of(
656                new EstimatedRobotPose(
657                        best,
658                        result.getTimestampSeconds(),
659                        result.getTargets(),
660                        PoseStrategy.MULTI_TAG_PNP_ON_COPROCESSOR));
661    }
662
663    private Optional<EstimatedRobotPose> multiTagOnRioStrategy(
664            PhotonPipelineResult result,
665            Optional<Matrix<N3, N3>> cameraMatrixOpt,
666            Optional<Matrix<N8, N1>> distCoeffsOpt) {
667        if (cameraMatrixOpt.isEmpty() || distCoeffsOpt.isEmpty()) {
668            DriverStation.reportWarning(
669                    "No camera calibration data provided for multi-tag-on-rio",
670                    Thread.currentThread().getStackTrace());
671            return update(result, this.multiTagFallbackStrategy);
672        }
673
674        if (result.getTargets().size() < 2) {
675            return update(result, this.multiTagFallbackStrategy);
676        }
677
678        var pnpResult =
679                VisionEstimation.estimateCamPosePNP(
680                        cameraMatrixOpt.get(), distCoeffsOpt.get(), result.getTargets(), fieldTags, tagModel);
681        // try fallback strategy if solvePNP fails for some reason
682        if (!pnpResult.isPresent())
683            return update(
684                    result, cameraMatrixOpt, distCoeffsOpt, Optional.empty(), this.multiTagFallbackStrategy);
685
686        var best =
687                Pose3d.kZero
688                        .plus(pnpResult.get().best) // field-to-camera
689                        .plus(robotToCamera.inverse()); // field-to-robot
690
691        return Optional.of(
692                new EstimatedRobotPose(
693                        best,
694                        result.getTimestampSeconds(),
695                        result.getTargets(),
696                        PoseStrategy.MULTI_TAG_PNP_ON_RIO));
697    }
698
699    /**
700     * Return the estimated position of the robot with the lowest position ambiguity from a List of
701     * pipeline results.
702     *
703     * @param result pipeline result
704     * @return the estimated position of the robot in the FCS and the estimated timestamp of this
705     *     estimation.
706     */
707    private Optional<EstimatedRobotPose> lowestAmbiguityStrategy(PhotonPipelineResult result) {
708        PhotonTrackedTarget lowestAmbiguityTarget = null;
709
710        double lowestAmbiguityScore = 10;
711
712        for (PhotonTrackedTarget target : result.targets) {
713            double targetPoseAmbiguity = target.getPoseAmbiguity();
714            // Make sure the target is a Fiducial target.
715            if (targetPoseAmbiguity != -1 && targetPoseAmbiguity < lowestAmbiguityScore) {
716                lowestAmbiguityScore = targetPoseAmbiguity;
717                lowestAmbiguityTarget = target;
718            }
719        }
720
721        // Although there are confirmed to be targets, none of them may be fiducial
722        // targets.
723        if (lowestAmbiguityTarget == null) return Optional.empty();
724
725        int targetFiducialId = lowestAmbiguityTarget.getFiducialId();
726
727        Optional<Pose3d> targetPosition = fieldTags.getTagPose(targetFiducialId);
728
729        if (targetPosition.isEmpty()) {
730            reportFiducialPoseError(targetFiducialId);
731            return Optional.empty();
732        }
733
734        return Optional.of(
735                new EstimatedRobotPose(
736                        targetPosition
737                                .get()
738                                .transformBy(lowestAmbiguityTarget.getBestCameraToTarget().inverse())
739                                .transformBy(robotToCamera.inverse()),
740                        result.getTimestampSeconds(),
741                        result.getTargets(),
742                        PoseStrategy.LOWEST_AMBIGUITY));
743    }
744
745    /**
746     * Return the estimated position of the robot using the target with the lowest delta height
747     * difference between the estimated and actual height of the camera.
748     *
749     * @param result pipeline result
750     * @return the estimated position of the robot in the FCS and the estimated timestamp of this
751     *     estimation.
752     */
753    private Optional<EstimatedRobotPose> closestToCameraHeightStrategy(PhotonPipelineResult result) {
754        double smallestHeightDifference = 10e9;
755        EstimatedRobotPose closestHeightTarget = null;
756
757        for (PhotonTrackedTarget target : result.targets) {
758            int targetFiducialId = target.getFiducialId();
759
760            // Don't report errors for non-fiducial targets. This could also be resolved by
761            // adding -1 to
762            // the initial HashSet.
763            if (targetFiducialId == -1) continue;
764
765            Optional<Pose3d> targetPosition = fieldTags.getTagPose(target.getFiducialId());
766
767            if (targetPosition.isEmpty()) {
768                reportFiducialPoseError(target.getFiducialId());
769                continue;
770            }
771
772            double alternateTransformDelta =
773                    Math.abs(
774                            robotToCamera.getZ()
775                                    - targetPosition
776                                            .get()
777                                            .transformBy(target.getAlternateCameraToTarget().inverse())
778                                            .getZ());
779            double bestTransformDelta =
780                    Math.abs(
781                            robotToCamera.getZ()
782                                    - targetPosition
783                                            .get()
784                                            .transformBy(target.getBestCameraToTarget().inverse())
785                                            .getZ());
786
787            if (alternateTransformDelta < smallestHeightDifference) {
788                smallestHeightDifference = alternateTransformDelta;
789                closestHeightTarget =
790                        new EstimatedRobotPose(
791                                targetPosition
792                                        .get()
793                                        .transformBy(target.getAlternateCameraToTarget().inverse())
794                                        .transformBy(robotToCamera.inverse()),
795                                result.getTimestampSeconds(),
796                                result.getTargets(),
797                                PoseStrategy.CLOSEST_TO_CAMERA_HEIGHT);
798            }
799
800            if (bestTransformDelta < smallestHeightDifference) {
801                smallestHeightDifference = bestTransformDelta;
802                closestHeightTarget =
803                        new EstimatedRobotPose(
804                                targetPosition
805                                        .get()
806                                        .transformBy(target.getBestCameraToTarget().inverse())
807                                        .transformBy(robotToCamera.inverse()),
808                                result.getTimestampSeconds(),
809                                result.getTargets(),
810                                PoseStrategy.CLOSEST_TO_CAMERA_HEIGHT);
811            }
812        }
813
814        // Need to null check here in case none of the provided targets are fiducial.
815        return Optional.ofNullable(closestHeightTarget);
816    }
817
818    /**
819     * Return the estimated position of the robot using the target with the lowest delta in the vector
820     * magnitude between it and the reference pose.
821     *
822     * @param result pipeline result
823     * @param referencePose reference pose to check vector magnitude difference against.
824     * @return the estimated position of the robot in the FCS and the estimated timestamp of this
825     *     estimation.
826     */
827    private Optional<EstimatedRobotPose> closestToReferencePoseStrategy(
828            PhotonPipelineResult result, Pose3d referencePose) {
829        if (referencePose == null) {
830            DriverStation.reportError(
831                    "[PhotonPoseEstimator] Tried to use reference pose strategy without setting the reference!",
832                    false);
833            return Optional.empty();
834        }
835
836        double smallestPoseDelta = 10e9;
837        EstimatedRobotPose lowestDeltaPose = null;
838
839        for (PhotonTrackedTarget target : result.targets) {
840            int targetFiducialId = target.getFiducialId();
841
842            // Don't report errors for non-fiducial targets. This could also be resolved by
843            // adding -1 to
844            // the initial HashSet.
845            if (targetFiducialId == -1) continue;
846
847            Optional<Pose3d> targetPosition = fieldTags.getTagPose(target.getFiducialId());
848
849            if (targetPosition.isEmpty()) {
850                reportFiducialPoseError(targetFiducialId);
851                continue;
852            }
853
854            Pose3d altTransformPosition =
855                    targetPosition
856                            .get()
857                            .transformBy(target.getAlternateCameraToTarget().inverse())
858                            .transformBy(robotToCamera.inverse());
859            Pose3d bestTransformPosition =
860                    targetPosition
861                            .get()
862                            .transformBy(target.getBestCameraToTarget().inverse())
863                            .transformBy(robotToCamera.inverse());
864
865            double altDifference = Math.abs(calculateDifference(referencePose, altTransformPosition));
866            double bestDifference = Math.abs(calculateDifference(referencePose, bestTransformPosition));
867
868            if (altDifference < smallestPoseDelta) {
869                smallestPoseDelta = altDifference;
870                lowestDeltaPose =
871                        new EstimatedRobotPose(
872                                altTransformPosition,
873                                result.getTimestampSeconds(),
874                                result.getTargets(),
875                                PoseStrategy.CLOSEST_TO_REFERENCE_POSE);
876            }
877            if (bestDifference < smallestPoseDelta) {
878                smallestPoseDelta = bestDifference;
879                lowestDeltaPose =
880                        new EstimatedRobotPose(
881                                bestTransformPosition,
882                                result.getTimestampSeconds(),
883                                result.getTargets(),
884                                PoseStrategy.CLOSEST_TO_REFERENCE_POSE);
885            }
886        }
887        return Optional.ofNullable(lowestDeltaPose);
888    }
889
890    /**
891     * Return the average of the best target poses using ambiguity as weight.
892     *
893     * @param result pipeline result
894     * @return the estimated position of the robot in the FCS and the estimated timestamp of this
895     *     estimation.
896     */
897    private Optional<EstimatedRobotPose> averageBestTargetsStrategy(PhotonPipelineResult result) {
898        List<Pair<PhotonTrackedTarget, Pose3d>> estimatedRobotPoses = new ArrayList<>();
899        double totalAmbiguity = 0;
900
901        for (PhotonTrackedTarget target : result.targets) {
902            int targetFiducialId = target.getFiducialId();
903
904            // Don't report errors for non-fiducial targets. This could also be resolved by
905            // adding -1 to
906            // the initial HashSet.
907            if (targetFiducialId == -1) continue;
908
909            Optional<Pose3d> targetPosition = fieldTags.getTagPose(target.getFiducialId());
910
911            if (targetPosition.isEmpty()) {
912                reportFiducialPoseError(targetFiducialId);
913                continue;
914            }
915
916            double targetPoseAmbiguity = target.getPoseAmbiguity();
917
918            // Pose ambiguity is 0, use that pose
919            if (targetPoseAmbiguity == 0) {
920                return Optional.of(
921                        new EstimatedRobotPose(
922                                targetPosition
923                                        .get()
924                                        .transformBy(target.getBestCameraToTarget().inverse())
925                                        .transformBy(robotToCamera.inverse()),
926                                result.getTimestampSeconds(),
927                                result.getTargets(),
928                                PoseStrategy.AVERAGE_BEST_TARGETS));
929            }
930
931            totalAmbiguity += 1.0 / target.getPoseAmbiguity();
932
933            estimatedRobotPoses.add(
934                    new Pair<>(
935                            target,
936                            targetPosition
937                                    .get()
938                                    .transformBy(target.getBestCameraToTarget().inverse())
939                                    .transformBy(robotToCamera.inverse())));
940        }
941
942        // Take the average
943
944        Translation3d transform = new Translation3d();
945        Rotation3d rotation = new Rotation3d();
946
947        if (estimatedRobotPoses.isEmpty()) return Optional.empty();
948
949        for (Pair<PhotonTrackedTarget, Pose3d> pair : estimatedRobotPoses) {
950            // Total ambiguity is non-zero confirmed because if it was zero, that pose was
951            // returned.
952            double weight = (1.0 / pair.getFirst().getPoseAmbiguity()) / totalAmbiguity;
953            Pose3d estimatedPose = pair.getSecond();
954            transform = transform.plus(estimatedPose.getTranslation().times(weight));
955            rotation = rotation.plus(estimatedPose.getRotation().times(weight));
956        }
957
958        return Optional.of(
959                new EstimatedRobotPose(
960                        new Pose3d(transform, rotation),
961                        result.getTimestampSeconds(),
962                        result.getTargets(),
963                        PoseStrategy.AVERAGE_BEST_TARGETS));
964    }
965
966    /**
967     * Difference is defined as the vector magnitude between the two poses
968     *
969     * @return The absolute "difference" (>=0) between two Pose3ds.
970     */
971    private double calculateDifference(Pose3d x, Pose3d y) {
972        return x.getTranslation().getDistance(y.getTranslation());
973    }
974
975    private void reportFiducialPoseError(int fiducialId) {
976        if (!reportedErrors.contains(fiducialId)) {
977            DriverStation.reportError(
978                    "[PhotonPoseEstimator] Tried to get pose of unknown AprilTag: " + fiducialId, false);
979            reportedErrors.add(fiducialId);
980        }
981    }
982}