001/*
002 * MIT License
003 *
004 * Copyright (c) PhotonVision
005 *
006 * Permission is hereby granted, free of charge, to any person obtaining a copy
007 * of this software and associated documentation files (the "Software"), to deal
008 * in the Software without restriction, including without limitation the rights
009 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
010 * copies of the Software, and to permit persons to whom the Software is
011 * furnished to do so, subject to the following conditions:
012 *
013 * The above copyright notice and this permission notice shall be included in all
014 * copies or substantial portions of the Software.
015 *
016 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
017 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
018 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
019 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
020 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
021 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
022 * SOFTWARE.
023 */
024
025package org.photonvision;
026
027import edu.wpi.first.apriltag.AprilTagFieldLayout;
028import edu.wpi.first.cscore.OpenCvLoader;
029import edu.wpi.first.hal.FRCNetComm.tResourceType;
030import edu.wpi.first.hal.HAL;
031import edu.wpi.first.math.Matrix;
032import edu.wpi.first.math.Pair;
033import edu.wpi.first.math.geometry.Pose2d;
034import edu.wpi.first.math.geometry.Pose3d;
035import edu.wpi.first.math.geometry.Rotation2d;
036import edu.wpi.first.math.geometry.Rotation3d;
037import edu.wpi.first.math.geometry.Transform3d;
038import edu.wpi.first.math.geometry.Translation2d;
039import edu.wpi.first.math.geometry.Translation3d;
040import edu.wpi.first.math.interpolation.TimeInterpolatableBuffer;
041import edu.wpi.first.math.numbers.N1;
042import edu.wpi.first.math.numbers.N3;
043import edu.wpi.first.math.numbers.N8;
044import edu.wpi.first.wpilibj.DriverStation;
045import java.util.*;
046import org.photonvision.estimation.TargetModel;
047import org.photonvision.estimation.VisionEstimation;
048import org.photonvision.targeting.PhotonPipelineResult;
049import org.photonvision.targeting.PhotonTrackedTarget;
050
051/**
052 * The PhotonPoseEstimator class filters or combines readings from all the AprilTags visible at a
053 * given timestamp on the field to produce a single robot in field pose, using the strategy set
054 * below. Example usage can be found in our apriltagExample example project.
055 */
056public class PhotonPoseEstimator {
057    private static int InstanceCount = 1;
058
059    /** Position estimation strategies that can be used by the {@link PhotonPoseEstimator} class. */
060    public enum PoseStrategy {
061        /** Choose the Pose with the lowest ambiguity. */
062        LOWEST_AMBIGUITY,
063
064        /** Choose the Pose which is closest to the camera height. */
065        CLOSEST_TO_CAMERA_HEIGHT,
066
067        /** Choose the Pose which is closest to a set Reference position. */
068        CLOSEST_TO_REFERENCE_POSE,
069
070        /** Choose the Pose which is closest to the last pose calculated */
071        CLOSEST_TO_LAST_POSE,
072
073        /** Return the average of the best target poses using ambiguity as weight. */
074        AVERAGE_BEST_TARGETS,
075
076        /**
077         * Use all visible tags to compute a single pose estimate on coprocessor. This option needs to
078         * be enabled on the PhotonVision web UI as well.
079         */
080        MULTI_TAG_PNP_ON_COPROCESSOR,
081
082        /**
083         * Use all visible tags to compute a single pose estimate. This runs on the RoboRIO, and can
084         * take a lot of time.
085         */
086        MULTI_TAG_PNP_ON_RIO,
087
088        /**
089         * Use distance data from best visible tag to compute a Pose. This runs on the RoboRIO in order
090         * to access the robot's yaw heading, and MUST have addHeadingData called every frame so heading
091         * data is up-to-date.
092         *
093         * <p>Yields a Pose2d in estimatedRobotPose (0 for z, roll, pitch)
094         *
095         * <p>https://www.chiefdelphi.com/t/frc-6328-mechanical-advantage-2025-build-thread/477314/98
096         */
097        PNP_DISTANCE_TRIG_SOLVE,
098
099        /**
100         * Solve a constrained version of the Perspective-n-Point problem with the robot's drivebase
101         * flat on the floor. This computation takes place on the RoboRIO, and typically takes not more
102         * than 2ms. See {@link PhotonPoseEstimator.ConstrainedSolvepnpParams} and {@link
103         * org.photonvision.jni.ConstrainedSolvepnpJni} for details and tuning handles this strategy
104         * exposes. This strategy needs addHeadingData called every frame so heading data is up-to-date.
105         * If Multi-Tag PNP is enabled on the coprocessor, it will be used to provide an initial seed to
106         * the optimization algorithm -- otherwise, the multi-tag fallback strategy will be used as the
107         * seed.
108         */
109        CONSTRAINED_SOLVEPNP
110    }
111
112    /**
113     * Tuning handles we have over the CONSTRAINED_SOLVEPNP {@link PhotonPoseEstimator.PoseStrategy}.
114     * Internally, the cost function is a sum-squared of pixel reprojection error + (optionally)
115     * heading error * heading scale factor.
116     *
117     * @param headingFree If true, heading is completely free to vary. If false, heading excursions
118     *     from the provided heading measurement will be penalized
119     * @param headingScaleFactor If headingFree is false, this weights the cost of changing our robot
120     *     heading estimate against the tag corner reprojection error cost.
121     */
122    public static final record ConstrainedSolvepnpParams(
123            boolean headingFree, double headingScaleFactor) {}
124
125    private AprilTagFieldLayout fieldTags;
126    private TargetModel tagModel = TargetModel.kAprilTag36h11;
127    private PoseStrategy primaryStrategy;
128    private PoseStrategy multiTagFallbackStrategy = PoseStrategy.LOWEST_AMBIGUITY;
129    private Transform3d robotToCamera;
130
131    private Pose3d lastPose;
132    private Pose3d referencePose;
133    protected double poseCacheTimestampSeconds = -1;
134    private final Set<Integer> reportedErrors = new HashSet<>();
135
136    private final TimeInterpolatableBuffer<Rotation2d> headingBuffer =
137            TimeInterpolatableBuffer.createBuffer(1.0);
138
139    /**
140     * Create a new PhotonPoseEstimator.
141     *
142     * @param fieldTags A WPILib {@link AprilTagFieldLayout} linking AprilTag IDs to Pose3d objects
143     *     with respect to the FIRST field using the <a href=
144     *     "https://docs.wpilib.org/en/stable/docs/software/advanced-controls/geometry/coordinate-systems.html#field-coordinate-system">Field
145     *     Coordinate System</a>. Note that setting the origin of this layout object will affect the
146     *     results from this class.
147     * @param robotToCamera Transform3d from the center of the robot to the camera mount position (ie,
148     *     robot ➔ camera) in the <a href=
149     *     "https://docs.wpilib.org/en/stable/docs/software/advanced-controls/geometry/coordinate-systems.html#robot-coordinate-system">Robot
150     *     Coordinate System</a>.
151     */
152    public PhotonPoseEstimator(AprilTagFieldLayout fieldTags, Transform3d robotToCamera) {
153        this.fieldTags = fieldTags;
154        this.robotToCamera = robotToCamera;
155
156        HAL.report(tResourceType.kResourceType_PhotonPoseEstimator, InstanceCount);
157        InstanceCount++;
158    }
159
160    /**
161     * Create a new PhotonPoseEstimator.
162     *
163     * @param fieldTags A WPILib {@link AprilTagFieldLayout} linking AprilTag IDs to Pose3d objects
164     *     with respect to the FIRST field using the <a href=
165     *     "https://docs.wpilib.org/en/stable/docs/software/advanced-controls/geometry/coordinate-systems.html#field-coordinate-system">Field
166     *     Coordinate System</a>. Note that setting the origin of this layout object will affect the
167     *     results from this class.
168     * @param strategy The strategy it should use to determine the best pose.
169     * @param robotToCamera Transform3d from the center of the robot to the camera mount position (ie,
170     *     robot ➔ camera) in the <a href=
171     *     "https://docs.wpilib.org/en/stable/docs/software/advanced-controls/geometry/coordinate-systems.html#robot-coordinate-system">Robot
172     *     Coordinate System</a>.
173     * @deprecated Use individual estimation methods with the 2 argument constructor instead.
174     */
175    @Deprecated(forRemoval = true, since = "2026")
176    public PhotonPoseEstimator(
177            AprilTagFieldLayout fieldTags, PoseStrategy strategy, Transform3d robotToCamera) {
178        this.fieldTags = fieldTags;
179        this.primaryStrategy = strategy;
180        this.robotToCamera = robotToCamera;
181
182        if (strategy == PoseStrategy.MULTI_TAG_PNP_ON_RIO
183                || strategy == PoseStrategy.CONSTRAINED_SOLVEPNP) {
184            OpenCvLoader.forceStaticLoad();
185        }
186
187        HAL.report(tResourceType.kResourceType_PhotonPoseEstimator, InstanceCount);
188        InstanceCount++;
189    }
190
191    /** Invalidates the pose cache. */
192    private void invalidatePoseCache() {
193        poseCacheTimestampSeconds = -1;
194    }
195
196    private void checkUpdate(Object oldObj, Object newObj) {
197        if (!Objects.equals(oldObj, newObj)) {
198            invalidatePoseCache();
199        }
200    }
201
202    /**
203     * Get the AprilTagFieldLayout being used by the PositionEstimator.
204     *
205     * <p>Note: Setting the origin of this layout will affect the results from this class.
206     *
207     * @return the AprilTagFieldLayout
208     */
209    public AprilTagFieldLayout getFieldTags() {
210        return fieldTags;
211    }
212
213    /**
214     * Set the AprilTagFieldLayout being used by the PositionEstimator.
215     *
216     * <p>Note: Setting the origin of this layout will affect the results from this class.
217     *
218     * @param fieldTags the AprilTagFieldLayout
219     */
220    public void setFieldTags(AprilTagFieldLayout fieldTags) {
221        checkUpdate(this.fieldTags, fieldTags);
222        this.fieldTags = fieldTags;
223    }
224
225    /**
226     * Get the TargetModel representing the tags being detected. This is used for on-rio multitag.
227     *
228     * <p>By default, this is {@link TargetModel#kAprilTag36h11}.
229     */
230    public TargetModel getTagModel() {
231        return tagModel;
232    }
233
234    /**
235     * Set the TargetModel representing the tags being detected. This is used for on-rio multitag.
236     *
237     * @param tagModel E.g. {@link TargetModel#kAprilTag16h5}.
238     */
239    public void setTagModel(TargetModel tagModel) {
240        this.tagModel = tagModel;
241    }
242
243    /**
244     * Get the Position Estimation Strategy being used by the Position Estimator.
245     *
246     * @return the strategy
247     * @deprecated Use individual estimation methods instead.
248     */
249    @Deprecated(forRemoval = true, since = "2026")
250    public PoseStrategy getPrimaryStrategy() {
251        return primaryStrategy;
252    }
253
254    /**
255     * Set the Position Estimation Strategy used by the Position Estimator.
256     *
257     * @param strategy the strategy to set
258     * @deprecated Use individual estimation methods instead.
259     */
260    @Deprecated(forRemoval = true, since = "2026")
261    public void setPrimaryStrategy(PoseStrategy strategy) {
262        checkUpdate(this.primaryStrategy, strategy);
263
264        if (strategy == PoseStrategy.MULTI_TAG_PNP_ON_RIO
265                || strategy == PoseStrategy.CONSTRAINED_SOLVEPNP) {
266            OpenCvLoader.forceStaticLoad();
267        }
268        this.primaryStrategy = strategy;
269    }
270
271    /**
272     * Set the Position Estimation Strategy used in multi-tag mode when only one tag can be seen. Must
273     * NOT be MULTI_TAG_PNP
274     *
275     * @param strategy the strategy to set
276     * @deprecated Use individual estimation methods instead.
277     */
278    @Deprecated(forRemoval = true, since = "2026")
279    public void setMultiTagFallbackStrategy(PoseStrategy strategy) {
280        checkUpdate(this.multiTagFallbackStrategy, strategy);
281        if (strategy == PoseStrategy.MULTI_TAG_PNP_ON_COPROCESSOR
282                || strategy == PoseStrategy.MULTI_TAG_PNP_ON_RIO) {
283            DriverStation.reportWarning(
284                    "Fallback cannot be set to MULTI_TAG_PNP! Setting to lowest ambiguity", false);
285            strategy = PoseStrategy.LOWEST_AMBIGUITY;
286        }
287        this.multiTagFallbackStrategy = strategy;
288    }
289
290    /**
291     * Return the reference position that is being used by the estimator.
292     *
293     * @return the referencePose
294     * @deprecated Use individual estimation methods instead.
295     */
296    @Deprecated(forRemoval = true, since = "2026")
297    public Pose3d getReferencePose() {
298        return referencePose;
299    }
300
301    /**
302     * Update the stored reference pose for use when using the <b>CLOSEST_TO_REFERENCE_POSE</b>
303     * strategy.
304     *
305     * @param referencePose the referencePose to set
306     * @deprecated Use individual estimation methods instead.
307     */
308    @Deprecated(forRemoval = true, since = "2026")
309    public void setReferencePose(Pose3d referencePose) {
310        checkUpdate(this.referencePose, referencePose);
311        this.referencePose = referencePose;
312    }
313
314    /**
315     * Update the stored reference pose for use when using the <b>CLOSEST_TO_REFERENCE_POSE</b>
316     * strategy.
317     *
318     * @param referencePose the referencePose to set
319     * @deprecated Use individual estimation methods instead.
320     */
321    @Deprecated(forRemoval = true, since = "2026")
322    public void setReferencePose(Pose2d referencePose) {
323        setReferencePose(new Pose3d(referencePose));
324    }
325
326    /**
327     * Update the stored last pose. Useful for setting the initial estimate when using the
328     * <b>CLOSEST_TO_LAST_POSE</b> strategy.
329     *
330     * @param lastPose the lastPose to set
331     * @deprecated Use individual estimation methods instead.
332     */
333    @Deprecated(forRemoval = true, since = "2026")
334    public void setLastPose(Pose3d lastPose) {
335        this.lastPose = lastPose;
336    }
337
338    /**
339     * Update the stored last pose. Useful for setting the initial estimate when using the
340     * <b>CLOSEST_TO_LAST_POSE</b> strategy.
341     *
342     * @param lastPose the lastPose to set
343     * @deprecated Use individual estimation methods instead.
344     */
345    @Deprecated(forRemoval = true, since = "2026")
346    public void setLastPose(Pose2d lastPose) {
347        setLastPose(new Pose3d(lastPose));
348    }
349
350    /**
351     * Add robot heading data to buffer. Must be called periodically for the
352     * <b>PNP_DISTANCE_TRIG_SOLVE</b> strategy.
353     *
354     * @param timestampSeconds Timestamp of the robot heading data.
355     * @param heading Field-relative robot heading at given timestamp. Standard WPILIB field
356     *     coordinates.
357     */
358    public void addHeadingData(double timestampSeconds, Rotation3d heading) {
359        addHeadingData(timestampSeconds, heading.toRotation2d());
360    }
361
362    /**
363     * Add robot heading data to buffer. Must be called periodically for the
364     * <b>PNP_DISTANCE_TRIG_SOLVE</b> strategy.
365     *
366     * @param timestampSeconds Timestamp of the robot heading data.
367     * @param heading Field-relative robot heading at given timestamp. Standard WPILIB field
368     *     coordinates.
369     */
370    public void addHeadingData(double timestampSeconds, Rotation2d heading) {
371        headingBuffer.addSample(timestampSeconds, heading);
372    }
373
374    /**
375     * Clears all heading data in the buffer, and adds a new seed. Useful for preventing estimates
376     * from utilizing heading data provided prior to a pose or rotation reset.
377     *
378     * @param timestampSeconds Timestamp of the robot heading data.
379     * @param heading Field-relative robot heading at given timestamp. Standard WPILIB field
380     *     coordinates.
381     */
382    public void resetHeadingData(double timestampSeconds, Rotation3d heading) {
383        headingBuffer.clear();
384        addHeadingData(timestampSeconds, heading);
385    }
386
387    /**
388     * Clears all heading data in the buffer, and adds a new seed. Useful for preventing estimates
389     * from utilizing heading data provided prior to a pose or rotation reset.
390     *
391     * @param timestampSeconds Timestamp of the robot heading data.
392     * @param heading Field-relative robot heading at given timestamp. Standard WPILIB field
393     *     coordinates.
394     */
395    public void resetHeadingData(double timestampSeconds, Rotation2d heading) {
396        headingBuffer.clear();
397        addHeadingData(timestampSeconds, heading);
398    }
399
400    /**
401     * @return The current transform from the center of the robot to the camera mount position
402     */
403    public Transform3d getRobotToCameraTransform() {
404        return robotToCamera;
405    }
406
407    /**
408     * Useful for pan and tilt mechanisms and such.
409     *
410     * @param robotToCamera The current transform from the center of the robot to the camera mount
411     *     position
412     */
413    public void setRobotToCameraTransform(Transform3d robotToCamera) {
414        this.robotToCamera = robotToCamera;
415    }
416
417    /**
418     * Updates the estimated position of the robot, assuming no camera calibration is required for the
419     * selected strategy. Returns empty if:
420     *
421     * <ul>
422     *   <li>The timestamp of the provided pipeline result is the same as in the previous call to
423     *       {@code update()}.
424     *   <li>No targets were found in the pipeline results.
425     * </ul>
426     *
427     * Will report a warning if strategy is multi-tag-on-rio because camera calibration data is not
428     * provided in this overload.
429     *
430     * @param cameraResult The latest pipeline result from the camera
431     * @return An {@link EstimatedRobotPose} with an estimated pose, timestamp, and targets used to
432     *     create the estimate.
433     * @deprecated Use individual estimation methods instead.
434     */
435    @Deprecated(forRemoval = true, since = "2026")
436    public Optional<EstimatedRobotPose> update(PhotonPipelineResult cameraResult) {
437        return update(cameraResult, Optional.empty(), Optional.empty());
438    }
439
440    /**
441     * Updates the estimated position of the robot. Returns empty if:
442     *
443     * <ul>
444     *   <li>The timestamp of the provided pipeline result is the same as in the previous call to
445     *       {@code update()}.
446     *   <li>No targets were found in the pipeline results.
447     *   <li>The strategy is CONSTRAINED_SOLVEPNP, but no constrainedPnpParams were provided (use the
448     *       other function overload).
449     * </ul>
450     *
451     * @param cameraMatrix Camera calibration data for multi-tag-on-rio strategy - can be empty
452     *     otherwise
453     * @param distCoeffs Camera calibration data for multi-tag-on-rio strategy - can be empty
454     *     otherwise
455     * @return An {@link EstimatedRobotPose} with an estimated pose, timestamp, and targets used to
456     *     create the estimate.
457     * @deprecated Use individual estimation methods instead.
458     */
459    @Deprecated(forRemoval = true, since = "2026")
460    public Optional<EstimatedRobotPose> update(
461            PhotonPipelineResult cameraResult,
462            Optional<Matrix<N3, N3>> cameraMatrix,
463            Optional<Matrix<N8, N1>> distCoeffs) {
464        return update(cameraResult, cameraMatrix, distCoeffs, Optional.empty());
465    }
466
467    /**
468     * Updates the estimated position of the robot. Returns empty if:
469     *
470     * <ul>
471     *   <li>The timestamp of the provided pipeline result is the same as in the previous call to
472     *       {@code update()}.
473     *   <li>No targets were found in the pipeline results.
474     *   <li>The strategy is CONSTRAINED_SOLVEPNP, but the provided constrainedPnpParams are empty.
475     * </ul>
476     *
477     * @param cameraMatrix Camera calibration data for multi-tag-on-rio strategy - can be empty
478     *     otherwise
479     * @param distCoeffs Camera calibration data for multi-tag-on-rio strategy - can be empty
480     *     otherwise
481     * @param constrainedPnpParams Constrained SolvePNP params, if needed.
482     * @return An {@link EstimatedRobotPose} with an estimated pose, timestamp, and targets used to
483     *     create the estimate.
484     * @deprecated Use individual estimation methods instead.
485     */
486    @Deprecated(forRemoval = true, since = "2026")
487    public Optional<EstimatedRobotPose> update(
488            PhotonPipelineResult cameraResult,
489            Optional<Matrix<N3, N3>> cameraMatrix,
490            Optional<Matrix<N8, N1>> distCoeffs,
491            Optional<ConstrainedSolvepnpParams> constrainedPnpParams) {
492        // Time in the past -- give up, since the following if expects times > 0
493        if (cameraResult.getTimestampSeconds() < 0) {
494            return Optional.empty();
495        }
496
497        // If the pose cache timestamp was set, and the result is from the same
498        // timestamp, return an
499        // empty result
500        if (poseCacheTimestampSeconds > 0
501                && Math.abs(poseCacheTimestampSeconds - cameraResult.getTimestampSeconds()) < 1e-6) {
502            return Optional.empty();
503        }
504
505        // Remember the timestamp of the current result used
506        poseCacheTimestampSeconds = cameraResult.getTimestampSeconds();
507
508        // If no targets seen, trivial case -- return empty result
509        if (!cameraResult.hasTargets()) {
510            return Optional.empty();
511        }
512
513        return update(
514                cameraResult, cameraMatrix, distCoeffs, constrainedPnpParams, this.primaryStrategy);
515    }
516
517    /**
518     * Internal convenience method for using a fallback strategy for update(). This should only be
519     * called after timestamp checks have been done by another update() overload.
520     *
521     * @param cameraResult The latest pipeline result from the camera
522     * @param strategy The pose strategy to use. Can't be CONSTRAINED_SOLVEPNP.
523     * @return An {@link EstimatedRobotPose} with an estimated pose, timestamp, and targets used to
524     *     create the estimate.
525     */
526    private Optional<EstimatedRobotPose> update(
527            PhotonPipelineResult cameraResult, PoseStrategy strategy) {
528        return update(cameraResult, Optional.empty(), Optional.empty(), Optional.empty(), strategy);
529    }
530
531    private Optional<EstimatedRobotPose> update(
532            PhotonPipelineResult cameraResult,
533            Optional<Matrix<N3, N3>> cameraMatrix,
534            Optional<Matrix<N8, N1>> distCoeffs,
535            Optional<ConstrainedSolvepnpParams> constrainedPnpParams,
536            PoseStrategy strategy) {
537        Optional<EstimatedRobotPose> estimatedPose =
538                switch (strategy) {
539                    case LOWEST_AMBIGUITY -> estimateLowestAmbiguityPose(cameraResult);
540                    case CLOSEST_TO_CAMERA_HEIGHT -> estimateClosestToCameraHeightPose(cameraResult);
541                    case CLOSEST_TO_REFERENCE_POSE ->
542                            estimateClosestToReferencePose(cameraResult, referencePose);
543                    case CLOSEST_TO_LAST_POSE -> {
544                        setReferencePose(lastPose);
545                        yield estimateClosestToReferencePose(cameraResult, referencePose);
546                    }
547                    case AVERAGE_BEST_TARGETS -> estimateAverageBestTargetsPose(cameraResult);
548                    case MULTI_TAG_PNP_ON_RIO -> {
549                        if (cameraMatrix.isEmpty() || distCoeffs.isEmpty()) {
550                            DriverStation.reportWarning(
551                                    "No camera calibration data provided for multi-tag-on-rio",
552                                    Thread.currentThread().getStackTrace());
553                            yield update(cameraResult, this.multiTagFallbackStrategy);
554                        }
555                        var res = estimateRioMultiTagPose(cameraResult, cameraMatrix.get(), distCoeffs.get());
556                        if (res.isEmpty()) {
557                            yield update(
558                                    cameraResult,
559                                    cameraMatrix,
560                                    distCoeffs,
561                                    constrainedPnpParams,
562                                    this.multiTagFallbackStrategy);
563                        }
564                        yield res;
565                    }
566                    case MULTI_TAG_PNP_ON_COPROCESSOR -> {
567                        if (cameraResult.getMultiTagResult().isEmpty()) {
568                            yield update(cameraResult, this.multiTagFallbackStrategy);
569                        }
570                        yield estimateCoprocMultiTagPose(cameraResult);
571                    }
572                    case PNP_DISTANCE_TRIG_SOLVE -> estimatePnpDistanceTrigSolvePose(cameraResult);
573                    case CONSTRAINED_SOLVEPNP -> {
574                        boolean hasCalibData = cameraMatrix.isPresent() && distCoeffs.isPresent();
575                        // cannot run multitagPNP, use fallback strategy
576                        if (!hasCalibData) {
577                            yield update(
578                                    cameraResult,
579                                    cameraMatrix,
580                                    distCoeffs,
581                                    Optional.empty(),
582                                    this.multiTagFallbackStrategy);
583                        }
584
585                        if (constrainedPnpParams.isEmpty()) {
586                            yield Optional.empty();
587                        }
588
589                        // Need heading if heading fixed
590                        if (!constrainedPnpParams.get().headingFree
591                                && headingBuffer.getSample(cameraResult.getTimestampSeconds()).isEmpty()) {
592                            yield update(
593                                    cameraResult,
594                                    cameraMatrix,
595                                    distCoeffs,
596                                    Optional.empty(),
597                                    this.multiTagFallbackStrategy);
598                        }
599
600                        Pose3d fieldToRobotSeed;
601                        // Attempt to use multi-tag to get a pose estimate seed
602                        if (cameraResult.getMultiTagResult().isPresent()) {
603                            fieldToRobotSeed =
604                                    Pose3d.kZero.plus(
605                                            cameraResult
606                                                    .getMultiTagResult()
607                                                    .get()
608                                                    .estimatedPose
609                                                    .best
610                                                    .plus(robotToCamera.inverse()));
611                        } else {
612                            // HACK - use fallback strategy to gimme a seed pose
613                            // TODO - make sure nested update doesn't break state
614                            var nestedUpdate =
615                                    update(
616                                            cameraResult,
617                                            cameraMatrix,
618                                            distCoeffs,
619                                            Optional.empty(),
620                                            this.multiTagFallbackStrategy);
621                            if (nestedUpdate.isEmpty()) {
622                                // best i can do is bail
623                                yield Optional.empty();
624                            }
625                            fieldToRobotSeed = nestedUpdate.get().estimatedPose;
626                        }
627
628                        if (!constrainedPnpParams.get().headingFree) {
629                            // If heading fixed, force rotation component
630                            fieldToRobotSeed =
631                                    new Pose3d(
632                                            fieldToRobotSeed.getTranslation(),
633                                            new Rotation3d(
634                                                    headingBuffer.getSample(cameraResult.getTimestampSeconds()).get()));
635                        }
636
637                        var pnpResult =
638                                estimateConstrainedSolvepnpPose(
639                                        cameraResult,
640                                        cameraMatrix.get(),
641                                        distCoeffs.get(),
642                                        fieldToRobotSeed,
643                                        constrainedPnpParams.get().headingFree,
644                                        constrainedPnpParams.get().headingScaleFactor);
645                        if (!pnpResult.isPresent()) {
646                            yield update(
647                                    cameraResult,
648                                    cameraMatrix,
649                                    distCoeffs,
650                                    Optional.empty(),
651                                    this.multiTagFallbackStrategy);
652                        }
653                        yield pnpResult;
654                    }
655                };
656
657        if (estimatedPose.isPresent()) {
658            lastPose = estimatedPose.get().estimatedPose;
659        }
660
661        return estimatedPose;
662    }
663
664    /**
665     * @param cameraResult A pipeline result from the camera.
666     * @return Whether or not pose estimation should be performed.
667     */
668    private boolean shouldEstimate(PhotonPipelineResult cameraResult) {
669        // Time in the past -- give up, since the following if expects times > 0
670        if (cameraResult.getTimestampSeconds() < 0) {
671            return false;
672        }
673
674        // If no targets seen, trivial case -- can't do estimation
675        return cameraResult.hasTargets();
676    }
677
678    /**
679     * Return the estimated position of the robot by using distance data from best visible tag to
680     * compute a Pose. This runs on the RoboRIO in order to access the robot's yaw heading, and MUST
681     * have addHeadingData called every frame so heading data is up-to-date.
682     *
683     * <p>Yields a Pose2d in estimatedRobotPose (0 for z, roll, pitch)
684     *
685     * <p>https://www.chiefdelphi.com/t/frc-6328-mechanical-advantage-2025-build-thread/477314/98
686     *
687     * @param cameraResult A pipeline result from the camera.
688     * @return An {@link EstimatedRobotPose} with an estimated pose, timestamp, and targets used to
689     *     create the estimate, or an empty optional if there's no targets or heading data.
690     */
691    public Optional<EstimatedRobotPose> estimatePnpDistanceTrigSolvePose(
692            PhotonPipelineResult cameraResult) {
693        if (!shouldEstimate(cameraResult)) {
694            return Optional.empty();
695        }
696        PhotonTrackedTarget bestTarget = cameraResult.getBestTarget();
697
698        if (bestTarget == null) return Optional.empty();
699
700        var headingSampleOpt = headingBuffer.getSample(cameraResult.getTimestampSeconds());
701        if (headingSampleOpt.isEmpty()) {
702            return Optional.empty();
703        }
704        Rotation2d headingSample = headingSampleOpt.get();
705
706        Translation2d camToTagTranslation =
707                new Translation3d(
708                                bestTarget.getBestCameraToTarget().getTranslation().getNorm(),
709                                new Rotation3d(
710                                        0,
711                                        -Math.toRadians(bestTarget.getPitch()),
712                                        -Math.toRadians(bestTarget.getYaw())))
713                        .rotateBy(robotToCamera.getRotation())
714                        .toTranslation2d()
715                        .rotateBy(headingSample);
716
717        var tagPoseOpt = fieldTags.getTagPose(bestTarget.getFiducialId());
718        if (tagPoseOpt.isEmpty()) {
719            return Optional.empty();
720        }
721        var tagPose2d = tagPoseOpt.get().toPose2d();
722
723        Translation2d fieldToCameraTranslation =
724                tagPose2d.getTranslation().plus(camToTagTranslation.unaryMinus());
725
726        Translation2d camToRobotTranslation =
727                robotToCamera.getTranslation().toTranslation2d().unaryMinus().rotateBy(headingSample);
728
729        Pose2d robotPose =
730                new Pose2d(fieldToCameraTranslation.plus(camToRobotTranslation), headingSample);
731
732        return Optional.of(
733                new EstimatedRobotPose(
734                        new Pose3d(robotPose),
735                        cameraResult.getTimestampSeconds(),
736                        cameraResult.getTargets(),
737                        PoseStrategy.PNP_DISTANCE_TRIG_SOLVE));
738    }
739
740    /**
741     * Return the estimated position of the robot by solving a constrained version of the
742     * Perspective-n-Point problem with the robot's drivebase flat on the floor. This computation
743     * takes place on the RoboRIO, and typically takes not more than 2ms. See {@link
744     * org.photonvision.jni.ConstrainedSolvepnpJni} for tuning handles this strategy exposes.
745     * Internally, the cost function is a sum-squared of pixel reprojection error + (optionally)
746     * heading error * heading scale factor. This strategy needs addHeadingData called every frame so
747     * heading data is up-to-date.
748     *
749     * @param cameraResult A pipeline result from the camera.
750     * @param cameraMatrix Camera intrinsics from camera calibration data.
751     * @param distCoeffs Distortion coefficients from camera calibration data.
752     * @param seedPose An initial guess at robot pose, refined via optimization. Better guesses will
753     *     converge faster. Can come from any pose source, but some battle-tested sources are {@link
754     *     #estimateCoprocMultiTagPose(PhotonPipelineResult)}, or {@link
755     *     #estimateLowestAmbiguityPose(PhotonPipelineResult)} if MultiTag results aren't available.
756     * @param headingFree If true, heading is completely free to vary. If false, heading excursions
757     *     from the provided heading measurement will be penalized
758     * @param headingScaleFactor If headingFree is false, this weights the cost of changing our robot
759     *     heading estimate against the tag corner reprojection error cont.
760     * @return An {@link EstimatedRobotPose} with an estimated pose, timestamp, and targets used to
761     *     create the estimate, or an empty optional if there's no targets or heading data, or if the
762     *     solver fails to solve the problem.
763     */
764    public Optional<EstimatedRobotPose> estimateConstrainedSolvepnpPose(
765            PhotonPipelineResult cameraResult,
766            Matrix<N3, N3> cameraMatrix,
767            Matrix<N8, N1> distCoeffs,
768            Pose3d seedPose,
769            boolean headingFree,
770            double headingScaleFactor) {
771        if (!shouldEstimate(cameraResult)) {
772            return Optional.empty();
773        }
774        // Need heading if heading fixed
775        if (!headingFree) {
776            if (headingBuffer.getSample(cameraResult.getTimestampSeconds()).isEmpty()) {
777                return Optional.empty();
778            } else {
779                // If heading fixed, force rotation component
780                seedPose =
781                        new Pose3d(
782                                seedPose.getTranslation(),
783                                new Rotation3d(headingBuffer.getSample(cameraResult.getTimestampSeconds()).get()));
784            }
785        }
786        var pnpResult =
787                VisionEstimation.estimateRobotPoseConstrainedSolvepnp(
788                        cameraMatrix,
789                        distCoeffs,
790                        cameraResult.getTargets(),
791                        robotToCamera,
792                        seedPose,
793                        fieldTags,
794                        tagModel,
795                        headingFree,
796                        headingBuffer.getSample(cameraResult.getTimestampSeconds()).get(),
797                        headingScaleFactor);
798        if (!pnpResult.isPresent()) return Optional.empty();
799        var best = Pose3d.kZero.plus(pnpResult.get().best); // field-to-robot
800
801        return Optional.of(
802                new EstimatedRobotPose(
803                        best,
804                        cameraResult.getTimestampSeconds(),
805                        cameraResult.getTargets(),
806                        PoseStrategy.CONSTRAINED_SOLVEPNP));
807    }
808
809    /**
810     * Return the estimated position of the robot by using all visible tags to compute a single pose
811     * estimate on coprocessor. This option needs to be enabled on the PhotonVision web UI as well.
812     *
813     * @param cameraResult A pipeline result from the camera.
814     * @return An {@link EstimatedRobotPose} with an estimated pose, timestamp, and targets used to
815     *     create the estimate, or an empty optional if there's no targets, no multi-tag results, or
816     *     multi-tag is disabled in the web UI.
817     */
818    public Optional<EstimatedRobotPose> estimateCoprocMultiTagPose(
819            PhotonPipelineResult cameraResult) {
820        if (cameraResult.getMultiTagResult().isEmpty() || !shouldEstimate(cameraResult)) {
821            return Optional.empty();
822        }
823
824        var best_tf = cameraResult.getMultiTagResult().get().estimatedPose.best;
825        var best =
826                Pose3d.kZero
827                        .plus(best_tf) // field-to-camera
828                        .relativeTo(fieldTags.getOrigin())
829                        .plus(robotToCamera.inverse()); // field-to-robot
830        return Optional.of(
831                new EstimatedRobotPose(
832                        best,
833                        cameraResult.getTimestampSeconds(),
834                        cameraResult.getTargets(),
835                        PoseStrategy.MULTI_TAG_PNP_ON_COPROCESSOR));
836    }
837
838    /**
839     * Return the estimated position of the robot by using all visible tags to compute a single pose
840     * estimate on the RoboRIO. This can take a lot of time due to the RIO's weak computing power.
841     *
842     * @param cameraResult A pipeline result from the camera.
843     * @param cameraMatrix Camera intrinsics from camera calibration data
844     * @param distCoeffs Distortion coefficients from camera calibration data.
845     * @return An {@link EstimatedRobotPose} with an estimated pose, timestamp, and targets used to
846     *     create the estimate, or an empty optional if there's less than 2 targets visible or
847     *     SolvePnP fails.
848     */
849    public Optional<EstimatedRobotPose> estimateRioMultiTagPose(
850            PhotonPipelineResult cameraResult, Matrix<N3, N3> cameraMatrix, Matrix<N8, N1> distCoeffs) {
851        if (cameraResult.getTargets().size() < 2 || !shouldEstimate(cameraResult)) {
852            return Optional.empty();
853        }
854
855        var pnpResult =
856                VisionEstimation.estimateCamPosePNP(
857                        cameraMatrix, distCoeffs, cameraResult.getTargets(), fieldTags, tagModel);
858        if (!pnpResult.isPresent()) return Optional.empty();
859
860        var best =
861                Pose3d.kZero
862                        .plus(pnpResult.get().best) // field-to-camera
863                        .plus(robotToCamera.inverse()); // field-to-robot
864
865        return Optional.of(
866                new EstimatedRobotPose(
867                        best,
868                        cameraResult.getTimestampSeconds(),
869                        cameraResult.getTargets(),
870                        PoseStrategy.MULTI_TAG_PNP_ON_RIO));
871    }
872
873    /**
874     * Return the estimated position of the robot with the lowest position ambiguity from a pipeline
875     * result.
876     *
877     * @param cameraResult A pipeline result from the camera.
878     * @return An {@link EstimatedRobotPose} with an estimated pose, timestamp, and targets used to
879     *     create the estimate, or an empty optional if there's no targets.
880     */
881    public Optional<EstimatedRobotPose> estimateLowestAmbiguityPose(
882            PhotonPipelineResult cameraResult) {
883        if (!shouldEstimate(cameraResult)) {
884            return Optional.empty();
885        }
886        PhotonTrackedTarget lowestAmbiguityTarget = null;
887
888        double lowestAmbiguityScore = 10;
889
890        for (PhotonTrackedTarget target : cameraResult.targets) {
891            double targetPoseAmbiguity = target.getPoseAmbiguity();
892            // Make sure the target is a Fiducial target.
893            if (targetPoseAmbiguity != -1 && targetPoseAmbiguity < lowestAmbiguityScore) {
894                lowestAmbiguityScore = targetPoseAmbiguity;
895                lowestAmbiguityTarget = target;
896            }
897        }
898
899        // Although there are confirmed to be targets, none of them may be fiducial
900        // targets.
901        if (lowestAmbiguityTarget == null) return Optional.empty();
902
903        int targetFiducialId = lowestAmbiguityTarget.getFiducialId();
904
905        Optional<Pose3d> targetPosition = fieldTags.getTagPose(targetFiducialId);
906
907        if (targetPosition.isEmpty()) {
908            reportFiducialPoseError(targetFiducialId);
909            return Optional.empty();
910        }
911
912        return Optional.of(
913                new EstimatedRobotPose(
914                        targetPosition
915                                .get()
916                                .transformBy(lowestAmbiguityTarget.getBestCameraToTarget().inverse())
917                                .transformBy(robotToCamera.inverse()),
918                        cameraResult.getTimestampSeconds(),
919                        cameraResult.getTargets(),
920                        PoseStrategy.LOWEST_AMBIGUITY));
921    }
922
923    /**
924     * Return the estimated position of the robot using the target with the lowest delta height
925     * difference between the estimated and actual height of the camera.
926     *
927     * @param cameraResult A pipeline result from the camera.
928     * @return An {@link EstimatedRobotPose} with an estimated pose, timestamp, and targets used to
929     *     create the estimate, or an empty optional if there's no targets.
930     */
931    public Optional<EstimatedRobotPose> estimateClosestToCameraHeightPose(
932            PhotonPipelineResult cameraResult) {
933        if (!shouldEstimate(cameraResult)) {
934            return Optional.empty();
935        }
936        double smallestHeightDifference = 10e9;
937        EstimatedRobotPose closestHeightTarget = null;
938
939        for (PhotonTrackedTarget target : cameraResult.targets) {
940            int targetFiducialId = target.getFiducialId();
941
942            // Don't report errors for non-fiducial targets. This could also be resolved by
943            // adding -1 to
944            // the initial HashSet.
945            if (targetFiducialId == -1) continue;
946
947            Optional<Pose3d> targetPosition = fieldTags.getTagPose(target.getFiducialId());
948
949            if (targetPosition.isEmpty()) {
950                reportFiducialPoseError(target.getFiducialId());
951                continue;
952            }
953
954            double alternateTransformDelta =
955                    Math.abs(
956                            robotToCamera.getZ()
957                                    - targetPosition
958                                            .get()
959                                            .transformBy(target.getAlternateCameraToTarget().inverse())
960                                            .getZ());
961            double bestTransformDelta =
962                    Math.abs(
963                            robotToCamera.getZ()
964                                    - targetPosition
965                                            .get()
966                                            .transformBy(target.getBestCameraToTarget().inverse())
967                                            .getZ());
968
969            if (alternateTransformDelta < smallestHeightDifference) {
970                smallestHeightDifference = alternateTransformDelta;
971                closestHeightTarget =
972                        new EstimatedRobotPose(
973                                targetPosition
974                                        .get()
975                                        .transformBy(target.getAlternateCameraToTarget().inverse())
976                                        .transformBy(robotToCamera.inverse()),
977                                cameraResult.getTimestampSeconds(),
978                                cameraResult.getTargets(),
979                                PoseStrategy.CLOSEST_TO_CAMERA_HEIGHT);
980            }
981
982            if (bestTransformDelta < smallestHeightDifference) {
983                smallestHeightDifference = bestTransformDelta;
984                closestHeightTarget =
985                        new EstimatedRobotPose(
986                                targetPosition
987                                        .get()
988                                        .transformBy(target.getBestCameraToTarget().inverse())
989                                        .transformBy(robotToCamera.inverse()),
990                                cameraResult.getTimestampSeconds(),
991                                cameraResult.getTargets(),
992                                PoseStrategy.CLOSEST_TO_CAMERA_HEIGHT);
993            }
994        }
995
996        // Need to null check here in case none of the provided targets are fiducial.
997        return Optional.ofNullable(closestHeightTarget);
998    }
999
1000    /**
1001     * Return the estimated position of the robot using the target with the lowest delta in the vector
1002     * magnitude between it and the reference pose.
1003     *
1004     * @param cameraResult A pipeline result from the camera.
1005     * @param referencePose reference pose to check vector magnitude difference against.
1006     * @return An {@link EstimatedRobotPose} with an estimated pose, timestamp, and targets used to
1007     *     create the estimate, or an empty optional if there's no targets.
1008     */
1009    public Optional<EstimatedRobotPose> estimateClosestToReferencePose(
1010            PhotonPipelineResult cameraResult, Pose3d referencePose) {
1011        if (!shouldEstimate(cameraResult)) {
1012            return Optional.empty();
1013        }
1014        if (referencePose == null) {
1015            DriverStation.reportError(
1016                    "[PhotonPoseEstimator] Tried to use reference pose strategy without setting the reference!",
1017                    false);
1018            return Optional.empty();
1019        }
1020
1021        double smallestPoseDelta = 10e9;
1022        EstimatedRobotPose lowestDeltaPose = null;
1023
1024        for (PhotonTrackedTarget target : cameraResult.targets) {
1025            int targetFiducialId = target.getFiducialId();
1026
1027            // Don't report errors for non-fiducial targets. This could also be resolved by
1028            // adding -1 to
1029            // the initial HashSet.
1030            if (targetFiducialId == -1) continue;
1031
1032            Optional<Pose3d> targetPosition = fieldTags.getTagPose(target.getFiducialId());
1033
1034            if (targetPosition.isEmpty()) {
1035                reportFiducialPoseError(targetFiducialId);
1036                continue;
1037            }
1038
1039            Pose3d altTransformPosition =
1040                    targetPosition
1041                            .get()
1042                            .transformBy(target.getAlternateCameraToTarget().inverse())
1043                            .transformBy(robotToCamera.inverse());
1044            Pose3d bestTransformPosition =
1045                    targetPosition
1046                            .get()
1047                            .transformBy(target.getBestCameraToTarget().inverse())
1048                            .transformBy(robotToCamera.inverse());
1049
1050            double altDifference = Math.abs(calculateDifference(referencePose, altTransformPosition));
1051            double bestDifference = Math.abs(calculateDifference(referencePose, bestTransformPosition));
1052
1053            if (altDifference < smallestPoseDelta) {
1054                smallestPoseDelta = altDifference;
1055                lowestDeltaPose =
1056                        new EstimatedRobotPose(
1057                                altTransformPosition,
1058                                cameraResult.getTimestampSeconds(),
1059                                cameraResult.getTargets(),
1060                                PoseStrategy.CLOSEST_TO_REFERENCE_POSE);
1061            }
1062            if (bestDifference < smallestPoseDelta) {
1063                smallestPoseDelta = bestDifference;
1064                lowestDeltaPose =
1065                        new EstimatedRobotPose(
1066                                bestTransformPosition,
1067                                cameraResult.getTimestampSeconds(),
1068                                cameraResult.getTargets(),
1069                                PoseStrategy.CLOSEST_TO_REFERENCE_POSE);
1070            }
1071        }
1072        return Optional.ofNullable(lowestDeltaPose);
1073    }
1074
1075    /**
1076     * Return the average of the best target poses using ambiguity as weight.
1077     *
1078     * @param cameraResult A pipeline result from the camera.
1079     * @return An {@link EstimatedRobotPose} with an estimated pose, timestamp, and targets used to
1080     *     create the estimate, or an empty optional if there's no targets.
1081     */
1082    public Optional<EstimatedRobotPose> estimateAverageBestTargetsPose(
1083            PhotonPipelineResult cameraResult) {
1084        if (!shouldEstimate(cameraResult)) {
1085            return Optional.empty();
1086        }
1087        List<Pair<PhotonTrackedTarget, Pose3d>> estimatedRobotPoses = new ArrayList<>();
1088        double totalAmbiguity = 0;
1089
1090        for (PhotonTrackedTarget target : cameraResult.targets) {
1091            int targetFiducialId = target.getFiducialId();
1092
1093            // Don't report errors for non-fiducial targets. This could also be resolved by
1094            // adding -1 to
1095            // the initial HashSet.
1096            if (targetFiducialId == -1) continue;
1097
1098            Optional<Pose3d> targetPosition = fieldTags.getTagPose(target.getFiducialId());
1099
1100            if (targetPosition.isEmpty()) {
1101                reportFiducialPoseError(targetFiducialId);
1102                continue;
1103            }
1104
1105            double targetPoseAmbiguity = target.getPoseAmbiguity();
1106
1107            // Pose ambiguity is 0, use that pose
1108            if (targetPoseAmbiguity == 0) {
1109                return Optional.of(
1110                        new EstimatedRobotPose(
1111                                targetPosition
1112                                        .get()
1113                                        .transformBy(target.getBestCameraToTarget().inverse())
1114                                        .transformBy(robotToCamera.inverse()),
1115                                cameraResult.getTimestampSeconds(),
1116                                cameraResult.getTargets(),
1117                                PoseStrategy.AVERAGE_BEST_TARGETS));
1118            }
1119
1120            totalAmbiguity += 1.0 / target.getPoseAmbiguity();
1121
1122            estimatedRobotPoses.add(
1123                    new Pair<>(
1124                            target,
1125                            targetPosition
1126                                    .get()
1127                                    .transformBy(target.getBestCameraToTarget().inverse())
1128                                    .transformBy(robotToCamera.inverse())));
1129        }
1130
1131        // Take the average
1132
1133        Translation3d transform = new Translation3d();
1134        Rotation3d rotation = new Rotation3d();
1135
1136        if (estimatedRobotPoses.isEmpty()) return Optional.empty();
1137
1138        for (Pair<PhotonTrackedTarget, Pose3d> pair : estimatedRobotPoses) {
1139            // Total ambiguity is non-zero confirmed because if it was zero, that pose was
1140            // returned.
1141            double weight = (1.0 / pair.getFirst().getPoseAmbiguity()) / totalAmbiguity;
1142            Pose3d estimatedPose = pair.getSecond();
1143            transform = transform.plus(estimatedPose.getTranslation().times(weight));
1144            rotation = rotation.plus(estimatedPose.getRotation().times(weight));
1145        }
1146
1147        return Optional.of(
1148                new EstimatedRobotPose(
1149                        new Pose3d(transform, rotation),
1150                        cameraResult.getTimestampSeconds(),
1151                        cameraResult.getTargets(),
1152                        PoseStrategy.AVERAGE_BEST_TARGETS));
1153    }
1154
1155    /**
1156     * Difference is defined as the vector magnitude between the two poses
1157     *
1158     * @return The absolute "difference" (>=0) between two Pose3ds.
1159     */
1160    private double calculateDifference(Pose3d x, Pose3d y) {
1161        return x.getTranslation().getDistance(y.getTranslation());
1162    }
1163
1164    private void reportFiducialPoseError(int fiducialId) {
1165        if (!reportedErrors.contains(fiducialId)) {
1166            DriverStation.reportError(
1167                    "[PhotonPoseEstimator] Tried to get pose of unknown AprilTag: " + fiducialId, false);
1168            reportedErrors.add(fiducialId);
1169        }
1170    }
1171}