001/*
002 * MIT License
003 *
004 * Copyright (c) PhotonVision
005 *
006 * Permission is hereby granted, free of charge, to any person obtaining a copy
007 * of this software and associated documentation files (the "Software"), to deal
008 * in the Software without restriction, including without limitation the rights
009 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
010 * copies of the Software, and to permit persons to whom the Software is
011 * furnished to do so, subject to the following conditions:
012 *
013 * The above copyright notice and this permission notice shall be included in all
014 * copies or substantial portions of the Software.
015 *
016 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
017 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
018 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
019 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
020 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
021 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
022 * SOFTWARE.
023 */
024
025package org.photonvision.simulation;
026
027import edu.wpi.first.apriltag.AprilTagFieldLayout;
028import edu.wpi.first.apriltag.AprilTagFields;
029import edu.wpi.first.cameraserver.CameraServer;
030import edu.wpi.first.cscore.CvSource;
031import edu.wpi.first.cscore.OpenCvLoader;
032import edu.wpi.first.cscore.VideoSource.ConnectionStrategy;
033import edu.wpi.first.math.MathUtil;
034import edu.wpi.first.math.Pair;
035import edu.wpi.first.math.geometry.Pose3d;
036import edu.wpi.first.math.geometry.Transform3d;
037import edu.wpi.first.util.PixelFormat;
038import edu.wpi.first.util.WPIUtilJNI;
039import edu.wpi.first.wpilibj.RobotController;
040import java.util.ArrayList;
041import java.util.List;
042import java.util.Optional;
043import java.util.stream.Collectors;
044import org.opencv.core.CvType;
045import org.opencv.core.Mat;
046import org.opencv.core.Point;
047import org.opencv.core.RotatedRect;
048import org.opencv.core.Scalar;
049import org.opencv.core.Size;
050import org.opencv.imgproc.Imgproc;
051import org.photonvision.PhotonCamera;
052import org.photonvision.PhotonTargetSortMode;
053import org.photonvision.common.networktables.NTTopicSet;
054import org.photonvision.estimation.CameraTargetRelation;
055import org.photonvision.estimation.OpenCVHelp;
056import org.photonvision.estimation.RotTrlTransform3d;
057import org.photonvision.estimation.TargetModel;
058import org.photonvision.estimation.VisionEstimation;
059import org.photonvision.targeting.MultiTargetPNPResult;
060import org.photonvision.targeting.PhotonPipelineResult;
061import org.photonvision.targeting.PhotonTrackedTarget;
062import org.photonvision.targeting.PnpResult;
063
064/**
065 * A handle for simulating {@link PhotonCamera} values. Processing simulated targets through this
066 * class will change the associated PhotonCamera's results.
067 */
068@SuppressWarnings("unused")
069public class PhotonCameraSim implements AutoCloseable {
070    private final PhotonCamera cam;
071
072    @SuppressWarnings("doclint")
073    protected NTTopicSet ts = new NTTopicSet();
074
075    private long heartbeatCounter = 1;
076
077    /** This simulated camera's {@link SimCameraProperties} */
078    public final SimCameraProperties prop;
079
080    private long nextNTEntryTime = WPIUtilJNI.now();
081
082    private double maxSightRangeMeters = Double.MAX_VALUE;
083    private static final double kDefaultMinAreaPx = 100;
084    private double minTargetAreaPercent;
085    private PhotonTargetSortMode sortMode = PhotonTargetSortMode.Largest;
086
087    private final AprilTagFieldLayout tagLayout;
088
089    // video stream simulation
090    private final CvSource videoSimRaw;
091    private final Mat videoSimFrameRaw = new Mat();
092    private boolean videoSimRawEnabled = true;
093    private boolean videoSimWireframeEnabled = false;
094    private double videoSimWireframeResolution = 0.1;
095    private final CvSource videoSimProcessed;
096    private final Mat videoSimFrameProcessed = new Mat();
097    private boolean videoSimProcEnabled = true;
098
099    static {
100        OpenCvLoader.forceStaticLoad();
101    }
102
103    @Override
104    public void close() {
105        videoSimRaw.close();
106        videoSimFrameRaw.release();
107        videoSimProcessed.close();
108        videoSimFrameProcessed.release();
109    }
110
111    /**
112     * Constructs a handle for simulating {@link PhotonCamera} values. Processing simulated targets
113     * through this class will change the associated PhotonCamera's results.
114     *
115     * <p><b>This constructor's camera has a 90 deg FOV with no simulated lag!</b>
116     *
117     * <p>By default, the minimum target area is 100 pixels and there is no maximum sight range.
118     *
119     * @param camera The camera to be simulated
120     */
121    public PhotonCameraSim(PhotonCamera camera) {
122        this(camera, SimCameraProperties.PERFECT_90DEG());
123    }
124
125    /**
126     * Constructs a handle for simulating {@link PhotonCamera} values. Processing simulated targets
127     * through this class will change the associated PhotonCamera's results.
128     *
129     * <p>By default, the minimum target area is 100 pixels and there is no maximum sight range.
130     *
131     * @param camera The camera to be simulated
132     * @param prop Properties of this camera such as FOV and FPS
133     */
134    public PhotonCameraSim(PhotonCamera camera, SimCameraProperties prop) {
135        this(camera, prop, AprilTagFieldLayout.loadField(AprilTagFields.kDefaultField));
136    }
137
138    /**
139     * Constructs a handle for simulating {@link PhotonCamera} values. Processing simulated targets
140     * through this class will change the associated PhotonCamera's results.
141     *
142     * <p>By default, the minimum target area is 100 pixels and there is no maximum sight range.
143     *
144     * @param camera The camera to be simulated
145     * @param prop Properties of this camera such as FOV and FPS
146     * @param tagLayout The {@link AprilTagFieldLayout} used to solve for tag positions.
147     */
148    public PhotonCameraSim(
149            PhotonCamera camera, SimCameraProperties prop, AprilTagFieldLayout tagLayout) {
150        this.cam = camera;
151        this.prop = prop;
152        this.tagLayout = tagLayout;
153        setMinTargetAreaPixels(kDefaultMinAreaPx);
154
155        videoSimRaw =
156                CameraServer.putVideo(camera.getName() + "-raw", prop.getResWidth(), prop.getResHeight());
157        videoSimRaw.setPixelFormat(PixelFormat.kGray);
158        videoSimProcessed =
159                CameraServer.putVideo(
160                        camera.getName() + "-processed", prop.getResWidth(), prop.getResHeight());
161
162        ts.removeEntries();
163        ts.subTable = camera.getCameraTable();
164        ts.updateEntries();
165    }
166
167    /**
168     * Constructs a handle for simulating {@link PhotonCamera} values. Processing simulated targets
169     * through this class will change the associated PhotonCamera's results.
170     *
171     * @param camera The camera to be simulated
172     * @param prop Properties of this camera such as FOV and FPS
173     * @param minTargetAreaPercent The minimum percentage (0 - 100) a detected target must take up of
174     *     the camera's image to be processed. Match this with your contour filtering settings in the
175     *     PhotonVision GUI.
176     * @param maxSightRangeMeters Maximum distance at which the target is illuminated to your camera.
177     *     Note that minimum target area of the image is separate from this.
178     */
179    public PhotonCameraSim(
180            PhotonCamera camera,
181            SimCameraProperties prop,
182            double minTargetAreaPercent,
183            double maxSightRangeMeters) {
184        this(camera, prop, AprilTagFieldLayout.loadField(AprilTagFields.kDefaultField));
185        this.minTargetAreaPercent = minTargetAreaPercent;
186        this.maxSightRangeMeters = maxSightRangeMeters;
187    }
188
189    /**
190     * Constructs a handle for simulating {@link PhotonCamera} values. Processing simulated targets
191     * through this class will change the associated PhotonCamera's results.
192     *
193     * @param camera The camera to be simulated
194     * @param prop Properties of this camera such as FOV and FPS
195     * @param minTargetAreaPercent The minimum percentage (0 - 100) a detected target must take up of
196     *     the camera's image to be processed. Match this with your contour filtering settings in the
197     *     PhotonVision GUI.
198     * @param maxSightRangeMeters Maximum distance at which the target is illuminated to your camera.
199     *     Note that minimum target area of the image is separate from this.
200     * @param tagLayout AprilTag field layout to use for multi-tag estimation
201     */
202    public PhotonCameraSim(
203            PhotonCamera camera,
204            SimCameraProperties prop,
205            double minTargetAreaPercent,
206            double maxSightRangeMeters,
207            AprilTagFieldLayout tagLayout) {
208        this(camera, prop);
209        this.minTargetAreaPercent = minTargetAreaPercent;
210        this.maxSightRangeMeters = maxSightRangeMeters;
211    }
212
213    /**
214     * Returns the camera being simulated.
215     *
216     * @return The camera
217     */
218    public PhotonCamera getCamera() {
219        return cam;
220    }
221
222    /**
223     * Returns the minimum percentage (0 - 100) a detected target must take up of the camera's image
224     * to be processed.
225     *
226     * @return The percentage
227     */
228    public double getMinTargetAreaPercent() {
229        return minTargetAreaPercent;
230    }
231
232    /**
233     * Returns the minimum number of pixels a detected target must take up in the camera's image to be
234     * processed.
235     *
236     * @return The number of pixels
237     */
238    public double getMinTargetAreaPixels() {
239        return minTargetAreaPercent / 100.0 * prop.getResArea();
240    }
241
242    /**
243     * Returns the maximum distance at which the target is illuminated to your camera. Note that
244     * minimum target area of the image is separate from this.
245     *
246     * @return The distance in meters
247     */
248    public double getMaxSightRangeMeters() {
249        return maxSightRangeMeters;
250    }
251
252    /**
253     * Returns the order the targets are sorted in the pipeline result.
254     *
255     * @return The target sorting order
256     */
257    public PhotonTargetSortMode getTargetSortMode() {
258        return sortMode;
259    }
260
261    public CvSource getVideoSimRaw() {
262        return videoSimRaw;
263    }
264
265    public Mat getVideoSimFrameRaw() {
266        return videoSimFrameRaw;
267    }
268
269    /**
270     * Determines if this target's pose should be visible to the camera without considering its
271     * projected image points. Does not account for image area.
272     *
273     * @param camPose Camera's 3d pose
274     * @param target Vision target containing pose and shape
275     * @return If this vision target can be seen before image projection.
276     */
277    public boolean canSeeTargetPose(Pose3d camPose, VisionTargetSim target) {
278        var rel = new CameraTargetRelation(camPose, target.getPose());
279
280        return (
281        // target translation is outside of camera's FOV
282        (Math.abs(rel.camToTargYaw.getDegrees()) < prop.getHorizFOV().getDegrees() / 2)
283                && (Math.abs(rel.camToTargPitch.getDegrees()) < prop.getVertFOV().getDegrees() / 2)
284                && (!target.getModel().isPlanar
285                        || Math.abs(rel.targToCamAngle.getDegrees())
286                                < 90) // camera is behind planar target and it should be occluded
287                && (rel.camToTarg.getTranslation().getNorm() <= maxSightRangeMeters)); // target is too far
288    }
289
290    /**
291     * Determines if all target points are inside the camera's image.
292     *
293     * @param points The target's 2d image points
294     * @return True if all the target points are inside the camera's image, false otherwise.
295     */
296    public boolean canSeeCorners(Point[] points) {
297        for (var point : points) {
298            if (MathUtil.clamp(point.x, 0, prop.getResWidth()) != point.x
299                    || MathUtil.clamp(point.y, 0, prop.getResHeight()) != point.y) {
300                return false; // point is outside of resolution
301            }
302        }
303        return true;
304    }
305
306    /**
307     * Determine if this camera should process a new frame based on performance metrics and the time
308     * since the last update. This returns an Optional which is either empty if no update should occur
309     * or a Long of the timestamp in microseconds of when the frame which should be received by NT. If
310     * a timestamp is returned, the last frame update time becomes that timestamp.
311     *
312     * @return Optional long which is empty while blocked or the NT entry timestamp in microseconds if
313     *     ready
314     */
315    public Optional<Long> consumeNextEntryTime() {
316        // check if this camera is ready for another frame update
317        long now = WPIUtilJNI.now();
318        long timestamp = -1;
319        int iter = 0;
320        // prepare next latest update
321        while (now >= nextNTEntryTime) {
322            timestamp = nextNTEntryTime;
323            long frameTime = (long) (prop.estMsUntilNextFrame() * 1e3);
324            nextNTEntryTime += frameTime;
325
326            // if frame time is very small, avoid blocking
327            if (iter++ > 50) {
328                timestamp = now;
329                nextNTEntryTime = now + frameTime;
330                break;
331            }
332        }
333        // return the timestamp of the latest update
334        if (timestamp >= 0) return Optional.of(timestamp);
335        // or this camera isn't ready to process yet
336        else return Optional.empty();
337    }
338
339    /**
340     * Sets the minimum percentage (0 - 100) a detected target must take up of the camera's image to
341     * be processed.
342     *
343     * @param areaPercent The percentage
344     */
345    public void setMinTargetAreaPercent(double areaPercent) {
346        this.minTargetAreaPercent = areaPercent;
347    }
348
349    /**
350     * Sets the minimum number of pixels a detected target must take up in the camera's image to be
351     * processed.
352     *
353     * @param areaPx The number of pixels
354     */
355    public void setMinTargetAreaPixels(double areaPx) {
356        this.minTargetAreaPercent = areaPx / prop.getResArea() * 100;
357    }
358
359    /**
360     * Sets the maximum distance at which the target is illuminated to your camera. Note that minimum
361     * target area of the image is separate from this.
362     *
363     * @param rangeMeters The distance in meters
364     */
365    public void setMaxSightRange(double rangeMeters) {
366        this.maxSightRangeMeters = rangeMeters;
367    }
368
369    /**
370     * Defines the order the targets are sorted in the pipeline result.
371     *
372     * @param sortMode The target sorting order
373     */
374    public void setTargetSortMode(PhotonTargetSortMode sortMode) {
375        if (sortMode != null) this.sortMode = sortMode;
376    }
377
378    /**
379     * Sets whether the raw video stream simulation is enabled.
380     *
381     * <p>Note: This may increase loop times.
382     *
383     * @param enabled Whether or not to enable the raw video stream
384     */
385    public void enableRawStream(boolean enabled) {
386        videoSimRawEnabled = enabled;
387    }
388
389    /**
390     * Sets whether a wireframe of the field is drawn to the raw video stream.
391     *
392     * <p>Note: This will dramatically increase loop times.
393     *
394     * @param enabled Whether or not to enable the wireframe in the raw video stream
395     */
396    public void enableDrawWireframe(boolean enabled) {
397        videoSimWireframeEnabled = enabled;
398    }
399
400    /**
401     * Sets the resolution of the drawn wireframe if enabled. Drawn line segments will be subdivided
402     * into smaller segments based on a threshold set by the resolution.
403     *
404     * @param resolution Resolution as a fraction(0 - 1) of the video frame's diagonal length in
405     *     pixels
406     */
407    public void setWireframeResolution(double resolution) {
408        videoSimWireframeResolution = resolution;
409    }
410
411    /**
412     * Sets whether the processed video stream simulation is enabled.
413     *
414     * @param enabled Whether or not to enable the processed video stream
415     */
416    public void enableProcessedStream(boolean enabled) {
417        videoSimProcEnabled = enabled;
418    }
419
420    public PhotonPipelineResult process(
421            double latencyMillis, Pose3d cameraPose, List<VisionTargetSim> targets) {
422        // sort targets by distance to camera
423        targets = new ArrayList<>(targets);
424        targets.sort(
425                (t1, t2) -> {
426                    double dist1 = t1.getPose().getTranslation().getDistance(cameraPose.getTranslation());
427                    double dist2 = t2.getPose().getTranslation().getDistance(cameraPose.getTranslation());
428                    if (dist1 == dist2) return 0;
429                    return dist1 < dist2 ? 1 : -1;
430                });
431        // all targets visible before noise
432        var visibleTgts = new ArrayList<Pair<VisionTargetSim, Point[]>>();
433        // all targets actually detected by camera (after noise)
434        var detectableTgts = new ArrayList<PhotonTrackedTarget>();
435        // basis change from world coordinates to camera coordinates
436        var camRt = RotTrlTransform3d.makeRelativeTo(cameraPose);
437
438        // reset our frame
439        VideoSimUtil.updateVideoProp(videoSimRaw, prop);
440        VideoSimUtil.updateVideoProp(videoSimProcessed, prop);
441        Size videoFrameSize = new Size(prop.getResWidth(), prop.getResHeight());
442        Mat.zeros(videoFrameSize, CvType.CV_8UC1).assignTo(videoSimFrameRaw);
443
444        for (var tgt : targets) {
445            if (detectableTgts.size() >= 50) {
446                break;
447            }
448
449            // pose isn't visible, skip to next
450            if (!canSeeTargetPose(cameraPose, tgt)) continue;
451
452            // find target's 3d corner points
453            var fieldCorners = tgt.getFieldVertices();
454            if (tgt.getModel().isSpherical) { // target is spherical
455                var model = tgt.getModel();
456                // orient the model to the camera (like a sprite/decal) so it appears similar regardless of
457                // view
458                fieldCorners =
459                        model.getFieldVertices(
460                                TargetModel.getOrientedPose(
461                                        tgt.getPose().getTranslation(), cameraPose.getTranslation()));
462            }
463            // project 3d target points into 2d image points
464            var imagePoints =
465                    OpenCVHelp.projectPoints(prop.getIntrinsics(), prop.getDistCoeffs(), camRt, fieldCorners);
466            // spherical targets need a rotated rectangle of their midpoints for visualization
467            if (tgt.getModel().isSpherical) {
468                var center = OpenCVHelp.avgPoint(imagePoints);
469                int l = 0, t, b, r = 0;
470                // reference point (left side midpoint)
471                for (int i = 1; i < 4; i++) {
472                    if (imagePoints[i].x < imagePoints[l].x) l = i;
473                }
474                var lc = imagePoints[l];
475                // determine top, right, bottom midpoints
476                double[] angles = new double[4];
477                t = (l + 1) % 4;
478                b = (l + 1) % 4;
479                for (int i = 0; i < 4; i++) {
480                    if (i == l) continue;
481                    var ic = imagePoints[i];
482                    angles[i] = Math.atan2(lc.y - ic.y, ic.x - lc.x);
483                    if (angles[i] >= angles[t]) t = i;
484                    if (angles[i] <= angles[b]) b = i;
485                }
486                for (int i = 0; i < 4; i++) {
487                    if (i != t && i != l && i != b) r = i;
488                }
489                // create RotatedRect from midpoints
490                var rect =
491                        new RotatedRect(
492                                new Point(center.x, center.y),
493                                new Size(imagePoints[r].x - lc.x, imagePoints[b].y - imagePoints[t].y),
494                                Math.toDegrees(-angles[r]));
495                // set target corners to rect corners
496                Point[] points = new Point[4];
497                rect.points(points);
498                imagePoints = points;
499            }
500            // save visible targets for raw video stream simulation
501            visibleTgts.add(new Pair<>(tgt, imagePoints));
502            // estimate pixel noise
503            var noisyTargetCorners = prop.estPixelNoise(imagePoints);
504            // find the minimum area rectangle of target corners
505            var minAreaRect = OpenCVHelp.getMinAreaRect(noisyTargetCorners);
506            Point[] minAreaRectPts = new Point[4];
507            minAreaRect.points(minAreaRectPts);
508            // find the (naive) 2d yaw/pitch
509            var centerPt = minAreaRect.center;
510            var centerRot = prop.getPixelRot(centerPt);
511            // find contour area
512            double areaPercent = prop.getContourAreaPercent(noisyTargetCorners);
513
514            // projected target can't be detected, skip to next
515            if (!(canSeeCorners(noisyTargetCorners) && areaPercent >= minTargetAreaPercent)) continue;
516
517            var pnpSim = new PnpResult();
518            if (tgt.fiducialID >= 0 && tgt.getFieldVertices().size() == 4) { // single AprilTag solvePNP
519                pnpSim =
520                        OpenCVHelp.solvePNP_SQUARE(
521                                        prop.getIntrinsics(),
522                                        prop.getDistCoeffs(),
523                                        tgt.getModel().vertices,
524                                        noisyTargetCorners)
525                                .get();
526            }
527
528            detectableTgts.add(
529                    new PhotonTrackedTarget(
530                            -Math.toDegrees(centerRot.getZ()),
531                            -Math.toDegrees(centerRot.getY()),
532                            areaPercent,
533                            Math.toDegrees(centerRot.getX()),
534                            tgt.fiducialID,
535                            -1,
536                            -1,
537                            pnpSim.best,
538                            pnpSim.alt,
539                            pnpSim.ambiguity,
540                            OpenCVHelp.pointsToCorners(minAreaRectPts),
541                            OpenCVHelp.pointsToCorners(noisyTargetCorners)));
542        }
543        // render visible tags to raw video frame
544        if (videoSimRawEnabled) {
545            // draw field wireframe
546            if (videoSimWireframeEnabled) {
547                VideoSimUtil.drawFieldWireframe(
548                        camRt,
549                        prop,
550                        videoSimWireframeResolution,
551                        1.5,
552                        new Scalar(80),
553                        6,
554                        1,
555                        new Scalar(30),
556                        videoSimFrameRaw);
557            }
558
559            // draw targets
560            for (var pair : visibleTgts) {
561                var tgt = pair.getFirst();
562                var corn = pair.getSecond();
563
564                if (tgt.fiducialID >= 0) { // apriltags
565                    VideoSimUtil.warp36h11TagImage(tgt.fiducialID, corn, true, videoSimFrameRaw);
566                } else if (!tgt.getModel().isSpherical) { // non-spherical targets
567                    var contour = corn;
568                    if (!tgt.getModel()
569                            .isPlanar) { // visualization cant handle non-convex projections of 3d models
570                        contour = OpenCVHelp.getConvexHull(contour);
571                    }
572                    VideoSimUtil.drawPoly(contour, -1, new Scalar(255), true, videoSimFrameRaw);
573                } else { // spherical targets
574                    VideoSimUtil.drawInscribedEllipse(corn, new Scalar(255), videoSimFrameRaw);
575                }
576            }
577            videoSimRaw.putFrame(videoSimFrameRaw);
578        } else videoSimRaw.setConnectionStrategy(ConnectionStrategy.kForceClose);
579        // draw/annotate target detection outline on processed view
580        if (videoSimProcEnabled) {
581            Imgproc.cvtColor(videoSimFrameRaw, videoSimFrameProcessed, Imgproc.COLOR_GRAY2BGR);
582            Imgproc.drawMarker( // crosshair
583                    videoSimFrameProcessed,
584                    new Point(prop.getResWidth() / 2.0, prop.getResHeight() / 2.0),
585                    new Scalar(0, 255, 0),
586                    Imgproc.MARKER_CROSS,
587                    (int) VideoSimUtil.getScaledThickness(15, videoSimFrameProcessed),
588                    (int) VideoSimUtil.getScaledThickness(1, videoSimFrameProcessed),
589                    Imgproc.LINE_AA);
590            for (var tgt : detectableTgts) {
591                if (tgt.getFiducialId() >= 0) { // apriltags
592                    VideoSimUtil.drawTagDetection(
593                            tgt.getFiducialId(),
594                            OpenCVHelp.cornersToPoints(tgt.getDetectedCorners()),
595                            videoSimFrameProcessed);
596                } else { // other targets
597                    // bounding rectangle
598                    Imgproc.rectangle(
599                            videoSimFrameProcessed,
600                            OpenCVHelp.getBoundingRect(OpenCVHelp.cornersToPoints(tgt.getDetectedCorners())),
601                            new Scalar(0, 0, 255),
602                            (int) VideoSimUtil.getScaledThickness(1, videoSimFrameProcessed),
603                            Imgproc.LINE_AA);
604
605                    VideoSimUtil.drawPoly(
606                            OpenCVHelp.cornersToPoints(tgt.getMinAreaRectCorners()),
607                            (int) VideoSimUtil.getScaledThickness(1, videoSimFrameProcessed),
608                            new Scalar(255, 30, 30),
609                            true,
610                            videoSimFrameProcessed);
611                }
612            }
613            videoSimProcessed.putFrame(videoSimFrameProcessed);
614        } else videoSimProcessed.setConnectionStrategy(ConnectionStrategy.kForceClose);
615
616        // calculate multitag results
617        Optional<MultiTargetPNPResult> multitagResult = Optional.empty();
618        // TODO: Implement ATFL subscribing in backend
619        // var tagLayout = cam.getAprilTagFieldLayout();
620        var visibleLayoutTags = VisionEstimation.getVisibleLayoutTags(detectableTgts, tagLayout);
621        if (visibleLayoutTags.size() > 1) {
622            List<Short> usedIDs =
623                    visibleLayoutTags.stream().map(t -> (short) t.ID).sorted().collect(Collectors.toList());
624            var pnpResult =
625                    VisionEstimation.estimateCamPosePNP(
626                            prop.getIntrinsics(),
627                            prop.getDistCoeffs(),
628                            detectableTgts,
629                            tagLayout,
630                            TargetModel.kAprilTag36h11);
631
632            if (pnpResult.isPresent()) {
633                multitagResult = Optional.of(new MultiTargetPNPResult(pnpResult.get(), usedIDs));
634            }
635        }
636
637        // sort target order
638        if (sortMode != null) {
639            detectableTgts.sort(sortMode.getComparator());
640        }
641
642        // put this simulated data to NT
643        var now = RobotController.getFPGATime();
644        var ret =
645                new PhotonPipelineResult(
646                        heartbeatCounter,
647                        now - (long) (latencyMillis * 1000),
648                        now,
649                        // Pretend like we heard a pong recently
650                        1000L + (long) ((Math.random() - 0.5) * 50),
651                        detectableTgts,
652                        multitagResult);
653        return ret;
654    }
655
656    /**
657     * Simulate one processed frame of vision data, putting one result to NT at current timestamp.
658     * Image capture time is assumed be (current time - latency).
659     *
660     * @param result The pipeline result to submit
661     */
662    public void submitProcessedFrame(PhotonPipelineResult result) {
663        submitProcessedFrame(result, WPIUtilJNI.now());
664    }
665
666    /**
667     * Simulate one processed frame of vision data, putting one result to NT. Image capture timestamp
668     * overrides {@link PhotonPipelineResult#getTimestampSeconds() getTimestampSeconds()} for more
669     * precise latency simulation.
670     *
671     * @param result The pipeline result to submit
672     * @param receiveTimestamp The (sim) timestamp when this result was read by NT in microseconds
673     */
674    public void submitProcessedFrame(PhotonPipelineResult result, long receiveTimestamp) {
675        ts.latencyMillisEntry.set(result.metadata.getLatencyMillis(), receiveTimestamp);
676
677        // Results are now dynamically sized, so let's guess 1024 bytes is big enough
678        ts.resultPublisher.set(result, 1024);
679
680        boolean hasTargets = result.hasTargets();
681        ts.hasTargetEntry.set(hasTargets, receiveTimestamp);
682        if (!hasTargets) {
683            ts.targetPitchEntry.set(0.0, receiveTimestamp);
684            ts.targetYawEntry.set(0.0, receiveTimestamp);
685            ts.targetAreaEntry.set(0.0, receiveTimestamp);
686            ts.targetPoseEntry.set(new Transform3d(), receiveTimestamp);
687            ts.targetSkewEntry.set(0.0, receiveTimestamp);
688        } else {
689            var bestTarget = result.getBestTarget();
690
691            ts.targetPitchEntry.set(bestTarget.getPitch(), receiveTimestamp);
692            ts.targetYawEntry.set(bestTarget.getYaw(), receiveTimestamp);
693            ts.targetAreaEntry.set(bestTarget.getArea(), receiveTimestamp);
694            ts.targetSkewEntry.set(bestTarget.getSkew(), receiveTimestamp);
695
696            var transform = bestTarget.getBestCameraToTarget();
697            ts.targetPoseEntry.set(transform, receiveTimestamp);
698        }
699
700        ts.cameraIntrinsicsPublisher.set(prop.getIntrinsics().getData(), receiveTimestamp);
701        ts.cameraDistortionPublisher.set(prop.getDistCoeffs().getData(), receiveTimestamp);
702
703        ts.heartbeatPublisher.set(heartbeatCounter, receiveTimestamp);
704        heartbeatCounter += 1;
705    }
706}