001/*
002 * MIT License
003 *
004 * Copyright (c) PhotonVision
005 *
006 * Permission is hereby granted, free of charge, to any person obtaining a copy
007 * of this software and associated documentation files (the "Software"), to deal
008 * in the Software without restriction, including without limitation the rights
009 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
010 * copies of the Software, and to permit persons to whom the Software is
011 * furnished to do so, subject to the following conditions:
012 *
013 * The above copyright notice and this permission notice shall be included in all
014 * copies or substantial portions of the Software.
015 *
016 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
017 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
018 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
019 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
020 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
021 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
022 * SOFTWARE.
023 */
024
025package org.photonvision.simulation;
026
027import edu.wpi.first.apriltag.AprilTagFieldLayout;
028import edu.wpi.first.apriltag.AprilTagFields;
029import edu.wpi.first.cameraserver.CameraServer;
030import edu.wpi.first.cscore.CvSource;
031import edu.wpi.first.cscore.OpenCvLoader;
032import edu.wpi.first.cscore.VideoSource.ConnectionStrategy;
033import edu.wpi.first.math.MathUtil;
034import edu.wpi.first.math.Pair;
035import edu.wpi.first.math.geometry.Pose3d;
036import edu.wpi.first.math.geometry.Transform3d;
037import edu.wpi.first.util.PixelFormat;
038import edu.wpi.first.util.WPIUtilJNI;
039import edu.wpi.first.wpilibj.RobotController;
040import java.util.ArrayList;
041import java.util.List;
042import java.util.Optional;
043import java.util.stream.Collectors;
044import org.opencv.core.CvType;
045import org.opencv.core.Mat;
046import org.opencv.core.Point;
047import org.opencv.core.RotatedRect;
048import org.opencv.core.Scalar;
049import org.opencv.core.Size;
050import org.opencv.imgproc.Imgproc;
051import org.photonvision.PhotonCamera;
052import org.photonvision.PhotonTargetSortMode;
053import org.photonvision.common.networktables.NTTopicSet;
054import org.photonvision.estimation.CameraTargetRelation;
055import org.photonvision.estimation.OpenCVHelp;
056import org.photonvision.estimation.RotTrlTransform3d;
057import org.photonvision.estimation.TargetModel;
058import org.photonvision.estimation.VisionEstimation;
059import org.photonvision.targeting.MultiTargetPNPResult;
060import org.photonvision.targeting.PhotonPipelineResult;
061import org.photonvision.targeting.PhotonTrackedTarget;
062import org.photonvision.targeting.PnpResult;
063
064/**
065 * A handle for simulating {@link PhotonCamera} values. Processing simulated targets through this
066 * class will change the associated PhotonCamera's results.
067 */
068@SuppressWarnings("unused")
069public class PhotonCameraSim implements AutoCloseable {
070    private final PhotonCamera cam;
071
072    protected NTTopicSet ts = new NTTopicSet();
073    private long heartbeatCounter = 1;
074
075    /** This simulated camera's {@link SimCameraProperties} */
076    public final SimCameraProperties prop;
077
078    private long nextNTEntryTime = WPIUtilJNI.now();
079
080    private double maxSightRangeMeters = Double.MAX_VALUE;
081    private static final double kDefaultMinAreaPx = 100;
082    private double minTargetAreaPercent;
083    private PhotonTargetSortMode sortMode = PhotonTargetSortMode.Largest;
084
085    private final AprilTagFieldLayout tagLayout;
086
087    // video stream simulation
088    private final CvSource videoSimRaw;
089    private final Mat videoSimFrameRaw = new Mat();
090    private boolean videoSimRawEnabled = true;
091    private boolean videoSimWireframeEnabled = false;
092    private double videoSimWireframeResolution = 0.1;
093    private final CvSource videoSimProcessed;
094    private final Mat videoSimFrameProcessed = new Mat();
095    private boolean videoSimProcEnabled = true;
096
097    static {
098        OpenCvLoader.forceStaticLoad();
099    }
100
101    @Override
102    public void close() {
103        videoSimRaw.close();
104        videoSimFrameRaw.release();
105        videoSimProcessed.close();
106        videoSimFrameProcessed.release();
107    }
108
109    /**
110     * Constructs a handle for simulating {@link PhotonCamera} values. Processing simulated targets
111     * through this class will change the associated PhotonCamera's results.
112     *
113     * <p><b>This constructor's camera has a 90 deg FOV with no simulated lag!</b>
114     *
115     * <p>By default, the minimum target area is 100 pixels and there is no maximum sight range.
116     *
117     * @param camera The camera to be simulated
118     */
119    public PhotonCameraSim(PhotonCamera camera) {
120        this(camera, SimCameraProperties.PERFECT_90DEG());
121    }
122
123    /**
124     * Constructs a handle for simulating {@link PhotonCamera} values. Processing simulated targets
125     * through this class will change the associated PhotonCamera's results.
126     *
127     * <p>By default, the minimum target area is 100 pixels and there is no maximum sight range.
128     *
129     * @param camera The camera to be simulated
130     * @param prop Properties of this camera such as FOV and FPS
131     */
132    public PhotonCameraSim(PhotonCamera camera, SimCameraProperties prop) {
133        this(camera, prop, AprilTagFieldLayout.loadField(AprilTagFields.kDefaultField));
134    }
135
136    /**
137     * Constructs a handle for simulating {@link PhotonCamera} values. Processing simulated targets
138     * through this class will change the associated PhotonCamera's results.
139     *
140     * <p>By default, the minimum target area is 100 pixels and there is no maximum sight range.
141     *
142     * @param camera The camera to be simulated
143     * @param prop Properties of this camera such as FOV and FPS
144     * @param tagLayout The {@link AprilTagFieldLayout} used to solve for tag positions.
145     */
146    public PhotonCameraSim(
147            PhotonCamera camera, SimCameraProperties prop, AprilTagFieldLayout tagLayout) {
148        this.cam = camera;
149        this.prop = prop;
150        this.tagLayout = tagLayout;
151        setMinTargetAreaPixels(kDefaultMinAreaPx);
152
153        videoSimRaw =
154                CameraServer.putVideo(camera.getName() + "-raw", prop.getResWidth(), prop.getResHeight());
155        videoSimRaw.setPixelFormat(PixelFormat.kGray);
156        videoSimProcessed =
157                CameraServer.putVideo(
158                        camera.getName() + "-processed", prop.getResWidth(), prop.getResHeight());
159
160        ts.removeEntries();
161        ts.subTable = camera.getCameraTable();
162        ts.updateEntries();
163    }
164
165    /**
166     * Constructs a handle for simulating {@link PhotonCamera} values. Processing simulated targets
167     * through this class will change the associated PhotonCamera's results.
168     *
169     * @param camera The camera to be simulated
170     * @param prop Properties of this camera such as FOV and FPS
171     * @param minTargetAreaPercent The minimum percentage(0 - 100) a detected target must take up of
172     *     the camera's image to be processed. Match this with your contour filtering settings in the
173     *     PhotonVision GUI.
174     * @param maxSightRangeMeters Maximum distance at which the target is illuminated to your camera.
175     *     Note that minimum target area of the image is separate from this.
176     */
177    public PhotonCameraSim(
178            PhotonCamera camera,
179            SimCameraProperties prop,
180            double minTargetAreaPercent,
181            double maxSightRangeMeters) {
182        this(camera, prop, AprilTagFieldLayout.loadField(AprilTagFields.kDefaultField));
183        this.minTargetAreaPercent = minTargetAreaPercent;
184        this.maxSightRangeMeters = maxSightRangeMeters;
185    }
186
187    /**
188     * Constructs a handle for simulating {@link PhotonCamera} values. Processing simulated targets
189     * through this class will change the associated PhotonCamera's results.
190     *
191     * @param camera The camera to be simulated
192     * @param prop Properties of this camera such as FOV and FPS
193     * @param minTargetAreaPercent The minimum percentage(0 - 100) a detected target must take up of
194     *     the camera's image to be processed. Match this with your contour filtering settings in the
195     *     PhotonVision GUI.
196     * @param maxSightRangeMeters Maximum distance at which the target is illuminated to your camera.
197     *     Note that minimum target area of the image is separate from this.
198     * @param tagLayout AprilTag field layout to use for multi-tag estimation
199     */
200    public PhotonCameraSim(
201            PhotonCamera camera,
202            SimCameraProperties prop,
203            double minTargetAreaPercent,
204            double maxSightRangeMeters,
205            AprilTagFieldLayout tagLayout) {
206        this(camera, prop);
207        this.minTargetAreaPercent = minTargetAreaPercent;
208        this.maxSightRangeMeters = maxSightRangeMeters;
209    }
210
211    public PhotonCamera getCamera() {
212        return cam;
213    }
214
215    public double getMinTargetAreaPercent() {
216        return minTargetAreaPercent;
217    }
218
219    public double getMinTargetAreaPixels() {
220        return minTargetAreaPercent / 100.0 * prop.getResArea();
221    }
222
223    public double getMaxSightRangeMeters() {
224        return maxSightRangeMeters;
225    }
226
227    public PhotonTargetSortMode getTargetSortMode() {
228        return sortMode;
229    }
230
231    public CvSource getVideoSimRaw() {
232        return videoSimRaw;
233    }
234
235    public Mat getVideoSimFrameRaw() {
236        return videoSimFrameRaw;
237    }
238
239    /**
240     * Determines if this target's pose should be visible to the camera without considering its
241     * projected image points. Does not account for image area.
242     *
243     * @param camPose Camera's 3d pose
244     * @param target Vision target containing pose and shape
245     * @return If this vision target can be seen before image projection.
246     */
247    public boolean canSeeTargetPose(Pose3d camPose, VisionTargetSim target) {
248        var rel = new CameraTargetRelation(camPose, target.getPose());
249
250        return (
251        // target translation is outside of camera's FOV
252        (Math.abs(rel.camToTargYaw.getDegrees()) < prop.getHorizFOV().getDegrees() / 2)
253                && (Math.abs(rel.camToTargPitch.getDegrees()) < prop.getVertFOV().getDegrees() / 2)
254                && (!target.getModel().isPlanar
255                        || Math.abs(rel.targToCamAngle.getDegrees())
256                                < 90) // camera is behind planar target and it should be occluded
257                && (rel.camToTarg.getTranslation().getNorm() <= maxSightRangeMeters)); // target is too far
258    }
259
260    /**
261     * Determines if all target points are inside the camera's image.
262     *
263     * @param points The target's 2d image points
264     */
265    public boolean canSeeCorners(Point[] points) {
266        for (var point : points) {
267            if (MathUtil.clamp(point.x, 0, prop.getResWidth()) != point.x
268                    || MathUtil.clamp(point.y, 0, prop.getResHeight()) != point.y) {
269                return false; // point is outside of resolution
270            }
271        }
272        return true;
273    }
274
275    /**
276     * Determine if this camera should process a new frame based on performance metrics and the time
277     * since the last update. This returns an Optional which is either empty if no update should occur
278     * or a Long of the timestamp in microseconds of when the frame which should be received by NT. If
279     * a timestamp is returned, the last frame update time becomes that timestamp.
280     *
281     * @return Optional long which is empty while blocked or the NT entry timestamp in microseconds if
282     *     ready
283     */
284    public Optional<Long> consumeNextEntryTime() {
285        // check if this camera is ready for another frame update
286        long now = WPIUtilJNI.now();
287        long timestamp = -1;
288        int iter = 0;
289        // prepare next latest update
290        while (now >= nextNTEntryTime) {
291            timestamp = nextNTEntryTime;
292            long frameTime = (long) (prop.estMsUntilNextFrame() * 1e3);
293            nextNTEntryTime += frameTime;
294
295            // if frame time is very small, avoid blocking
296            if (iter++ > 50) {
297                timestamp = now;
298                nextNTEntryTime = now + frameTime;
299                break;
300            }
301        }
302        // return the timestamp of the latest update
303        if (timestamp >= 0) return Optional.of(timestamp);
304        // or this camera isn't ready to process yet
305        else return Optional.empty();
306    }
307
308    /**
309     * The minimum percentage(0 - 100) a detected target must take up of the camera's image to be
310     * processed.
311     */
312    public void setMinTargetAreaPercent(double areaPercent) {
313        this.minTargetAreaPercent = areaPercent;
314    }
315
316    /**
317     * The minimum number of pixels a detected target must take up in the camera's image to be
318     * processed.
319     */
320    public void setMinTargetAreaPixels(double areaPx) {
321        this.minTargetAreaPercent = areaPx / prop.getResArea() * 100;
322    }
323
324    /**
325     * Maximum distance at which the target is illuminated to your camera. Note that minimum target
326     * area of the image is separate from this.
327     */
328    public void setMaxSightRange(double rangeMeters) {
329        this.maxSightRangeMeters = rangeMeters;
330    }
331
332    /** Defines the order the targets are sorted in the pipeline result. */
333    public void setTargetSortMode(PhotonTargetSortMode sortMode) {
334        if (sortMode != null) this.sortMode = sortMode;
335    }
336
337    /**
338     * Sets whether the raw video stream simulation is enabled.
339     *
340     * <p>Note: This may increase loop times.
341     */
342    public void enableRawStream(boolean enabled) {
343        videoSimRawEnabled = enabled;
344    }
345
346    /**
347     * Sets whether a wireframe of the field is drawn to the raw video stream.
348     *
349     * <p>Note: This will dramatically increase loop times.
350     */
351    public void enableDrawWireframe(boolean enabled) {
352        videoSimWireframeEnabled = enabled;
353    }
354
355    /**
356     * Sets the resolution of the drawn wireframe if enabled. Drawn line segments will be subdivided
357     * into smaller segments based on a threshold set by the resolution.
358     *
359     * @param resolution Resolution as a fraction(0 - 1) of the video frame's diagonal length in
360     *     pixels
361     */
362    public void setWireframeResolution(double resolution) {
363        videoSimWireframeResolution = resolution;
364    }
365
366    /** Sets whether the processed video stream simulation is enabled. */
367    public void enableProcessedStream(boolean enabled) {
368        videoSimProcEnabled = enabled;
369    }
370
371    public PhotonPipelineResult process(
372            double latencyMillis, Pose3d cameraPose, List<VisionTargetSim> targets) {
373        // sort targets by distance to camera
374        targets = new ArrayList<>(targets);
375        targets.sort(
376                (t1, t2) -> {
377                    double dist1 = t1.getPose().getTranslation().getDistance(cameraPose.getTranslation());
378                    double dist2 = t2.getPose().getTranslation().getDistance(cameraPose.getTranslation());
379                    if (dist1 == dist2) return 0;
380                    return dist1 < dist2 ? 1 : -1;
381                });
382        // all targets visible before noise
383        var visibleTgts = new ArrayList<Pair<VisionTargetSim, Point[]>>();
384        // all targets actually detected by camera (after noise)
385        var detectableTgts = new ArrayList<PhotonTrackedTarget>();
386        // basis change from world coordinates to camera coordinates
387        var camRt = RotTrlTransform3d.makeRelativeTo(cameraPose);
388
389        // reset our frame
390        VideoSimUtil.updateVideoProp(videoSimRaw, prop);
391        VideoSimUtil.updateVideoProp(videoSimProcessed, prop);
392        Size videoFrameSize = new Size(prop.getResWidth(), prop.getResHeight());
393        Mat.zeros(videoFrameSize, CvType.CV_8UC1).assignTo(videoSimFrameRaw);
394
395        for (var tgt : targets) {
396            // pose isn't visible, skip to next
397            if (!canSeeTargetPose(cameraPose, tgt)) continue;
398
399            // find target's 3d corner points
400            var fieldCorners = tgt.getFieldVertices();
401            if (tgt.getModel().isSpherical) { // target is spherical
402                var model = tgt.getModel();
403                // orient the model to the camera (like a sprite/decal) so it appears similar regardless of
404                // view
405                fieldCorners =
406                        model.getFieldVertices(
407                                TargetModel.getOrientedPose(
408                                        tgt.getPose().getTranslation(), cameraPose.getTranslation()));
409            }
410            // project 3d target points into 2d image points
411            var imagePoints =
412                    OpenCVHelp.projectPoints(prop.getIntrinsics(), prop.getDistCoeffs(), camRt, fieldCorners);
413            // spherical targets need a rotated rectangle of their midpoints for visualization
414            if (tgt.getModel().isSpherical) {
415                var center = OpenCVHelp.avgPoint(imagePoints);
416                int l = 0, t, b, r = 0;
417                // reference point (left side midpoint)
418                for (int i = 1; i < 4; i++) {
419                    if (imagePoints[i].x < imagePoints[l].x) l = i;
420                }
421                var lc = imagePoints[l];
422                // determine top, right, bottom midpoints
423                double[] angles = new double[4];
424                t = (l + 1) % 4;
425                b = (l + 1) % 4;
426                for (int i = 0; i < 4; i++) {
427                    if (i == l) continue;
428                    var ic = imagePoints[i];
429                    angles[i] = Math.atan2(lc.y - ic.y, ic.x - lc.x);
430                    if (angles[i] >= angles[t]) t = i;
431                    if (angles[i] <= angles[b]) b = i;
432                }
433                for (int i = 0; i < 4; i++) {
434                    if (i != t && i != l && i != b) r = i;
435                }
436                // create RotatedRect from midpoints
437                var rect =
438                        new RotatedRect(
439                                new Point(center.x, center.y),
440                                new Size(imagePoints[r].x - lc.x, imagePoints[b].y - imagePoints[t].y),
441                                Math.toDegrees(-angles[r]));
442                // set target corners to rect corners
443                Point[] points = new Point[4];
444                rect.points(points);
445                imagePoints = points;
446            }
447            // save visible targets for raw video stream simulation
448            visibleTgts.add(new Pair<>(tgt, imagePoints));
449            // estimate pixel noise
450            var noisyTargetCorners = prop.estPixelNoise(imagePoints);
451            // find the minimum area rectangle of target corners
452            var minAreaRect = OpenCVHelp.getMinAreaRect(noisyTargetCorners);
453            Point[] minAreaRectPts = new Point[4];
454            minAreaRect.points(minAreaRectPts);
455            // find the (naive) 2d yaw/pitch
456            var centerPt = minAreaRect.center;
457            var centerRot = prop.getPixelRot(centerPt);
458            // find contour area
459            double areaPercent = prop.getContourAreaPercent(noisyTargetCorners);
460
461            // projected target can't be detected, skip to next
462            if (!(canSeeCorners(noisyTargetCorners) && areaPercent >= minTargetAreaPercent)) continue;
463
464            var pnpSim = new PnpResult();
465            if (tgt.fiducialID >= 0 && tgt.getFieldVertices().size() == 4) { // single AprilTag solvePNP
466                pnpSim =
467                        OpenCVHelp.solvePNP_SQUARE(
468                                        prop.getIntrinsics(),
469                                        prop.getDistCoeffs(),
470                                        tgt.getModel().vertices,
471                                        noisyTargetCorners)
472                                .get();
473            }
474
475            detectableTgts.add(
476                    new PhotonTrackedTarget(
477                            -Math.toDegrees(centerRot.getZ()),
478                            -Math.toDegrees(centerRot.getY()),
479                            areaPercent,
480                            Math.toDegrees(centerRot.getX()),
481                            tgt.fiducialID,
482                            -1,
483                            -1,
484                            pnpSim.best,
485                            pnpSim.alt,
486                            pnpSim.ambiguity,
487                            OpenCVHelp.pointsToCorners(minAreaRectPts),
488                            OpenCVHelp.pointsToCorners(noisyTargetCorners)));
489        }
490        // render visible tags to raw video frame
491        if (videoSimRawEnabled) {
492            // draw field wireframe
493            if (videoSimWireframeEnabled) {
494                VideoSimUtil.drawFieldWireframe(
495                        camRt,
496                        prop,
497                        videoSimWireframeResolution,
498                        1.5,
499                        new Scalar(80),
500                        6,
501                        1,
502                        new Scalar(30),
503                        videoSimFrameRaw);
504            }
505
506            // draw targets
507            for (var pair : visibleTgts) {
508                var tgt = pair.getFirst();
509                var corn = pair.getSecond();
510
511                if (tgt.fiducialID >= 0) { // apriltags
512                    VideoSimUtil.warp36h11TagImage(tgt.fiducialID, corn, true, videoSimFrameRaw);
513                } else if (!tgt.getModel().isSpherical) { // non-spherical targets
514                    var contour = corn;
515                    if (!tgt.getModel()
516                            .isPlanar) { // visualization cant handle non-convex projections of 3d models
517                        contour = OpenCVHelp.getConvexHull(contour);
518                    }
519                    VideoSimUtil.drawPoly(contour, -1, new Scalar(255), true, videoSimFrameRaw);
520                } else { // spherical targets
521                    VideoSimUtil.drawInscribedEllipse(corn, new Scalar(255), videoSimFrameRaw);
522                }
523            }
524            videoSimRaw.putFrame(videoSimFrameRaw);
525        } else videoSimRaw.setConnectionStrategy(ConnectionStrategy.kForceClose);
526        // draw/annotate target detection outline on processed view
527        if (videoSimProcEnabled) {
528            Imgproc.cvtColor(videoSimFrameRaw, videoSimFrameProcessed, Imgproc.COLOR_GRAY2BGR);
529            Imgproc.drawMarker( // crosshair
530                    videoSimFrameProcessed,
531                    new Point(prop.getResWidth() / 2.0, prop.getResHeight() / 2.0),
532                    new Scalar(0, 255, 0),
533                    Imgproc.MARKER_CROSS,
534                    (int) VideoSimUtil.getScaledThickness(15, videoSimFrameProcessed),
535                    (int) VideoSimUtil.getScaledThickness(1, videoSimFrameProcessed),
536                    Imgproc.LINE_AA);
537            for (var tgt : detectableTgts) {
538                if (tgt.getFiducialId() >= 0) { // apriltags
539                    VideoSimUtil.drawTagDetection(
540                            tgt.getFiducialId(),
541                            OpenCVHelp.cornersToPoints(tgt.getDetectedCorners()),
542                            videoSimFrameProcessed);
543                } else { // other targets
544                    // bounding rectangle
545                    Imgproc.rectangle(
546                            videoSimFrameProcessed,
547                            OpenCVHelp.getBoundingRect(OpenCVHelp.cornersToPoints(tgt.getDetectedCorners())),
548                            new Scalar(0, 0, 255),
549                            (int) VideoSimUtil.getScaledThickness(1, videoSimFrameProcessed),
550                            Imgproc.LINE_AA);
551
552                    VideoSimUtil.drawPoly(
553                            OpenCVHelp.cornersToPoints(tgt.getMinAreaRectCorners()),
554                            (int) VideoSimUtil.getScaledThickness(1, videoSimFrameProcessed),
555                            new Scalar(255, 30, 30),
556                            true,
557                            videoSimFrameProcessed);
558                }
559            }
560            videoSimProcessed.putFrame(videoSimFrameProcessed);
561        } else videoSimProcessed.setConnectionStrategy(ConnectionStrategy.kForceClose);
562
563        // calculate multitag results
564        Optional<MultiTargetPNPResult> multitagResult = Optional.empty();
565        // TODO: Implement ATFL subscribing in backend
566        // var tagLayout = cam.getAprilTagFieldLayout();
567        var visibleLayoutTags = VisionEstimation.getVisibleLayoutTags(detectableTgts, tagLayout);
568        if (visibleLayoutTags.size() > 1) {
569            List<Short> usedIDs =
570                    visibleLayoutTags.stream().map(t -> (short) t.ID).sorted().collect(Collectors.toList());
571            var pnpResult =
572                    VisionEstimation.estimateCamPosePNP(
573                            prop.getIntrinsics(),
574                            prop.getDistCoeffs(),
575                            detectableTgts,
576                            tagLayout,
577                            TargetModel.kAprilTag36h11);
578
579            if (pnpResult.isPresent()) {
580                multitagResult = Optional.of(new MultiTargetPNPResult(pnpResult.get(), usedIDs));
581            }
582        }
583
584        // sort target order
585        if (sortMode != null) {
586            detectableTgts.sort(sortMode.getComparator());
587        }
588
589        // put this simulated data to NT
590        var now = RobotController.getFPGATime();
591        var ret =
592                new PhotonPipelineResult(
593                        heartbeatCounter,
594                        now - (long) (latencyMillis * 1000),
595                        now,
596                        // Pretend like we heard a pong recently
597                        1000L + (long) ((Math.random() - 0.5) * 50),
598                        detectableTgts,
599                        multitagResult);
600        return ret;
601    }
602
603    /**
604     * Simulate one processed frame of vision data, putting one result to NT at current timestamp.
605     * Image capture time is assumed be (current time - latency).
606     *
607     * @param result The pipeline result to submit
608     */
609    public void submitProcessedFrame(PhotonPipelineResult result) {
610        submitProcessedFrame(result, WPIUtilJNI.now());
611    }
612
613    /**
614     * Simulate one processed frame of vision data, putting one result to NT. Image capture timestamp
615     * overrides {@link PhotonPipelineResult#getTimestampSeconds() getTimestampSeconds()} for more
616     * precise latency simulation.
617     *
618     * @param result The pipeline result to submit
619     * @param receiveTimestamp The (sim) timestamp when this result was read by NT in microseconds
620     */
621    public void submitProcessedFrame(PhotonPipelineResult result, long receiveTimestamp) {
622        ts.latencyMillisEntry.set(result.metadata.getLatencyMillis(), receiveTimestamp);
623
624        // Results are now dynamically sized, so let's guess 1024 bytes is big enough
625        ts.resultPublisher.set(result, 1024);
626
627        boolean hasTargets = result.hasTargets();
628        ts.hasTargetEntry.set(hasTargets, receiveTimestamp);
629        if (!hasTargets) {
630            ts.targetPitchEntry.set(0.0, receiveTimestamp);
631            ts.targetYawEntry.set(0.0, receiveTimestamp);
632            ts.targetAreaEntry.set(0.0, receiveTimestamp);
633            ts.targetPoseEntry.set(new Transform3d(), receiveTimestamp);
634            ts.targetSkewEntry.set(0.0, receiveTimestamp);
635        } else {
636            var bestTarget = result.getBestTarget();
637
638            ts.targetPitchEntry.set(bestTarget.getPitch(), receiveTimestamp);
639            ts.targetYawEntry.set(bestTarget.getYaw(), receiveTimestamp);
640            ts.targetAreaEntry.set(bestTarget.getArea(), receiveTimestamp);
641            ts.targetSkewEntry.set(bestTarget.getSkew(), receiveTimestamp);
642
643            var transform = bestTarget.getBestCameraToTarget();
644            ts.targetPoseEntry.set(transform, receiveTimestamp);
645        }
646
647        ts.cameraIntrinsicsPublisher.set(prop.getIntrinsics().getData(), receiveTimestamp);
648        ts.cameraDistortionPublisher.set(prop.getDistCoeffs().getData(), receiveTimestamp);
649
650        ts.heartbeatPublisher.set(heartbeatCounter, receiveTimestamp);
651        heartbeatCounter += 1;
652    }
653}