001/*
002 * MIT License
003 *
004 * Copyright (c) PhotonVision
005 *
006 * Permission is hereby granted, free of charge, to any person obtaining a copy
007 * of this software and associated documentation files (the "Software"), to deal
008 * in the Software without restriction, including without limitation the rights
009 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
010 * copies of the Software, and to permit persons to whom the Software is
011 * furnished to do so, subject to the following conditions:
012 *
013 * The above copyright notice and this permission notice shall be included in all
014 * copies or substantial portions of the Software.
015 *
016 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
017 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
018 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
019 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
020 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
021 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
022 * SOFTWARE.
023 */
024
025package org.photonvision.simulation;
026
027import edu.wpi.first.apriltag.AprilTag;
028import edu.wpi.first.apriltag.AprilTagFieldLayout;
029import edu.wpi.first.math.geometry.Pose2d;
030import edu.wpi.first.math.geometry.Pose3d;
031import edu.wpi.first.math.geometry.Transform3d;
032import edu.wpi.first.math.interpolation.TimeInterpolatableBuffer;
033import edu.wpi.first.wpilibj.Timer;
034import edu.wpi.first.wpilibj.smartdashboard.Field2d;
035import edu.wpi.first.wpilibj.smartdashboard.SmartDashboard;
036import java.util.ArrayList;
037import java.util.Collection;
038import java.util.HashMap;
039import java.util.HashSet;
040import java.util.List;
041import java.util.Map;
042import java.util.Optional;
043import java.util.Set;
044import org.photonvision.PhotonCamera;
045import org.photonvision.estimation.TargetModel;
046
047/**
048 * A simulated vision system involving a camera(s) and coprocessor(s) mounted on a mobile robot
049 * running PhotonVision, detecting targets placed on the field. {@link VisionTargetSim}s added to
050 * this class will be detected by the {@link PhotonCameraSim}s added to this class. This class
051 * should be updated periodically with the robot's current pose in order to publish the simulated
052 * camera target info.
053 */
054public class VisionSystemSim {
055    private final Map<String, PhotonCameraSim> camSimMap = new HashMap<>();
056    private static final double kBufferLengthSeconds = 1.5;
057    // save robot-to-camera for each camera over time (Pose3d is easily interpolatable)
058    private final Map<PhotonCameraSim, TimeInterpolatableBuffer<Pose3d>> camTrfMap = new HashMap<>();
059
060    // interpolate drivetrain with twists
061    private final TimeInterpolatableBuffer<Pose3d> robotPoseBuffer =
062            TimeInterpolatableBuffer.createBuffer(kBufferLengthSeconds);
063
064    private final Map<String, Set<VisionTargetSim>> targetSets = new HashMap<>();
065
066    private final Field2d dbgField;
067
068    private final Transform3d kEmptyTrf = new Transform3d();
069
070    /**
071     * A simulated vision system involving a camera(s) and coprocessor(s) mounted on a mobile robot
072     * running PhotonVision, detecting targets placed on the field. {@link VisionTargetSim}s added to
073     * this class will be detected by the {@link PhotonCameraSim}s added to this class. This class
074     * should be updated periodically with the robot's current pose in order to publish the simulated
075     * camera target info.
076     *
077     * @param visionSystemName The specific identifier for this vision system in NetworkTables.
078     */
079    public VisionSystemSim(String visionSystemName) {
080        dbgField = new Field2d();
081        String tableName = "VisionSystemSim-" + visionSystemName;
082        SmartDashboard.putData(tableName + "/Sim Field", dbgField);
083    }
084
085    /** Get one of the simulated cameras. */
086    public Optional<PhotonCameraSim> getCameraSim(String name) {
087        return Optional.ofNullable(camSimMap.get(name));
088    }
089
090    /** Get all the simulated cameras. */
091    public Collection<PhotonCameraSim> getCameraSims() {
092        return camSimMap.values();
093    }
094
095    /**
096     * Adds a simulated camera to this vision system with a specified robot-to-camera transformation.
097     * The vision targets registered with this vision system simulation will be observed by the
098     * simulated {@link PhotonCamera}.
099     *
100     * @param cameraSim The camera simulation
101     * @param robotToCamera The transform from the robot pose to the camera pose
102     */
103    public void addCamera(PhotonCameraSim cameraSim, Transform3d robotToCamera) {
104        var existing = camSimMap.putIfAbsent(cameraSim.getCamera().getName(), cameraSim);
105        if (existing == null) {
106            camTrfMap.put(cameraSim, TimeInterpolatableBuffer.createBuffer(kBufferLengthSeconds));
107            camTrfMap
108                    .get(cameraSim)
109                    .addSample(Timer.getFPGATimestamp(), new Pose3d().plus(robotToCamera));
110        }
111    }
112
113    /** Remove all simulated cameras from this vision system. */
114    public void clearCameras() {
115        camSimMap.clear();
116        camTrfMap.clear();
117    }
118
119    /**
120     * Remove a simulated camera from this vision system.
121     *
122     * @return If the camera was present and removed
123     */
124    public boolean removeCamera(PhotonCameraSim cameraSim) {
125        boolean success = camSimMap.remove(cameraSim.getCamera().getName()) != null;
126        camTrfMap.remove(cameraSim);
127        return success;
128    }
129
130    /**
131     * Get a simulated camera's position relative to the robot. If the requested camera is invalid, an
132     * empty optional is returned.
133     *
134     * @param cameraSim The specific camera to get the robot-to-camera transform of
135     * @return The transform of this camera, or an empty optional if it is invalid
136     */
137    public Optional<Transform3d> getRobotToCamera(PhotonCameraSim cameraSim) {
138        return getRobotToCamera(cameraSim, Timer.getFPGATimestamp());
139    }
140
141    /**
142     * Get a simulated camera's position relative to the robot. If the requested camera is invalid, an
143     * empty optional is returned.
144     *
145     * @param cameraSim The specific camera to get the robot-to-camera transform of
146     * @param timeSeconds Timestamp in seconds of when the transform should be observed
147     * @return The transform of this camera, or an empty optional if it is invalid
148     */
149    public Optional<Transform3d> getRobotToCamera(PhotonCameraSim cameraSim, double timeSeconds) {
150        var trfBuffer = camTrfMap.get(cameraSim);
151        if (trfBuffer == null) return Optional.empty();
152        var sample = trfBuffer.getSample(timeSeconds);
153        if (sample.isEmpty()) return Optional.empty();
154        return Optional.of(new Transform3d(new Pose3d(), sample.orElse(new Pose3d())));
155    }
156
157    /**
158     * Get a simulated camera's position on the field. If the requested camera is invalid, an empty
159     * optional is returned.
160     *
161     * @param cameraSim The specific camera to get the field pose of
162     * @return The pose of this camera, or an empty optional if it is invalid
163     */
164    public Optional<Pose3d> getCameraPose(PhotonCameraSim cameraSim) {
165        return getCameraPose(cameraSim, Timer.getFPGATimestamp());
166    }
167
168    /**
169     * Get a simulated camera's position on the field. If the requested camera is invalid, an empty
170     * optional is returned.
171     *
172     * @param cameraSim The specific camera to get the field pose of
173     * @param timeSeconds Timestamp in seconds of when the pose should be observed
174     * @return The pose of this camera, or an empty optional if it is invalid
175     */
176    public Optional<Pose3d> getCameraPose(PhotonCameraSim cameraSim, double timeSeconds) {
177        var robotToCamera = getRobotToCamera(cameraSim, timeSeconds);
178        return robotToCamera.map(transform3d -> getRobotPose(timeSeconds).plus(transform3d));
179    }
180
181    /**
182     * Adjust a camera's position relative to the robot. Use this if your camera is on a gimbal or
183     * turret or some other mobile platform.
184     *
185     * @param cameraSim The simulated camera to change the relative position of
186     * @param robotToCamera New transform from the robot to the camera
187     * @return If the cameraSim was valid and transform was adjusted
188     */
189    public boolean adjustCamera(PhotonCameraSim cameraSim, Transform3d robotToCamera) {
190        var trfBuffer = camTrfMap.get(cameraSim);
191        if (trfBuffer == null) return false;
192        trfBuffer.addSample(Timer.getFPGATimestamp(), new Pose3d().plus(robotToCamera));
193        return true;
194    }
195
196    /** Reset the previous transforms for all cameras to their current transform. */
197    public void resetCameraTransforms() {
198        for (var cam : camTrfMap.keySet()) resetCameraTransforms(cam);
199    }
200
201    /**
202     * Reset the transform history for this camera to just the current transform.
203     *
204     * @return If the cameraSim was valid and transforms were reset
205     */
206    public boolean resetCameraTransforms(PhotonCameraSim cameraSim) {
207        double now = Timer.getFPGATimestamp();
208        var trfBuffer = camTrfMap.get(cameraSim);
209        if (trfBuffer == null) return false;
210        var lastTrf = new Transform3d(new Pose3d(), trfBuffer.getSample(now).orElse(new Pose3d()));
211        trfBuffer.clear();
212        adjustCamera(cameraSim, lastTrf);
213        return true;
214    }
215
216    public Set<VisionTargetSim> getVisionTargets() {
217        var all = new HashSet<VisionTargetSim>();
218        for (var entry : targetSets.entrySet()) {
219            all.addAll(entry.getValue());
220        }
221        return all;
222    }
223
224    public Set<VisionTargetSim> getVisionTargets(String type) {
225        return targetSets.get(type);
226    }
227
228    /**
229     * Adds targets on the field which your vision system is designed to detect. The {@link
230     * PhotonCamera}s simulated from this system will report the location of the camera relative to
231     * the subset of these targets which are visible from the given camera position.
232     *
233     * <p>By default these are added under the type "targets".
234     *
235     * @param targets Targets to add to the simulated field
236     */
237    public void addVisionTargets(VisionTargetSim... targets) {
238        addVisionTargets("targets", targets);
239    }
240
241    /**
242     * Adds targets on the field which your vision system is designed to detect. The {@link
243     * PhotonCamera}s simulated from this system will report the location of the camera relative to
244     * the subset of these targets which are visible from the given camera position.
245     *
246     * <p>The AprilTags from this layout will be added as vision targets under the type "apriltag".
247     * The poses added preserve the tag layout's current alliance origin. If the tag layout's alliance
248     * origin is changed, these added tags will have to be cleared and re-added.
249     *
250     * @param tagLayout The field tag layout to get Apriltag poses and IDs from
251     */
252    public void addAprilTags(AprilTagFieldLayout tagLayout) {
253        for (AprilTag tag : tagLayout.getTags()) {
254            addVisionTargets(
255                    "apriltag",
256                    new VisionTargetSim(
257                            tagLayout.getTagPose(tag.ID).get(), // preserve alliance rotation
258                            TargetModel.kAprilTag36h11,
259                            tag.ID));
260        }
261    }
262
263    /**
264     * Adds targets on the field which your vision system is designed to detect. The {@link
265     * PhotonCamera}s simulated from this system will report the location of the camera relative to
266     * the subset of these targets which are visible from the given camera position.
267     *
268     * @param type Type of target (e.g. "cargo").
269     * @param targets Targets to add to the simulated field
270     */
271    public void addVisionTargets(String type, VisionTargetSim... targets) {
272        targetSets.computeIfAbsent(type, k -> new HashSet<>());
273        for (var tgt : targets) {
274            targetSets.get(type).add(tgt);
275        }
276    }
277
278    public void clearVisionTargets() {
279        targetSets.clear();
280    }
281
282    public void clearAprilTags() {
283        removeVisionTargets("apriltag");
284    }
285
286    public Set<VisionTargetSim> removeVisionTargets(String type) {
287        return targetSets.remove(type);
288    }
289
290    public Set<VisionTargetSim> removeVisionTargets(VisionTargetSim... targets) {
291        var removeList = List.of(targets);
292        var removedSet = new HashSet<VisionTargetSim>();
293        for (var entry : targetSets.entrySet()) {
294            entry
295                    .getValue()
296                    .removeIf(
297                            t -> {
298                                if (removeList.contains(t)) {
299                                    removedSet.add(t);
300                                    return true;
301                                } else return false;
302                            });
303        }
304        return removedSet;
305    }
306
307    /** Get the latest robot pose in meters saved by the vision system. */
308    public Pose3d getRobotPose() {
309        return getRobotPose(Timer.getFPGATimestamp());
310    }
311
312    /**
313     * Get the robot pose in meters saved by the vision system at this timestamp.
314     *
315     * @param timestamp Timestamp of the desired robot pose
316     */
317    public Pose3d getRobotPose(double timestamp) {
318        return robotPoseBuffer.getSample(timestamp).orElse(new Pose3d());
319    }
320
321    /** Clears all previous robot poses and sets robotPose at current time. */
322    public void resetRobotPose(Pose2d robotPose) {
323        resetRobotPose(new Pose3d(robotPose));
324    }
325
326    /** Clears all previous robot poses and sets robotPose at current time. */
327    public void resetRobotPose(Pose3d robotPose) {
328        robotPoseBuffer.clear();
329        robotPoseBuffer.addSample(Timer.getFPGATimestamp(), robotPose);
330    }
331
332    public Field2d getDebugField() {
333        return dbgField;
334    }
335
336    /**
337     * Periodic update. Ensure this is called repeatedly-- camera performance is used to automatically
338     * determine if a new frame should be submitted.
339     *
340     * @param robotPoseMeters The simulated robot pose in meters
341     */
342    public void update(Pose2d robotPoseMeters) {
343        update(new Pose3d(robotPoseMeters));
344    }
345
346    /**
347     * Periodic update. Ensure this is called repeatedly-- camera performance is used to automatically
348     * determine if a new frame should be submitted.
349     *
350     * @param robotPoseMeters The simulated robot pose in meters
351     */
352    public void update(Pose3d robotPoseMeters) {
353        var targetTypes = targetSets.entrySet();
354        // update vision targets on field
355        targetTypes.forEach(
356                entry ->
357                        dbgField
358                                .getObject(entry.getKey())
359                                .setPoses(entry.getValue().stream().map(t -> t.getPose().toPose2d()).toList()));
360
361        if (robotPoseMeters == null) return;
362
363        // save "real" robot poses over time
364        double now = Timer.getFPGATimestamp();
365        robotPoseBuffer.addSample(now, robotPoseMeters);
366        dbgField.setRobotPose(robotPoseMeters.toPose2d());
367
368        var allTargets = new ArrayList<VisionTargetSim>();
369        targetTypes.forEach((entry) -> allTargets.addAll(entry.getValue()));
370        var visTgtPoses2d = new ArrayList<Pose2d>();
371        var cameraPoses2d = new ArrayList<Pose2d>();
372        boolean processed = false;
373        // process each camera
374        for (var camSim : camSimMap.values()) {
375            // check if this camera is ready to process and get latency
376            var optTimestamp = camSim.consumeNextEntryTime();
377            if (optTimestamp.isEmpty()) continue;
378            else processed = true;
379            // when this result "was" read by NT
380            long timestampNT = optTimestamp.get();
381            // this result's processing latency in milliseconds
382            double latencyMillis = camSim.prop.estLatencyMs();
383            // the image capture timestamp in seconds of this result
384            double timestampCapture = timestampNT / 1e6 - latencyMillis / 1e3;
385
386            // use camera pose from the image capture timestamp
387            Pose3d lateRobotPose = getRobotPose(timestampCapture);
388            Pose3d lateCameraPose = lateRobotPose.plus(getRobotToCamera(camSim, timestampCapture).get());
389            cameraPoses2d.add(lateCameraPose.toPose2d());
390
391            // process a PhotonPipelineResult with visible targets
392            var camResult = camSim.process(latencyMillis, lateCameraPose, allTargets);
393            // publish this info to NT at estimated timestamp of receive
394            camSim.submitProcessedFrame(camResult, timestampNT);
395            // display debug results
396            for (var target : camResult.getTargets()) {
397                var trf = target.getBestCameraToTarget();
398                if (trf.equals(kEmptyTrf)) continue;
399                visTgtPoses2d.add(lateCameraPose.transformBy(trf).toPose2d());
400            }
401        }
402        if (processed) dbgField.getObject("visibleTargetPoses").setPoses(visTgtPoses2d);
403        if (!cameraPoses2d.isEmpty()) dbgField.getObject("cameras").setPoses(cameraPoses2d);
404    }
405}