001/* 002 * MIT License 003 * 004 * Copyright (c) PhotonVision 005 * 006 * Permission is hereby granted, free of charge, to any person obtaining a copy 007 * of this software and associated documentation files (the "Software"), to deal 008 * in the Software without restriction, including without limitation the rights 009 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 010 * copies of the Software, and to permit persons to whom the Software is 011 * furnished to do so, subject to the following conditions: 012 * 013 * The above copyright notice and this permission notice shall be included in all 014 * copies or substantial portions of the Software. 015 * 016 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 017 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 018 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 019 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 020 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 021 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 022 * SOFTWARE. 023 */ 024 025package org.photonvision.simulation; 026 027import edu.wpi.first.apriltag.AprilTag; 028import edu.wpi.first.apriltag.AprilTagFieldLayout; 029import edu.wpi.first.math.geometry.Pose2d; 030import edu.wpi.first.math.geometry.Pose3d; 031import edu.wpi.first.math.geometry.Transform3d; 032import edu.wpi.first.math.interpolation.TimeInterpolatableBuffer; 033import edu.wpi.first.wpilibj.Timer; 034import edu.wpi.first.wpilibj.smartdashboard.Field2d; 035import edu.wpi.first.wpilibj.smartdashboard.SmartDashboard; 036import java.util.ArrayList; 037import java.util.Collection; 038import java.util.HashMap; 039import java.util.HashSet; 040import java.util.List; 041import java.util.Map; 042import java.util.Optional; 043import java.util.Set; 044import org.photonvision.PhotonCamera; 045import org.photonvision.estimation.TargetModel; 046 047/** 048 * A simulated vision system involving a camera(s) and coprocessor(s) mounted on a mobile robot 049 * running PhotonVision, detecting targets placed on the field. {@link VisionTargetSim}s added to 050 * this class will be detected by the {@link PhotonCameraSim}s added to this class. This class 051 * should be updated periodically with the robot's current pose in order to publish the simulated 052 * camera target info. 053 */ 054public class VisionSystemSim { 055 private final Map<String, PhotonCameraSim> camSimMap = new HashMap<>(); 056 private static final double kBufferLengthSeconds = 1.5; 057 // save robot-to-camera for each camera over time (Pose3d is easily interpolatable) 058 private final Map<PhotonCameraSim, TimeInterpolatableBuffer<Pose3d>> camTrfMap = new HashMap<>(); 059 060 // interpolate drivetrain with twists 061 private final TimeInterpolatableBuffer<Pose3d> robotPoseBuffer = 062 TimeInterpolatableBuffer.createBuffer(kBufferLengthSeconds); 063 064 private final Map<String, Set<VisionTargetSim>> targetSets = new HashMap<>(); 065 066 private final Field2d dbgField; 067 068 private final Transform3d kEmptyTrf = new Transform3d(); 069 070 /** 071 * A simulated vision system involving a camera(s) and coprocessor(s) mounted on a mobile robot 072 * running PhotonVision, detecting targets placed on the field. {@link VisionTargetSim}s added to 073 * this class will be detected by the {@link PhotonCameraSim}s added to this class. This class 074 * should be updated periodically with the robot's current pose in order to publish the simulated 075 * camera target info. 076 * 077 * @param visionSystemName The specific identifier for this vision system in NetworkTables. 078 */ 079 public VisionSystemSim(String visionSystemName) { 080 dbgField = new Field2d(); 081 String tableName = "VisionSystemSim-" + visionSystemName; 082 SmartDashboard.putData(tableName + "/Sim Field", dbgField); 083 } 084 085 /** Get one of the simulated cameras. */ 086 public Optional<PhotonCameraSim> getCameraSim(String name) { 087 return Optional.ofNullable(camSimMap.get(name)); 088 } 089 090 /** Get all the simulated cameras. */ 091 public Collection<PhotonCameraSim> getCameraSims() { 092 return camSimMap.values(); 093 } 094 095 /** 096 * Adds a simulated camera to this vision system with a specified robot-to-camera transformation. 097 * The vision targets registered with this vision system simulation will be observed by the 098 * simulated {@link PhotonCamera}. 099 * 100 * @param cameraSim The camera simulation 101 * @param robotToCamera The transform from the robot pose to the camera pose 102 */ 103 public void addCamera(PhotonCameraSim cameraSim, Transform3d robotToCamera) { 104 var existing = camSimMap.putIfAbsent(cameraSim.getCamera().getName(), cameraSim); 105 if (existing == null) { 106 camTrfMap.put(cameraSim, TimeInterpolatableBuffer.createBuffer(kBufferLengthSeconds)); 107 camTrfMap 108 .get(cameraSim) 109 .addSample(Timer.getFPGATimestamp(), new Pose3d().plus(robotToCamera)); 110 } 111 } 112 113 /** Remove all simulated cameras from this vision system. */ 114 public void clearCameras() { 115 camSimMap.clear(); 116 camTrfMap.clear(); 117 } 118 119 /** 120 * Remove a simulated camera from this vision system. 121 * 122 * @param cameraSim The camera to remove 123 * @return If the camera was present and removed 124 */ 125 public boolean removeCamera(PhotonCameraSim cameraSim) { 126 @SuppressWarnings("resource") 127 boolean success = camSimMap.remove(cameraSim.getCamera().getName()) != null; 128 camTrfMap.remove(cameraSim); 129 return success; 130 } 131 132 /** 133 * Get a simulated camera's position relative to the robot. If the requested camera is invalid, an 134 * empty optional is returned. 135 * 136 * @param cameraSim The specific camera to get the robot-to-camera transform of 137 * @return The transform of this camera, or an empty optional if it is invalid 138 */ 139 public Optional<Transform3d> getRobotToCamera(PhotonCameraSim cameraSim) { 140 return getRobotToCamera(cameraSim, Timer.getFPGATimestamp()); 141 } 142 143 /** 144 * Get a simulated camera's position relative to the robot. If the requested camera is invalid, an 145 * empty optional is returned. 146 * 147 * @param cameraSim The specific camera to get the robot-to-camera transform of 148 * @param timeSeconds Timestamp in seconds of when the transform should be observed 149 * @return The transform of this camera, or an empty optional if it is invalid 150 */ 151 public Optional<Transform3d> getRobotToCamera(PhotonCameraSim cameraSim, double timeSeconds) { 152 var trfBuffer = camTrfMap.get(cameraSim); 153 if (trfBuffer == null) return Optional.empty(); 154 var sample = trfBuffer.getSample(timeSeconds); 155 if (sample.isEmpty()) return Optional.empty(); 156 return Optional.of(new Transform3d(new Pose3d(), sample.orElse(new Pose3d()))); 157 } 158 159 /** 160 * Get a simulated camera's position on the field. If the requested camera is invalid, an empty 161 * optional is returned. 162 * 163 * @param cameraSim The specific camera to get the field pose of 164 * @return The pose of this camera, or an empty optional if it is invalid 165 */ 166 public Optional<Pose3d> getCameraPose(PhotonCameraSim cameraSim) { 167 return getCameraPose(cameraSim, Timer.getFPGATimestamp()); 168 } 169 170 /** 171 * Get a simulated camera's position on the field. If the requested camera is invalid, an empty 172 * optional is returned. 173 * 174 * @param cameraSim The specific camera to get the field pose of 175 * @param timeSeconds Timestamp in seconds of when the pose should be observed 176 * @return The pose of this camera, or an empty optional if it is invalid 177 */ 178 public Optional<Pose3d> getCameraPose(PhotonCameraSim cameraSim, double timeSeconds) { 179 var robotToCamera = getRobotToCamera(cameraSim, timeSeconds); 180 return robotToCamera.map(transform3d -> getRobotPose(timeSeconds).plus(transform3d)); 181 } 182 183 /** 184 * Adjust a camera's position relative to the robot. Use this if your camera is on a gimbal or 185 * turret or some other mobile platform. 186 * 187 * @param cameraSim The simulated camera to change the relative position of 188 * @param robotToCamera New transform from the robot to the camera 189 * @return If the cameraSim was valid and transform was adjusted 190 */ 191 public boolean adjustCamera(PhotonCameraSim cameraSim, Transform3d robotToCamera) { 192 var trfBuffer = camTrfMap.get(cameraSim); 193 if (trfBuffer == null) return false; 194 trfBuffer.addSample(Timer.getFPGATimestamp(), new Pose3d().plus(robotToCamera)); 195 return true; 196 } 197 198 /** Reset the previous transforms for all cameras to their current transform. */ 199 public void resetCameraTransforms() { 200 for (var cam : camTrfMap.keySet()) resetCameraTransforms(cam); 201 } 202 203 /** 204 * Reset the transform history for this camera to just the current transform. 205 * 206 * @param cameraSim The camera to reset 207 * @return If the cameraSim was valid and transforms were reset 208 */ 209 public boolean resetCameraTransforms(PhotonCameraSim cameraSim) { 210 double now = Timer.getFPGATimestamp(); 211 var trfBuffer = camTrfMap.get(cameraSim); 212 if (trfBuffer == null) return false; 213 var lastTrf = new Transform3d(new Pose3d(), trfBuffer.getSample(now).orElse(new Pose3d())); 214 trfBuffer.clear(); 215 adjustCamera(cameraSim, lastTrf); 216 return true; 217 } 218 219 /** 220 * Returns all the vision targets on the field. 221 * 222 * @return The vision targets 223 */ 224 public Set<VisionTargetSim> getVisionTargets() { 225 var all = new HashSet<VisionTargetSim>(); 226 for (var entry : targetSets.entrySet()) { 227 all.addAll(entry.getValue()); 228 } 229 return all; 230 } 231 232 /** 233 * Returns all the vision targets of the specified type on the field. 234 * 235 * @param type The type of vision targets to return 236 * @return The vision targets 237 */ 238 public Set<VisionTargetSim> getVisionTargets(String type) { 239 return targetSets.get(type); 240 } 241 242 /** 243 * Adds targets on the field which your vision system is designed to detect. The {@link 244 * PhotonCamera}s simulated from this system will report the location of the camera relative to 245 * the subset of these targets which are visible from the given camera position. 246 * 247 * <p>By default these are added under the type "targets". 248 * 249 * @param targets Targets to add to the simulated field 250 */ 251 public void addVisionTargets(VisionTargetSim... targets) { 252 addVisionTargets("targets", targets); 253 } 254 255 /** 256 * Adds targets on the field which your vision system is designed to detect. The {@link 257 * PhotonCamera}s simulated from this system will report the location of the camera relative to 258 * the subset of these targets which are visible from the given camera position. 259 * 260 * <p>The AprilTags from this layout will be added as vision targets under the type "apriltag". 261 * The poses added preserve the tag layout's current alliance origin. If the tag layout's alliance 262 * origin is changed, these added tags will have to be cleared and re-added. 263 * 264 * @param tagLayout The field tag layout to get Apriltag poses and IDs from 265 */ 266 public void addAprilTags(AprilTagFieldLayout tagLayout) { 267 for (AprilTag tag : tagLayout.getTags()) { 268 addVisionTargets( 269 "apriltag", 270 new VisionTargetSim( 271 tagLayout.getTagPose(tag.ID).get(), // preserve alliance rotation 272 TargetModel.kAprilTag36h11, 273 tag.ID)); 274 } 275 } 276 277 /** 278 * Adds targets on the field which your vision system is designed to detect. The {@link 279 * PhotonCamera}s simulated from this system will report the location of the camera relative to 280 * the subset of these targets which are visible from the given camera position. 281 * 282 * @param type Type of target (e.g. "cargo"). 283 * @param targets Targets to add to the simulated field 284 */ 285 public void addVisionTargets(String type, VisionTargetSim... targets) { 286 targetSets.computeIfAbsent(type, k -> new HashSet<>()); 287 for (var tgt : targets) { 288 targetSets.get(type).add(tgt); 289 } 290 } 291 292 /** Removes every {@link VisionTargetSim} from the simulated field. */ 293 public void clearVisionTargets() { 294 targetSets.clear(); 295 } 296 297 /** Removes all simulated AprilTag targets from the simulated field. */ 298 public void clearAprilTags() { 299 removeVisionTargets("apriltag"); 300 } 301 302 /** 303 * Removes every {@link VisionTargetSim} of the specified type from the simulated field. 304 * 305 * @param type Type of target (e.g. "cargo"). Same as the type passed into {@link 306 * #addVisionTargets(String, VisionTargetSim...)} 307 * @return The removed targets, or null if no targets of the specified type exist 308 */ 309 public Set<VisionTargetSim> removeVisionTargets(String type) { 310 return targetSets.remove(type); 311 } 312 313 /** 314 * Removes the specified {@link VisionTargetSim}s from the simulated field. 315 * 316 * @param targets The targets to remove 317 * @return The targets that were actually removed 318 */ 319 public Set<VisionTargetSim> removeVisionTargets(VisionTargetSim... targets) { 320 var removeList = List.of(targets); 321 var removedSet = new HashSet<VisionTargetSim>(); 322 for (var entry : targetSets.entrySet()) { 323 entry 324 .getValue() 325 .removeIf( 326 t -> { 327 if (removeList.contains(t)) { 328 removedSet.add(t); 329 return true; 330 } else return false; 331 }); 332 } 333 return removedSet; 334 } 335 336 /** 337 * Get the latest robot pose in meters saved by the vision system. 338 * 339 * @return The latest robot pose 340 */ 341 public Pose3d getRobotPose() { 342 return getRobotPose(Timer.getFPGATimestamp()); 343 } 344 345 /** 346 * Get the robot pose in meters saved by the vision system at this timestamp. 347 * 348 * @param timestamp Timestamp of the desired robot pose 349 * @return The robot pose 350 */ 351 public Pose3d getRobotPose(double timestamp) { 352 return robotPoseBuffer.getSample(timestamp).orElse(new Pose3d()); 353 } 354 355 /** 356 * Clears all previous robot poses and sets robotPose at current time. 357 * 358 * @param robotPose The robot pose 359 */ 360 public void resetRobotPose(Pose2d robotPose) { 361 resetRobotPose(new Pose3d(robotPose)); 362 } 363 364 /** 365 * Clears all previous robot poses and sets robotPose at current time. 366 * 367 * @param robotPose The robot pose 368 */ 369 public void resetRobotPose(Pose3d robotPose) { 370 robotPoseBuffer.clear(); 371 robotPoseBuffer.addSample(Timer.getFPGATimestamp(), robotPose); 372 } 373 374 public Field2d getDebugField() { 375 return dbgField; 376 } 377 378 /** 379 * Periodic update. Ensure this is called repeatedly-- camera performance is used to automatically 380 * determine if a new frame should be submitted. 381 * 382 * @param robotPoseMeters The simulated robot pose in meters 383 */ 384 public void update(Pose2d robotPoseMeters) { 385 update(new Pose3d(robotPoseMeters)); 386 } 387 388 /** 389 * Periodic update. Ensure this is called repeatedly-- camera performance is used to automatically 390 * determine if a new frame should be submitted. 391 * 392 * @param robotPoseMeters The simulated robot pose in meters 393 */ 394 public void update(Pose3d robotPoseMeters) { 395 var targetTypes = targetSets.entrySet(); 396 // update vision targets on field 397 targetTypes.forEach( 398 entry -> 399 dbgField 400 .getObject(entry.getKey()) 401 .setPoses(entry.getValue().stream().map(t -> t.getPose().toPose2d()).toList())); 402 403 if (robotPoseMeters == null) return; 404 405 // save "real" robot poses over time 406 double now = Timer.getFPGATimestamp(); 407 robotPoseBuffer.addSample(now, robotPoseMeters); 408 dbgField.setRobotPose(robotPoseMeters.toPose2d()); 409 410 var allTargets = new ArrayList<VisionTargetSim>(); 411 targetTypes.forEach((entry) -> allTargets.addAll(entry.getValue())); 412 var visTgtPoses2d = new ArrayList<Pose2d>(); 413 var cameraPoses2d = new ArrayList<Pose2d>(); 414 boolean processed = false; 415 // process each camera 416 for (var camSim : camSimMap.values()) { 417 // check if this camera is ready to process and get latency 418 var optTimestamp = camSim.consumeNextEntryTime(); 419 if (optTimestamp.isEmpty()) continue; 420 else processed = true; 421 // when this result "was" read by NT 422 long timestampNT = optTimestamp.get(); 423 // this result's processing latency in milliseconds 424 double latencyMillis = camSim.prop.estLatencyMs(); 425 // the image capture timestamp in seconds of this result 426 double timestampCapture = timestampNT / 1e6 - latencyMillis / 1e3; 427 428 // use camera pose from the image capture timestamp 429 Pose3d lateRobotPose = getRobotPose(timestampCapture); 430 Pose3d lateCameraPose = lateRobotPose.plus(getRobotToCamera(camSim, timestampCapture).get()); 431 cameraPoses2d.add(lateCameraPose.toPose2d()); 432 433 // process a PhotonPipelineResult with visible targets 434 var camResult = camSim.process(latencyMillis, lateCameraPose, allTargets); 435 // publish this info to NT at estimated timestamp of receive 436 camSim.submitProcessedFrame(camResult, timestampNT); 437 // display debug results 438 for (var target : camResult.getTargets()) { 439 var trf = target.getBestCameraToTarget(); 440 if (trf.equals(kEmptyTrf)) continue; 441 visTgtPoses2d.add(lateCameraPose.transformBy(trf).toPose2d()); 442 } 443 } 444 if (processed) dbgField.getObject("visibleTargetPoses").setPoses(visTgtPoses2d); 445 if (!cameraPoses2d.isEmpty()) dbgField.getObject("cameras").setPoses(cameraPoses2d); 446 } 447}