001/* 002 * MIT License 003 * 004 * Copyright (c) PhotonVision 005 * 006 * Permission is hereby granted, free of charge, to any person obtaining a copy 007 * of this software and associated documentation files (the "Software"), to deal 008 * in the Software without restriction, including without limitation the rights 009 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 010 * copies of the Software, and to permit persons to whom the Software is 011 * furnished to do so, subject to the following conditions: 012 * 013 * The above copyright notice and this permission notice shall be included in all 014 * copies or substantial portions of the Software. 015 * 016 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 017 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 018 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 019 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 020 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 021 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 022 * SOFTWARE. 023 */ 024 025package org.photonvision.simulation; 026 027import edu.wpi.first.apriltag.AprilTag; 028import edu.wpi.first.cscore.CvSource; 029import edu.wpi.first.cscore.OpenCvLoader; 030import edu.wpi.first.math.geometry.Pose3d; 031import edu.wpi.first.math.geometry.Translation3d; 032import edu.wpi.first.math.util.Units; 033import edu.wpi.first.util.RawFrame; 034import java.util.ArrayList; 035import java.util.Arrays; 036import java.util.HashMap; 037import java.util.List; 038import java.util.Map; 039import org.opencv.core.Core; 040import org.opencv.core.CvType; 041import org.opencv.core.Mat; 042import org.opencv.core.MatOfPoint; 043import org.opencv.core.MatOfPoint2f; 044import org.opencv.core.Point; 045import org.opencv.core.Rect; 046import org.opencv.core.Scalar; 047import org.opencv.core.Size; 048import org.opencv.imgproc.Imgproc; 049import org.photonvision.estimation.OpenCVHelp; 050import org.photonvision.estimation.RotTrlTransform3d; 051 052public class VideoSimUtil { 053 public static final int kNumTags36h11 = 30; 054 055 // All 36h11 tag images 056 private static final Map<Integer, Mat> kTag36h11Images = new HashMap<>(); 057 // Points corresponding to marker(black square) corners of 10x10 36h11 tag images 058 public static final Point[] kTag36h11MarkerPts; 059 060 // field dimensions for wireframe 061 private static double fieldLength = 16.54175; 062 private static double fieldWidth = 8.0137; 063 064 static { 065 OpenCvLoader.forceStaticLoad(); 066 067 // create Mats of 10x10 apriltag images 068 for (int i = 0; i < VideoSimUtil.kNumTags36h11; i++) { 069 Mat tagImage = VideoSimUtil.get36h11TagImage(i); 070 kTag36h11Images.put(i, tagImage); 071 } 072 073 kTag36h11MarkerPts = get36h11MarkerPts(); 074 } 075 076 /** Updates the properties of this CvSource video stream with the given camera properties. */ 077 public static void updateVideoProp(CvSource video, SimCameraProperties prop) { 078 video.setResolution(prop.getResWidth(), prop.getResHeight()); 079 video.setFPS((int) prop.getFPS()); 080 } 081 082 /** 083 * Gets the points representing the corners of this image. Because image pixels are accessed 084 * through a Mat, the point (0,0) actually represents the center of the top-left pixel and not the 085 * actual top-left corner. 086 * 087 * <p>Order of corners returned is: [BL, BR, TR, TL] 088 * 089 * @param size Size of image 090 */ 091 public static Point[] getImageCorners(Size size) { 092 return new Point[] { 093 new Point(-0.5, size.height - 0.5), 094 new Point(size.width - 0.5, size.height - 0.5), 095 new Point(size.width - 0.5, -0.5), 096 new Point(-0.5, -0.5) 097 }; 098 } 099 100 /** 101 * Gets the 10x10 (grayscale) image of a specific 36h11 AprilTag. 102 * 103 * <p>WARNING: This creates a {@link RawFrame} instance but does not close it, which would result 104 * in a resource leak if the {@link Mat} is garbage-collected. Unfortunately, closing the {@code 105 * RawFrame} inside this function would delete the underlying data that backs the {@code 106 * ByteBuffer} that is passed to the {@code Mat} constructor (see comments on <a 107 * href="https://github.com/PhotonVision/photonvision/pull/2023">PR 2023</a> for details). 108 * Luckily, this method is private and is (as of Aug 2025) only used to populate the {@link 109 * #kTag36h11Images} static map at static-initialization time. 110 * 111 * @param id The fiducial id of the desired tag 112 */ 113 private static Mat get36h11TagImage(int id) { 114 RawFrame frame = AprilTag.generate36h11AprilTagImage(id); 115 return new Mat( 116 frame.getHeight(), frame.getWidth(), CvType.CV_8UC1, frame.getData(), frame.getStride()); 117 } 118 119 /** Gets the points representing the marker(black square) corners. */ 120 public static Point[] get36h11MarkerPts() { 121 return get36h11MarkerPts(1); 122 } 123 124 /** 125 * Gets the points representing the marker(black square) corners. 126 * 127 * @param scale The scale of the tag image (10*scale x 10*scale image) 128 */ 129 public static Point[] get36h11MarkerPts(int scale) { 130 var roi36h11 = new Rect(new Point(1, 1), new Size(8, 8)); 131 roi36h11.x *= scale; 132 roi36h11.y *= scale; 133 roi36h11.width *= scale; 134 roi36h11.height *= scale; 135 var pts = getImageCorners(roi36h11.size()); 136 for (int i = 0; i < pts.length; i++) { 137 var pt = pts[i]; 138 pts[i] = new Point(roi36h11.tl().x + pt.x, roi36h11.tl().y + pt.y); 139 } 140 return pts; 141 } 142 143 /** 144 * Warps the image of a specific 36h11 AprilTag onto the destination image at the given points. 145 * 146 * @param tagId The id of the specific tag to warp onto the destination image 147 * @param dstPoints Points(4) in destination image where the tag marker(black square) corners 148 * should be warped onto. 149 * @param antialiasing If antialiasing should be performed by automatically 150 * supersampling/interpolating the warped image. This should be used if better stream quality 151 * is desired or target detection is being done on the stream, but can hurt performance. 152 * @param destination The destination image to place the warped tag image onto. 153 */ 154 public static void warp36h11TagImage( 155 int tagId, Point[] dstPoints, boolean antialiasing, Mat destination) { 156 Mat tagImage = kTag36h11Images.get(tagId); 157 if (tagImage == null || tagImage.empty()) return; 158 var tagPoints = new MatOfPoint2f(kTag36h11MarkerPts); 159 // points of tag image corners 160 var tagImageCorners = new MatOfPoint2f(getImageCorners(tagImage.size())); 161 var dstPointMat = new MatOfPoint2f(dstPoints); 162 // the rectangle describing the rectangle-of-interest(ROI) 163 var boundingRect = Imgproc.boundingRect(dstPointMat); 164 // find the perspective transform from the tag image to the warped destination points 165 Mat perspecTrf = Imgproc.getPerspectiveTransform(tagPoints, dstPointMat); 166 // check extreme image corners after transform to check if we need to expand bounding rect 167 var extremeCorners = new MatOfPoint2f(); 168 Core.perspectiveTransform(tagImageCorners, extremeCorners, perspecTrf); 169 // dilate ROI to fit full tag 170 boundingRect = Imgproc.boundingRect(extremeCorners); 171 172 // adjust interpolation strategy based on size of warped tag compared to tag image 173 var warpedContourArea = Imgproc.contourArea(extremeCorners); 174 double warpedTagUpscale = Math.sqrt(warpedContourArea) / Math.sqrt(tagImage.size().area()); 175 int warpStrategy = Imgproc.INTER_NEAREST; 176 // automatically determine the best supersampling of warped image and scale of tag image 177 /* 178 (warpPerspective does not properly resample, so this is used to avoid aliasing in the 179 warped image. Supersampling is used when the warped tag is small, but is very slow 180 when the warped tag is large-- scaling the tag image up and using linear interpolation 181 instead can be performant while still effectively antialiasing. Some combination of these 182 two can be used in between those extremes.) 183 184 TODO: Simplify magic numbers to one or two variables, or use a more proper approach? 185 */ 186 int supersampling = 6; 187 supersampling = (int) Math.ceil(supersampling / warpedTagUpscale); 188 supersampling = Math.max(Math.min(supersampling, 10), 1); 189 190 Mat scaledTagImage = new Mat(); 191 if (warpedTagUpscale > 2.0) { 192 warpStrategy = Imgproc.INTER_LINEAR; 193 int scaleFactor = (int) (warpedTagUpscale / 3.0) + 2; 194 scaleFactor = Math.max(Math.min(scaleFactor, 40), 1); 195 scaleFactor *= supersampling; 196 Imgproc.resize( 197 tagImage, scaledTagImage, new Size(), scaleFactor, scaleFactor, Imgproc.INTER_NEAREST); 198 tagPoints.fromArray(get36h11MarkerPts(scaleFactor)); 199 } else tagImage.assignTo(scaledTagImage); 200 201 // constrain the bounding rect inside of the destination image 202 boundingRect.x -= 1; 203 boundingRect.y -= 1; 204 boundingRect.width += 2; 205 boundingRect.height += 2; 206 if (boundingRect.x < 0) { 207 boundingRect.width += boundingRect.x; 208 boundingRect.x = 0; 209 } 210 if (boundingRect.y < 0) { 211 boundingRect.height += boundingRect.y; 212 boundingRect.y = 0; 213 } 214 boundingRect.width = Math.min(destination.width() - boundingRect.x, boundingRect.width); 215 boundingRect.height = Math.min(destination.height() - boundingRect.y, boundingRect.height); 216 if (boundingRect.width <= 0 || boundingRect.height <= 0) return; 217 218 // upscale if supersampling 219 Mat scaledDstPts = new Mat(); 220 if (supersampling > 1) { 221 Core.multiply(dstPointMat, new Scalar(supersampling, supersampling), scaledDstPts); 222 boundingRect.x *= supersampling; 223 boundingRect.y *= supersampling; 224 boundingRect.width *= supersampling; 225 boundingRect.height *= supersampling; 226 } else dstPointMat.assignTo(scaledDstPts); 227 228 // update transform relative to expanded, scaled bounding rect 229 Core.subtract(scaledDstPts, new Scalar(boundingRect.tl().x, boundingRect.tl().y), scaledDstPts); 230 perspecTrf = Imgproc.getPerspectiveTransform(tagPoints, scaledDstPts); 231 232 // warp (scaled) tag image onto (scaled) ROI image representing the portion of 233 // the destination image encapsulated by boundingRect 234 Mat tempROI = new Mat(); 235 Imgproc.warpPerspective(scaledTagImage, tempROI, perspecTrf, boundingRect.size(), warpStrategy); 236 237 // downscale ROI with interpolation if supersampling 238 if (supersampling > 1) { 239 boundingRect.x /= supersampling; 240 boundingRect.y /= supersampling; 241 boundingRect.width /= supersampling; 242 boundingRect.height /= supersampling; 243 Imgproc.resize(tempROI, tempROI, boundingRect.size(), 0, 0, Imgproc.INTER_AREA); 244 } 245 246 // we want to copy ONLY the transformed tag to the result image, not the entire bounding rect 247 // using a mask only copies the source pixels which have an associated non-zero value in the 248 // mask 249 Mat tempMask = Mat.zeros(tempROI.size(), CvType.CV_8UC1); 250 Core.subtract( 251 extremeCorners, new Scalar(boundingRect.tl().x, boundingRect.tl().y), extremeCorners); 252 Point tempCenter = new Point(); 253 tempCenter.x = 254 Arrays.stream(extremeCorners.toArray()).mapToDouble(p -> p.x).average().getAsDouble(); 255 tempCenter.y = 256 Arrays.stream(extremeCorners.toArray()).mapToDouble(p -> p.y).average().getAsDouble(); 257 // dilate tag corners 258 Arrays.stream(extremeCorners.toArray()) 259 .forEach( 260 p -> { 261 double xdiff = p.x - tempCenter.x; 262 double ydiff = p.y - tempCenter.y; 263 xdiff += 1 * Math.signum(xdiff); 264 ydiff += 1 * Math.signum(ydiff); 265 new Point(tempCenter.x + xdiff, tempCenter.y + ydiff); 266 }); 267 // (make inside of tag completely white in mask) 268 Imgproc.fillConvexPoly(tempMask, new MatOfPoint(extremeCorners.toArray()), new Scalar(255)); 269 270 // copy transformed tag onto result image 271 tempROI.copyTo(destination.submat(boundingRect), tempMask); 272 } 273 274 /** 275 * Given a line thickness in a 640x480 image, try to scale to the given destination image 276 * resolution. 277 * 278 * @param thickness480p A hypothetical line thickness in a 640x480 image 279 * @param destinationImg The destination image to scale to 280 * @return Scaled thickness which cannot be less than 1 281 */ 282 public static double getScaledThickness(double thickness480p, Mat destinationImg) { 283 double scaleX = destinationImg.width() / 640.0; 284 double scaleY = destinationImg.height() / 480.0; 285 double minScale = Math.min(scaleX, scaleY); 286 return Math.max(thickness480p * minScale, 1.0); 287 } 288 289 /** 290 * Draw a filled ellipse in the destination image. 291 * 292 * @param dstPoints The points in the destination image representing the rectangle in which the 293 * ellipse is inscribed. 294 * @param color The color of the ellipse. This is a scalar with BGR values (0-255) 295 * @param destination The destination image to draw onto. The image should be in the BGR color 296 * space. 297 */ 298 public static void drawInscribedEllipse(Point[] dstPoints, Scalar color, Mat destination) { 299 // create RotatedRect from points 300 var rect = OpenCVHelp.getMinAreaRect(dstPoints); 301 // inscribe ellipse inside rectangle 302 Imgproc.ellipse(destination, rect, color, -1, Imgproc.LINE_AA); 303 } 304 305 /** 306 * Draw a polygon outline or filled polygon to the destination image with the given points. 307 * 308 * @param dstPoints The points in the destination image representing the polygon. 309 * @param thickness The thickness of the outline in pixels. If this is not positive, a filled 310 * polygon is drawn instead. 311 * @param color The color drawn. This should match the color space of the destination image. 312 * @param isClosed If the last point should connect to the first point in the polygon outline. 313 * @param destination The destination image to draw onto. 314 */ 315 public static void drawPoly( 316 Point[] dstPoints, int thickness, Scalar color, boolean isClosed, Mat destination) { 317 var dstPointsd = new MatOfPoint(dstPoints); 318 if (thickness > 0) { 319 Imgproc.polylines( 320 destination, List.of(dstPointsd), isClosed, color, thickness, Imgproc.LINE_AA); 321 } else { 322 Imgproc.fillPoly(destination, List.of(dstPointsd), color, Imgproc.LINE_AA); 323 } 324 } 325 326 /** 327 * Draws a contour around the given points and text of the id onto the destination image. 328 * 329 * @param id Fiducial ID number to draw 330 * @param dstPoints Points representing the four corners of the tag marker(black square) in the 331 * destination image. 332 * @param destination The destination image to draw onto. The image should be in the BGR color 333 * space. 334 */ 335 public static void drawTagDetection(int id, Point[] dstPoints, Mat destination) { 336 double thickness = getScaledThickness(1, destination); 337 drawPoly(dstPoints, (int) thickness, new Scalar(0, 0, 255), true, destination); 338 var rect = Imgproc.boundingRect(new MatOfPoint(dstPoints)); 339 var textPt = new Point(rect.x + rect.width, rect.y); 340 textPt.x += thickness; 341 textPt.y += thickness; 342 Imgproc.putText( 343 destination, 344 String.valueOf(id), 345 textPt, 346 Imgproc.FONT_HERSHEY_PLAIN, 347 1.5 * thickness, 348 new Scalar(0, 200, 0), 349 (int) thickness, 350 Imgproc.LINE_AA); 351 } 352 353 /** 354 * Set the field dimensions that are used for drawing the field wireframe. 355 * 356 * @param fieldLengthMeters field length in meters (x direction) 357 * @param fieldWidthMeters field width in meters (y direction) 358 */ 359 public static void setFieldDimensionsMeters(double fieldLengthMeters, double fieldWidthMeters) { 360 fieldLength = fieldLengthMeters; 361 fieldWidth = fieldWidthMeters; 362 } 363 364 /** 365 * The translations used to draw the field side walls and driver station walls. It is a List of 366 * Lists because the translations are not all connected. 367 */ 368 private static List<List<Translation3d>> getFieldWallLines() { 369 var list = new ArrayList<List<Translation3d>>(); 370 371 final double sideHt = Units.inchesToMeters(19.5); 372 final double driveHt = Units.inchesToMeters(35); 373 final double topHt = Units.inchesToMeters(78); 374 375 // field floor 376 list.add( 377 List.of( 378 new Translation3d(0, 0, 0), 379 new Translation3d(fieldLength, 0, 0), 380 new Translation3d(fieldLength, fieldWidth, 0), 381 new Translation3d(0, fieldWidth, 0), 382 new Translation3d(0, 0, 0))); 383 // right side wall 384 list.add( 385 List.of( 386 new Translation3d(0, 0, 0), 387 new Translation3d(0, 0, sideHt), 388 new Translation3d(fieldLength, 0, sideHt), 389 new Translation3d(fieldLength, 0, 0))); 390 // red driverstation 391 list.add( 392 List.of( 393 new Translation3d(fieldLength, 0, sideHt), 394 new Translation3d(fieldLength, 0, topHt), 395 new Translation3d(fieldLength, fieldWidth, topHt), 396 new Translation3d(fieldLength, fieldWidth, sideHt))); 397 list.add( 398 List.of( 399 new Translation3d(fieldLength, 0, driveHt), 400 new Translation3d(fieldLength, fieldWidth, driveHt))); 401 // left side wall 402 list.add( 403 List.of( 404 new Translation3d(0, fieldWidth, 0), 405 new Translation3d(0, fieldWidth, sideHt), 406 new Translation3d(fieldLength, fieldWidth, sideHt), 407 new Translation3d(fieldLength, fieldWidth, 0))); 408 // blue driverstation 409 list.add( 410 List.of( 411 new Translation3d(0, 0, sideHt), 412 new Translation3d(0, 0, topHt), 413 new Translation3d(0, fieldWidth, topHt), 414 new Translation3d(0, fieldWidth, sideHt))); 415 list.add(List.of(new Translation3d(0, 0, driveHt), new Translation3d(0, fieldWidth, driveHt))); 416 417 return list; 418 } 419 420 /** 421 * The translations used to draw the field floor subdivisions (not the floor outline). It is a 422 * List of Lists because the translations are not all connected. 423 * 424 * @param subdivisions How many "subdivisions" along the width/length of the floor. E.g. 3 425 * subdivisions would mean 2 lines along the length and 2 lines along the width creating a 3x3 426 * "grid". 427 */ 428 private static List<List<Translation3d>> getFieldFloorLines(int subdivisions) { 429 var list = new ArrayList<List<Translation3d>>(); 430 final double subLength = fieldLength / subdivisions; 431 final double subWidth = fieldWidth / subdivisions; 432 433 // field floor subdivisions 434 for (int i = 0; i < subdivisions; i++) { 435 list.add( 436 List.of( 437 new Translation3d(0, subWidth * (i + 1), 0), 438 new Translation3d(fieldLength, subWidth * (i + 1), 0))); 439 list.add( 440 List.of( 441 new Translation3d(subLength * (i + 1), 0, 0), 442 new Translation3d(subLength * (i + 1), fieldWidth, 0))); 443 } 444 445 return list; 446 } 447 448 /** 449 * Convert 3D lines represented by the given series of translations into a polygon(s) in the 450 * camera's image. 451 * 452 * @param camRt The change in basis from world coordinates to camera coordinates. See {@link 453 * RotTrlTransform3d#makeRelativeTo(Pose3d)}. 454 * @param prop The simulated camera's properties. 455 * @param trls A sequential series of translations defining the polygon to be drawn. 456 * @param resolution Resolution as a fraction(0 - 1) of the video frame's diagonal length in 457 * pixels. Line segments will be subdivided if they exceed this resolution. 458 * @param isClosed If the final translation should also draw a line to the first translation. 459 * @param destination The destination image that is being drawn to. 460 * @return A list of polygons(which are an array of points) 461 */ 462 public static List<Point[]> polyFrom3dLines( 463 RotTrlTransform3d camRt, 464 SimCameraProperties prop, 465 List<Translation3d> trls, 466 double resolution, 467 boolean isClosed, 468 Mat destination) { 469 resolution = Math.hypot(destination.size().height, destination.size().width) * resolution; 470 List<Translation3d> pts = new ArrayList<>(trls); 471 if (isClosed) pts.add(pts.get(0)); 472 List<Point[]> polyPointList = new ArrayList<>(); 473 474 for (int i = 0; i < pts.size() - 1; i++) { 475 var pta = pts.get(i); 476 var ptb = pts.get(i + 1); 477 478 // check if line is inside camera fulcrum 479 var inter = prop.getVisibleLine(camRt, pta, ptb); 480 if (inter.getSecond() == null) continue; 481 482 // cull line to the inside of the camera fulcrum 483 double inter1 = inter.getFirst(); 484 double inter2 = inter.getSecond(); 485 var baseDelta = ptb.minus(pta); 486 var old_pta = pta; 487 if (inter1 > 0) pta = old_pta.plus(baseDelta.times(inter1)); 488 if (inter2 < 1) ptb = old_pta.plus(baseDelta.times(inter2)); 489 baseDelta = ptb.minus(pta); 490 491 // project points into 2d 492 var poly = 493 new ArrayList<>( 494 Arrays.asList( 495 OpenCVHelp.projectPoints( 496 prop.getIntrinsics(), prop.getDistCoeffs(), camRt, List.of(pta, ptb)))); 497 var pxa = poly.get(0); 498 var pxb = poly.get(1); 499 500 // subdivide projected line based on desired resolution 501 double pxDist = Math.hypot(pxb.x - pxa.x, pxb.y - pxa.y); 502 int subdivisions = (int) (pxDist / resolution); 503 var subDelta = baseDelta.div(subdivisions + 1); 504 var subPts = new ArrayList<Translation3d>(); 505 for (int j = 0; j < subdivisions; j++) { 506 subPts.add(pta.plus(subDelta.times(j + 1))); 507 } 508 if (!subPts.isEmpty()) { 509 poly.addAll( 510 1, 511 Arrays.asList( 512 OpenCVHelp.projectPoints( 513 prop.getIntrinsics(), prop.getDistCoeffs(), camRt, subPts))); 514 } 515 516 polyPointList.add(poly.toArray(Point[]::new)); 517 } 518 519 return polyPointList; 520 } 521 522 /** 523 * Draw a wireframe of the field to the given image. 524 * 525 * @param camRt The change in basis from world coordinates to camera coordinates. See {@link 526 * RotTrlTransform3d#makeRelativeTo(Pose3d)}. 527 * @param prop The simulated camera's properties. 528 * @param resolution Resolution as a fraction(0 - 1) of the video frame's diagonal length in 529 * pixels. Line segments will be subdivided if they exceed this resolution. 530 * @param wallThickness Thickness of the lines used for drawing the field walls in pixels. This is 531 * scaled by {@link #getScaledThickness(double, Mat)}. 532 * @param wallColor Color of the lines used for drawing the field walls. 533 * @param floorSubdivisions A NxN "grid" is created from the floor where this parameter is N, 534 * which defines the floor lines. 535 * @param floorThickness Thickness of the lines used for drawing the field floor grid in pixels. 536 * This is scaled by {@link #getScaledThickness(double, Mat)}. 537 * @param floorColor Color of the lines used for drawing the field floor grid. 538 * @param destination The destination image to draw to. 539 */ 540 public static void drawFieldWireframe( 541 RotTrlTransform3d camRt, 542 SimCameraProperties prop, 543 double resolution, 544 double wallThickness, 545 Scalar wallColor, 546 int floorSubdivisions, 547 double floorThickness, 548 Scalar floorColor, 549 Mat destination) { 550 for (var trls : getFieldFloorLines(floorSubdivisions)) { 551 var polys = VideoSimUtil.polyFrom3dLines(camRt, prop, trls, resolution, false, destination); 552 for (var poly : polys) { 553 drawPoly( 554 poly, 555 (int) Math.round(getScaledThickness(floorThickness, destination)), 556 floorColor, 557 false, 558 destination); 559 } 560 } 561 for (var trls : getFieldWallLines()) { 562 var polys = VideoSimUtil.polyFrom3dLines(camRt, prop, trls, resolution, false, destination); 563 for (var poly : polys) { 564 drawPoly( 565 poly, 566 (int) Math.round(getScaledThickness(wallThickness, destination)), 567 wallColor, 568 false, 569 destination); 570 } 571 } 572 } 573}