001/*
002 * MIT License
003 *
004 * Copyright (c) PhotonVision
005 *
006 * Permission is hereby granted, free of charge, to any person obtaining a copy
007 * of this software and associated documentation files (the "Software"), to deal
008 * in the Software without restriction, including without limitation the rights
009 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
010 * copies of the Software, and to permit persons to whom the Software is
011 * furnished to do so, subject to the following conditions:
012 *
013 * The above copyright notice and this permission notice shall be included in all
014 * copies or substantial portions of the Software.
015 *
016 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
017 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
018 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
019 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
020 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
021 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
022 * SOFTWARE.
023 */
024
025package org.photonvision.simulation;
026
027import edu.wpi.first.apriltag.AprilTag;
028import edu.wpi.first.cscore.CvSource;
029import edu.wpi.first.cscore.OpenCvLoader;
030import edu.wpi.first.math.geometry.Pose3d;
031import edu.wpi.first.math.geometry.Translation3d;
032import edu.wpi.first.math.util.Units;
033import edu.wpi.first.util.RawFrame;
034import java.util.ArrayList;
035import java.util.Arrays;
036import java.util.HashMap;
037import java.util.List;
038import java.util.Map;
039import org.opencv.core.Core;
040import org.opencv.core.CvType;
041import org.opencv.core.Mat;
042import org.opencv.core.MatOfByte;
043import org.opencv.core.MatOfPoint;
044import org.opencv.core.MatOfPoint2f;
045import org.opencv.core.Point;
046import org.opencv.core.Rect;
047import org.opencv.core.Scalar;
048import org.opencv.core.Size;
049import org.opencv.imgproc.Imgproc;
050import org.photonvision.estimation.OpenCVHelp;
051import org.photonvision.estimation.RotTrlTransform3d;
052
053public class VideoSimUtil {
054    public static final int kNumTags36h11 = 30;
055
056    // All 36h11 tag images
057    private static final Map<Integer, Mat> kTag36h11Images = new HashMap<>();
058    // Points corresponding to marker(black square) corners of 10x10 36h11 tag images
059    public static final Point[] kTag36h11MarkerPts;
060
061    // field dimensions for wireframe
062    private static double fieldLength = 16.54175;
063    private static double fieldWidth = 8.0137;
064
065    static {
066        OpenCvLoader.forceStaticLoad();
067
068        // create Mats of 10x10 apriltag images
069        for (int i = 0; i < VideoSimUtil.kNumTags36h11; i++) {
070            Mat tagImage = VideoSimUtil.get36h11TagImage(i);
071            kTag36h11Images.put(i, tagImage);
072        }
073
074        kTag36h11MarkerPts = get36h11MarkerPts();
075    }
076
077    /** Updates the properties of this CvSource video stream with the given camera properties. */
078    public static void updateVideoProp(CvSource video, SimCameraProperties prop) {
079        video.setResolution(prop.getResWidth(), prop.getResHeight());
080        video.setFPS((int) prop.getFPS());
081    }
082
083    /**
084     * Gets the points representing the corners of this image. Because image pixels are accessed
085     * through a Mat, the point (0,0) actually represents the center of the top-left pixel and not the
086     * actual top-left corner.
087     *
088     * @param size Size of image
089     */
090    public static Point[] getImageCorners(Size size) {
091        return new Point[] {
092            new Point(-0.5, -0.5),
093            new Point(size.width - 0.5, -0.5),
094            new Point(size.width - 0.5, size.height - 0.5),
095            new Point(-0.5, size.height - 0.5)
096        };
097    }
098
099    /**
100     * Gets the 10x10 (grayscale) image of a specific 36h11 AprilTag.
101     *
102     * @param id The fiducial id of the desired tag
103     */
104    private static Mat get36h11TagImage(int id) {
105        RawFrame frame = AprilTag.generate36h11AprilTagImage(id);
106
107        var buf = frame.getData();
108        byte[] arr = new byte[buf.remaining()];
109        buf.get(arr);
110        // frame.close();
111
112        var mat = new MatOfByte(arr).reshape(1, 10).submat(new Rect(0, 0, 10, 10));
113        mat.dump();
114
115        return mat;
116    }
117
118    /** Gets the points representing the marker(black square) corners. */
119    public static Point[] get36h11MarkerPts() {
120        return get36h11MarkerPts(1);
121    }
122
123    /**
124     * Gets the points representing the marker(black square) corners.
125     *
126     * @param scale The scale of the tag image (10*scale x 10*scale image)
127     */
128    public static Point[] get36h11MarkerPts(int scale) {
129        var roi36h11 = new Rect(new Point(1, 1), new Size(8, 8));
130        roi36h11.x *= scale;
131        roi36h11.y *= scale;
132        roi36h11.width *= scale;
133        roi36h11.height *= scale;
134        var pts = getImageCorners(roi36h11.size());
135        for (int i = 0; i < pts.length; i++) {
136            var pt = pts[i];
137            pts[i] = new Point(roi36h11.tl().x + pt.x, roi36h11.tl().y + pt.y);
138        }
139        return pts;
140    }
141
142    /**
143     * Warps the image of a specific 36h11 AprilTag onto the destination image at the given points.
144     *
145     * @param tagId The id of the specific tag to warp onto the destination image
146     * @param dstPoints Points(4) in destination image where the tag marker(black square) corners
147     *     should be warped onto.
148     * @param antialiasing If antialiasing should be performed by automatically
149     *     supersampling/interpolating the warped image. This should be used if better stream quality
150     *     is desired or target detection is being done on the stream, but can hurt performance.
151     * @param destination The destination image to place the warped tag image onto.
152     */
153    public static void warp36h11TagImage(
154            int tagId, Point[] dstPoints, boolean antialiasing, Mat destination) {
155        Mat tagImage = kTag36h11Images.get(tagId);
156        if (tagImage == null || tagImage.empty()) return;
157        var tagPoints = new MatOfPoint2f(kTag36h11MarkerPts);
158        // points of tag image corners
159        var tagImageCorners = new MatOfPoint2f(getImageCorners(tagImage.size()));
160        var dstPointMat = new MatOfPoint2f(dstPoints);
161        // the rectangle describing the rectangle-of-interest(ROI)
162        var boundingRect = Imgproc.boundingRect(dstPointMat);
163        // find the perspective transform from the tag image to the warped destination points
164        Mat perspecTrf = Imgproc.getPerspectiveTransform(tagPoints, dstPointMat);
165        // check extreme image corners after transform to check if we need to expand bounding rect
166        var extremeCorners = new MatOfPoint2f();
167        Core.perspectiveTransform(tagImageCorners, extremeCorners, perspecTrf);
168        // dilate ROI to fit full tag
169        boundingRect = Imgproc.boundingRect(extremeCorners);
170
171        // adjust interpolation strategy based on size of warped tag compared to tag image
172        var warpedContourArea = Imgproc.contourArea(extremeCorners);
173        double warpedTagUpscale = Math.sqrt(warpedContourArea) / Math.sqrt(tagImage.size().area());
174        int warpStrategy = Imgproc.INTER_NEAREST;
175        // automatically determine the best supersampling of warped image and scale of tag image
176        /*
177        (warpPerspective does not properly resample, so this is used to avoid aliasing in the
178        warped image. Supersampling is used when the warped tag is small, but is very slow
179        when the warped tag is large-- scaling the tag image up and using linear interpolation
180        instead can be performant while still effectively antialiasing. Some combination of these
181        two can be used in between those extremes.)
182
183        TODO: Simplify magic numbers to one or two variables, or use a more proper approach?
184        */
185        int supersampling = 6;
186        supersampling = (int) Math.ceil(supersampling / warpedTagUpscale);
187        supersampling = Math.max(Math.min(supersampling, 10), 1);
188
189        Mat scaledTagImage = new Mat();
190        if (warpedTagUpscale > 2.0) {
191            warpStrategy = Imgproc.INTER_LINEAR;
192            int scaleFactor = (int) (warpedTagUpscale / 3.0) + 2;
193            scaleFactor = Math.max(Math.min(scaleFactor, 40), 1);
194            scaleFactor *= supersampling;
195            Imgproc.resize(
196                    tagImage, scaledTagImage, new Size(), scaleFactor, scaleFactor, Imgproc.INTER_NEAREST);
197            tagPoints.fromArray(get36h11MarkerPts(scaleFactor));
198        } else tagImage.assignTo(scaledTagImage);
199
200        // constrain the bounding rect inside of the destination image
201        boundingRect.x -= 1;
202        boundingRect.y -= 1;
203        boundingRect.width += 2;
204        boundingRect.height += 2;
205        if (boundingRect.x < 0) {
206            boundingRect.width += boundingRect.x;
207            boundingRect.x = 0;
208        }
209        if (boundingRect.y < 0) {
210            boundingRect.height += boundingRect.y;
211            boundingRect.y = 0;
212        }
213        boundingRect.width = Math.min(destination.width() - boundingRect.x, boundingRect.width);
214        boundingRect.height = Math.min(destination.height() - boundingRect.y, boundingRect.height);
215        if (boundingRect.width <= 0 || boundingRect.height <= 0) return;
216
217        // upscale if supersampling
218        Mat scaledDstPts = new Mat();
219        if (supersampling > 1) {
220            Core.multiply(dstPointMat, new Scalar(supersampling, supersampling), scaledDstPts);
221            boundingRect.x *= supersampling;
222            boundingRect.y *= supersampling;
223            boundingRect.width *= supersampling;
224            boundingRect.height *= supersampling;
225        } else dstPointMat.assignTo(scaledDstPts);
226
227        // update transform relative to expanded, scaled bounding rect
228        Core.subtract(scaledDstPts, new Scalar(boundingRect.tl().x, boundingRect.tl().y), scaledDstPts);
229        perspecTrf = Imgproc.getPerspectiveTransform(tagPoints, scaledDstPts);
230
231        // warp (scaled) tag image onto (scaled) ROI image representing the portion of
232        // the destination image encapsulated by boundingRect
233        Mat tempROI = new Mat();
234        Imgproc.warpPerspective(scaledTagImage, tempROI, perspecTrf, boundingRect.size(), warpStrategy);
235
236        // downscale ROI with interpolation if supersampling
237        if (supersampling > 1) {
238            boundingRect.x /= supersampling;
239            boundingRect.y /= supersampling;
240            boundingRect.width /= supersampling;
241            boundingRect.height /= supersampling;
242            Imgproc.resize(tempROI, tempROI, boundingRect.size(), 0, 0, Imgproc.INTER_AREA);
243        }
244
245        // we want to copy ONLY the transformed tag to the result image, not the entire bounding rect
246        // using a mask only copies the source pixels which have an associated non-zero value in the
247        // mask
248        Mat tempMask = Mat.zeros(tempROI.size(), CvType.CV_8UC1);
249        Core.subtract(
250                extremeCorners, new Scalar(boundingRect.tl().x, boundingRect.tl().y), extremeCorners);
251        Point tempCenter = new Point();
252        tempCenter.x =
253                Arrays.stream(extremeCorners.toArray()).mapToDouble(p -> p.x).average().getAsDouble();
254        tempCenter.y =
255                Arrays.stream(extremeCorners.toArray()).mapToDouble(p -> p.y).average().getAsDouble();
256        // dilate tag corners
257        Arrays.stream(extremeCorners.toArray())
258                .forEach(
259                        p -> {
260                            double xdiff = p.x - tempCenter.x;
261                            double ydiff = p.y - tempCenter.y;
262                            xdiff += 1 * Math.signum(xdiff);
263                            ydiff += 1 * Math.signum(ydiff);
264                            new Point(tempCenter.x + xdiff, tempCenter.y + ydiff);
265                        });
266        // (make inside of tag completely white in mask)
267        Imgproc.fillConvexPoly(tempMask, new MatOfPoint(extremeCorners.toArray()), new Scalar(255));
268
269        // copy transformed tag onto result image
270        tempROI.copyTo(destination.submat(boundingRect), tempMask);
271    }
272
273    /**
274     * Given a line thickness in a 640x480 image, try to scale to the given destination image
275     * resolution.
276     *
277     * @param thickness480p A hypothetical line thickness in a 640x480 image
278     * @param destinationImg The destination image to scale to
279     * @return Scaled thickness which cannot be less than 1
280     */
281    public static double getScaledThickness(double thickness480p, Mat destinationImg) {
282        double scaleX = destinationImg.width() / 640.0;
283        double scaleY = destinationImg.height() / 480.0;
284        double minScale = Math.min(scaleX, scaleY);
285        return Math.max(thickness480p * minScale, 1.0);
286    }
287
288    /**
289     * Draw a filled ellipse in the destination image.
290     *
291     * @param dstPoints The points in the destination image representing the rectangle in which the
292     *     ellipse is inscribed.
293     * @param color The color of the ellipse. This is a scalar with BGR values (0-255)
294     * @param destination The destination image to draw onto. The image should be in the BGR color
295     *     space.
296     */
297    public static void drawInscribedEllipse(Point[] dstPoints, Scalar color, Mat destination) {
298        // create RotatedRect from points
299        var rect = OpenCVHelp.getMinAreaRect(dstPoints);
300        // inscribe ellipse inside rectangle
301        Imgproc.ellipse(destination, rect, color, -1, Imgproc.LINE_AA);
302    }
303
304    /**
305     * Draw a polygon outline or filled polygon to the destination image with the given points.
306     *
307     * @param dstPoints The points in the destination image representing the polygon.
308     * @param thickness The thickness of the outline in pixels. If this is not positive, a filled
309     *     polygon is drawn instead.
310     * @param color The color drawn. This should match the color space of the destination image.
311     * @param isClosed If the last point should connect to the first point in the polygon outline.
312     * @param destination The destination image to draw onto.
313     */
314    public static void drawPoly(
315            Point[] dstPoints, int thickness, Scalar color, boolean isClosed, Mat destination) {
316        var dstPointsd = new MatOfPoint(dstPoints);
317        if (thickness > 0) {
318            Imgproc.polylines(
319                    destination, List.of(dstPointsd), isClosed, color, thickness, Imgproc.LINE_AA);
320        } else {
321            Imgproc.fillPoly(destination, List.of(dstPointsd), color, Imgproc.LINE_AA);
322        }
323    }
324
325    /**
326     * Draws a contour around the given points and text of the id onto the destination image.
327     *
328     * @param id Fiducial ID number to draw
329     * @param dstPoints Points representing the four corners of the tag marker(black square) in the
330     *     destination image.
331     * @param destination The destination image to draw onto. The image should be in the BGR color
332     *     space.
333     */
334    public static void drawTagDetection(int id, Point[] dstPoints, Mat destination) {
335        double thickness = getScaledThickness(1, destination);
336        drawPoly(dstPoints, (int) thickness, new Scalar(0, 0, 255), true, destination);
337        var rect = Imgproc.boundingRect(new MatOfPoint(dstPoints));
338        var textPt = new Point(rect.x + rect.width, rect.y);
339        textPt.x += thickness;
340        textPt.y += thickness;
341        Imgproc.putText(
342                destination,
343                String.valueOf(id),
344                textPt,
345                Imgproc.FONT_HERSHEY_PLAIN,
346                1.5 * thickness,
347                new Scalar(0, 200, 0),
348                (int) thickness,
349                Imgproc.LINE_AA);
350    }
351
352    /**
353     * Set the field dimensions that are used for drawing the field wireframe.
354     *
355     * @param fieldLengthMeters field length in meters (x direction)
356     * @param fieldWidthMeters field width in meters (y direction)
357     */
358    public static void setFieldDimensionsMeters(double fieldLengthMeters, double fieldWidthMeters) {
359        fieldLength = fieldLengthMeters;
360        fieldWidth = fieldWidthMeters;
361    }
362
363    /**
364     * The translations used to draw the field side walls and driver station walls. It is a List of
365     * Lists because the translations are not all connected.
366     */
367    private static List<List<Translation3d>> getFieldWallLines() {
368        var list = new ArrayList<List<Translation3d>>();
369
370        final double sideHt = Units.inchesToMeters(19.5);
371        final double driveHt = Units.inchesToMeters(35);
372        final double topHt = Units.inchesToMeters(78);
373
374        // field floor
375        list.add(
376                List.of(
377                        new Translation3d(0, 0, 0),
378                        new Translation3d(fieldLength, 0, 0),
379                        new Translation3d(fieldLength, fieldWidth, 0),
380                        new Translation3d(0, fieldWidth, 0),
381                        new Translation3d(0, 0, 0)));
382        // right side wall
383        list.add(
384                List.of(
385                        new Translation3d(0, 0, 0),
386                        new Translation3d(0, 0, sideHt),
387                        new Translation3d(fieldLength, 0, sideHt),
388                        new Translation3d(fieldLength, 0, 0)));
389        // red driverstation
390        list.add(
391                List.of(
392                        new Translation3d(fieldLength, 0, sideHt),
393                        new Translation3d(fieldLength, 0, topHt),
394                        new Translation3d(fieldLength, fieldWidth, topHt),
395                        new Translation3d(fieldLength, fieldWidth, sideHt)));
396        list.add(
397                List.of(
398                        new Translation3d(fieldLength, 0, driveHt),
399                        new Translation3d(fieldLength, fieldWidth, driveHt)));
400        // left side wall
401        list.add(
402                List.of(
403                        new Translation3d(0, fieldWidth, 0),
404                        new Translation3d(0, fieldWidth, sideHt),
405                        new Translation3d(fieldLength, fieldWidth, sideHt),
406                        new Translation3d(fieldLength, fieldWidth, 0)));
407        // blue driverstation
408        list.add(
409                List.of(
410                        new Translation3d(0, 0, sideHt),
411                        new Translation3d(0, 0, topHt),
412                        new Translation3d(0, fieldWidth, topHt),
413                        new Translation3d(0, fieldWidth, sideHt)));
414        list.add(List.of(new Translation3d(0, 0, driveHt), new Translation3d(0, fieldWidth, driveHt)));
415
416        return list;
417    }
418
419    /**
420     * The translations used to draw the field floor subdivisions (not the floor outline). It is a
421     * List of Lists because the translations are not all connected.
422     *
423     * @param subdivisions How many "subdivisions" along the width/length of the floor. E.g. 3
424     *     subdivisions would mean 2 lines along the length and 2 lines along the width creating a 3x3
425     *     "grid".
426     */
427    private static List<List<Translation3d>> getFieldFloorLines(int subdivisions) {
428        var list = new ArrayList<List<Translation3d>>();
429        final double subLength = fieldLength / subdivisions;
430        final double subWidth = fieldWidth / subdivisions;
431
432        // field floor subdivisions
433        for (int i = 0; i < subdivisions; i++) {
434            list.add(
435                    List.of(
436                            new Translation3d(0, subWidth * (i + 1), 0),
437                            new Translation3d(fieldLength, subWidth * (i + 1), 0)));
438            list.add(
439                    List.of(
440                            new Translation3d(subLength * (i + 1), 0, 0),
441                            new Translation3d(subLength * (i + 1), fieldWidth, 0)));
442        }
443
444        return list;
445    }
446
447    /**
448     * Convert 3D lines represented by the given series of translations into a polygon(s) in the
449     * camera's image.
450     *
451     * @param camRt The change in basis from world coordinates to camera coordinates. See {@link
452     *     RotTrlTransform3d#makeRelativeTo(Pose3d)}.
453     * @param prop The simulated camera's properties.
454     * @param trls A sequential series of translations defining the polygon to be drawn.
455     * @param resolution Resolution as a fraction(0 - 1) of the video frame's diagonal length in
456     *     pixels. Line segments will be subdivided if they exceed this resolution.
457     * @param isClosed If the final translation should also draw a line to the first translation.
458     * @param destination The destination image that is being drawn to.
459     * @return A list of polygons(which are an array of points)
460     */
461    public static List<Point[]> polyFrom3dLines(
462            RotTrlTransform3d camRt,
463            SimCameraProperties prop,
464            List<Translation3d> trls,
465            double resolution,
466            boolean isClosed,
467            Mat destination) {
468        resolution = Math.hypot(destination.size().height, destination.size().width) * resolution;
469        List<Translation3d> pts = new ArrayList<>(trls);
470        if (isClosed) pts.add(pts.get(0));
471        List<Point[]> polyPointList = new ArrayList<>();
472
473        for (int i = 0; i < pts.size() - 1; i++) {
474            var pta = pts.get(i);
475            var ptb = pts.get(i + 1);
476
477            // check if line is inside camera fulcrum
478            var inter = prop.getVisibleLine(camRt, pta, ptb);
479            if (inter.getSecond() == null) continue;
480
481            // cull line to the inside of the camera fulcrum
482            double inter1 = inter.getFirst();
483            double inter2 = inter.getSecond();
484            var baseDelta = ptb.minus(pta);
485            var old_pta = pta;
486            if (inter1 > 0) pta = old_pta.plus(baseDelta.times(inter1));
487            if (inter2 < 1) ptb = old_pta.plus(baseDelta.times(inter2));
488            baseDelta = ptb.minus(pta);
489
490            // project points into 2d
491            var poly =
492                    new ArrayList<>(
493                            Arrays.asList(
494                                    OpenCVHelp.projectPoints(
495                                            prop.getIntrinsics(), prop.getDistCoeffs(), camRt, List.of(pta, ptb))));
496            var pxa = poly.get(0);
497            var pxb = poly.get(1);
498
499            // subdivide projected line based on desired resolution
500            double pxDist = Math.hypot(pxb.x - pxa.x, pxb.y - pxa.y);
501            int subdivisions = (int) (pxDist / resolution);
502            var subDelta = baseDelta.div(subdivisions + 1);
503            var subPts = new ArrayList<Translation3d>();
504            for (int j = 0; j < subdivisions; j++) {
505                subPts.add(pta.plus(subDelta.times(j + 1)));
506            }
507            if (!subPts.isEmpty()) {
508                poly.addAll(
509                        1,
510                        Arrays.asList(
511                                OpenCVHelp.projectPoints(
512                                        prop.getIntrinsics(), prop.getDistCoeffs(), camRt, subPts)));
513            }
514
515            polyPointList.add(poly.toArray(Point[]::new));
516        }
517
518        return polyPointList;
519    }
520
521    /**
522     * Draw a wireframe of the field to the given image.
523     *
524     * @param camRt The change in basis from world coordinates to camera coordinates. See {@link
525     *     RotTrlTransform3d#makeRelativeTo(Pose3d)}.
526     * @param prop The simulated camera's properties.
527     * @param resolution Resolution as a fraction(0 - 1) of the video frame's diagonal length in
528     *     pixels. Line segments will be subdivided if they exceed this resolution.
529     * @param wallThickness Thickness of the lines used for drawing the field walls in pixels. This is
530     *     scaled by {@link #getScaledThickness(double, Mat)}.
531     * @param wallColor Color of the lines used for drawing the field walls.
532     * @param floorSubdivisions A NxN "grid" is created from the floor where this parameter is N,
533     *     which defines the floor lines.
534     * @param floorThickness Thickness of the lines used for drawing the field floor grid in pixels.
535     *     This is scaled by {@link #getScaledThickness(double, Mat)}.
536     * @param floorColor Color of the lines used for drawing the field floor grid.
537     * @param destination The destination image to draw to.
538     */
539    public static void drawFieldWireframe(
540            RotTrlTransform3d camRt,
541            SimCameraProperties prop,
542            double resolution,
543            double wallThickness,
544            Scalar wallColor,
545            int floorSubdivisions,
546            double floorThickness,
547            Scalar floorColor,
548            Mat destination) {
549        for (var trls : getFieldFloorLines(floorSubdivisions)) {
550            var polys = VideoSimUtil.polyFrom3dLines(camRt, prop, trls, resolution, false, destination);
551            for (var poly : polys) {
552                drawPoly(
553                        poly,
554                        (int) Math.round(getScaledThickness(floorThickness, destination)),
555                        floorColor,
556                        false,
557                        destination);
558            }
559        }
560        for (var trls : getFieldWallLines()) {
561            var polys = VideoSimUtil.polyFrom3dLines(camRt, prop, trls, resolution, false, destination);
562            for (var poly : polys) {
563                drawPoly(
564                        poly,
565                        (int) Math.round(getScaledThickness(wallThickness, destination)),
566                        wallColor,
567                        false,
568                        destination);
569            }
570        }
571    }
572}