引言这次2025电赛清单一出大家的想法都是激光画图,网上早早的都是各种画图

在这里插入图片描述
虽然猜错了,但保不准下次会出是吧,简单的写一点,给你们一点实现的建议与代码
废话太多下面是我对激光画图的代码流程(本人只负责视觉方面,下位机云台是我的队友敲击的)

1.激光识别

说到激光识别大家一打开摄像头看到的激光一定是个白斑,基本上只有边边有一点颜色比如红色(如果是红激光),所以我的选择是降低激光的亮度,我说的是视觉软件降低咯(激光硬件一定是越亮越好)
拉低曝光,效果图额额先欠着,忘拍了,但是这种降低曝光对红色阈值十分敏感,可能视觉上有其他红色(棕色会有一定的误判)

'''设置曝光函数'''
def set_exposure(cap, exposure_flag,exposure_value)
'''
注:代码中的exposure_value,是用来缓冲帧,由于曝光的改变摄像头硬件可能会有延迟而改变
'''

'''寻找红斑'''
def find_bright_spots(frame, l_min=5, l_max=255, a_min=142, a_max=255, b_min=117, b_max=142)

def find_bright_spots(frame, l_min=5, l_max=255, a_min=142, a_max=255, b_min=117, b_max=142):
    '''
    功能:查找亮点(默认红色激光)
    参数:frame: 输入图像
            l_min, l_max: L通道亮度范围
            a_min, a_max: A通道范围
            b_min, b_max: B通道范围
    '''    
    global ray_x, ray_y
    lab = cv2.cvtColor(frame, cv2.COLOR_BGR2LAB)  # 转换到LAB颜色空间
    # 创建LAB掩码
    lower = np.array([l_min, a_min, b_min])
    upper = np.array([l_max, a_max, b_max])
    mask = cv2.inRange(lab, lower, upper)
    # 形态学操作
    kernel = np.ones((3, 3), np.uint8)
    mask = cv2.dilate(mask, kernel, iterations=1)#膨胀
    mask = cv2.erode(mask, kernel, iterations=1)#腐蚀
    # 应用掩码
    # 查找轮廓
    contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    # 筛选并标记符合条件的点
    valid_points = []
    w = 0
    h = 0
    for cnt in contours:
        area = cv2.contourArea(cnt)#面积
        if area >= 9:
            x_rect, y_rect, w, h = cv2.boundingRect(cnt)
            if w > 15 or h > 15:
                continue  # ⚠️ 跳过长宽太大的轮廓
            M = cv2.moments(cnt)    
            cx = int(M["m10"] / M["m00"])
            cy = int(M["m01"] / M["m00"])
            # 获取LAB通道值(L通道表示亮度)
            l_val, a_val, b_val = lab[cy, cx]
            # 保存信息
            valid_points.append((cx, cy, l_val, area, w, h))
    # 标记最亮点
    if valid_points:
        valid_points.sort(key=lambda x: x[2], reverse=True)  # 按亮度排序
        x, y, l_val, area,w, h = valid_points[0]

        cv2.circle(frame, (x, y), 5, (0, 0, 255), -1)  # 红色圆心标记
        cv2.putText(frame, f"L:{l_val} A:{area}", (x+10, y-10), 
                    cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 1)

        #cv2.imshow("LAB Detection", frame)
        #cv2.imshow("LAB Mask", mask)  # 显示掩码结果
    else:
        x, y = ray_x, ray_y  # 如果没有找到点,保持上次位置
    return frame, mask ,x, y


def set_exposure(cap, exposure_flag,exposure_value):
    '''
    功能:设置摄像头曝光
    参数:   cap: 摄像头对象
            exposure_flag: 是否启用自动曝光(True/False)
            exposure_value: 手动曝光值(仅在自动曝光关闭时有效)
    '''    
    global exposure_change_flag00.
    exposure_change_flag = 20  # 重置曝光变化标志
    if exposure_flag:
        # 启用自动曝光
        cap.set(cv2.CAP_PROP_AUTO_EXPOSURE, 3)
        print("自动曝光已启用")

    else:
        cap.set(cv2.CAP_PROP_AUTO_EXPOSURE, 1)  # 禁用自动曝光,切换到手动模式
        cap.set(cv2.CAP_PROP_EXPOSURE, exposure_value)
        print(f"手动曝光值已设置为: {exposure_value}")

骨架识别+路径规划

def get_skeleton_center_points_filtered(binary, min_dist=1, threshold_val=threshold_val, min_length=100):
    '''
    功能:从二值图像中提取骨架线段的中心点
    参数:
        binary: 输入的二值图像,背景为白色(255),前景为黑色(0)
        min_dist: 最小距离,控制点的稀疏程度
        threshold_val: 阈值,用于二值化处理
        min_length: 最小长度,过滤掉短线段
    返回:
        filtered_points_all: 所有过滤后的点列表,每个元素是一个线段的点集
        skeleton: 骨架图,0/1格式
    '''
    # Step 1: 灰度 + 模糊降噪
    gray = cv2.cvtColor(binary, cv2.COLOR_BGR2GRAY)
    blurred = cv2.GaussianBlur(gray, (5, 5), 0)

    threshold_val = cv2.getTrackbarPos("Threshold", "Threshold Adjust")  # 获取当前阈值 
    # Step 2: 阈值提取黑带区域
    _, binary_thresh = cv2.threshold(blurred, threshold_val, 255, cv2.THRESH_BINARY_INV)
    cv2.imshow("Threshold Adjust", binary_thresh)  # 显示二值化结果

    # Step 3: 骨架提取
    skeleton = skeletonize(binary_thresh // 255).astype(np.uint8)  # 骨架图,0/1

    # Step 4: 使用OpenCV connectedComponents标记连通区域
    num_labels, labels = cv2.connectedComponents(skeleton)

    filtered_points_all = []
    
    for label_idx in range(1, num_labels):  # 0是背景
        coords = np.column_stack(np.where(labels == label_idx))  # (y,x)

        if len(coords) > min_length:
            selected_points = []
            last_pt = None
            for pt in coords:
                if last_pt is None or np.linalg.norm(pt - last_pt) > min_dist:
                    selected_points.append((pt[1], pt[0]))  # 转成 (x,y)
                    last_pt = pt
            filtered_points_all.append(selected_points)
    # 返回所有过滤后的点和骨架图
    return filtered_points_all, skeleton


def sort_points_greedy_with_start_degree(points, neighbor_radius=2.0):
    """
    基于邻居数选起点 + 激光开关标记的贪婪排序。
    返回格式:[(point, laser_on), ...]
    """
    if len(points) <= 2:
        return [(tuple(pt), True) for pt in points]

    points = np.array(points)
    tree = cKDTree(points)
    used = np.zeros(len(points), dtype=bool)
    total_points = len(points)

    # 1. 计算每个点的邻居数(度)
    neighbors_count = []
    for i, pt in enumerate(points):
        dists, idxs = tree.query(pt, k=total_points, distance_upper_bound=neighbor_radius)
        count = np.sum((idxs < total_points) & (idxs != i))
        neighbors_count.append(count)

    # 2. 选度数最大的点作为起点
    start_idx = int(np.argmax(neighbors_count))
    current_idx = start_idx
    used[current_idx] = True

    result = []
    result.append((tuple(points[current_idx]), True))  # 激光开启

    while np.sum(used) < total_points:
        last_pt = points[current_idx]
        dists, idxs = tree.query(last_pt, k=10, distance_upper_bound=neighbor_radius)

        found = False
        for idx in idxs:
            if idx >= total_points or used[idx]:
                continue
            used[idx] = True
            current_idx = idx
            result.append((tuple(points[current_idx]), True))  # 激光继续开
            found = True
            break

        if not found:
            # 找不到邻居了,断笔,跳转到下一个未访问度数最高的点
            remaining_idxs = np.where(~used)[0]
            if len(remaining_idxs) == 0:
                break
            next_idx = remaining_idxs[np.argmax([neighbors_count[i] for i in remaining_idxs])]

            result.append((tuple(points[current_idx]), False))   # 激光关闭
            result.append((tuple(points[next_idx]), False))      # 空移动
            result.append((tuple(points[next_idx]), True))       # 激光重新开启

            used[next_idx] = True
            current_idx = next_idx

    return result

模式函数可以这么写(标注画布后)

        if mode ==6:
            #判断是否标定
            if canvas_corners[3][0] != 0:
                # 透视变换画布#在上一个模式用激光标注了画布的四个角
                canvas = warp_perspective_canvas(frame, canvas_corners, canvas_width, canvas_height)
                # 查找骨架线段
                cv2.imshow("canvas", canvas)  # 乘255变成黑白图像
                filtered_points_all, skeleton = get_skeleton_center_points_filtered(canvas, min_dist=1, threshold_val=50, min_length=400)
                #判断返回列表长度
                if len(filtered_points_all) == 0:
                    print("没有找到有效的骨架线段")
                    continue

                else:
                    """
                    #有效骨架线段
                    """   
                    if len(skeleton_length_point) > skeleton_length+30 or len(filtered_points_all) < skeleton_length-30:
                        print(f"骨架线段数量异常: {len(filtered_points_all)}")
                        skeleton_length_point = copy.deepcopy(filtered_points_all)
                        skeleton_length_Similarity = 20
                    
                    elif skeleton_length_Similarity != 0:
                        print(f"骨架线段数量正常: {skeleton_length_Similarity}")
                        skeleton_length_Similarity =skeleton_length_Similarity-1

                    elif skeleton_length_Similarity == 0:
                        cv2.imshow("Skeleton", skeleton * 255)  # 乘255变成黑白图像
                        for segment in filtered_points_all:
                        #对每个骨架线段进行排序
                            ordered_points = sort_points_greedy_with_start_degree(segment,15)
                            #在画布上绘制骨架线段
                            for i in range(len(ordered_points) - 1) :
                                pt1, _ = ordered_points[i]       # pt1 是 (x, y)
                                pt2, _ = ordered_points[i + 1]
                                cv2.line(canvas, pt1, pt2, (0, 255, 0), 1)
                                #cv2.line(canvas, ordered_points[i], ordered_points[i + 1], (0, 255, 0), 1)
                            for (ray_x, ray_y), laser_on in ordered_points:
                                x_int, x_frac, y_int, y_frac = process_canvas_xy(ray_x, ray_y)
    							# 你可以根据 laser_on 来决定是否发送激光指令
                                print(f"发送点: ({x_int}, {x_frac}, {y_int}, {y_frac}), 激光开关状态: {laser_on}")
                                Serial_port_canvas_xy(x_int, x_frac, y_int, y_frac, int(laser_on))  # 用1或0表示激光开关
                                cv2.waitKey(50)

画布标注(画布相对于摄像头是固定的!!!)


def warp_perspective_canvas(frame, pts_src, canvas_width=canvas_width, canvas_height=canvas_height):
    """
    对摄像头帧进行透视变换,将倾斜的画布区域warp成矩形。

    参数:
        frame: 输入图像(np.ndarray)
        pts_src: 画布四个角点坐标,shape=(4,2),类型float32或可转换成float32
                 顺序建议为左上,右上,右下,左下
        canvas_width: 输出画布宽度(像素)
        canvas_height: 输出画布高度(像素)

    返回:
        warp后的矩形画布图像(大小为canvas_width x canvas_height)
    """
    pts_src = np.array(pts_src, dtype=np.float32)
    pts_dst = np.float32([
        [0, 0],
        [canvas_width - 1, 0],
        [canvas_width - 1, canvas_height - 1],
        [0, canvas_height - 1]
    ])

    M = cv2.getPerspectiveTransform(pts_src, pts_dst)
    canvas = cv2.warpPerspective(frame, M, (canvas_width, canvas_height))
    return canvas




#-------------------------------------------------------------------------------------------------------
'''
主函数循环部分
'''
        if mode == 5:
            if canvas_corners[3][0] != 0:
                canvas = warp_perspective_canvas(frame, canvas_corners, canvas_width, canvas_height)
                _,_,ray_x,ray_y = find_bright_spots(canvas)
                x_int, x_frac, y_int, y_frac = process_canvas_xy(ray_x,ray_y)
                try:
                    Serial_port_canvas_xy(x_int, x_frac, y_int, y_frac,1)
                except Exception as e:      
                    print(f"发送数据失败: {e}")
                print(f"红点: ({x_int}, {x_frac}, {y_int}, {y_frac})")
                cv2.imshow("canvas", canvas)

            else :
                print("no")

zzz

T字转角多个节角也可以实现

T

结语:2025年电赛前准备,记录我的最后一次电赛,回顾我的前两次电赛,参与奖和省三,有时候怀疑自己的水平是不是不太适合参加电赛,最后还是要明白,其实结果什么都不重要,只要自己认真对待过就好了不是嘛(强行安慰)

Logo

火山引擎开发者社区是火山引擎打造的AI技术生态平台,聚焦Agent与大模型开发,提供豆包系列模型(图像/视频/视觉)、智能分析与会话工具,并配套评测集、动手实验室及行业案例库。社区通过技术沙龙、挑战赛等活动促进开发者成长,新用户可领50万Tokens权益,助力构建智能应用。

更多推荐