# Find Lines Example # # This example shows off how to find lines in the image. For each line object # found in the image a line object is returned which includes the line's rotation.
# Note: Line detection is done by using the Hough Transform: # http://en.wikipedia.org/wiki/Hough_transform # Please read about it above for more information on what `theta` and `rho` are.
# find_lines() finds infinite length lines. Use find_line_segments() to find non-infinite lines.
enable_lens_corr = False# turn on for straighter lines...
defis_right_angle(line1, line2): global right_angle_threshold # 判断两个直线之间的夹角是否为直角 angle = calculate_angle(line1, line2)
if angle >= right_angle_threshold[0] and angle <= right_angle_threshold[1]: # 判断在阈值范围内 returnTrue returnFalse
deffind_verticle_lines(lines): line_num = len(lines) for i in range(line_num -1): for j in range(i, line_num): if is_right_angle(lines[i], lines[j]): return (lines[i], lines[j]) return (None, None)
defdraw_cross_point(cross_x, cross_y): img.draw_cross(cross_x, cross_y) img.draw_circle(cross_x, cross_y, 5) img.draw_circle(cross_x, cross_y, 10) # All lines also have `x1()`, `y1()`, `x2()`, and `y2()` methods to get their end-points # and a `line()` method to get all the above as one 4 value tuple for `draw_line()`.
# 去除摄像头畸变, 这里我们采用的是13.8mm的,近距离没有畸变效果 # if enable_lens_corr: img.lens_corr(1.8) # for 2.8mm lens...
# `threshold` controls how many lines in the image are found. Only lines with # edge difference magnitude sums greater than `threshold` are detected...
# More about `threshold` - each pixel in the image contributes a magnitude value # to a line. The sum of all contributions is the magintude for that line. Then # when lines are merged their magnitudes are added togheter. Note that `threshold` # filters out lines with low magnitudes before merging. To see the magnitude of # un-merged lines set `theta_margin` and `rho_margin` to 0...
# `theta_margin` and `rho_margin` control merging similar lines. If two lines # theta and rho value differences are less than the margins then they are merged.
import sensor, image, time from pyb import LED from pyb import UART,Timer
uart = UART(3,115200)#初始化串口 波特率 115200 sensor.reset() #sensor.set_vflip(True) #sensor.set_hmirror(True) sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QQQVGA) # 80x60 (4,800 pixels) - O(N^2) max = 2,3040,000. #sensor.set_windowing([0,20,80,40]) sensor.skip_frames(time = 2000) # WARNING: If you use QQVGA it may take seconds clock = time.clock() # to process a frame sometimes.
sensor.reset() # Reset and initialize the sensor. sensor.set_contrast(3) sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) sensor.set_windowing((28, 28))
sensor.skip_frames(time = 2000) # Wait for settings take effect. sensor.set_auto_gain(False) sensor.set_auto_exposure(False)
clock = time.clock() # Create a clock object to track the FPS.
while(True): clock.tick() # Update the FPS clock. img = sensor.snapshot() # Take a picture and return the image. out = img.invert().find_number() if out[1] > 3.0: print(out[0]) #print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected # to the IDE. The FPS should increase once disconnected.
while (True): clock.tick() img = sensor.snapshot() if (kpts1 == None): #如果是刚开始运行程序,提取最开始的图像作为目标物体特征,kpts1保存目标物体的特征 #默认会匹配目标特征的多种比例大小,而不仅仅是保存目标特征时的大小,比模版匹配灵活。 # NOTE: By default find_keypoints returns multi-scale keypoints extracted from an image pyramid. kpts1 = img.find_keypoints(max_keypoints=150, threshold=10, scale_factor=1.2) #image.find_keypoints(roi=Auto, threshold=20, normalized=False, scale_factor=1.5, max_keypoints=100, corner_detector=CORNER_AGAST) #roi表示识别的区域,是一个元组(x,y,w,h),默认与framsesize大小一致。 #threshold是0~255的一个阈值,用来控制特征点检测的角点数量。用默认的AGAST特征点检测,这个阈值大概是20。用FAST特征点检测,这个阈值大概是60~80。阈值越低,获得的角点越多。 #normalized是一个布尔数值,默认是False,可以匹配目标特征的多种大小(比ncc模版匹配效果灵活)。如果设置为True,关闭特征点检测的多比例结果,仅匹配目标特征的一种大小(类似于模版匹配),但是运算速度会更快一些。 #scale_factor是一个大于1.0的浮点数。这个数值越高,检测速度越快,但是匹配准确率会下降。一般在1.35~1.5左右最佳。 #max_keypoints是一个物体可提取的特征点的最大数量。如果一个物体的特征点太多导致RAM内存爆掉,减小这个数值。 #corner_detector是特征点检测采取的算法,默认是AGAST算法。FAST算法会更快但是准确率会下降。 draw_keypoints(img, kpts1) #画出此时的目标特征 else: #当与最开始的目标特征进行匹配时,默认设置normalized=True,只匹配目标特征的一种大小。 # NOTE: When extracting keypoints to match the first descriptor, we use normalized=True to extract # keypoints from the first scale only, which will match one of the scales in the first descriptor. kpts2 = img.find_keypoints(max_keypoints=150, threshold=10, normalized=True) #如果检测到特征物体 if (kpts2): #匹配当前找到的特征和最初的目标特征的相似度 match = image.match_descriptor(kpts1, kpts2, threshold=85) #image.match_descriptor(descritor0, descriptor1, threshold=70, filter_outliers=False)。本函数返回kptmatch对象。 #threshold阈值设置匹配的准确度,用来过滤掉有歧义的匹配。这个值越小,准确度越高。阈值范围0~100,默认70 #filter_outliers默认关闭。
#match.count()是kpt1和kpt2的匹配的近似特征点数目。 #如果大于10,证明两个特征相似,匹配成功。 if (match.count()>10): # If we have at least n "good matches" # Draw bounding rectangle and cross. #在匹配到的目标特征中心画十字和矩形框。 img.draw_rectangle(match.rect()) img.draw_cross(match.cx(), match.cy(), size=10)
#match.theta()是匹配到的特征物体相对目标物体的旋转角度。 print(kpts2, "matched:%d dt:%d"%(match.count(), match.theta())) #不建议draw_keypoints画出特征角点。 # NOTE: uncomment if you want to draw the keypoints #img.draw_keypoints(kpts2, size=KEYPOINTS_SIZE, matched=True)
sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.VGA) sensor.set_windowing((240, 240)) # look at center 240x240 pixels of the VGA resolution. sensor.skip_frames(30) sensor.set_auto_gain(False) # must turn this off to prevent image washout... clock = time.clock()
while(True): clock.tick() img = sensor.snapshot() for code in img.find_qrcodes(): print(code) print(clock.fps())
sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.VGA) # High Res! sensor.set_windowing((640, 80)) # V Res of 80 == less work (40 for 2X the speed). sensor.skip_frames(time = 2000) sensor.set_auto_gain(False) # must turn this off to prevent image washout... sensor.set_auto_whitebal(False) # must turn this off to prevent image washout... clock = time.clock()
sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.VGA) # we run out of memory if the resolution is much bigger... sensor.set_windowing((160, 120)) # Look at center 160x120 pixels of the VGA resolution. sensor.skip_frames(time = 2000) sensor.set_auto_gain(False) # must turn this off to prevent image washout... sensor.set_auto_whitebal(False) # must turn this off to prevent image washout... clock = time.clock()