本文整理汇总了Python中cv2.minMaxLoc方法的典型用法代码示例。如果您正苦于以下问题:Python cv2.minMaxLoc方法的具体用法?Python cv2.minMaxLoc怎么用?Python cv2.minMaxLoc使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在模块cv2
的用法示例。
在下文中一共展示了cv2.minMaxLoc方法的22个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
点赞 7
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import minMaxLoc [as 别名]
def match_img(image, template, value):
"""
:param image: 图片
:param template: 模板
:param value: 阈值
:return: 水印坐标
描述:用于获得这幅图片模板对应的位置坐标,用途:校准元素位置信息
"""
res = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED)
threshold = value
min_v, max_v, min_pt, max_pt = cv2.minMaxLoc(res)
if max_v < threshold:
return False
if not max_pt[0] in range(10, 40) or max_pt[1] > 20:
return False
return max_pt
开发者ID:Mingtzge,项目名称:2019-CCF-BDCI-OCR-MCZJ-OCR-IdentificationIDElement,代码行数:18,代码来源:split_img_generate_data.py
点赞 7
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import minMaxLoc [as 别名]
def get_match_confidence(img1, img2, mask=None):
if img1.shape != img2.shape:
return False
## first try, using absdiff
# diff = cv2.absdiff(img1, img2)
# h, w, d = diff.shape
# total = h*w*d
# num = (diff<20).sum()
# print 'is_match', total, num
# return num > total*0.90
if mask is not None:
img1 = img1.copy()
img1[mask!=0] = 0
img2 = img2.copy()
img2[mask!=0] = 0
## using match
match = cv2.matchTemplate(img1, img2, cv2.TM_CCOEFF_NORMED)
_, confidence, _, _ = cv2.minMaxLoc(match)
# print confidence
return confidence
开发者ID:NetEaseGame,项目名称:ATX,代码行数:22,代码来源:scene_detector.py
点赞 6
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import minMaxLoc [as 别名]
def probability(self, im: str) -> float:
"""
Return the probability of the existence of given image.
:param im: the name of the image.
:return: the probability (confidence).
"""
assert self.screen is not None
try:
template = self.images[im]
except KeyError:
logger.error('Unexpected image name {}'.format(im))
return 0.0
res = cv.matchTemplate(self.screen, template, TM_METHOD)
_, max_val, _, max_loc = cv.minMaxLoc(res)
logger.debug('max_val = {}, max_loc = {}'.format(max_val, max_loc))
return max_val
开发者ID:will7101,项目名称:fgo-bot,代码行数:20,代码来源:tm.py
点赞 6
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import minMaxLoc [as 别名]
def find(self, im: str, threshold: float = None) -> Tuple[int, int]:
"""
Find the template image on screen and return its top-left coords.
Return None if the matching value is less than `threshold`.
:param im: the name of the image
:param threshold: the threshold of matching. If not given, will be set to the default threshold.
:return: the top-left coords of the result. Return (-1, -1) if not found.
"""
threshold = threshold or self.threshold
assert self.screen is not None
try:
template = self.images[im]
except KeyError:
logger.error('Unexpected image name {}'.format(im))
return -1, -1
res = cv.matchTemplate(self.screen, template, TM_METHOD)
_, max_val, _, max_loc = cv.minMaxLoc(res)
logger.debug('max_val = {}, max_loc = {}'.format(max_val, max_loc))
return max_loc if max_val >= threshold else (-1, -1)
开发者ID:will7101,项目名称:fgo-bot,代码行数:25,代码来源:tm.py
点赞 6
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import minMaxLoc [as 别名]
def cal_rgb_confidence(img_src_rgb, img_sch_rgb):
"""同大小彩图计算相似度."""
# BGR三通道心理学权重:
weight = (0.114, 0.587, 0.299)
src_bgr, sch_bgr = cv2.split(img_src_rgb), cv2.split(img_sch_rgb)
# 计算BGR三通道的confidence,存入bgr_confidence:
bgr_confidence = [0, 0, 0]
for i in range(3):
res_temp = cv2.matchTemplate(src_bgr[i], sch_bgr[i], cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res_temp)
bgr_confidence[i] = max_val
# 加权可信度
weighted_confidence = bgr_confidence[0] * weight[0] + bgr_confidence[1] * weight[1] + bgr_confidence[2] * weight[2]
return weighted_confidence
开发者ID:AirtestProject,项目名称:Airtest,代码行数:19,代码来源:cal_confidence.py
点赞 6
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import minMaxLoc [as 别名]
def find_template(im_source, im_search, threshold=0.8, rgb=False):
"""函数功能:找到最优结果."""
# 第一步:校验图像输入
check_source_larger_than_search(im_source, im_search)
# 第二步:计算模板匹配的结果矩阵res
res = _get_template_result_matrix(im_source, im_search)
# 第三步:依次获取匹配结果
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
h, w = im_search.shape[:2]
# 求取可信度:
confidence = _get_confidence_from_matrix(im_source, im_search, max_loc, max_val, w, h, rgb)
# 求取识别位置: 目标中心 + 目标区域:
middle_point, rectangle = _get_target_rectangle(max_loc, w, h)
best_match = generate_result(middle_point, rectangle, confidence)
LOGGING.debug("threshold=%s, result=%s" % (threshold, best_match))
return best_match if confidence >= threshold else None
开发者ID:AirtestProject,项目名称:Airtest,代码行数:18,代码来源:template.py
点赞 6
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import minMaxLoc [as 别名]
def match_dmg_templates(self, frame):
match_mat, max_val, tl = [None]*10, [0]*10, [(0, 0)]*10
for i in range(0, 10):
match_mat[i] = cv2.matchTemplate(frame, self.num_img[0],
cv2.TM_CCORR_NORMED, mask=self.num_mask[0])
_, max_val[i], _, tl[i] = cv2.minMaxLoc(match_mat[i])
# print(max_val[0])
br = (tl[0][0] + self.num_w, tl[0][1] + self.num_h)
frame = cv2.rectangle(frame, tl[0], br, (255, 255, 255), 2)
# Multi-template result searching
# _, max_val_1, _, tl_1 = cv2.minMaxLoc(np.array(match_mat))
# print(tl_1)
# A number of methods corresponding to the various trackbars available.
开发者ID:jpnaterer,项目名称:smashscan,代码行数:18,代码来源:thresholding.py
点赞 6
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import minMaxLoc [as 别名]
def main():
src = cv2.imread('src.jpg', cv2.IMREAD_GRAYSCALE)
tpl = cv2.imread('tpl.jpg', cv2.IMREAD_GRAYSCALE)
result = cv2.matchTemplate(src, tpl, cv2.TM_CCOEFF_NORMED)
result = cv2.normalize(result, dst=None, alpha=0, beta=1,
norm_type=cv2.NORM_MINMAX, dtype=-1)
minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(result)
matchLoc = maxLoc
draw1 = cv2.rectangle(
src, matchLoc, (matchLoc[0] + tpl.shape[1], matchLoc[1] + tpl.shape[0]), 0, 2, 8, 0)
draw2 = cv2.rectangle(
result, matchLoc, (matchLoc[0] + tpl.shape[1], matchLoc[1] + tpl.shape[0]), 0, 2, 8, 0)
cv2.imshow('draw1', draw1)
cv2.imshow('draw2', draw2)
cv2.waitKey(0)
print src.shape
print tpl.shape
print result.shape
print matchLoc
开发者ID:cynricfu,项目名称:dual-fisheye-video-stitching,代码行数:21,代码来源:template_matching.py
点赞 6
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import minMaxLoc [as 别名]
def detect(self, z, x):
k = self.gaussianCorrelation(x, z)
# 得到响应图
res = real(fftd(complexMultiplication(self._alphaf, fftd(k)), True))
# pv:响应最大值 pi:相应最大点的索引数组
_, pv, _, pi = cv2.minMaxLoc(res)
# 得到响应最大的点索引的float表示
p = [float(pi[0]), float(pi[1])]
# 使用幅值做差来定位峰值的位置
if pi[0] > 0 and pi[0] < res.shape[1] - 1:
p[0] += self.subPixelPeak(res[pi[1], pi[0] - 1], pv, res[pi[1], pi[0] + 1])
if pi[1] > 0 and pi[1] < res.shape[0] - 1:
p[1] += self.subPixelPeak(res[pi[1] - 1, pi[0]], pv, res[pi[1] + 1, pi[0]])
# 得出偏离采样中心的位移
p[0] -= res.shape[1] / 2.
p[1] -= res.shape[0] / 2.
# 返回偏离采样中心的位移和峰值
return p, pv
# 基于当前帧更新目标位置
开发者ID:ryanfwy,项目名称:KCF-DSST-py,代码行数:26,代码来源:tracker.py
点赞 6
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import minMaxLoc [as 别名]
def detect_scale(self, image):
xsf = self.get_scale_sample(image)
# Compute AZ in the paper
add_temp = cv2.reduce(complexMultiplication(self.sf_num, xsf), 0, cv2.REDUCE_SUM)
# compute the final y
scale_response = cv2.idft(complexDivisionReal(add_temp, (self.sf_den + self.scale_lambda)), None, cv2.DFT_REAL_OUTPUT)
# Get the max point as the final scaling rate
# pv:响应最大值 pi:相应最大点的索引数组
_, pv, _, pi = cv2.minMaxLoc(scale_response)
return pi
# 更新尺度
开发者ID:ryanfwy,项目名称:KCF-DSST-py,代码行数:18,代码来源:tracker.py
点赞 6
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import minMaxLoc [as 别名]
def imagesearcharea(image, x1, y1, x2, y2, precision=0.8, im=None):
if im is None:
im = region_grabber(region=(x1, y1, x2, y2))
if is_retina:
im.thumbnail((round(im.size[0] * 0.5), round(im.size[1] * 0.5)))
# im.save('testarea.png') usefull for debugging purposes, this will save the captured region as "testarea.png"
img_rgb = np.array(im)
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template = cv2.imread(image, 0)
res = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
if max_val < precision:
return [-1, -1]
return max_loc
开发者ID:drov0,项目名称:python-imagesearch,代码行数:18,代码来源:imagesearch.py
点赞 6
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import minMaxLoc [as 别名]
def locate_img(image, template):
img = image.copy()
res = cv2.matchTemplate(img, template, method)
print res
print res.shape
cv2.imwrite('image/shape.png', res)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
print cv2.minMaxLoc(res)
if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
top_left = min_loc
else:
top_left = max_loc
h, w = template.shape
bottom_right = (top_left[0] + w, top_left[1]+h)
cv2.rectangle(img, top_left, bottom_right, 255, 2)
cv2.imwrite('image/tt.jpg', img)
开发者ID:NetEase,项目名称:airtest,代码行数:18,代码来源:pixelmatch.py
点赞 6
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import minMaxLoc [as 别名]
def getKeypoints(probMap, threshold=0.1):
mapSmooth = cv2.GaussianBlur(probMap, (3, 3), 0, 0)
mapMask = np.uint8(mapSmooth>threshold)
keypoints = []
contours = None
try:
#OpenCV4.x
contours, _ = cv2.findContours(mapMask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
except:
#OpenCV3.x
_, contours, _ = cv2.findContours(mapMask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
blobMask = np.zeros(mapMask.shape)
blobMask = cv2.fillConvexPoly(blobMask, cnt, 1)
maskedProbMap = mapSmooth * blobMask
_, maxVal, _, maxLoc = cv2.minMaxLoc(maskedProbMap)
keypoints.append(maxLoc + (probMap[maxLoc[1], maxLoc[0]],))
return keypoints
开发者ID:PINTO0309,项目名称:MobileNetV2-PoseEstimation,代码行数:23,代码来源:openvino-usbcamera-cpu-ncs2-async.py
点赞 6
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import minMaxLoc [as 别名]
def _locate_target(self, score):
def subpixel_peak(left, center, right):
divisor = 2 * center - left - right
if abs(divisor) < 1e-3:
return 0
return 0.5 * (right - left) / divisor
_, _, _, max_loc = cv2.minMaxLoc(score)
loc = np.float32(max_loc)
if max_loc[0] in range(1, score.shape[1] - 1):
loc[0] += subpixel_peak(
score[max_loc[1], max_loc[0] - 1],
score[max_loc[1], max_loc[0]],
score[max_loc[1], max_loc[0] + 1])
if max_loc[1] in range(1, score.shape[0] - 1):
loc[1] += subpixel_peak(
score[max_loc[1] - 1, max_loc[0]],
score[max_loc[1], max_loc[0]],
score[max_loc[1] + 1, max_loc[0]])
offset = loc - np.float32(score.shape[1::-1]) / 2
return offset
开发者ID:huanglianghua,项目名称:open-vot,代码行数:25,代码来源:kcf.py
点赞 6
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import minMaxLoc [as 别名]
def SMAvgLocalMax(self, src):
# size
stepsize = pySaliencyMapDefs.default_step_local
width = src.shape[1]
height = src.shape[0]
# find local maxima
numlocal = 0
lmaxmean = 0
for y in range(0, height-stepsize, stepsize):
for x in range(0, width-stepsize, stepsize):
localimg = src[y:y+stepsize, x:x+stepsize]
lmin, lmax, dummy1, dummy2 = cv2.minMaxLoc(localimg)
lmaxmean += lmax
numlocal += 1
# averaging over all the local regions
return lmaxmean / numlocal
# normalization specific for the saliency map model
开发者ID:tyarkoni,项目名称:pliers,代码行数:19,代码来源:pySaliencyMap.py
点赞 6
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import minMaxLoc [as 别名]
def matchAB(fileA, fileB):
# 读取图像数据
imgA = cv2.imread(fileA)
imgB = cv2.imread(fileB)
# 转换成灰色
grayA = cv2.cvtColor(imgA, cv2.COLOR_BGR2GRAY)
grayB = cv2.cvtColor(imgB, cv2.COLOR_BGR2GRAY)
# 获取图片A的大小
height, width = grayA.shape
# 取局部图像,寻找匹配位置
result_window = np.zeros((height, width), dtype=imgA.dtype)
for start_y in range(0, height-100, 10):
for start_x in range(0, width-100, 10):
window = grayA[start_y:start_y+100, start_x:start_x+100]
match = cv2.matchTemplate(grayB, window, cv2.TM_CCOEFF_NORMED)
_, _, _, max_loc = cv2.minMaxLoc(match)
matched_window = grayB[max_loc[1]:max_loc[1]+100, max_loc[0]:max_loc[0]+100]
result = cv2.absdiff(window, matched_window)
result_window[start_y:start_y+100, start_x:start_x+100] = result
plt.imshow(result_window)
plt.show()
开发者ID:cangyan,项目名称:image-detect,代码行数:27,代码来源:image_detect_02.py
点赞 6
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import minMaxLoc [as 别名]
def detect(self, z, x):
k = self.gaussianCorrelation(x, z)
res = real(fftd(complexMultiplication(self._alphaf, fftd(k)), True))
_, pv, _, pi = cv2.minMaxLoc(res) # pv:float pi:tuple of int
p = [float(pi[0]), float(pi[1])] # cv::Point2f, [x,y] #[float,float]
if(pi[0]>0 and pi[0]0 and pi[1]
开发者ID:uoip,项目名称:KCFnb,代码行数:18,代码来源:kcftracker.py
点赞 5
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import minMaxLoc [as 别名]
def image_search(x_start :int, y_start :int, x_end :int, y_end :int,
img :str, threshold :int, bmp :image =None) -> Optional[Tuple[int, int]]:
"""Search the screen for the supplied picture.
Returns a tuple with x,y-coordinates, or None if result is below
the threshold.
Keyword arguments:
image -- Filename or path to file that you search for.
threshold -- The level of fuzziness to use - a perfect match will be
close to 1, but probably never 1. In my testing use a
value between 0.7-0.95 depending on how strict you wish
to be.
bmp -- a bitmap from the get_bitmap() function, use this if you're
performing multiple different OCR-readings in succession
from the same page. This is to avoid to needlessly get the
same bitmap multiple times. If a bitmap is not passed, the
function will get the bitmap itself. (default None)
"""
if not bmp: bmp = Inputs.get_bitmap()
# Bitmaps are created with a 8px border
search_area = bmp.crop((x_start + 8, y_start + 8,
x_end + 8, y_end + 8))
search_area = numpy.asarray(search_area)
search_area = cv2.cvtColor(search_area, cv2.COLOR_RGB2GRAY)
template = cv2.imread(img, 0)
res = cv2.matchTemplate(search_area, template, cv2.TM_CCOEFF_NORMED)
_, max_val, _, max_loc = cv2.minMaxLoc(res)
if max_val < threshold:
return None
return max_loc
开发者ID:kujan,项目名称:NGU-scripts,代码行数:34,代码来源:inputs.py
点赞 5
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import minMaxLoc [as 别名]
def MatchTemplate(template, target):
"""Returns match score for given template"""
res = cv2.matchTemplate(target, template, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
return max_val
开发者ID:cfircohen,项目名称:airport,代码行数:7,代码来源:solver.py
点赞 5
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import minMaxLoc [as 别名]
def match_img(self, image, target, value, rematch=False):
"""
:param image: 原始图片
:param target: 匹配模板
:param value: 匹配阈值
:param rematch: false,初赛水印,true复赛水印
:return: 水印外轮廓坐标,原始图片灰度图,水印内轮廓
"""
img_rgb = cv2.imread(image)
h, w, c = img_rgb.shape
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template = cv2.imread(target, 0)
th, tw = template.shape
max_v1 = 0
if not rematch:
template = template[16:56, 20:186]
else:
template = template[18:107, 19:106]
res = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED)
threshold = value
min_v, max_v, min_pt, max_pt = cv2.minMaxLoc(res)
if max_v < threshold:
return False, False, False
if not rematch:
template1 = cv2.imread(self.roi_rematch_img_path, 0)
template1 = template1[18:107, 19:106]
res1 = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED)
min_v1, max_v1, min_pt1, max_pt1 = cv2.minMaxLoc(res1)
if max_v < max_v1: # 避免两种水印匹配重叠的情况
return False, False, False
if not rematch:
x = 20
y = 16
else:
x = 19
y = 18
ori_pt = (min(w - tw - 1, max(max_pt[0] - x, 0)), max(0, min(max_pt[1] - y, h - th - 1)))
return ori_pt, img_gray, max_pt
开发者ID:Mingtzge,项目名称:2019-CCF-BDCI-OCR-MCZJ-OCR-IdentificationIDElement,代码行数:40,代码来源:watermask_process.py
点赞 5
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import minMaxLoc [as 别名]
def compare_a_template(img_gray, template): # 函数返回模板匹配的最大值
"""
将图片与模板对比,比较相似度
:param img_gray: 灰度图片
:param template: 模板图片
:return: 相似度,介于[0,1]
"""
#img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # 转为灰度图
res = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED) # 模板匹配
_, max_val, _, _ = cv2.minMaxLoc(res)
return max_val # 返回的是归一化的相似度的最大值,值位于0-1之间
开发者ID:Mingtzge,项目名称:2019-CCF-BDCI-OCR-MCZJ-OCR-IdentificationIDElement,代码行数:13,代码来源:twist_part.py
点赞 5
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import minMaxLoc [as 别名]
def match_template1(template, img, plot=False, method=cv2.TM_SQDIFF_NORMED):
img = cv2.imread(img, 0).copy()
template = cv2.imread(template, 0)
w, h = template.shape[::-1]
if lib == OPENCV:
res = cv2.matchTemplate(img, template, method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
top_left = min_loc
else:
top_left = max_loc
else:
result = match_template(img, template)
ij = np.unravel_index(np.argmax(result), result.shape)
top_left = ij[::-1]
bottom_right = (top_left[0] + w, top_left[1] + h)
if plot:
cv2.rectangle(img, top_left, bottom_right, 255, 5)
plt.subplot(121)
plt.imshow(img)
plt.title('Detected Point'), plt.xticks([]), plt.yticks([])
plt.subplot(122)
plt.imshow(template)
plt.show()
return top_left, bottom_right
开发者ID:tobyqin,项目名称:kog-money,代码行数:31,代码来源:match.py
注:本文中的cv2.minMaxLoc方法示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。