好友
阅读权限10
听众
最后登录1970-1-1
|
人太懒了,写一半写不动了。
目前可直接使用部分,保存、加载、截图、图色功能的找图。
截图分为:
1、全屏截图:就是截全屏
2、图像截图:点了之后可以在窗口的图像上画框,回车保存
3、屏幕截图:跟截全屏一样,但是可以画框
4、动态截图:跟屏幕截图一样,会保留静态部分,动态部分变黑,截图持续5s
图色功能的找图:用的是模板匹配,自行选择小图,找到了会在窗口的大图上画个框
图色功能的多点找色:半成品,写了一半,自行测试
图色功能的其他部分:还没写
[Python] 纯文本查看 复制代码 # 萌新本炘自动化.py
import time
import cv2
import numpy as np
import pyautogui
from PIL import Image
from baseFun import notice
import wx
import wx.aui as aui
from baseFun import mxbx_ts, pmjt, tsgj
class MyFrame(wx.Frame):
def __init__(self, parent, title):
super().__init__(parent, title=title, size=(960, 540))
self.ts = mxbx_ts.TuSe()
# 创建AuiManager,并将其设置为frame的管理器
self.aui_mgr = aui.AuiManager(self)
self.aui_mgr.SetManagedWindow(self)
# 添加一个面板作为窗口内容
panel = wx.Panel(self)
# 创建ScrolledWindow,并将图片控件添加到其中
self.sw = wx.ScrolledWindow(panel, style=wx.VSCROLL | wx.HSCROLL)
self.sw.SetScrollbars(1, 1, 1, 1)
image = pyautogui.screenshot()
image = np.array(image)
# image = cv2.imread('1234.bmp') # 读取Opencv图像
height, width, _ = image.shape
# image = cv2.cvtColor(screenshot, cv2.COLOR_BGR2RGB) # Opencv默认使用BGR颜色通道,需要转换为RGB
self.maps = wx.Bitmap.FromBuffer(width, height, image) # 将Opencv图像转换为wxPython图像对象
self.bitmap = wx.StaticBitmap(self.sw, -1, self.maps)
self.sw.SetVirtualSize(width, height)
self.sw.SetScrollRate(20, 20)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.sw, 1, wx.EXPAND)
panel.SetSizer(sizer)
# 创建工具条
self.tb1 = self._CreateToolBar()
# 将面板添加到AuiManager中
self.aui_mgr.AddPane(panel,
aui.AuiPaneInfo().CenterPane().Caption("My Panel"))
# 将工具栏添加到AuiManager中
self.aui_mgr.AddPane(self.tb1,
aui.AuiPaneInfo().Name("ToolBar1").Caption(u"工具条").ToolbarPane().Top().Row(0).Position(
0).Floatable(True)
)
# 更新AuiManager的布局
self.aui_mgr.Update()
# 绑定公告事件
self.Bind(wx.EVT_SHOW, self.on_show)
def _CreateToolBar(self, d='H'):
'''创建工具栏'''
self.id_open = wx.Window.NewControlId()
self.id_save = wx.Window.NewControlId()
self.id_quit = wx.Window.NewControlId()
self.id_help = wx.Window.NewControlId()
self.id_about = wx.Window.NewControlId()
if d.upper() in ['V', 'VERTICAL']:
tb = aui.AuiToolBar(self, -1, wx.DefaultPosition, wx.DefaultSize,
agwStyle=aui.AUI_TB_TEXT | aui.AUI_TB_VERTICAL)
else:
tb = aui.AuiToolBar(self, -1, wx.DefaultPosition, wx.DefaultSize, style=aui.AUI_TB_TEXT)
tb.SetToolBitmapSize(wx.Size(16, 16))
tb.AddTool(self.id_open, u'加载', wx.NullBitmap, u'加载新图片')
self.Bind(wx.EVT_TOOL, self.on_button1_click, id=self.id_open)
tb.AddTool(self.id_save, u'保存', wx.NullBitmap, u'保存文件')
self.Bind(wx.EVT_TOOL, self.on_button2_click, id=self.id_save)
tb.AddSeparator()
tb.AddTool(self.id_help, u'截图', wx.NullBitmap, u'截图')
self.Bind(wx.EVT_TOOL, self.on_button_click, id=self.id_help)
tb.AddTool(self.id_about, u'图色功能', wx.NullBitmap, u'图色功能')
self.Bind(wx.EVT_TOOL, self.on_button3_click, id=self.id_about)
tb.Realize()
return tb
def on_button3_click(self, event):
# 创建新窗口
self.dialog = wx.Dialog(self, title="图色功能", size=(400, 400), style=wx.DEFAULT_DIALOG_STYLE | wx.STAY_ON_TOP)
# 创建下拉框
choices = ["找图", "找色", "多点找色", "找动图"]
combo_box = wx.ComboBox(self.dialog, choices=choices, style=wx.CB_DROPDOWN)
# 创建按钮
button = wx.Button(self.dialog, label="确定")
button.Bind(wx.EVT_BUTTON, lambda evt: self.on_confirm(evt, combo_box.GetValue()))
# 添加布局
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(combo_box, 0, wx.ALIGN_CENTER | wx.ALL, 20)
# sizer.Add(file_dialog, 0, wx.EXPAND | wx.ALL, 20)
sizer.Add(button, 0, wx.ALIGN_CENTER | wx.ALL, 20)
self.dialog.SetSizer(sizer)
# 显示对话框
self.dialog.Show()
def on_paint(self, event):
dc = wx.PaintDC(self)
dc.DrawBitmap(self.maps, 0, 0)
def on_confirm(self, event, selection):
if selection == "找图":
# 获取大图
bitmap = self.maps.ConvertToImage()
width, height = bitmap.GetSize()
rgb_data = bitmap.GetData()
# 创建 PIL.Image 对象
image = Image.frombytes("RGB", (width, height), rgb_data)
# 将 PIL.Image 对象转换为 numpy 数组
img = np.array(image)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# 获取小图
file_dialog = wx.FileDialog(self.dialog, "选择图片",
wildcard="Image files (*.bmp;*.jpg;*.png)|*.bmp;*.jpg;*.png",
style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)
if file_dialog.ShowModal() == wx.ID_OK:
file_path = file_dialog.GetPath()
template = cv2.imread(file_path)
res = self.ts.FindPic(img, template)
if res[0] == -1:
wx.MessageBox('没找到', '注意', wx.OK | wx.ICON_INFORMATION)
else:
th, tw = template.shape[:2]
# 在位图上的(100, 100)坐标位置绘制一个红色矩形框
dc = wx.MemoryDC(self.maps)
dc.SetPen(wx.Pen(wx.RED, 2))
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.DrawRectangle(int(res[0]), int(res[1]), tw, th)
self.bitmap.SetBitmap(self.maps)
del dc # 释放内存
self.Refresh()
elif selection == "多点找色":
ts = tsgj.MyFrame()
ts.Show()
def on_button_click(self, event):
# 创建弹出菜单
menu = wx.Menu()
full_screen_item = menu.Append(wx.ID_ANY, "全屏截图")
image_capture_item = menu.Append(wx.ID_ANY, "图像截图")
screen_capture_item = menu.Append(wx.ID_ANY, "屏幕截图")
moving_capture_item = menu.Append(wx.ID_ANY, "动态截图")
# 绑定菜单项点击事件
self.Bind(wx.EVT_MENU, self.on_full_screen_capture, full_screen_item)
self.Bind(wx.EVT_MENU, self.on_image_capture, image_capture_item)
self.Bind(wx.EVT_MENU, self.on_screen_capture_item, screen_capture_item)
self.Bind(wx.EVT_MENU, self.on_moving_capture_item, moving_capture_item)
# 显示弹出菜单
self.PopupMenu(menu)
menu.Destroy()
def on_moving_capture_item(self, event):
# print('动图')
dialog = pmjt.MaskPanel(self)
if dialog.ShowModal() == wx.ID_OK:
pos = dialog.pos # 获取返回值
print(pos)
captime = 0
st = time.time()
a = pyautogui.screenshot(region=pos)
a = np.array(a)
# a = cv2.cvtColor(a, cv2.COLOR_RGB2BGR)
while 1:
if captime > 5:
break
b = pyautogui.screenshot(region=pos)
b = np.array(b)
# b = cv2.cvtColor(b, cv2.COLOR_RGB2BGR)
b = np.where(np.all(a == b, axis=-1, keepdims=True), a, [0, 0, 0])
b = cv2.convertScaleAbs(b)
a = b.copy()
captime = time.time() - st
# return a
height, width, _ = a.shape
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # Opencv默认使用BGR颜色通道,需要转换为RGB
self.maps = wx.Bitmap.FromBuffer(width, height, a) # 将Opencv图像转换为wxPython图像对象
self.bitmap.SetBitmap(self.maps)
self.sw.SetVirtualSize(width, height)
dialog.Destroy()
def on_screen_capture_item(self, event):
print("屏幕截图")
dialog = pmjt.MaskPanel(self)
if dialog.ShowModal() == wx.ID_OK:
img = dialog.result # 获取返回值
height, width, _ = img.shape
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # Opencv默认使用BGR颜色通道,需要转换为RGB
self.maps = wx.Bitmap.FromBuffer(width, height, img) # 将Opencv图像转换为wxPython图像对象
self.bitmap.SetBitmap(self.maps)
self.sw.SetVirtualSize(width, height)
dialog.Destroy()
def on_full_screen_capture(self, event):
print("全屏截图")
self.Iconize() # 最小化窗口
time.sleep(0.5)
image = pyautogui.screenshot()
image = np.array(image)
height, width, _ = image.shape
self.maps = wx.Bitmap.FromBuffer(width, height, image) # 将Opencv图像转换为wxPython图像对象
self.bitmap.SetBitmap(self.maps)
self.sw.SetVirtualSize(width, height)
# time.sleep(1)
self.Iconize(False) # 恢复窗口
def on_image_capture(self, event):
print("图像截图")
self.bitmap.Bind(wx.EVT_LEFT_DOWN, self.on_left_down)
self.bitmap.Bind(wx.EVT_LEFT_UP, self.on_left_up)
self.selection_started = False
self.start_pos = None
self.end_pos = None
self.selected_bitmap = None # 保存选框的位图
self.new_width, self.new_height = 0, 0
self.Bind(wx.EVT_CHAR_HOOK, self.on_key_press)
def on_left_down(self, event):
self.selection_started = True
self.start_pos = event.GetPosition()
self.bitmap.Bind(wx.EVT_MOTION, self.on_mouse_move)
event.StopPropagation() # 停止事件继续传播到父级窗口
def on_left_up(self, event):
self.selection_started = False
self.end_pos = event.GetPosition()
self.bitmap.Unbind(wx.EVT_MOTION) # 解除绑定鼠标移动事件
# 获取原始图片
original_bitmap = self.bitmap.GetBitmap()
# 计算选框的位置和大小
x = min(self.start_pos.x, self.end_pos.x)
y = min(self.start_pos.y, self.end_pos.y)
self.new_width = abs(self.start_pos.x - self.end_pos.x)
self.new_height = abs(self.start_pos.y - self.end_pos.y)
# 创建选框的位图
self.selected_bitmap = wx.Bitmap(self.new_width, self.new_height)
self.maps = self.selected_bitmap
# 在选框位图上绘制原始图片中的选框区域
dc = wx.MemoryDC(self.selected_bitmap)
dc.DrawBitmap(original_bitmap, -x, -y)
dc.SelectObject(wx.NullBitmap)
# 获取选框的屏幕坐标
screen_x, screen_y = self.ClientToScreen(x, y)
print("选框的屏幕坐标:", screen_x, screen_y)
def on_key_press(self, event):
print('回车')
if event.GetKeyCode() == wx.WXK_RETURN and self.selected_bitmap is not None:
# 显示截取的图片
self.bitmap.SetBitmap(self.selected_bitmap)
self.sw.SetVirtualSize(self.new_width, self.new_height)
elif event.GetKeyCode() == wx.WXK_ESCAPE:
self.Refresh()
def on_mouse_move(self, event):
self.end_pos = event.GetPosition()
# 绘制选框矩形
self.client_dc = wx.ClientDC(self.bitmap)
self.client_dc.SetPen(wx.Pen(wx.RED, 2))
self.client_dc.SetBrush(wx.TRANSPARENT_BRUSH)
self.client_dc.Clear()
self.client_dc.DrawBitmap(self.bitmap.GetBitmap(), 0, 0)
self.client_dc.DrawRectangle(self.start_pos.x, self.start_pos.y, self.end_pos.x - self.start_pos.x,
self.end_pos.y - self.start_pos.y)
del self.client_dc # 释放内存
# self.Refresh()
def on_show(self, event):
if self.IsShown():
wx.CallAfter(self.show_notice)
# 显示公告
def show_notice(self):
note = notice.Notice()
note.compare()
# print(note.compare_result)
wx.MessageBox(note.compare_result, '注意', wx.OK | wx.ICON_INFORMATION)
def on_button1_click(self, event):
# 打开文件选择对话框
dlg = wx.FileDialog(self, "Choose a file", wildcard="PNG files (*.png)|*.png;*.bmp;*.jpg")
if dlg.ShowModal() == wx.ID_OK:
# 获取文件路径和文件类型
filepath = dlg.GetPath()
image = cv2.imdecode(np.fromfile(filepath, dtype=np.uint8), cv2.IMREAD_COLOR) # 读取Opencv图像
height, width, _ = image.shape
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # Opencv默认使用BGR颜色通道,需要转换为RGB
self.maps = wx.Bitmap.FromBuffer(width, height, image) # 将Opencv图像转换为wxPython图像对象
self.bitmap.SetBitmap(self.maps)
self.sw.SetVirtualSize(width, height)
dlg.Destroy()
def on_button2_click(self, event):
wildcard = "PNG files (*.png)|*.png|BMP files (*.bmp)|*.bmp"
dlg = wx.FileDialog(self, "保存图片", "", "", wildcard, wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
if dlg.ShowModal() == wx.ID_OK:
file_path = dlg.GetPath()
file_extension = dlg.GetFilterIndex()
image_type = wx.BITMAP_TYPE_PNG if file_extension == 0 else wx.BITMAP_TYPE_BMP
self.maps = self.bitmap.GetBitmap()
self.maps.SaveFile(file_path, image_type)
dlg.Destroy()
class MyApp(wx.App):
def OnInit(self):
frame = MyFrame(None, "萌新自动化管理工具")
frame.Show()
return True
app = MyApp()
app.MainLoop()
[Python] 纯文本查看 复制代码 # mxbx_ts.py
# 这个跟我之前开源的仿大漠功能一样
import time
import numpy as np
import pyautogui
import copy
import cv2
from sklearn import cluster
class TuSe:
def __init__(self):
print('欢迎使用')
def GetCapture(self, stax, stay, endx, endy):
w = endx - stax
h = endy - stay
im = pyautogui.screenshot(region=(stax, stay, w, h))
# im = cv2.cvtColor(np.array(im), cv2.COLOR_BGR2RGB)
return np.array(im)
def FindPic(self, bitmap, template, thd=0.9, type=1):
'''
找图
:param x1: 起点X
:param y1: 起点Y
:param x2: 终点X
:param y2: 终点Y
:param path: 图片路径
:param thd: 相似度
:param type: 默认1为灰度化找图,其他为彩色找图
:return: 图片中心坐标
'''
img = bitmap
if type == 1:
# print('灰度化找图')
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
template = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)
rv = cv2.matchTemplate(img, template, cv2.TM_CCOEFF_NORMED)
minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(rv)
if maxVal < thd:
return -1, -1
else:
return maxLoc[0], maxLoc[1]
def FindPics(self, des):
img = pyautogui.screenshot()
img = np.array(img)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
res = {}
for key, value in des.items():
template = cv2.imread(value[4])
base = img[value[1]:value[3], value[0]:value[2]]
th, tw = template.shape[:2]
template = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)
rv = cv2.matchTemplate(base, template, cv2.TM_CCOEFF_NORMED)
minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(rv)
if maxVal < value[5]:
res.setdefault(key, (-1, -1))
else:
res.setdefault(key, (maxLoc[0] + tw / 2 + value[0], maxLoc[1] + th / 2 + value[1]))
return res
def Hex_to_Rgb(self, hex):
'''
十六进制转RGB
:param hex: 十六进制颜色值
:return: RGB
'''
return np.array(tuple(int(hex[i:i + 2], 16) for i in (0, 2, 4)))
def CmpColor(self, x, y, color, sim: float):
'''
比色
:param x: X坐标
:param y: Y坐标
:param color: 十六进制颜色,可以从大漠直接获取
:param sim: 相似度(0-1对应二值化的255-0),1为完全匹配
:return: 真或假
'''
img = self.GetCapture(x - 1, y - 1, x + 1, y + 1)
img = img[1][1]
color = self.Hex_to_Rgb(color)
res = np.absolute(color - img)
sim = int((1 - sim) * 255)
return True if np.amax(res) <= sim else False
def FindColor(self, x1, y1, x2, y2, des, sim: float):
'''
找色
:param x1: 起点X
:param y1: 起点Y
:param x2: 终点X
:param y2: 终点Y
:param des: 十六进制颜色,可以从大漠直接获取
:param sim: 相似度(0-1对应二值化的255-0),1为完全匹配
:return:
'''
img = self.GetCapture(x1, y1, x2, y2)
res = np.absolute(img - self.Hex_to_Rgb(des))
sim = int((1 - sim) * 255)
res = np.argwhere(np.all(res <= sim, axis=2))
res = res + (y1, x1)
return res[:, [1, 0]]
def GetColorNum(self, x1, y1, x2, y2, des, sim: float):
'''
获取颜色数量
:param x1: 起点X
:param y1: 起点Y
:param x2: 终点X
:param y2: 终点Y
:param des: 十六进制颜色,可以从大漠直接获取
:param sim: 相似度(0-1对应二值化的255-0),1为完全匹配
:return:
'''
return len(self.FindColor(x1, y1, x2, y2, des, sim))
def FindMultColor(self, stax, stay, endx, endy, des):
'''
多点找色
:param stax:
:param stay:
:param endx:
:param endy:
:param des: 大漠获取到的多点找色数据,偏色必须写上
:return:
'''
w = endx - stax
h = endy - stay
img = pyautogui.screenshot(region=(stax, stay, w, h))
img = np.array(img)
rgby = []
ps = []
a = 0
firstXY = []
res = np.empty([0, 2])
for i in des.split(','):
rgb_y = i[-13:]
r = int(rgb_y[0:2], 16)
g = int(rgb_y[2:4], 16)
b = int(rgb_y[4:6], 16)
y = int(rgb_y[-2:])
rgby.append([r, g, b, y])
for i in range(1, len(des.split(','))):
ps.append([int(des.split(',')[i].split('|')[0]), int(des.split(',')[i].split('|')[1])])
for i in rgby:
result = np.logical_and(abs(img[:, :, 0:1] - i[0]) < i[3], abs(img[:, :, 1:2] - i[1]) < i[3],
abs(img[:, :, 2:3] - i[2]) < i[3])
results = np.argwhere(np.all(result == True, axis=2)).tolist()
if a == 0:
firstXY = copy.deepcopy(results)
else:
nextnextXY = copy.deepcopy(results)
for index in nextnextXY:
index[0] = int(index[0]) - ps[a - 1][1]
index[1] = int(index[1]) - ps[a - 1][0]
q = set([tuple(t) for t in firstXY])
w = set([tuple(t) for t in nextnextXY])
matched = np.array(list(q.intersection(w)))
res = np.append(res, matched, axis=0)
a += 1
unique, counts = np.unique(res, return_counts=True, axis=0)
index = np.argmax(counts)
re = unique[index] + (stay, stax)
if np.max(counts) == len(des.split(',')) - 1:
return np.flipud(re)
return np.array([-1, -1])
def FindPicEx(self, x1, y1, x2, y2, path, thd=0.9, MIN_MATCH_COUNT=8):
'''
全分辨率找图
:param x1:
:param y1:
:param x2:
:param y2:
:param path:
:param thd: 相似度
:param MIN_MATCH_COUNT: 特征点数量
:return:
'''
thd = thd - 0.2
template = cv2.imread(path, 0) # queryImage
# target = cv2.imread('target.jpg', 0) # trainImage
target = self.GetCapture(x1, y1, x2, y2)
target = cv2.cvtColor(target, cv2.COLOR_BGR2GRAY)
# Initiate SIFT detector创建sift检测器
sift = cv2.xfeatures2d.SIFT_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(template, None)
kp2, des2 = sift.detectAndCompute(target, None)
# 创建设置FLANN匹配
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
# store all the good matches as per Lowe's ratio test.
good = []
# 舍弃大于0.7的匹配
for m, n in matches:
if m.distance < thd * n.distance:
good.append(m)
if len(good) > MIN_MATCH_COUNT:
# 获取关键点的坐标
src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
# 计算变换矩阵和MASK
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
h, w = template.shape
# 使用得到的变换矩阵对原图像的四个角进行变换,获得在目标图像上对应的坐标
pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)
dst = cv2.perspectiveTransform(pts, M)
res = (dst[0] + dst[2]) / 2 # [[[ 39.11337 147.11575 ]] [[135.06624 255.12143 ]]
return int(res[0][0]) + x1, int(res[0][1]) + y1
else:
return -1, -1
def _FilterRec(self, res, loc):
""" 对同一对象的多个框按位置聚类后,按置信度选最大的一个进行保留。
:param res: 是 cv2.matchTemplate 返回值
:param loc: 是 cv2.np.argwhere(res>threshold) 返回值
:return: 返回保留的点的列表 pts
"""
model = cluster.AffinityPropagation(damping=0.5, max_iter=100, convergence_iter=10, preference=-50).fit(loc)
y_pred = model.labels_
pts = []
for i in set(y_pred):
argj = loc[y_pred == i]
argi = argj.T
pt = argj[np.argmax(res[tuple(argi)])]
pts.append(pt[::-1])
return np.array(pts)
def FindMultPic(self, x1, y1, x2, y2, path, thd=0.8):
'''
多目标找图
:param x1:
:param y1:
:param x2:
:param y2:
:param path:
:param thd: 相似度
:return:
'''
target = self.GetCapture(x1, y1, x2, y2)
target = cv2.cvtColor(target, cv2.COLOR_BGR2GRAY)
template = cv2.imread(path, 0)
w, h = template.shape[:2]
res = cv2.matchTemplate(target, template, cv2.TM_CCOEFF_NORMED)
loc = np.argwhere(res >= thd)
if len(loc):
resc = self._FilterRec(res, loc)
return resc + (h / 2 + x1, w / 2 + y1)
else:
return [[-1, -1]]
def FindPic_TM(self, x1, y1, x2, y2, path, thd=0.9):
'''
找透明图,透明色为黑色
:param x1: 起点X
:param y1: 起点Y
:param x2: 终点X
:param y2: 终点Y
:param path: 图片路径
:param thd: 相似度
:return: 图片中心坐标
'''
img = self.GetCapture(x1, y1, x2, y2)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
template = cv2.imread(path)
template2 = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(template2, 20, 255, cv2.THRESH_BINARY)
th, tw = template.shape[:2]
rv = cv2.matchTemplate(img, template, 1, mask=mask)
minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(rv)
if 1 - minVal >= thd:
return minLoc[0] + tw / 2 + x1, minLoc[1] + th / 2 + y1
else:
return -1, -1
def Getcaptre(self, x1, y1, x2, y2, times=5):
'''
动图变静态图片,改为静态图片后使用FindPic_TM进行找图
:param x1:
:param y1:
:param x2:
:param y2:
:param times:
:return:
'''
w = x2 - x1
h = y2 - y1
captime = 0
st = time.time()
a = pyautogui.screenshot(region=(x1, y1, w, h))
a = np.array(a)
a = cv2.cvtColor(a, cv2.COLOR_BGR2RGB)
# time.sleep(0.1)
while 1:
if captime > times:
break
b = pyautogui.screenshot(region=(x1, y1, w, h))
b = np.array(b)
b = cv2.cvtColor(b, cv2.COLOR_BGR2RGB)
b = np.where(np.all(a == b, axis=-1, keepdims=True), a, [0, 0, 0])
b = cv2.convertScaleAbs(b)
a = b.copy()
captime = time.time() - st
return a
def StressShow(self, img, des, type=0):
'''
保留选中颜色,其他为黑色,相似度根据偏色调整
:param stax:
:param stay:
:param endx:
:param endy:
:param des: 大漠的色彩描述
:param type: 0为原来颜色,1为白色
:return:
'''
# des = 'e81010-101010|f9ad08-000000'
dess = des.split('|')
des = [i[0:6] for i in dess]
des = [np.array(self.Hex_to_Rgb(d)) for d in des]
pds = [i[-6:] for i in dess]
pds = tuple(tuple(int(item[i:i + 2]) for i in range(0, len(item), 2)) for item in pds)
mask = np.zeros(img.shape[:2], dtype=np.bool_)
for i, color in enumerate(des):
mask += np.all(np.abs(img - color) <= pds[i], axis=-1)
new_img = np.where(mask[..., None], [255, 255, 255], [0, 0, 0]) if type else np.where(mask[..., None], img,
[0, 0,
0]) # 修改这里,将选中的颜色设为白色
img_converted = cv2.convertScaleAbs(new_img)
img_converted = cv2.cvtColor(np.array(img_converted), cv2.COLOR_BGR2RGB)
return img_converted
def SetDict(self, path):
des = {}
with open(path, 'r', encoding='GBK') as f:
text = f.read()
lines = text.splitlines()
for line in lines:
parts = line.split("$")
# self.des.setdefault(parts[1],parts[0])
bin_str = ''
for c in parts[0]:
byte = int(c, 16)
byte_bin = bin(byte)[2:].zfill(4)
bin_str += byte_bin
m = len(bin_str) // 11
if (m % 4):
bin_str = bin_str[:-(m % 4)]
arr = np.array([list(bin_str[i:i + 11]) for i in range(0, len(bin_str), 11)], dtype=np.float32)
arr = arr.transpose()
des.setdefault(parts[1], arr)
# print(self.des)
return des
def FindString(self, x1, y1, x2, y2, strs, color, thd, DIict):
if strs not in DIict:
print('字符串不存在')
return -1, -1
else:
arr = DIict[strs]
img = self.StressShow(x1, y1, x2, y2, color, 1)
img = (img != 0).any(axis=2).astype(int)
thresh = np.array(img, dtype=np.float32)
result = cv2.matchTemplate(arr, thresh, cv2.TM_CCOEFF_NORMED)
minv, maxv, minl, maxl = cv2.minMaxLoc(result)
# print(minv, maxv, minl, maxl)
w, h = arr.shape
if maxv < thd:
return -1, -1
else:
return int(maxl[0] + h / 2 + x1), int(maxl[1] + w / 2 + y1)
def Ocr(self, x1, y1, x2, y2, des, thd, DIict):
dess = des.split('|')
des = [i[0:6] for i in dess]
des = [np.array(self.Hex_to_Rgb(d)) for d in des]
pds = [i[-6:] for i in dess]
pds = tuple(tuple(int(item[i:i + 2]) for i in range(0, len(item), 2)) for item in pds)
img = self.GetCapture(x1, y1, x2, y2)
mask = np.zeros(img.shape[:2], dtype=np.bool_)
for i, color in enumerate(des):
mask += np.all(np.abs(img - color) <= pds[i], axis=-1)
new_img = np.where(mask[..., None], [255, 255, 255], [0, 0, 0]) if type else np.where(mask[..., None], img,
[0, 0,
0]) # 修改这里,将选中的颜色设为白色
img_converted = cv2.convertScaleAbs(new_img)
img_converted = cv2.cvtColor(np.array(img_converted), cv2.COLOR_BGR2RGB)
img = (img_converted != 0).any(axis=2).astype(int)
img = np.array(img, dtype=np.float32)
res = {}
for key, value in DIict.items():
w, h = value.shape
result = cv2.matchTemplate(value, img, cv2.TM_CCOEFF_NORMED)
loc = np.argwhere(result >= thd)
if len(loc):
resc = self._FilterRec(result, loc)
resc.astype(np.int64)
resc += np.array((h / 2 + x1, w / 2 + y1)).astype(np.int64)
resc = [(i[0], i[1]) for i in resc]
res.setdefault(key, resc)
else:
res.setdefault(key, [(-1, -1)])
return res
def getstr(self, original_data):
sorted_data = sorted(original_data.items(), key=lambda item: item[1][0][1])
grouped_data = []
for char, coord_list in sorted_data:
if not grouped_data:
grouped_data.append([(char, coord) for coord in coord_list])
else:
added = False
for sublist in grouped_data:
if coord_list[0][1] == sublist[0][1][1]:
sublist.extend([(char, coord) for coord in coord_list])
added = True
break
if not added:
grouped_data.append([(char, coord) for coord in coord_list])
return grouped_data
def OcrFix(self, input_dict, size=20):
items = sorted(input_dict, key=lambda x: x[1][0])
merged_dict = {}
i = 0
while i < len(items):
curr_key = items[i][0]
curr_value = items[i][1]
i += 1
if curr_value == (-1, -1):
continue
while i < len(items) and items[i][1][0] - curr_value[0] <= size:
curr_key += items[i][0]
curr_value = items[i][1]
if curr_value == (-1, -1):
merged_dict.pop(curr_key, None)
break
i += 1
merged_dict[curr_key] = curr_value
return merged_dict
def GetOcr(self, data):
res = {}
dat = self.getstr(data)
for i in dat:
x = self.OcrFix(i)
res.update(x)
return res
# a = TuSe()
#
# node = {
# '此电脑': [0, 94, 209, 284, './image/cdn.bmp', 0.8],
# '回收站': [0, 313,200, 539, './image/hsz.bmp', 0.8],
# '百度网盘': [39, 696,267, 897, './image/bdwp.bmp', 0.8]
# }
# b = time.time()
# c = a.FindPics(node)
# print(time.time() - b)
# print(c)
# # {'此电脑': (51.0, 174.0), '回收站': (46.0, 418.5), '百度网盘': (142.0, 787.0)}
[Python] 纯文本查看 复制代码 # notice.py
# 相当于一个远程公告吧
import requests
import threading
from bs4 import BeautifulSoup
# 公告操作
class Notice:
def __init__(self):
self.compare_result = ''
def Get_url(self):
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.146 Safari/537.36'
}
url = 'https://tibiji.com/share/iB3ed0aBDf'
res = requests.get(url, headers=headers)
res.encoding = res.apparent_encoding
if res.status_code == 200:
soup = BeautifulSoup(res.text, 'html.parser')
div_contents = soup.find('div', {'id': 'contents'})
print(div_contents.text.strip())
self.compare_result = div_contents.text.strip()
else:
print('请求失败')
self.compare_result = '公告获取失败'
def compare(self):
t = threading.Thread(target=self.Get_url)
t.setDaemon(True)
t.start()
t.join() # 等待线程执行完毕
[Python] 纯文本查看 复制代码 # pmjt.py
# 全屏截图操作
import cv2
import numpy as np
import pyautogui
import wx
class MaskPanel(wx.Dialog):
def __init__(self, parent):
super().__init__(parent, title='Mask Panel', style=wx.NO_BORDER | wx.FRAME_SHAPED | wx.STAY_ON_TOP)
x, y = pyautogui.size()
self.SetSize(x, y) # 设置窗口大小为屏幕大小
self.SetPosition(wx.Point(0, 0))
self.SetTransparent(128)
self.img = pyautogui.screenshot()
self.img = cv2.cvtColor(np.array(self.img), cv2.COLOR_BGR2RGB)
self.pos = None
self.rect_start = None
self.rect_end = None
self.dragging = False
self.result = None # 存储返回值的变量
self.Bind(wx.EVT_PAINT, self.on_paint)
self.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.Bind(wx.EVT_LEFT_UP, self.on_mouse_up)
self.Bind(wx.EVT_MOTION, self.on_mouse_motion)
self.Bind(wx.EVT_CHAR_HOOK, self.on_key_press)
def on_mouse_down(self, event):
self.rect_start = event.GetPosition()
self.dragging = True
def on_mouse_up(self, event):
self.rect_end = event.GetPosition()
self.dragging = False
self.Refresh()
def on_mouse_motion(self, event):
if self.dragging:
self.rect_end = event.GetPosition()
self.Refresh()
def on_paint(self, event):
dc = wx.PaintDC(self) # 使用 wx.PaintDC 而不是 wx.BufferedPaintDC
dc.SetPen(wx.Pen(wx.RED, 2))
if self.rect_start and self.rect_end:
rect = wx.Rect(self.rect_start, self.rect_end)
region = wx.Region(rect)
region.Subtract(wx.Region(rect))
dc.SetBrush(wx.Brush(wx.Colour(0, 0, 0, 0)))
dc.SetClippingRegion(rect) # 设置矩形框作为剪辑区域
dc.DrawRectangle(rect)
def on_key_press(self, event):
keycode = event.GetKeyCode()
if keycode == wx.WXK_RETURN or keycode == wx.WXK_NUMPAD_ENTER:
self.img = self.img[self.rect_start[1]:self.rect_end[1], self.rect_start[0]:self.rect_end[0]]
self.Destroy()
self.result = self.img # 将返回值存储在 result 变量中
self.pos = (self.rect_start[0],self.rect_start[1],self.rect_end[0]-self.rect_start[0],self.rect_end[1]-self.rect_start[1])
self.EndModal(wx.ID_OK)
# app = wx.App()
# frame = wx.Frame(None)
# dialog = MaskPanel(frame)
# if dialog.ShowModal() == wx.ID_OK:
# img = dialog.result # 获取返回值
# # 在这里可以使用返回的img
# print(img)
# dialog.Destroy()
[Python] 纯文本查看 复制代码 # tsgj.py# 仿大漠取色取点的操作,但是还没写完
import cv2
import numpy as np
import pyautogui
import wx
from PIL import Image
from . import pmjt, zhuazhua, mxbx_ts
class MyFrame(wx.Frame):
def __init__(self):
super().__init__(None, title="FlexGrid 布局示例", size=(780, 620))
self.panel = wx.Panel(self)
self.sf = mxbx_ts.TuSe()
self.mainImg = None
# 创建水平方向的 BoxSizer
hbox = wx.BoxSizer(wx.HORIZONTAL)
# 创建垂直方向的 BoxSizer
vbox = wx.BoxSizer(wx.VERTICAL)
# 创建 FlexGrid 布局
flex_grid_sizer = wx.FlexGridSizer(rows=11, cols=5, vgap=10, hgap=10)
# 创建标签文本
label1 = wx.StaticText(self.panel, label="坐标")
label2 = wx.StaticText(self.panel, label="颜色")
label3 = wx.StaticText(self.panel, label="RGB")
label4 = wx.StaticText(self.panel, label="偏色")
label5 = wx.StaticText(self.panel, label="选中")
# 将标签文本添加到布局中
flex_grid_sizer.Add(label1, 0, wx.ALIGN_CENTER)
flex_grid_sizer.Add(label2, 0, wx.ALIGN_CENTER)
flex_grid_sizer.Add(label3, 0, wx.ALIGN_CENTER)
flex_grid_sizer.Add(label4, 0, wx.ALIGN_CENTER)
flex_grid_sizer.Add(label5, 0, wx.ALIGN_CENTER)
# 创建并添加基础数据
self.data = []
for i in range(10):
coord_text_ctrl = wx.TextCtrl(self.panel, value="0, 0", style=wx.TE_READONLY)
color_panel = wx.Panel(self.panel, size=(40, 20))
rgb_text_ctrl = wx.TextCtrl(self.panel, value="255, 255, 255", style=wx.TE_READONLY)
bias_text_ctrl = wx.TextCtrl(self.panel, value="000000")
checkbox = wx.CheckBox(self.panel, label="")
# 给复选框绑定事件处理函数
checkbox.Bind(wx.EVT_CHECKBOX, lambda event, row=i: self.on_checkbox_change(event, row))
# 将颜色图片设置为纯色
color_panel.SetBackgroundColour((255, 255, 255))
# 将控件添加到布局中
flex_grid_sizer.Add(coord_text_ctrl, 0, wx.ALIGN_CENTER_VERTICAL)
flex_grid_sizer.Add(color_panel, 0, wx.EXPAND)
flex_grid_sizer.Add(rgb_text_ctrl, 0, wx.ALIGN_CENTER_VERTICAL)
flex_grid_sizer.Add(bias_text_ctrl, 0, wx.ALIGN_CENTER_VERTICAL)
flex_grid_sizer.Add(checkbox, 0, wx.ALIGN_CENTER_VERTICAL)
# 将每行的数据保存起来
self.data.append((coord_text_ctrl, color_panel, rgb_text_ctrl, bias_text_ctrl, checkbox))
# 给颜色图片绑定点击事件处理函数
color_panel.Bind(wx.EVT_LEFT_DOWN, lambda event, row=i: self.on_color_click(event, row))
# 设置布局的列宽和行高
flex_grid_sizer.AddGrowableCol(3, 1)
flex_grid_sizer.SetFlexibleDirection(wx.HORIZONTAL)
flex_grid_sizer.SetNonFlexibleGrowMode(wx.FLEX_GROWMODE_NONE)
# 将 FlexGrid 布局添加到水平 BoxSizer 中
hbox.Add(flex_grid_sizer, 1, wx.EXPAND | wx.ALL, 10)
# 添加图片到右侧
self.sw = wx.ScrolledWindow(self.panel, style=wx.VSCROLL | wx.HSCROLL, size=(200, 400))
self.sw.SetScrollbars(1, 1, 1, 1)
image = pyautogui.screenshot()
image = np.array(image)
height, width, _ = image.shape
self.maps = wx.Bitmap.FromBuffer(width, height, image) # 将Opencv图像转换为wxPython图像对象
self.bitmap = wx.StaticBitmap(self.sw, -1, self.maps)
self.sw.SetVirtualSize(width, height)
self.sw.SetScrollRate(20, 20)
hbox.Add(self.sw, 0, wx.EXPAND | wx.ALL, 10)
basedata = wx.FlexGridSizer(rows=0, cols=2, vgap=5, hgap=5)
self.xzfw_button = wx.Button(self.panel, label="选择范围")
self.xzfw_button.Bind(wx.EVT_BUTTON, self.on_xzfw_button_click)
self.xzfw_text = wx.TextCtrl(self.panel, value="0,0,0,0", style=wx.TE_READONLY)
self.scms_label = wx.StaticText(self.panel, label=" 色彩描述")
self.scms_value = ''
self.scms_text = wx.TextCtrl(self.panel, value=self.scms_value)
ddzs_label = wx.StaticText(self.panel, label=" 多点找色")
ddzs_text = wx.TextCtrl(self.panel, value="0,0,0,0", style=wx.TE_READONLY)
# 将控件添加到布局中
basedata.Add(self.xzfw_button, 0, wx.EXPAND | wx.ALL)
basedata.Add(self.xzfw_text, 0, wx.EXPAND | wx.ALL)
basedata.Add(self.scms_label, 0, wx.EXPAND | wx.ALL)
basedata.Add(self.scms_text, 0, wx.EXPAND | wx.ALL)
basedata.Add(ddzs_label, 0, wx.EXPAND | wx.ALL)
basedata.Add(ddzs_text, 0, wx.EXPAND | wx.ALL)
basedata.AddGrowableCol(1, 1)
basedata.SetFlexibleDirection(wx.HORIZONTAL)
basedata.SetNonFlexibleGrowMode(wx.FLEX_GROWMODE_NONE)
vbox.Add(hbox, 0, wx.EXPAND | wx.ALL, 10)
vbox.Add(basedata, 0, wx.EXPAND | wx.ALL, 10)
# 将水平 BoxSizer 设置为面板的 sizer
self.panel.SetSizer(vbox)
self.Show()
def on_xzfw_button_click(self, event):
jt = pmjt.MaskPanel(self)
if jt.ShowModal() == wx.ID_OK:
pos = jt.pos # 获取返回值
img = jt.result
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
height, width, _ = img.shape
self.maps = wx.Bitmap.FromBuffer(width, height, img) # 将Opencv图像转换为wxPython图像对象
self.bitmap.SetBitmap(self.maps)
self.sw.SetVirtualSize(width, height)
self.xzfw_text.SetValue(str(pos))
jt.Destroy()
def on_color_click(self, event, row):
zz = zhuazhua.ColorFrame()
if zz.ShowModal() == wx.ID_OK:
data = zz.data # 获取返回值
coord, color, rgb, bias, checkbox = self.data[row]
coord.SetValue(str(data.pos))
rgb.SetValue(str(data.rgb))
color.SetBackgroundColour(wx.Colour(data.color))
self.Refresh()
zz.Destroy()
def on_checkbox_change(self, event, row):
_, _, _, _, checkbox = self.data[row]
if checkbox.GetValue():
coord, _, rgb, bias, _ = self.data[row]
coord_data = coord.GetValue()
rgb_data = rgb.GetValue()
bias_data = bias.GetValue()
self.scms_value = self.scms_text.GetValue()
self.scms_text.SetValue(self.scms_value + f"位置:{coord_data}\nRGB:{rgb_data}\n偏色:{bias_data}")
bitmap = self.maps.ConvertToImage()
width, height = bitmap.GetSize()
rgb_data = bitmap.GetData()
# 创建 PIL.Image 对象
image = Image.frombytes("RGB", (width, height), rgb_data)
# 将 PIL.Image 对象转换为 numpy 数组
img = np.array(image)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = self.sf.StressShow(img, f'4CA079-{bias_data}', 1)
self.maps = wx.Bitmap.FromBuffer(width, height, img) # 将Opencv图像转换为wxPython图像对象
self.bitmap.SetBitmap(self.maps)
self.sw.SetVirtualSize(width, height)
else:
self.scms_text.SetValue("色彩描述")
# if __name__ == "__main__":
# app = wx.App(False)
# frame = MyFrame()
# app.MainLoop()
[Python] 纯文本查看 复制代码 # zhuazhua.py
# 之前开源的抓抓的代码
from ctypes import windll
import cv2
from numpy import array as arr
from win32api import GetCursorPos, SetCursorPos
import wx
from PIL import ImageGrab
class colorData:
def __init__(self, pos=None, color=None, rgb=None):
self.pos = pos
self.color = color
self.rgb = rgb
class ColorFrame(wx.Dialog):
def __init__(self):
windll.user32.SetProcessDPIAware()
super().__init__(None, title='Desktop Color', size=(200, 300))
self.panel = wx.Panel(self)
self.zb = wx.StaticText(self.panel, label='坐标:(0, 0, 0)', style=wx.ALIGN_CENTER)
self.ys = wx.StaticText(self.panel, label='颜色:(0, 0, 0)', style=wx.ALIGN_CENTER)
self.RGB = wx.StaticText(self.panel, label='RGB:(0, 0, 0)', style=wx.ALIGN_CENTER)
self.bitmap = wx.StaticBitmap(self.panel, size=(200, 200))
self.data = colorData()
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.zb, proportion=1, flag=wx.EXPAND)
sizer.Add(self.ys, proportion=1, flag=wx.EXPAND)
sizer.Add(self.RGB, proportion=1, flag=wx.EXPAND)
sizer.Add(self.bitmap, proportion=1, flag=wx.EXPAND | wx.ALL)
self.panel.SetSizer(sizer)
self.Bind(wx.EVT_CLOSE, self.on_close)
self.Bind(wx.EVT_CHAR_HOOK, self.on_key_press)
# 创建一个定时器来定期获取桌面颜色并更新标签
self.timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.on_timer, self.timer)
self.timer.Start(1) # 每隔1秒触发一次定时器
def on_timer(self, event):
point = GetCursorPos()
screenshot = ImageGrab.grab()
color = screenshot.getpixel(point)
img = arr(ImageGrab.grab((point[0] - 10, point[1] - 10, point[0] + 10, point[1] + 10)))
img = cv2.resize(img, None, None, fx=10, fy=10, interpolation=cv2.INTER_AREA)
cv2.rectangle(img, (100, 100), (110, 110), (255, 0, 0), 1)
self.update_label(point, color, img)
def update_label(self, point, color, img):
self.zb.SetLabel(f'坐标:({point[0]}, {point[1]})')
self.ys.SetLabel(f'颜色:({color[0]}, {color[1]}, {color[2]})')
self.RGB.SetLabel(f'RGB:({color[0]:02X}{color[1]:02X}{color[2]:02X})')
height, width, _ = img.shape
self.maps = wx.Bitmap.FromBuffer(width, height, img) # 将Opencv图像转换为wxPython图像对象
self.bitmap.SetBitmap(self.maps)
def on_close(self, event):
self.timer.Stop()
self.Destroy()
def on_key_press(self, event):
keycode = event.GetKeyCode()
point = GetCursorPos()
if keycode == wx.WXK_RETURN or keycode == wx.WXK_NUMPAD_ENTER:
screenshot = ImageGrab.grab()
color = screenshot.getpixel(point)
self.data.pos = point
self.data.color = color
self.data.rgb = f'{color[0]:02X}{color[1]:02X}{color[2]:02X}'
self.on_close(event)
self.EndModal(wx.ID_OK)
elif keycode == wx.WXK_LEFT:
SetCursorPos((point[0] - 1, point[1]))
elif keycode == wx.WXK_RIGHT:
SetCursorPos((point[0] + 1, point[1]))
elif keycode == wx.WXK_UP:
SetCursorPos((point[0], point[1] - 1))
elif keycode == wx.WXK_DOWN:
SetCursorPos((point[0], point[1] + 1))
def get_data(self):
return self.data
# app = wx.App()
# frame = ColorFrame()
# frame.Show()
# app.MainLoop()
# print(frame.data.pos, frame.data.color, frame.data.rgb)
|
免费评分
-
查看全部评分
|