AI打游戏-伍(游戏,启动!)
目标
- 使用代码调用yolo模型,并解析预测结果
- 读取游戏视频预测结果,并可视化
- 读取游戏窗口预测结果,并可视化
- 根据预测结果,模拟操作鼠标操作
步骤
官方文档
代码预测静态图片
- 读取游戏截图,送入yolo网络预测
- 解析预测结果
import cv2
from ultralytics import YOLO
model = YOLO('runs/detect/train/weights/best.pt')
image = cv2.imread('E:\\ai-play-game\\project-1-at-2023-09-13-17-05-6275bec0\\images\\0d332b6a-8100.jpg')
# 预测结果(返回列表,因为同时输入一组图片)
result = model(image)[0]
print(f"result.names: {result.names}")
print(f"result.boxes: {result.boxes}")
# 观察打印结果,可以找到不同的输出类型:xywh,xywhn,xyxy,xyxyn
# 如果使用GPU训练,则tensor在GPU中,先使用.cpu()转到内存中才能使用
# 由于opencv框选标记使用xyxy,刚好可以使用result.boxes.data
# 内容格式:[[x1,y1,x2,y2,置信值,类别]]
print(f"坐标信息:{result.boxes.data.cpu().numpy().tolist()}")
可视化预测结果
- 打印坐标不够直观,使用opencv显示出结果
- 读取视频代码
import cv2
def main():
image = cv2.imread('E:\\ai-play-game\\project-1-at-2023-09-13-17-05-6275bec0\\images\\0d332b6a-8100.jpg')
# 显示图片
cv2.imshow("image", image)
# 显示5秒或按"Esc"键退出
if cv2.waitKey(5000) & 0xFF == 27:
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
- 加上yolo后
import cv2
from ultralytics import YOLO
# 加载模型
model = YOLO('runs/detect/train/weights/best.pt')
# 类别对应的名字,及rgb颜色
clazz_dict = {
0: ('hellhound', (255, 0, 0)),
1: ('samurais', (0, 255, 0)),
2: ('player', (0, 0, 255)),
3: ('fireflies', (255, 255, 0)),
}
def ai_boxes(image):
"""
增加ai识别的框
:param image: 图片像素张量
:return: 增加显示后的张量
"""
result = model(image)[0]
boxes = result.boxes.data.cpu().numpy().tolist()
for x1, y1, x2, y2, conf, cls in boxes:
x1, y1, x2, y2, cls = int(x1), int(y1), int(x2), int(y2), int(cls)
cls_name, cls_rgb = clazz_dict[cls]
# 增加框
cv2.rectangle(image, (x1, y1), (x2, y2), cls_rgb, 1)
# 增加文字
cv2.putText(image, cls_name, (x1, y1), cv2.FONT_HERSHEY_COMPLEX, 0.5, cls_rgb, 1)
return image
def main():
image = cv2.imread('E:\\ai-play-game\\project-1-at-2023-09-13-17-05-6275bec0\\images\\0d332b6a-8100.jpg')
image = ai_boxes(image)
# 显示图片
cv2.imshow("image", image)
# 显示5秒或按"Esc"键退出
if cv2.waitKey(5000) & 0xFF == 27:
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
可视化解析视频
- 将来源替换成视频
- 读取视频代码
import cv2
def main():
cap = cv2.VideoCapture("E:\\ai-play-game\\2023-09-12 23-27-51.mp4")
while cap.isOpened():
ret, frame = cap.read()
if ret:
cv2.imshow("video", frame)
# 按"Esc"键退出
if cv2.waitKey(1) & 0xFF == 27:
break
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
- 加上YOLO后
import cv2
from ultralytics import YOLO
# 加载模型
model = YOLO('runs/detect/train/weights/best.pt')
# 类别对应的名字,及rgb颜色
clazz_dict = {
0: ('hellhound', (255, 0, 0)),
1: ('samurais', (0, 255, 0)),
2: ('player', (0, 0, 255)),
3: ('fireflies', (255, 255, 0)),
}
def ai_boxes(image):
"""
增加ai识别的框
:param image: 图片像素张量
:return: 增加显示后的张量
"""
result = model(image)[0]
boxes = result.boxes.data.cpu().numpy().tolist()
for x1, y1, x2, y2, conf, cls in boxes:
x1, y1, x2, y2, cls = int(x1), int(y1), int(x2), int(y2), int(cls)
cls_name, cls_rgb = clazz_dict[cls]
# 增加框
cv2.rectangle(image, (x1, y1), (x2, y2), cls_rgb, 2)
# 增加文字
cv2.putText(image, cls_name, (x1, y1), cv2.FONT_HERSHEY_COMPLEX, 1, cls_rgb, 2)
return image
def main():
cap = cv2.VideoCapture("E:\\ai-play-game\\2023-09-12 23-27-51.mp4")
while cap.isOpened():
ret, frame = cap.read()
if ret:
frame = ai_boxes(frame)
cv2.imshow("video", frame)
# 按"Esc"键退出
if cv2.waitKey(1) & 0xFF == 27:
break
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
读取游戏窗口
- pip安装依赖
pip install pywin32
- 读取窗口代码
import cv2
from ultralytics import YOLO
import win32gui
import numpy as np
from PIL import ImageGrab
def main():
# 读取游戏窗口,需要先打开游戏
win_id = win32gui.FindWindow(None, 'Tap Ninja')
while True:
# 获取窗口位置信息
win_bbox = win32gui.GetWindowRect(win_id)
# 读取窗口位置像素信息
game_window = np.array(ImageGrab.grab(bbox=win_bbox))
# 两个库色彩模式不同,转换色彩模式
image = cv2.cvtColor(game_window, cv2.COLOR_BGR2RGB)
cv2.imshow("video", image)
# 按"Esc"键退出
if cv2.waitKey(1) & 0xFF == 27:
break
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
- 加上YOLO后
import cv2
from ultralytics import YOLO
import win32gui
import numpy as np
from PIL import ImageGrab
# 加载模型
model = YOLO('runs/detect/train/weights/best.pt')
# 类别对应的名字,及rgb颜色
clazz_dict = {
0: ('hellhound', (255, 0, 0)),
1: ('samurais', (0, 255, 0)),
2: ('player', (0, 0, 255)),
3: ('fireflies', (255, 255, 0)),
}
def ai_boxes(image):
"""
增加ai识别的框
:param image: 图片像素张量
:return: 增加显示后的张量
"""
result = model(image)[0]
boxes = result.boxes.data.cpu().numpy().tolist()
for x1, y1, x2, y2, conf, cls in boxes:
x1, y1, x2, y2, cls = int(x1), int(y1), int(x2), int(y2), int(cls)
cls_name, cls_rgb = clazz_dict[cls]
# 增加框
cv2.rectangle(image, (x1, y1), (x2, y2), cls_rgb, 2)
# 增加文字
cv2.putText(image, cls_name, (x1, y1), cv2.FONT_HERSHEY_COMPLEX, 1, cls_rgb, 2)
return image
def main():
# 读取游戏窗口,需要先打开游戏
win_id = win32gui.FindWindow(None, 'Tap Ninja')
while True:
# 获取窗口位置信息
win_bbox = win32gui.GetWindowRect(win_id)
# 读取窗口位置像素信息
game_window = np.array(ImageGrab.grab(bbox=win_bbox))
# 两个库色彩模式不同,转换色彩模式
image = cv2.cvtColor(game_window, cv2.COLOR_BGR2RGB)
image = ai_boxes(image)
cv2.imshow("video", image)
# 按"Esc"键退出
if cv2.waitKey(1) & 0xFF == 27:
break
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
模拟鼠标操作
- pip安装依赖
pip install pyautogui
- 由于游戏使用DirectX渲染,pyautogui需要管理员权限才能在游戏中模拟鼠标操作
- 重新一定要重新使用"管理员"打开IDE !!!
import cv2
from ultralytics import YOLO
import win32gui
import numpy as np
from PIL import ImageGrab
import pyautogui
# 加载模型
model = YOLO('runs/detect/train/weights/best.pt')
# 类别对应的名字,及rgb颜色
clazz_dict = {
0: ('hellhound', (255, 0, 0)),
1: ('samurais', (0, 255, 0)),
2: ('player', (0, 0, 255)),
3: ('fireflies', (255, 255, 0)),
}
def controller(boxes):
"""
控制模拟玩家操作
:param boxes: YOLO预测结果
"""
player_fire_x = None
other_x1_list = []
for x1, y1, x2, y2, conf, cls in boxes:
if int(cls) == 2:
# 操作位置:玩家前方一个身位的位置
player_fire_x = x2 + (x2 - x1)
else:
other_x1_list.append(x1)
if player_fire_x is not None and len(other_x1_list) > 0:
for x1 in other_x1_list:
if x1 < player_fire_x:
# 点击鼠标左键
pyautogui.click(button='left')
print("**************************** 点击 ****************************")
def ai_boxes(image):
"""
增加ai识别的框
:param image: 图片像素张量
:return: 增加显示后的张量
"""
result = model(image)[0]
boxes = result.boxes.data.cpu().numpy().tolist()
for x1, y1, x2, y2, conf, cls in boxes:
x1, y1, x2, y2, cls = int(x1), int(y1), int(x2), int(y2), int(cls)
cls_name, cls_rgb = clazz_dict[cls]
# 增加框
cv2.rectangle(image, (x1, y1), (x2, y2), cls_rgb, 2)
# 增加文字
cv2.putText(image, cls_name, (x1, y1), cv2.FONT_HERSHEY_COMPLEX, 1, cls_rgb, 2)
# 判断位置并控制
controller(boxes)
return image
def main():
# 读取游戏窗口,需要先打开游戏
win_id = win32gui.FindWindow(None, 'Tap Ninja')
while True:
# 获取窗口位置信息
win_bbox = win32gui.GetWindowRect(win_id)
# 读取窗口位置像素信息
game_window = np.array(ImageGrab.grab(bbox=win_bbox))
# 两个库色彩模式不同,转换色彩模式
image = cv2.cvtColor(game_window, cv2.COLOR_BGR2RGB)
image = ai_boxes(image)
cv2.imshow("video", image)
# 按"Esc"键退出
if cv2.waitKey(1) & 0xFF == 27:
break
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
小结
- 看出识别结果不够准确
提升准确率
- 调整训练参数
- 增加训练集样本数量
- 区分训练集和验证集
总结
- AI算法不仅是模型训练,还包含了很多上下游工作
通常是以下几个步骤
- 发现一个问题或者需求,分析解决方案
- 收集数据
- 标注数据
- 模型训练
- 部署使用
- 效果监控,收集数据,迭代算法