代码46:
'''
合并单元格
setSpan(row,col,要合并的行数,要合并的列数)
'''
import sys
from PyQt5.QtWidgets import *
from PyQt5 import QtCore
from PyQt5.QtGui import QColor,QBrush,QFont
from PyQt5.QtCore import Qt
class Span(QWidget):
def __init__(self):
super(Span,self).__init__()
self.initUI()
def initUI(self):
self.setWindowTitle("设置单元格的文本对齐方式")
self.resize(430,230);
layout =QHBoxLayout()
tableWidget =QTableWidget()
tableWidget.setRowCount(4)
tableWidget.setColumnCount(3)
layout.addWidget(tableWidget)
tableWidget.setHorizontalHeaderLabels(['姓名','性别','体重(kg)'])
newItem =QTableWidgetItem('雷神')
tableWidget.setItem(0,0,newItem)
tableWidget.setSpan(0,0,3,1)
newItem = QTableWidgetItem('男')
tableWidget.setItem(0, 1, newItem)
tableWidget.setSpan(0,1,2,1)
newItem = QTableWidgetItem('160')
tableWidget.setItem(0, 2, newItem)
tableWidget.setSpan(0, 2, 4, 1)
self.setLayout(layout)
if __name__ =="__main__":
app =QApplication(sys.argv)
win =Span()
win.show()
sys.exit(app.exec_())
代码47:
'''
在单元格中同时实现图文混排的效果
'''
import sys
from PyQt5.QtWidgets import *
from PyQt5 import QtCore
from PyQt5.QtGui import *
from PyQt5.QtCore import Qt
class CellImageText(QWidget):
def __init__(self):
super(CellImageText,self).__init__()
self.initUI()
def initUI(self):
self.setWindowTitle("在单元格中实现图文混排的效果")
self.resize(1000,600);
layout =QHBoxLayout()
self.tableWidget =QTableWidget()
self.tableWidget.setRowCount(5)
self.tableWidget.setColumnCount(4)
layout.addWidget(self.tableWidget)
self.tableWidget.setHorizontalHeaderLabels(['姓名','性别','体重(kg)'])
newItem =QTableWidgetItem('李宁')
self.tableWidget.setItem(0,0,newItem)
newItem = QTableWidgetItem('男')
self.tableWidget.setItem(0, 1, newItem)
newItem = QTableWidgetItem('160')
self.tableWidget.setItem(0, 2, newItem)
newItem = QTableWidgetItem(QIcon('E:/555/555/yolov5-master/data/images/zidane.jpg'),'勇士')
self.tableWidget.setItem(0, 3, newItem)
self.setLayout(layout)
if __name__ =="__main__":
app =QApplication(sys.argv)
win =CellImageText()
win.show()
sys.exit(app.exec_())
代码48:
'''
改变单元格图片尺寸大小
setIconSize(QSize(图片的的宽度,图片的高度))
'''
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
class CellImageSize(QWidget):
def __init__(self):
super(CellImageSize,self).__init__()
self.initUI()
def initUI(self):
self.setWindowTitle("在单元格中实现图文混排的效果")
self.resize(10000,10000);
layout =QHBoxLayout()
tablewidget =QTableWidget()
tablewidget.setIconSize(QSize(800,400))
tablewidget.setColumnCount(3)
tablewidget.setRowCount(5)#在5x3=15个单元格中显示15个图
tablewidget.setHorizontalHeaderLabels(['图片1','图片2','图片3'])
#让列的宽度与图片的宽度相同
for i in range(3):
tablewidget.setColumnWidth(i,300)
#让行的宽度和图片的宽度相同
for i in range(15):
tablewidget.setRowHeight(i,200)
for k in range(15):
i =k / 3 #行
j =k % 3 #列
item =QTableWidgetItem()
item.setIcon(QIcon('../shiguan/guan%d.jpg'%k ))
tablewidget.setItem(i,j,item)
layout.addWidget(tablewidget)
self.setLayout(layout)
if __name__ =="__main__":
app =QApplication(sys.argv)
main =CellImageSize()
main.show()
sys.exit(app.exec_())
代码49:
'''
为树节点添加相应事件,触发相应的反应
'''
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
import sys
class TreeEvent(QMainWindow):
def __init__(self,parent=None):
super(TreeEvent,self).__init__(parent)
self.setWindowTitle('为树节点添加响应事件')
self.tree =QTreeWidget()
self.tree.setColumnCount(2)
self.tree.setHeaderLabels(['Key','Value'])
root =QTreeWidgetItem(self.tree)
root.setText(0,'root')
root.setText(1,'0')
child1 =QTreeWidgetItem(root)
child1.setText(0,'root')
child1.setText(1,'1')
child2 =QTreeWidgetItem(root)
child2.setText(0,'child2')
child2.setText(1,'2')
child3 =QTreeWidgetItem(child2)
child3.setText(0,'child3')
child3.setText(1,'3')
self.tree.clicked.connect(self.onTreeClicked)
self.setCentralWidget(self.tree)
def onTreeClicked(self, index):
item =self.tree.currentItem()
print(index.row())
print('key =%s,value =%s'%(item.text(0),item.text(1)))
if __name__ =="__main__":
app =QApplication(sys.argv)
main =TreeEvent()
main.show()
sys.exit(app.exec_())
代码50:
'''
添加、修改和删除树控件中的节点
'''
import sys
from PyQt5.QtWidgets import *
class ModifyTree(QWidget):
def __init__(self,parent =None):
super(ModifyTree,self).__init__(parent)
self.setWindowTitle('TreeWidget例子')
operatorLayout =QHBoxLayout()
addBtn =QPushButton('添加节点')
updataBtn =QPushButton('修改节点')
deleteBtn = QPushButton('删除节点')
operatorLayout.addWidget(addBtn)
operatorLayout.addWidget(updataBtn)
operatorLayout.addWidget(deleteBtn)
addBtn.clicked.connect(self.addNode)
updataBtn.clicked.connect(self.updateNode)
deleteBtn.clicked.connect(self.deleteNode)
self.tree = QTreeWidget()
self.tree.setColumnCount(2)
self.tree.setHeaderLabels(['Key', 'Value'])
root = QTreeWidgetItem(self.tree)
root.setText(0, 'root')
root.setText(1, '0')
child1 = QTreeWidgetItem(root)
child1.setText(0, 'root')
child1.setText(1, '1')
child2 = QTreeWidgetItem(root)
child2.setText(0, 'child2')
child2.setText(1, '2')
child3 = QTreeWidgetItem(child2)
child3.setText(0, 'child3')
child3.setText(1, '3')
self.tree.clicked.connect(self.onTreeClicked)
mainLayout =QVBoxLayout(self)
mainLayout.addLayout(operatorLayout)
mainLayout.addWidget(self.tree)
self.setLayout(mainLayout)
def onTreeClicked(self, index):
item = self.tree.currentItem()
print(index.row())
print('key =%s,value =%s' % (item.text(0), item.text(1)))
#添加节点
def addNode(self):
print('添加节点')
item =self.tree.currentItem()
print(item)
node =QTreeWidgetItem(item)
node.setText(0,'新节点')
node.setText(1,'新值')
def updateNode(self):
print('修改节点')
item =self.tree.currentItem()
item.setText(0,'修改节点')
item.setText(1,'值已经被修改')
def deleteNode(self):
print('删除节点')
item =self.tree.currentItem()
for item in self.tree.selectedItems():
item.parent().removeChild(item)
if __name__ =="__main__":
app =QApplication(sys.argv)
main =ModifyTree()
main.show()
sys.exit(app.exec_())
代码51:
'''
选项卡控件:QTabWidget,例如多页面下显示页面
'''
import sys
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
class TabWidgetDemo(QTabWidget):
def __init__(self,parent=None):
super(TabWidgetDemo,self).__init__(parent)
self.setWindowTitle("选项卡控件:QTabWidget")
#创建用于显示控件的窗口
self.tab1 =QWidget()
self.tab2 =QWidget()
self.tab3 =QWidget()
self.addTab(self.tab1,'选项卡1')
self.addTab(self.tab2, '选项卡2')
self.addTab(self.tab3, '选项卡3')
#调用
self.tab1UI()
self.tab2UI()
self.tab3UI()
def tab1UI(self):
layout =QFormLayout()
layout.addRow('姓名',QLineEdit())
layout.addRow('地址', QLineEdit())
self.setTabText(0,'联系方式')
self.tab1.setLayout(layout)
def tab2UI(self):
layout =QFormLayout()
sex =QHBoxLayout()
sex.addWidget(QRadioButton('男'))
sex.addWidget(QRadioButton('女'))
layout.addRow(QLabel('性别'),sex)
layout.addRow('生日',QLineEdit())
self.setTabText(1,'个人详细信息')
self.tab2.setLayout(layout)
def tab3UI(self):
layout =QHBoxLayout()
layout.addWidget(QLabel('科目'))
layout.addWidget(QCheckBox('物理'))
layout.addWidget(QCheckBox('高数'))
self.setTabText(2,'教育程度')
self.tab3.setLayout(layout)
if __name__ =="__main__":
app =QApplication(sys.argv)
main =TabWidgetDemo()
main.show()
sys.exit(app.exec_())
代码52:
'''
容纳多文档的窗口
QMdiArea
QMdiSubWindows
'''
import sys
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
class MultiWindows(QMainWindow):
count =0
def __init__(self,parent=None):
super(MultiWindows,self).__init__(parent)
self.setWindowTitle("容纳多文档的窗口")
self.mdi =QMdiArea()
self.setCentralWidget(self.mdi)
bar =self.menuBar()
file =bar.addMenu("File")
file.addAction("new")
file.addAction("cascade")
file.addAction("Tiled")
file.triggered.connect(self.windowaction)
def windowaction(self,q):
print(q.text())
if q.text()=="New":
MultiWindows.count =MultiWindows.count +1
sub =QMdiSubWindow()
sub.setWidget(QTextEdit())
sub.setWindowTitle("子窗口"+str(MultiWindows.count))
self.mdi.addSubWindow(sub)
sub.show()
elif q.text() =="cascade":
self.mdi.cascadeSubWindows()
elif q.text() =="Tiled":
self.mdi.tileSubWindows()
if __name__ =="__main__":
app =QApplication(sys.argv)
demo =MultiWindows()
demo.show()
sys.exit(app.exec_())
代码53:
'''
动态显示当前时间
QTimer(周期性任务,定时器,每间隔一段时间调用一次,例如每隔一秒重新显示时间)
QThread(单个任务)
多线程:用于同时完成多个任务
'''
from PyQt5.QtWidgets import QWidget,QPushButton,QApplication,QListWidget,QGridLayout,QLabel
from PyQt5.QtCore import QTimer,QDateTime
import sys
class ShowTime(QWidget):
def __init__(self,parent=None):
super(ShowTime,self).__init__(parent)
self.setWindowTitle("动态显示当前时间")
self.label =QLabel('显示当前时间')
self.startBtn =QPushButton('开始')
self.endBtn =QPushButton('结束')
layout =QGridLayout()
self.timer =QTimer()
self.timer.timeout.connect(self.showTime)
layout.addWidget(self.label,0,0,1,2)
layout.addWidget(self.startBtn,1,0)
layout.addWidget(self.endBtn,1,1)
self.startBtn.clicked.connect(self.startTimer)
self.endBtn.clicked.connect(self.endTimer)
self.setLayout(layout)
def showTime(self):
time =QDateTime.currentDateTime()
timeDisplay =time.toString("yyyy-MM-dd hh:mm:ss dddd")
self.label.setText(timeDisplay)
def startTimer(self):
self.timer.start(1000)
self.startBtn.setEnabled(False)
self.endBtn.setEnabled(True)
def endTimer(self):
self.timer.stop()
self.startBtn.setEnabled(True)
self.endBtn.setEnabled(False)
if __name__ =="__main__":
app =QApplication(sys.argv)
demo =ShowTime()
demo.show()
sys.exit(app.exec_())
代码54:
'''
让程序定时关闭(QTimer.stingleShot)
'''
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
if __name__=='__main__':
app =QApplication(sys.argv)
label =QLabel('Hello Word,窗口在5秒后自动关闭!')
label.setWindowFlags(Qt.SplashScreen | Qt.FramelessWindowHint)
label.show()
QTimer.singleShot(5000,app.quit)
sys.exit(app.exec_())
代码55:
'''
使用线性类(QThread)编写计数器
'''
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
sec =0
class WorkThread(QThread):
timer =pyqtSignal() #每隔1秒发送一次信号
end =pyqtSignal() #计数完成后发送一次信号
def run(self):
while True:
self.sleep(1)#休眠1秒
if sec ==5:
self.end.emit()#发送end信号
break
self.timer.emit()#发送timer信号
class Counter(QWidget):
def __init__(self,parent=None):
super(Counter,self).__init__(parent)
self.setWindowTitle("使用线性类(QThread)编写计数器")
self.resize(300,120)
layout =QVBoxLayout()
self.lcdNumber =QLCDNumber()
layout.addWidget(self.lcdNumber)
button =QPushButton('开始计数')
layout.addWidget(button)
self.workThread =WorkThread()
self.workThread.timer.connect(self.countTime)
self.workThread.end.connect(self.end)
button.clicked.connect(self.work)
self.setLayout(layout)
def countTime(self):
global sec
sec+=1
self.lcdNumber.display(sec)
def end(self):
QMessageBox.information(self,'消息','计数结束',QMessageBox.Ok)
def work(self):
self.workThread.start()
if __name__ =="__main__":
app =QApplication(sys.argv)
form =Counter()
form.show()
sys.exit(app.exec_())
代码56:
'''
用web浏览器控件(QwenEngineView)显示网页
PyQt5和Web的交互技术(同时使用Python和Web开发程序,混合开发)
Python +JavaScript +HTML5 +CSS
QWebEngineView
'''
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtWebEngineWidgets import *
import sys
class WebEngineView(QMainWindow):
def __init__(self):
super(WebEngineView,self).__init__()
self.setWindowTitle('打开外部网页例子')
self.setGeometry(5,30,1355,730)
self.browser =QWebEngineView()
self.browser.load(QUrl('http://geekori.com'))
self.setCentralWidget()
if __name__=='__main':
app =QApplication(sys.argv)
win =WebEngineView()
win.show()
sys.exit(app.exec_())
代码57:
#!/usr/bin/python3
# -*- coding:utf-8 -*-
###############################################
# Author : SangUn
# EMail : [email protected]
# Created Time :
# File Name : traversal_txt_xls.py
# Description :
###############################################
'''
当前代码实现一个文件夹内所有文本内容写入同一个 excel
每个文本都有各自独立的 sheet 页
文本单行用制表符分隔,代表多列数据
'''
# 导入模块
import os
import xlwt
def getline(filepath, xlspath):
# 读取所有文本
file_names = os.listdir(filepath)
# 对os.listdir进行排序 指定参数加上 (key=lambda x:int(x[0]))
file_names.sort(key = lambda x:int(x[:-4]))
file_ob_list = []
try:
# 获取完整目录名并保存到数组
for file_name in file_names:
file_ob = filepath + "/" + file_name
file_ob_list.append(file_ob)
print(file_ob_list)
# 新建工作表格
xls = xlwt.Workbook()
# 循环读取文件,并写入到表格中
for file_ob in file_ob_list:
# 仅获取文件名,如果末尾为 '/' '\' ,返回空
sheet_name = os.path.basename(file_ob)
# 每一个文本都会新建一个相同文件名的 sheet
sheet = xls.add_sheet(sheet_name, cell_overwrite_ok=True)
# txt 写入 xls
f = open(file_ob)
x = 0
# 按行读取文本
while True:
line = f.readline()
if not line:
break
for i in range(len(line.split('\t'))):
data = line.split('\t')[i]
data = str(data) #将数据转化为字符串,再对其中的换行符进行处理
data = data.replace('\n', ' ') #使用python中字符串函数替换换行符为空格
sheet.write(x,i,data) # x,i,data 代表横、纵坐标和内容
x += 1
# 然后读取下一个文本
f.close()
xls.save(xlspath)
except:
raise
if __name__ == "__main__" :
filepath = "E:/YoloV5/yolov5-master/runs/img/out/text/labels" # 文件目录
xlspath = "E:/YoloV5/yolov5-master/runs/img/out/text/labels/txt_xls.xls" # xls 文件绝对路径
# 传入参数执行
getline(filepath, xlspath)
代码58:
import time
import cv2
import numpy as np
from PIL import Image
import detect
import os
import xlwt
# from detect import parse_opt #这里的detect是detect.py(yolov5的检测)文件,parse_opt表示检测参数的设置
# detect = parse_opt
capture = cv2.VideoCapture(0)
# capture=cv2.VideoCapture("D:/1.mp4")
if capture.isOpened():
ref, frame = capture.read()
else:
ref = False
fps = 0.0
timeF = 100 #yolov5每秒140帧(FPS)
c = 1
while ref:
t1 = time.time()
# 读取某一帧
ref, frame = capture.read()
# 此处保存图片无检测结果,用于采集训练数据和测试摄像头是否清晰稳定
if (c % timeF == 0):
fps = (fps + (1. / (time.time() - t1))) / 2
print("fps= %.2f" % (fps))
frame = cv2.putText(frame, "fps= %.2f" % (fps), (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
# 采集到每隔420帧的图像,保存到/runs/img./in/
cv2.imwrite("./runs/img/in/" + str(c) + '.jpg', frame)
# 将采集到的/runs/img./in/图像输入detect检测,结果保存在/runs/img/out
detect.run(source="./runs/img/in/" + str(c) + '.jpg', name='../img/out/photo' + str(c),view_img=True,save_txt=True)
c += 1
# print(frame)
#
# # 格式转变,BGRtoRGB
# frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# # 转变成Image
# frame = Image.fromarray(np.uint8(frame))
# # 进行检测
# frame = np.array(detect.run(source=frame))
# # RGBtoBGR满足opencv显示格式
# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
#
# # 此处保存的图片有检测结果,用于保留检测结果
# if (c % timeF == 0):
# cv2.imwrite("./runs/" + str(c) + '.jpg', frame)
# c += 1
#
# fps = (fps + (1. / (time.time() - t1))) / 2
# print("fps= %.2f" % (fps))
# frame = cv2.putText(frame, "fps= %.2f" % (fps), (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
# 此处保存的图片上既有检测结果,也有fps值,用于监测不同fps下的检测结果
# if (c % timeF == 0):
# cv2.imwrite("D:/photo/" + str(c) + '.jpg', frame)
# c += 1
# 显示摄像头
cv2.imshow("video", frame)
k = cv2.waitKey(1)
# 按q退出
if k == ord('q'):
capture.release()
break
# 按ESC退出
k = cv2.waitKey(1)
if k == 27:
capture.release()
break
代码59:
import time
import cv2
import numpy as np
from PIL import Image
import detect
import os
import xlwt
# from detect import parse_opt #这里的detect是detect.py(yolov5的检测)文件,parse_opt表示检测参数的设置
# detect = parse_opt
capture = cv2.VideoCapture(0)
# capture=cv2.VideoCapture("D:/1.mp4")
if capture.isOpened():
ref, frame = capture.read()
else:
ref = False
fps = 0.0
timeF = 100 #yolov5每秒140帧(FPS)
c = 1
while ref:
t1 = time.time()
# 读取某一帧
ref, frame = capture.read()
# 此处保存图片无检测结果,用于采集训练数据和测试摄像头是否清晰稳定
if (c % timeF == 0):
fps = (fps + (1. / (time.time() - t1))) / 2
print("fps= %.2f" % (fps))
frame = cv2.putText(frame, "fps= %.2f" % (fps), (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
# 采集到每隔420帧的图像,保存到/runs/img./in/
cv2.imwrite("./runs/img/in/" + str(c) + '.jpg', frame)
# 将采集到的/runs/img./in/图像输入detect检测,结果保存在/runs/img/out
detect.run(source="./runs/img/in/" + str(c) + '.jpg', name='../img/out/photo' + str(c),view_img=True,save_txt=True)
def getline(filepath, xlspath):
# 读取所有文本
file_names = os.listdir(filepath)
# 对os.listdir进行排序 指定参数加上 (key=lambda x:int(x[0]))
file_names.sort(key=lambda x: int(x[:-4]))
file_ob_list = []
try:
# 获取完整目录名并保存到数组
for file_name in file_names:
file_ob = filepath + "/" + file_name
file_ob_list.append(file_ob)
print(file_ob_list)
# 新建工作表格
xls = xlwt.Workbook()
# 循环读取文件,并写入到表格中
for file_ob in file_ob_list:
# 仅获取文件名,如果末尾为 '/' '\' ,返回空
sheet_name = os.path.basename(file_ob)
# 每一个文本都会新建一个相同文件名的 sheet
sheet = xls.add_sheet(sheet_name, cell_overwrite_ok=True)
# txt 写入 xls
f = open(file_ob)
x = 0
# 按行读取文本
while True:
line = f.readline()
if not line:
break
for i in range(len(line.split('\t'))):
data = line.split('\t')[i]
data = str(data) # 将数据转化为字符串,再对其中的换行符进行处理
data = data.replace('\n', ' ') # 使用python中字符串函数替换换行符为空格
sheet.write(x, i, data) # x,i,data 代表横、纵坐标和内容
x += 1
# 然后读取下一个文本
f.close()
xls.save(xlspath)
except:
raise
if __name__ == "__main__":
filepath = "E:/YoloV5/yolov5-master/runs/img/out/photo"+ str(c)+"/labels" # 文件目录
xlspath = "E:/YoloV5/yolov5-master/runs/img/out/excel/txt_xls.xls" # xls 文件绝对路径
# 传入参数执行
getline(filepath, xlspath)
c += 1
# print(frame)
#
# # 格式转变,BGRtoRGB
# frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# # 转变成Image
# frame = Image.fromarray(np.uint8(frame))
# # 进行检测
# frame = np.array(detect.run(source=frame))
# # RGBtoBGR满足opencv显示格式
# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
#
# # 此处保存的图片有检测结果,用于保留检测结果
# if (c % timeF == 0):
# cv2.imwrite("./runs/" + str(c) + '.jpg', frame)
# c += 1
#
# fps = (fps + (1. / (time.time() - t1))) / 2
# print("fps= %.2f" % (fps))
# frame = cv2.putText(frame, "fps= %.2f" % (fps), (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
# 此处保存的图片上既有检测结果,也有fps值,用于监测不同fps下的检测结果
# if (c % timeF == 0):
# cv2.imwrite("D:/photo/" + str(c) + '.jpg', frame)
# c += 1
# 显示摄像头
cv2.imshow("video", frame)
k = cv2.waitKey(1)
# 按q退出
if k == ord('q'):
capture.release()
break
# 按ESC退出
k = cv2.waitKey(1)
if k == 27:
capture.release()
break
代码61:
import time
import cv2
import numpy as np
from PIL import Image
import detect
import os
import xlwt
# from detect import parse_opt #这里的detect是detect.py(yolov5的检测)文件,parse_opt表示检测参数的设置
# detect = parse_opt
capture = cv2.VideoCapture(0)
# capture=cv2.VideoCapture("D:/1.mp4")
if capture.isOpened():
ref, frame = capture.read()
else:
ref = False
fps = 0.0
timeF = 100 #yolov5每秒140帧(FPS)
c = 1
while ref:
t1 = time.time()
# 读取某一帧
ref, frame = capture.read()
# 此处保存图片无检测结果,用于采集训练数据和测试摄像头是否清晰稳定
if (c % timeF == 0):
fps = (fps + (1. / (time.time() - t1))) / 2
print("fps= %.2f" % (fps))
frame = cv2.putText(frame, "fps= %.2f" % (fps), (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
# 采集到每隔420帧的图像,保存到/runs/img./in/
cv2.imwrite("./runs/img/in/" + str(c) + '.jpg', frame)
# 将采集到的/runs/img./in/图像输入detect检测,结果保存在/runs/img/out
detect.run(source="./runs/img/in/" + str(c) + '.jpg', name='../img/out/photo' + str(c),view_img=True,save_txt=True)
def getline(filepath, xlspath):
# 读取所有文本
file_names = os.listdir(filepath)
# 对os.listdir进行排序 指定参数加上 (key=lambda x:int(x[0]))
file_names.sort(key=lambda x: int(x[:-4]))
file_ob_list = []
try:
# 获取完整目录名并保存到数组
for file_name in file_names:
file_ob = filepath + "/" + file_name
file_ob_list.append(file_ob)
print(file_ob_list)
# 新建工作表格
xls = xlwt.Workbook()
# 循环读取文件,并写入到表格中
for file_ob in file_ob_list:
# 仅获取文件名,如果末尾为 '/' '\' ,返回空
sheet_name = os.path.basename(file_ob)
# 每一个文本都会新建一个相同文件名的 sheet
sheet = xls.add_sheet(sheet_name, cell_overwrite_ok=True)
# txt 写入 xls
f = open(file_ob)
x = 0
# 按行读取文本
while True:
line = f.readline()
if not line:
break
for i in range(len(line.split('\t'))):
data = line.split('\t')[i]
data = str(data) # 将数据转化为字符串,再对其中的换行符进行处理
data = data.replace('\n', ' ') # 使用python中字符串函数替换换行符为空格
sheet.write(x, i, data) # x,i,data 代表横、纵坐标和内容
x += 1
# 然后读取下一个文本
f.close()
xls.save(xlspath)
except:
raise
if __name__ == "__main__":
filepath = "E:/YoloV5/yolov5-master/runs/img/out/photo"+ str(c)+"/labels" # 文件目录
xlspath = "E:/YoloV5/yolov5-master/runs/img/out/excel/txt_xls.xls" # xls 文件绝对路径
# 传入参数执行
getline(filepath, xlspath)
c += 1
# print(frame)
#
# # 格式转变,BGRtoRGB
# frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# # 转变成Image
# frame = Image.fromarray(np.uint8(frame))
# # 进行检测
# frame = np.array(detect.run(source=frame))
# # RGBtoBGR满足opencv显示格式
# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
#
# # 此处保存的图片有检测结果,用于保留检测结果
# if (c % timeF == 0):
# cv2.imwrite("./runs/" + str(c) + '.jpg', frame)
# c += 1
#
# fps = (fps + (1. / (time.time() - t1))) / 2
# print("fps= %.2f" % (fps))
# frame = cv2.putText(frame, "fps= %.2f" % (fps), (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
# 此处保存的图片上既有检测结果,也有fps值,用于监测不同fps下的检测结果
# if (c % timeF == 0):
# cv2.imwrite("D:/photo/" + str(c) + '.jpg', frame)
# c += 1
# 显示摄像头
cv2.imshow("video", frame)
k = cv2.waitKey(1)
# 按q退出
if k == ord('q'):
capture.release()
break
# 按ESC退出
k = cv2.waitKey(1)
if k == 27:
capture.release()
break
88代码:成功Video到处excel数据(30元)
import time
import cv2
import numpy as np
from PIL import Image
import detect
import os
import xlwt
# from detect import parse_opt #这里的detect是detect.py(yolov5的检测)文件,parse_opt表示检测参数的设置
# detect = parse_opt
capture = cv2.VideoCapture(0)
# capture=cv2.VideoCapture("D:/1.mp4")
if capture.isOpened():
ref, frame = capture.read()
else:
ref = False
fps = 0.0
timeF = 50 #yolov5每秒140帧(FPS)
c = 1
while ref:
t1 = time.time()
# 读取某一帧
ref, frame = capture.read()
# 此处保存图片无检测结果,用于采集训练数据和测试摄像头是否清晰稳定
if (c % timeF == 0):
fps = (fps + (1. / (time.time() - t1))) / 2
print("fps= %.2f" % (fps))
frame = cv2.putText(frame, "fps= %.2f" % (fps), (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
# 采集到每隔420帧的图像,保存到/runs/img./in/
cv2.imwrite("./runs/img/in/" + str(c) + '.jpg', frame)
# 将采集到的/runs/img./in/图像输入detect检测,结果保存在/runs/img/out
detect.run(source="./runs/img/in/" + str(c) + '.jpg', name='../img/out/photo+str(c)'+'.jpg', save_txt =True)
def getline(filepath, xlspath):
# 读取所有文本
file_names = os.listdir(filepath)
# 对os.listdir进行排序 指定参数加上 (key=lambda x:int(x[0]))
file_names.sort(key=lambda x: int(x[:-4]))
file_ob_list = []
try:
# 获取完整目录名并保存到数组
for file_name in file_names:
file_ob = filepath + "/" + file_name
file_ob_list.append(file_ob)
print(file_ob_list)
# 新建工作表格
xls = xlwt.Workbook()
# 循环读取文件,并写入到表格中
for file_ob in file_ob_list:
# 仅获取文件名,如果末尾为 '/' '\' ,返回空
sheet_name = os.path.basename(file_ob)
# 每一个文本都会新建一个相同文件名的 sheet
sheet = xls.add_sheet(sheet_name, cell_overwrite_ok=True)
# txt 写入 xls
f = open(file_ob)
x = 0
# 按行读取文本
while True:
line = f.readline()
if not line:
break
for i in range(len(line.split('\t'))):
data = line.split('\t')[i]
data = str(data) # 将数据转化为字符串,再对其中的换行符进行处理
data = data.replace('\n', ' ') # 使用python中字符串函数替换换行符为空格
sheet.write(x, i, data) # x,i,data 代表横、纵坐标和内容
x += 1
# 然后读取下一个文本
f.close()
xls.save(xlspath)
except:
raise
if __name__ == "__main__":
filepath = "E:/YoloV5/yolov5-master/runs/img/out/photo+str(c)2.jpg/labels" # 文件目录
xlspath = "E:/YoloV5/yolov5-master/runs/img/out/excel/excel_xls.xls" # xls 文件绝对路径
# 传入参数执行
getline(filepath, xlspath)
c += 1
# print(frame)
#
# # 格式转变,BGRtoRGB
# frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# # 转变成Image
# frame = Image.fromarray(np.uint8(frame))
# # 进行检测
# frame = np.array(detect.run(source=frame))
# # RGBtoBGR满足opencv显示格式
# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
#
# # 此处保存的图片有检测结果,用于保留检测结果
# if (c % timeF == 0):
# cv2.imwrite("./runs/" + str(c) + '.jpg', frame)
# c += 1
#
# fps = (fps + (1. / (time.time() - t1))) / 2
# print("fps= %.2f" % (fps))
# frame = cv2.putText(frame, "fps= %.2f" % (fps), (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
# 此处保存的图片上既有检测结果,也有fps值,用于监测不同fps下的检测结果
# if (c % timeF == 0):
# cv2.imwrite("D:/photo/" + str(c) + '.jpg', frame)
# c += 1
# 显示摄像头
cv2.imshow("video", frame)
k = cv2.waitKey(1)
# 按q退出
if k == ord('q'):
capture.release()
break
# 按ESC退出
k = cv2.waitKey(1)
if k == 27:
capture.release()
break
代码89:调用Yolov5摄像头实时检测
import time
import cv2
import numpy as np
from PIL import Image
import detect
import os
import xlwt
capture = cv2.VideoCapture(0)#开启摄像头
# capture=cv2.VideoCapture("D:/1.mp4")
detect.run(source="0" , name='../img/out/photo+str(c)'+'.jpg', save_txt =False)#调节yoloV5检测参数
cv2.imshow("video", frame)
k = cv2.waitKey(1)
代码90:UI界面设计
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '.\project.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
import sys
import cv2
import argparse
import random
import torch
import numpy as np
import torch.backends.cudnn as cudnn
from PyQt5 import QtCore, QtGui, QtWidgets
from utils.torch_utils import select_device
from models.experimental import attempt_load
from utils.general import check_img_size, non_max_suppression, scale_coords
from utils.datasets import letterbox
from utils.plots import plot_one_box
class Ui_MainWindow(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super(Ui_MainWindow, self).__init__(parent)
self.timer_video = QtCore.QTimer()
self.setupUi(self)
self.init_logo()
self.init_slots()
self.cap = cv2.VideoCapture()
self.out = None
# self.out = cv2.VideoWriter('prediction.avi', cv2.VideoWriter_fourcc(*'XVID'), 20.0, (640, 480))
parser = argparse.ArgumentParser()
parser.add_argument('--weights', nargs='+', type=str, default='E:/555/555/yolov5-master/runs/train/exp29/weights/best.pt', help='model.pt path(s)')
parser.add_argument('--source', type=str, default='0', help='source') # file/folder, 0 for webcam
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--view-img', action='store_true', help='display results')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--update', action='store_true', help='update all models')
parser.add_argument('--project', default='runs/detect', help='save results to project/name')
parser.add_argument('--name', default='exp', help='save results to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
self.opt = parser.parse_args()
print(self.opt)
source, weights, view_img, save_txt, imgsz = self.opt.source, self.opt.weights, self.opt.view_img, self.opt.save_txt, self.opt.img_size
self.device = select_device(self.opt.device)
self.half = self.device.type != 'cpu' # half precision only supported on CUDA
cudnn.benchmark = True
# Load model
self.model = attempt_load(weights, map_location=self.device) # load FP32 model
stride = int(self.model.stride.max()) # model stride
self.imgsz = check_img_size(imgsz, s=stride) # check img_size
if self.half:
self.model.half() # to FP16
# Get names and colors
self.names = self.model.module.names if hasattr(self.model, 'module') else self.model.names
self.colors = [[random.randint(0, 255) for _ in range(3)] for _ in self.names]
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 600)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.centralwidget)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setSizeConstraint(QtWidgets.QLayout.SetNoConstraint)
self.horizontalLayout.setObjectName("horizontalLayout")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setContentsMargins(-1, -1, 0, -1)
self.verticalLayout.setSpacing(80)
self.verticalLayout.setObjectName("verticalLayout")
self.pushButton_img = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_img.sizePolicy().hasHeightForWidth())
self.pushButton_img.setSizePolicy(sizePolicy)
self.pushButton_img.setMinimumSize(QtCore.QSize(150, 100))
self.pushButton_img.setMaximumSize(QtCore.QSize(150, 100))
font = QtGui.QFont()
font.setFamily("Agency FB")
font.setPointSize(12)
self.pushButton_img.setFont(font)
self.pushButton_img.setObjectName("pushButton_img")
self.verticalLayout.addWidget(self.pushButton_img, 0, QtCore.Qt.AlignHCenter)
self.pushButton_camera = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_camera.sizePolicy().hasHeightForWidth())
self.pushButton_camera.setSizePolicy(sizePolicy)
self.pushButton_camera.setMinimumSize(QtCore.QSize(150, 100))
self.pushButton_camera.setMaximumSize(QtCore.QSize(150, 100))
font = QtGui.QFont()
font.setFamily("Agency FB")
font.setPointSize(12)
self.pushButton_camera.setFont(font)
self.pushButton_camera.setObjectName("pushButton_camera")
self.verticalLayout.addWidget(self.pushButton_camera, 0, QtCore.Qt.AlignHCenter)
self.pushButton_video = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_video.sizePolicy().hasHeightForWidth())
self.pushButton_video.setSizePolicy(sizePolicy)
self.pushButton_video.setMinimumSize(QtCore.QSize(150, 100))
self.pushButton_video.setMaximumSize(QtCore.QSize(150, 100))
font = QtGui.QFont()
font.setFamily("Agency FB")
font.setPointSize(12)
self.pushButton_video.setFont(font)
self.pushButton_video.setObjectName("pushButton_video")
self.verticalLayout.addWidget(self.pushButton_video, 0, QtCore.Qt.AlignHCenter)
self.verticalLayout.setStretch(2, 1)
self.horizontalLayout.addLayout(self.verticalLayout)
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
self.horizontalLayout.setStretch(0, 1)
self.horizontalLayout.setStretch(1, 3)
self.horizontalLayout_2.addLayout(self.horizontalLayout)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 23))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "PyQt5+YOLOv5示例"))
self.pushButton_img.setText(_translate("MainWindow", "图片检测"))
self.pushButton_camera.setText(_translate("MainWindow", "摄像头检测"))
self.pushButton_video.setText(_translate("MainWindow", "视频检测"))
self.label.setText(_translate("MainWindow", "TextLabel"))
def init_slots(self):
self.pushButton_img.clicked.connect(self.button_image_open)
self.pushButton_video.clicked.connect(self.button_video_open)
self.pushButton_camera.clicked.connect(self.button_camera_open)
self.timer_video.timeout.connect(self.show_video_frame)
def init_logo(self):
pix = QtGui.QPixmap('wechat.jpg')
self.label.setScaledContents(True)
self.label.setPixmap(pix)
def button_image_open(self):
print('button_image_open')
name_list = []
img_name, _ = QtWidgets.QFileDialog.getOpenFileName(self, "打开图片", "", "*.jpg;;*.png;;All Files(*)")
img = cv2.imread(img_name)
print(img_name)
showimg = img
with torch.no_grad():
img = letterbox(img, new_shape=self.opt.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
img = torch.from_numpy(img).to(self.device)
img = img.half() if self.half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
pred = self.model(img, augment=self.opt.augment)[0]
# Apply NMS
pred = non_max_suppression(pred, self.opt.conf_thres, self.opt.iou_thres, classes=self.opt.classes,
agnostic=self.opt.agnostic_nms)
print(pred)
# Process detections
for i, det in enumerate(pred):
if det is not None and len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], showimg.shape).round()
for *xyxy, conf, cls in reversed(det):
label = '%s %.2f' % (self.names[int(cls)], conf)
name_list.append(self.names[int(cls)])
plot_one_box(xyxy, showimg, label=label, color=self.colors[int(cls)], line_thickness=2)
cv2.imwrite('prediction.jpg', showimg)
self.result = cv2.cvtColor(showimg, cv2.COLOR_BGR2BGRA)
self.result = cv2.resize(self.result, (640, 480), interpolation=cv2.INTER_AREA)
self.QtImg = QtGui.QImage(self.result.data, self.result.shape[1], self.result.shape[0], QtGui.QImage.Format_RGB32)
self.label.setPixmap(QtGui.QPixmap.fromImage(self.QtImg))
def button_video_open(self):
video_name, _ = QtWidgets.QFileDialog.getOpenFileName(self, "打开视频", "", "*.mp4;;*.avi;;All Files(*)")
flag = self.cap.open(video_name)
if flag == False:
QtWidgets.QMessageBox.warning(self, u"Warning", u"打开视频失败", buttons=QtWidgets.QMessageBox.Ok, defaultButton=QtWidgets.QMessageBox.Ok)
else:
self.out = cv2.VideoWriter('prediction.avi', cv2.VideoWriter_fourcc(*'MJPG'), 20, (int(self.cap.get(3)), int(self.cap.get(4))))
self.timer_video.start(30)
self.pushButton_video.setDisabled(True)
self.pushButton_img.setDisabled(True)
self.pushButton_camera.setDisabled(True)
def button_camera_open(self):
if not self.timer_video.isActive():
# 默认使用第一个本地camera
flag = self.cap.open(0)
if flag == False:
QtWidgets.QMessageBox.warning(self, u"Warning", u"打开摄像头失败", buttons=QtWidgets.QMessageBox.Ok, defaultButton=QtWidgets.QMessageBox.Ok)
else:
self.out = cv2.VideoWriter('prediction.avi', cv2.VideoWriter_fourcc(*'MJPG'), 20, (int(self.cap.get(3)), int(self.cap.get(4))))
self.timer_video.start(30)
self.pushButton_video.setDisabled(True)
self.pushButton_img.setDisabled(True)
self.pushButton_camera.setText(u"关闭摄像头")
else:
self.timer_video.stop()
self.cap.release()
self.out.release()
self.label.clear()
self.init_logo()
self.pushButton_video.setDisabled(False)
self.pushButton_img.setDisabled(False)
self.pushButton_camera.setText(u"摄像头检测")
def show_video_frame(self):
name_list = []
flag, img = self.cap.read()
if img is not None:
showimg = img
with torch.no_grad():
img = letterbox(img, new_shape=self.opt.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
img = torch.from_numpy(img).to(self.device)
img = img.half() if self.half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
pred = self.model(img, augment=self.opt.augment)[0]
# Apply NMS
pred = non_max_suppression(pred, self.opt.conf_thres, self.opt.iou_thres, classes=self.opt.classes,
agnostic=self.opt.agnostic_nms)
# Process detections
for i, det in enumerate(pred): # detections per image
if det is not None and len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], showimg.shape).round()
# Write results
for *xyxy, conf, cls in reversed(det):
label = '%s %.2f' % (self.names[int(cls)], conf)
name_list.append(self.names[int(cls)])
print(label)
plot_one_box(xyxy, showimg, label=label, color=self.colors[int(cls)], line_thickness=2)
self.out.write(showimg)
show = cv2.resize(showimg, (640, 480))
self.result = cv2.cvtColor(show, cv2.COLOR_BGR2RGB)
showImage = QtGui.QImage(self.result.data, self.result.shape[1], self.result.shape[0],
QtGui.QImage.Format_RGB888)
self.label.setPixmap(QtGui.QPixmap.fromImage(showImage))
else:
self.timer_video.stop()
self.cap.release()
self.out.release()
self.label.clear()
self.pushButton_video.setDisabled(False)
self.pushButton_img.setDisabled(False)
self.pushButton_camera.setDisabled(False)
self.init_logo()
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
ui = Ui_MainWindow()
ui.show()
sys.exit(app.exec_())
代码91
import xlrd
import xlwt
# 需求:拷贝excel中的内容到另外一个excel中
flile_name = "E:/YoloV5/yolov5-master/runs/img/out/excel/excel_xls.xls"
# 读取源excel
xlsx = xlrd.open_workbook(flile_name)
# xlsx = xlrd.open_workbook("readexcel.xlsx")
# 获取sheet个数
sheets = len(xlsx.sheets())
# 准备写入
new_workbook = xlwt.Workbook()
for sheet in range(sheets):
table = xlsx.sheet_by_index(sheet)
rows = table.nrows
cols = table.ncols
worksheet = new_workbook.add_sheet("sheet"+str(sheet))
for i in range(0,rows):
for j in range(0, cols):
# print(i,j,table.cell_value(i, j))
worksheet.write(i, j ,table.cell_value(i, j))
new_workbook.save('E:/YoloV5/yolov5-master/runs/pass_xls/Data.xls')
代码92:**********项目检测界面*********(2021年8月28日)单界面显示
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'UI_interface.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
import sys
import cv2
import argparse
import random
import torch
import numpy as np
import torch.backends.cudnn as cudnn
from PyQt5 import QtCore, QtGui, QtWidgets
from utils.torch_utils import select_device
from models.experimental import attempt_load
from utils.general import check_img_size, non_max_suppression, scale_coords
from utils.datasets import letterbox
from utils.plots import plot_one_box
from openpyxl import load_workbook
import xlrd
import xlwt
class Ui_MainWindow(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super(Ui_MainWindow, self).__init__(parent)
self.timer_video = QtCore.QTimer()
self.setupUi(self)
self.init_logo()
self.init_slots()
self.cap = cv2.VideoCapture()
self.out = None
# self.out = cv2.VideoWriter('prediction.avi', cv2.VideoWriter_fourcc(*'XVID'), 20.0, (640, 480))
parser = argparse.ArgumentParser()
parser.add_argument('--weights', nargs='+', type=str, default='E:/555/555/yolov5-master/runs/train/exp29/weights/best.pt', help='model.pt path(s)')
parser.add_argument('--source', type=str, default='0', help='source') # file/folder, 0 for webcam
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--view-img', action='store_true', help='display results')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--update', action='store_true', help='update all models')
parser.add_argument('--project', default='runs/detect', help='save results to project/name')
parser.add_argument('--name', default='exp', help='save results to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
self.opt = parser.parse_args()
print(self.opt)
source, weights, view_img, save_txt, imgsz = self.opt.source, self.opt.weights, self.opt.view_img, self.opt.save_txt, self.opt.img_size
self.device = select_device(self.opt.device)
self.half = self.device.type != 'cpu' # half precision only supported on CUDA
cudnn.benchmark = True
# Load model
self.model = attempt_load(weights, map_location=self.device) # load FP32 model
stride = int(self.model.stride.max()) # model stride
self.imgsz = check_img_size(imgsz, s=stride) # check img_size
if self.half:
self.model.half() # to FP16
# Get names and colors
self.names = self.model.module.names if hasattr(self.model, 'module') else self.model.names
self.colors = [[random.randint(0, 255) for _ in range(3)] for _ in self.names]
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1507, 881)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.layoutWidget = QtWidgets.QWidget(self.centralwidget)
self.layoutWidget.setGeometry(QtCore.QRect(30, 40, 152, 642))
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setSpacing(80)
self.verticalLayout.setObjectName("verticalLayout")
self.pushButton_img = QtWidgets.QPushButton(self.layoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_img.sizePolicy().hasHeightForWidth())
self.pushButton_img.setSizePolicy(sizePolicy)
self.pushButton_img.setMinimumSize(QtCore.QSize(150, 100))
self.pushButton_img.setMaximumSize(QtCore.QSize(150, 100))
font = QtGui.QFont()
font.setFamily("Agency FB")
font.setPointSize(12)
self.pushButton_img.setFont(font)
self.pushButton_img.setObjectName("pushButton_img")
self.verticalLayout.addWidget(self.pushButton_img)
self.pushButton_camera = QtWidgets.QPushButton(self.layoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_camera.sizePolicy().hasHeightForWidth())
self.pushButton_camera.setSizePolicy(sizePolicy)
self.pushButton_camera.setMinimumSize(QtCore.QSize(150, 100))
self.pushButton_camera.setMaximumSize(QtCore.QSize(150, 100))
font = QtGui.QFont()
font.setFamily("Agency FB")
font.setPointSize(12)
self.pushButton_camera.setFont(font)
self.pushButton_camera.setObjectName("pushButton_camera")
self.verticalLayout.addWidget(self.pushButton_camera, 0, QtCore.Qt.AlignHCenter)
self.pushButton_video = QtWidgets.QPushButton(self.layoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_video.sizePolicy().hasHeightForWidth())
self.pushButton_video.setSizePolicy(sizePolicy)
self.pushButton_video.setMinimumSize(QtCore.QSize(150, 100))
self.pushButton_video.setMaximumSize(QtCore.QSize(150, 100))
font = QtGui.QFont()
font.setFamily("Agency FB")
font.setPointSize(12)
self.pushButton_video.setFont(font)
self.pushButton_video.setObjectName("pushButton_video")
self.verticalLayout.addWidget(self.pushButton_video, 0, QtCore.Qt.AlignHCenter)
self.pushButton_Transfer_data = QtWidgets.QPushButton(self.layoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_Transfer_data.sizePolicy().hasHeightForWidth())
self.pushButton_Transfer_data.setSizePolicy(sizePolicy)
self.pushButton_Transfer_data.setMinimumSize(QtCore.QSize(150, 100))
self.pushButton_Transfer_data.setMaximumSize(QtCore.QSize(150, 100))
font = QtGui.QFont()
font.setFamily("Agency FB")
font.setPointSize(12)
self.pushButton_Transfer_data.setFont(font)
self.pushButton_Transfer_data.setObjectName("pushButton_Transfer_data")
self.verticalLayout.addWidget(self.pushButton_Transfer_data)
self.verticalLayout.setStretch(2, 1)
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(190, 40, 541, 641))
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(750, 30, 371, 651))
self.label_2.setLineWidth(2)
self.label_2.setObjectName("label_2")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1507, 26))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.pushButton_img.setText(_translate("MainWindow", "图片检测"))
self.pushButton_camera.setText(_translate("MainWindow", "摄像头检测"))
self.pushButton_video.setText(_translate("MainWindow", "视频检测"))
self.pushButton_Transfer_data.setText(_translate("MainWindow", "传递数据"))
self.label.setText(_translate("MainWindow", "TextLabel"))
self.label_2.setText(_translate("MainWindow", "TextLabel"))
def init_slots(self):
self.pushButton_img.clicked.connect(self.button_image_open)
self.pushButton_video.clicked.connect(self.button_video_open)
self.pushButton_camera.clicked.connect(self.button_camera_open)
self.pushButton_Transfer_data.clicked.connect(self.pushButton_Transfer_data_open)
self.timer_video.timeout.connect(self.show_video_frame)
def init_logo(self):
pix = QtGui.QPixmap('wechat.jpg')
self.label.setScaledContents(True)
self.label_2.setScaledContents(True)
self.label.setPixmap(pix)
self.label_2.setPixmap(pix)
def button_image_open(self):
print('button_image_open')
name_list = []
img_name, _ = QtWidgets.QFileDialog.getOpenFileName(self, "打开图片", "", "*.jpg;;*.png;;All Files(*)")
img = cv2.imread(img_name)
print(img_name)
showimg = img
with torch.no_grad():
img = letterbox(img, new_shape=self.opt.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
img = torch.from_numpy(img).to(self.device)
img = img.half() if self.half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
pred = self.model(img, augment=self.opt.augment)[0]
# Apply NMS
pred = non_max_suppression(pred, self.opt.conf_thres, self.opt.iou_thres, classes=self.opt.classes,
agnostic=self.opt.agnostic_nms)
print(pred)
# Process detections
for i, det in enumerate(pred):
if det is not None and len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], showimg.shape).round()
for *xyxy, conf, cls in reversed(det):
label = '%s %.2f' % (self.names[int(cls)], conf)
name_list.append(self.names[int(cls)])
plot_one_box(xyxy, showimg, label=label, color=self.colors[int(cls)], line_thickness=2)
cv2.imwrite('prediction.jpg', showimg)
self.result = cv2.cvtColor(showimg, cv2.COLOR_BGR2BGRA)
self.result = cv2.resize(self.result, (640, 480), interpolation=cv2.INTER_AREA)
self.QtImg = QtGui.QImage(self.result.data, self.result.shape[1], self.result.shape[0], QtGui.QImage.Format_RGB32)
self.label.setPixmap(QtGui.QPixmap.fromImage(self.QtImg))
def button_video_open(self):
video_name, _ = QtWidgets.QFileDialog.getOpenFileName(self, "打开视频", "", "*.mp4;;*.avi;;All Files(*)")
flag = self.cap.open(video_name)
if flag == False:
QtWidgets.QMessageBox.warning(self, u"Warning", u"打开视频失败", buttons=QtWidgets.QMessageBox.Ok, defaultButton=QtWidgets.QMessageBox.Ok)
else:
self.out = cv2.VideoWriter('prediction.avi', cv2.VideoWriter_fourcc(*'MJPG'), 20, (int(self.cap.get(3)), int(self.cap.get(4))))
self.timer_video.start(30)
self.pushButton_video.setDisabled(True)
self.pushButton_img.setDisabled(True)
self.pushButton_camera.setDisabled(True)
QtWidgets.QMessageBox.information(self, u"Tips", u"视频检测进行中!", buttons=QtWidgets.QMessageBox.Ok,
defaultButton=QtWidgets.QMessageBox.Ok)
def button_camera_open(self):
if not self.timer_video.isActive():
# 默认使用第一个本地camera
flag = self.cap.open(0)
if flag == False:
QtWidgets.QMessageBox.warning(self, u"Warning", u"打开摄像头失败", buttons=QtWidgets.QMessageBox.Ok, defaultButton=QtWidgets.QMessageBox.Ok)
else:
self.out = cv2.VideoWriter('prediction.avi', cv2.VideoWriter_fourcc(*'MJPG'), 20, (int(self.cap.get(3)), int(self.cap.get(4))))
self.timer_video.start(30)
self.pushButton_video.setDisabled(True)
self.pushButton_img.setDisabled(True)
self.pushButton_camera.setText(u"关闭摄像头")
else:
self.timer_video.stop()
self.cap.release()
self.out.release()
self.label.clear()
self.init_logo()
self.pushButton_video.setDisabled(False)
self.pushButton_img.setDisabled(False)
self.pushButton_camera.setText(u"摄像头检测")
def pushButton_Transfer_data_open(self):
flile_name = "E:/YoloV5/yolov5-master/runs/img/out/excel/excel_xls.xls"
# 读取源excel
xlsx = xlrd.open_workbook(flile_name)
# xlsx = xlrd.open_workbook("readexcel.xlsx")
# 获取sheet个数
sheets = len(xlsx.sheets())
# 准备写入
new_workbook = xlwt.Workbook()
for sheet in range(sheets):
table = xlsx.sheet_by_index(sheet)
rows = table.nrows
cols = table.ncols
worksheet = new_workbook.add_sheet("sheet" + str(sheet))
for i in range(0, rows):
for j in range(0, cols):
# print(i,j,table.cell_value(i, j))
worksheet.write(i, j, table.cell_value(i, j))
new_workbook.save('E:/YoloV5/yolov5-master/runs/pass_xls/Data.xls')
QtWidgets.QMessageBox.information(self, u"Tips", u"数据传递成功!", buttons=QtWidgets.QMessageBox.Ok,
defaultButton=QtWidgets.QMessageBox.Ok)
def show_video_frame(self):
name_list = []
flag, img = self.cap.read()
if img is not None:
showimg = img
with torch.no_grad():
img = letterbox(img, new_shape=self.opt.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
img = torch.from_numpy(img).to(self.device)
img = img.half() if self.half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
pred = self.model(img, augment=self.opt.augment)[0]
# Apply NMS
pred = non_max_suppression(pred, self.opt.conf_thres, self.opt.iou_thres, classes=self.opt.classes,
agnostic=self.opt.agnostic_nms)
# Process detections
for i, det in enumerate(pred): # detections per image
if det is not None and len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], showimg.shape).round()
# Write results
for *xyxy, conf, cls in reversed(det):
label = '%s %.2f' % (self.names[int(cls)], conf)
name_list.append(self.names[int(cls)])
print(label)
plot_one_box(xyxy, showimg, label=label, color=self.colors[int(cls)], line_thickness=2)
self.out.write(showimg)
show = cv2.resize(showimg, (640, 480))
self.result = cv2.cvtColor(show, cv2.COLOR_BGR2RGB)
showImage = QtGui.QImage(self.result.data, self.result.shape[1], self.result.shape[0],
QtGui.QImage.Format_RGB888)
self.label.setPixmap(QtGui.QPixmap.fromImage(showImage))
else:
self.timer_video.stop()
self.cap.release()
self.out.release()
self.label.clear()
self.pushButton_video.setDisabled(False)
self.pushButton_img.setDisabled(False)
self.pushButton_camera.setDisabled(False)
self.init_logo()
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
ui = Ui_MainWindow()
ui.show()
sys.exit(app.exec_())
代码93:**********项目检测界面*********(2021年8月29日)双界面显示
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'UI_interface.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
import sys
import cv2
import argparse
import random
import torch
import numpy as np
import torch.backends.cudnn as cudnn
from PyQt5 import QtCore, QtGui, QtWidgets
from utils.torch_utils import select_device
from models.experimental import attempt_load
from utils.general import check_img_size, non_max_suppression, scale_coords
from utils.datasets import letterbox
from utils.plots import plot_one_box
from openpyxl import load_workbook
import xlrd
import xlwt
class Ui_MainWindow(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super(Ui_MainWindow, self).__init__(parent)
self.timer_video = QtCore.QTimer()
self.setupUi(self)
self.init_logo()
self.init_slots()
self.cap = cv2.VideoCapture()
self.out = None
# self.out = cv2.VideoWriter('prediction.avi', cv2.VideoWriter_fourcc(*'XVID'), 20.0, (640, 480))
parser = argparse.ArgumentParser()
parser.add_argument('--weights', nargs='+', type=str, default='E:/555/555/yolov5-master/runs/train/exp29/weights/best.pt', help='model.pt path(s)')
parser.add_argument('--source', type=str, default='0', help='source') # file/folder, 0 for webcam
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--view-img', action='store_true', help='display results')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--update', action='store_true', help='update all models')
parser.add_argument('--project', default='runs/detect', help='save results to project/name')
parser.add_argument('--name', default='exp', help='save results to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
self.opt = parser.parse_args()
print(self.opt)
source, weights, view_img, save_txt, imgsz = self.opt.source, self.opt.weights, self.opt.view_img, self.opt.save_txt, self.opt.img_size
self.device = select_device(self.opt.device)
self.half = self.device.type != 'cpu' # half precision only supported on CUDA
cudnn.benchmark = True
# Load model
self.model = attempt_load(weights, map_location=self.device) # load FP32 model
stride = int(self.model.stride.max()) # model stride
self.imgsz = check_img_size(imgsz, s=stride) # check img_size
if self.half:
self.model.half() # to FP16
# Get names and colors
self.names = self.model.module.names if hasattr(self.model, 'module') else self.model.names
self.colors = [[random.randint(0, 255) for _ in range(3)] for _ in self.names]
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1507, 881)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.layoutWidget = QtWidgets.QWidget(self.centralwidget)
self.layoutWidget.setGeometry(QtCore.QRect(30, 40, 152, 642))
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setSpacing(80)
self.verticalLayout.setObjectName("verticalLayout")
self.pushButton_img = QtWidgets.QPushButton(self.layoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_img.sizePolicy().hasHeightForWidth())
self.pushButton_img.setSizePolicy(sizePolicy)
self.pushButton_img.setMinimumSize(QtCore.QSize(150, 100))
self.pushButton_img.setMaximumSize(QtCore.QSize(150, 100))
font = QtGui.QFont()
font.setFamily("Agency FB")
font.setPointSize(12)
self.pushButton_img.setFont(font)
self.pushButton_img.setObjectName("pushButton_img")
self.verticalLayout.addWidget(self.pushButton_img)
self.pushButton_camera = QtWidgets.QPushButton(self.layoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_camera.sizePolicy().hasHeightForWidth())
self.pushButton_camera.setSizePolicy(sizePolicy)
self.pushButton_camera.setMinimumSize(QtCore.QSize(150, 100))
self.pushButton_camera.setMaximumSize(QtCore.QSize(150, 100))
font = QtGui.QFont()
font.setFamily("Agency FB")
font.setPointSize(12)
self.pushButton_camera.setFont(font)
self.pushButton_camera.setObjectName("pushButton_camera")
self.verticalLayout.addWidget(self.pushButton_camera, 0, QtCore.Qt.AlignHCenter)
self.pushButton_video = QtWidgets.QPushButton(self.layoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_video.sizePolicy().hasHeightForWidth())
self.pushButton_video.setSizePolicy(sizePolicy)
self.pushButton_video.setMinimumSize(QtCore.QSize(150, 100))
self.pushButton_video.setMaximumSize(QtCore.QSize(150, 100))
font = QtGui.QFont()
font.setFamily("Agency FB")
font.setPointSize(12)
self.pushButton_video.setFont(font)
self.pushButton_video.setObjectName("pushButton_video")
self.verticalLayout.addWidget(self.pushButton_video, 0, QtCore.Qt.AlignHCenter)
self.pushButton_Transfer_data = QtWidgets.QPushButton(self.layoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_Transfer_data.sizePolicy().hasHeightForWidth())
self.pushButton_Transfer_data.setSizePolicy(sizePolicy)
self.pushButton_Transfer_data.setMinimumSize(QtCore.QSize(150, 100))
self.pushButton_Transfer_data.setMaximumSize(QtCore.QSize(150, 100))
font = QtGui.QFont()
font.setFamily("Agency FB")
font.setPointSize(12)
self.pushButton_Transfer_data.setFont(font)
self.pushButton_Transfer_data.setObjectName("pushButton_Transfer_data")
self.verticalLayout.addWidget(self.pushButton_Transfer_data)
self.verticalLayout.setStretch(2, 1)
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(190, 40, 541, 641))
self.label.setObjectName("label")
#self.horizontalLayout.addWidget(self.label)
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(750, 30, 371, 651))
self.label_2.setLineWidth(2)
self.label_2.setObjectName("label_2")
#self.horizontalLayout.addWidget(self.label_2)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1507, 26))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.pushButton_img.setText(_translate("MainWindow", "图片检测"))
self.pushButton_camera.setText(_translate("MainWindow", "摄像头检测"))
self.pushButton_video.setText(_translate("MainWindow", "视频检测"))
self.pushButton_Transfer_data.setText(_translate("MainWindow", "传递数据"))
self.label.setText(_translate("MainWindow", "TextLabel"))
self.label_2.setText(_translate("MainWindow", "TextLabel"))
def init_slots(self):
self.pushButton_img.clicked.connect(self.button_image_open)
self.pushButton_video.clicked.connect(self.button_video_open)
self.pushButton_camera.clicked.connect(self.button_camera_open)
self.pushButton_Transfer_data.clicked.connect(self.pushButton_Transfer_data_open)
self.timer_video.timeout.connect(self.show_video_frame)
self.timer_video.timeout.connect(self.show_video_frame_2)
def init_logo(self):
pix = QtGui.QPixmap('wechat.jpg')
self.label.setScaledContents(True)
self.label_2.setScaledContents(True)
self.label.setPixmap(pix)
self.label_2.setPixmap(pix)
def button_image_open(self):
print('button_image_open')
name_list = []
img_name, _ = QtWidgets.QFileDialog.getOpenFileName(self, "打开图片", "", "*.jpg;;*.png;;All Files(*)")
img = cv2.imread(img_name)
print(img_name)
showimg = img
with torch.no_grad():
img = letterbox(img, new_shape=self.opt.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
img = torch.from_numpy(img).to(self.device)
img = img.half() if self.half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
pred = self.model(img, augment=self.opt.augment)[0]
# Apply NMS
pred = non_max_suppression(pred, self.opt.conf_thres, self.opt.iou_thres, classes=self.opt.classes,
agnostic=self.opt.agnostic_nms)
print(pred)
# Process detections
for i, det in enumerate(pred):
if det is not None and len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], showimg.shape).round()
for *xyxy, conf, cls in reversed(det):
label = '%s %.2f' % (self.names[int(cls)], conf)
name_list.append(self.names[int(cls)])
plot_one_box(xyxy, showimg, label=label, color=self.colors[int(cls)], line_thickness=2)
cv2.imwrite('prediction.jpg', showimg)
self.result = cv2.cvtColor(showimg, cv2.COLOR_BGR2BGRA)
self.result = cv2.resize(self.result, (640, 480), interpolation=cv2.INTER_AREA)
self.QtImg = QtGui.QImage(self.result.data, self.result.shape[1], self.result.shape[0], QtGui.QImage.Format_RGB32)
self.label.setPixmap(QtGui.QPixmap.fromImage(self.QtImg))
self.label.setStyleSheet("border: 2px solid red")#添加,设置窗口边界颜色
self.label_2.setPixmap(QtGui.QPixmap.fromImage(self.QtImg))#添加
self.label_2.setStyleSheet("border: 2px solid blue") # 添加,设置窗口边界颜色
def button_video_open(self):
video_name, _ = QtWidgets.QFileDialog.getOpenFileName(self, "打开视频", "", "*.mp4;;*.avi;;All Files(*)")
flag = self.cap.open(video_name)
if flag == False:
QtWidgets.QMessageBox.warning(self, u"Warning", u"打开视频失败", buttons=QtWidgets.QMessageBox.Ok, defaultButton=QtWidgets.QMessageBox.Ok)
else:
self.out = cv2.VideoWriter('prediction.avi', cv2.VideoWriter_fourcc(*'MJPG'), 20, (int(self.cap.get(3)), int(self.cap.get(4))))
self.timer_video.start(30)
self.pushButton_video.setDisabled(True)
self.pushButton_img.setDisabled(True)
self.pushButton_camera.setDisabled(True)
self.label.setStyleSheet("border: 2px solid red") # 添加,设置窗口边界颜色
#self.label_2.setPixmap(QtGui.QPixmap.fromImage(self.flag)) # 添加
self.label_2.setStyleSheet("border: 2px solid blue") # 添加,设置窗口边界颜色
QtWidgets.QMessageBox.information(self, u"Tips", u"视频检测进行中!", buttons=QtWidgets.QMessageBox.Ok,
defaultButton=QtWidgets.QMessageBox.Ok)
def button_camera_open(self):
if not self.timer_video.isActive():
# 默认使用第一个本地camera
flag = self.cap.open(0)
if flag == False:
QtWidgets.QMessageBox.warning(self, u"Warning", u"打开摄像头失败", buttons=QtWidgets.QMessageBox.Ok, defaultButton=QtWidgets.QMessageBox.Ok)
else:
self.out = cv2.VideoWriter('prediction.avi', cv2.VideoWriter_fourcc(*'MJPG'), 20, (int(self.cap.get(3)), int(self.cap.get(4))))
self.timer_video.start(30)
self.pushButton_video.setDisabled(True)
self.pushButton_img.setDisabled(True)
self.pushButton_camera.setText(u"关闭摄像头")
else:
self.timer_video.stop()
self.cap.release()
self.out.release()
self.label.clear()
self.init_logo()
self.pushButton_video.setDisabled(False)
self.pushButton_img.setDisabled(False)
self.pushButton_camera.setText(u"摄像头检测")
def pushButton_Transfer_data_open(self):
flile_name = "E:/YoloV5/yolov5-master/runs/img/out/excel/excel_xls.xls"
# 读取源excel
xlsx = xlrd.open_workbook(flile_name)
# xlsx = xlrd.open_workbook("readexcel.xlsx")
# 获取sheet个数
sheets = len(xlsx.sheets())
# 准备写入
new_workbook = xlwt.Workbook()
for sheet in range(sheets):
table = xlsx.sheet_by_index(sheet)
rows = table.nrows
cols = table.ncols
worksheet = new_workbook.add_sheet("sheet" + str(sheet))
for i in range(0, rows):
for j in range(0, cols):
# print(i,j,table.cell_value(i, j))
worksheet.write(i, j, table.cell_value(i, j))
new_workbook.save('E:/YoloV5/yolov5-master/runs/pass_xls/Data.xls')
QtWidgets.QMessageBox.information(self, u"Tips", u"数据传递成功!", buttons=QtWidgets.QMessageBox.Ok,
defaultButton=QtWidgets.QMessageBox.Ok)
def show_video_frame(self):#左侧显示窗口显示
name_list = []
flag, img = self.cap.read()
if img is not None:
showimg = img
with torch.no_grad():
img = letterbox(img, new_shape=self.opt.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
img = torch.from_numpy(img).to(self.device)
img = img.half() if self.half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
pred = self.model(img, augment=self.opt.augment)[0]
# Apply NMS
pred = non_max_suppression(pred, self.opt.conf_thres, self.opt.iou_thres, classes=self.opt.classes,
agnostic=self.opt.agnostic_nms)
# Process detections
for i, det in enumerate(pred): # detections per image
if det is not None and len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], showimg.shape).round()
# Write results
for *xyxy, conf, cls in reversed(det):
label = '%s %.2f' % (self.names[int(cls)], conf)
name_list.append(self.names[int(cls)])
print(label)
plot_one_box(xyxy, showimg, label=label, color=self.colors[int(cls)], line_thickness=2)
self.out.write(showimg)
show = cv2.resize(showimg, (640, 480))
self.result = cv2.cvtColor(show, cv2.COLOR_BGR2RGB)
showImage = QtGui.QImage(self.result.data, self.result.shape[1], self.result.shape[0],
QtGui.QImage.Format_RGB888)
self.label.setPixmap(QtGui.QPixmap.fromImage(showImage))
else:
self.timer_video.stop()
self.cap.release()
self.out.release()
self.label.clear()
self.pushButton_video.setDisabled(False)
self.pushButton_img.setDisabled(False)
self.pushButton_camera.setDisabled(False)
self.init_logo()
def show_video_frame_2(self):#右侧显示窗口显示
name_list = []
flag, img = self.cap.read()
if img is not None:
showimg = img
with torch.no_grad():
img = letterbox(img, new_shape=self.opt.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
img = torch.from_numpy(img).to(self.device)
img = img.half() if self.half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
pred = self.model(img, augment=self.opt.augment)[0]
# Apply NMS
pred = non_max_suppression(pred, self.opt.conf_thres, self.opt.iou_thres, classes=self.opt.classes,
agnostic=self.opt.agnostic_nms)
# Process detections
for i, det in enumerate(pred): # detections per image
if det is not None and len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], showimg.shape).round()
# Write results
for *xyxy, conf, cls in reversed(det):
label_2 = '%s %.2f' % (self.names[int(cls)], conf)
name_list.append(self.names[int(cls)])
print(label_2)
plot_one_box(xyxy, showimg, label=label_2, color=self.colors[int(cls)], line_thickness=2)
self.out.write(showimg)
show = cv2.resize(showimg, (640, 480))
self.result = cv2.cvtColor(show, cv2.COLOR_BGR2RGB)
showImage = QtGui.QImage(self.result.data, self.result.shape[1], self.result.shape[0],
QtGui.QImage.Format_RGB888)
self.label.setPixmap(QtGui.QPixmap.fromImage(showImage))
self.label_2.setPixmap(QtGui.QPixmap.fromImage(showImage))#添加,不可或缺,用来显示右侧标签的
else:
self.timer_video.stop()
self.cap.release()
self.out.release()
self.label.clear()
self.pushButton_video.setDisabled(False)
self.pushButton_img.setDisabled(False)
self.pushButton_camera.setDisabled(False)
self.init_logo()
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
ui = Ui_MainWindow()
ui.show()
sys.exit(app.exec_())
代码94:********英文显示界面*******两个界面
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'UI_interface.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
import sys
import cv2
import argparse
import random
import torch
import numpy as np
import torch.backends.cudnn as cudnn
from PyQt5 import QtCore, QtGui, QtWidgets
from utils.torch_utils import select_device
from models.experimental import attempt_load
from utils.general import check_img_size, non_max_suppression, scale_coords
from utils.datasets import letterbox
from utils.plots import plot_one_box
from openpyxl import load_workbook
import xlrd
import xlwt
class Ui_MainWindow(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super(Ui_MainWindow, self).__init__(parent)
self.timer_video = QtCore.QTimer()
self.setupUi(self)
self.init_logo()
self.init_slots()
self.cap = cv2.VideoCapture()
self.out = None
# self.out = cv2.VideoWriter('prediction.avi', cv2.VideoWriter_fourcc(*'XVID'), 20.0, (640, 480))
parser = argparse.ArgumentParser()
parser.add_argument('--weights', nargs='+', type=str, default='E:/555/555/yolov5-master/runs/train/exp29/weights/best.pt', help='model.pt path(s)')
parser.add_argument('--source', type=str, default='0', help='source') # file/folder, 0 for webcam
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--view-img', action='store_true', help='display results')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--update', action='store_true', help='update all models')
parser.add_argument('--project', default='runs/detect', help='save results to project/name')
parser.add_argument('--name', default='exp', help='save results to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
self.opt = parser.parse_args()
print(self.opt)
source, weights, view_img, save_txt, imgsz = self.opt.source, self.opt.weights, self.opt.view_img, self.opt.save_txt, self.opt.img_size
self.device = select_device(self.opt.device)
self.half = self.device.type != 'cpu' # half precision only supported on CUDA
cudnn.benchmark = True
# Load model
self.model = attempt_load(weights, map_location=self.device) # load FP32 model
stride = int(self.model.stride.max()) # model stride
self.imgsz = check_img_size(imgsz, s=stride) # check img_size
if self.half:
self.model.half() # to FP16
# Get names and colors
self.names = self.model.module.names if hasattr(self.model, 'module') else self.model.names
self.colors = [[random.randint(0, 255) for _ in range(3)] for _ in self.names]
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1507, 881)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.layoutWidget = QtWidgets.QWidget(self.centralwidget)
self.layoutWidget.setGeometry(QtCore.QRect(30, 40, 152, 642))
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setSpacing(80)
self.verticalLayout.setObjectName("verticalLayout")
self.pushButton_img = QtWidgets.QPushButton(self.layoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_img.sizePolicy().hasHeightForWidth())
self.pushButton_img.setSizePolicy(sizePolicy)
self.pushButton_img.setMinimumSize(QtCore.QSize(150, 100))
self.pushButton_img.setMaximumSize(QtCore.QSize(150, 100))
font = QtGui.QFont()
font.setFamily("Agency FB")
font.setPointSize(12)
self.pushButton_img.setFont(font)
self.pushButton_img.setObjectName("pushButton_img")
self.verticalLayout.addWidget(self.pushButton_img)
self.pushButton_camera = QtWidgets.QPushButton(self.layoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_camera.sizePolicy().hasHeightForWidth())
self.pushButton_camera.setSizePolicy(sizePolicy)
self.pushButton_camera.setMinimumSize(QtCore.QSize(150, 100))
self.pushButton_camera.setMaximumSize(QtCore.QSize(150, 100))
font = QtGui.QFont()
font.setFamily("Agency FB")
font.setPointSize(12)
self.pushButton_camera.setFont(font)
self.pushButton_camera.setObjectName("pushButton_camera")
self.verticalLayout.addWidget(self.pushButton_camera, 0, QtCore.Qt.AlignHCenter)
self.pushButton_video = QtWidgets.QPushButton(self.layoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_video.sizePolicy().hasHeightForWidth())
self.pushButton_video.setSizePolicy(sizePolicy)
self.pushButton_video.setMinimumSize(QtCore.QSize(150, 100))
self.pushButton_video.setMaximumSize(QtCore.QSize(150, 100))
font = QtGui.QFont()
font.setFamily("Agency FB")
font.setPointSize(12)
self.pushButton_video.setFont(font)
self.pushButton_video.setObjectName("pushButton_video")
self.verticalLayout.addWidget(self.pushButton_video, 0, QtCore.Qt.AlignHCenter)
self.pushButton_Transfer_data = QtWidgets.QPushButton(self.layoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_Transfer_data.sizePolicy().hasHeightForWidth())
self.pushButton_Transfer_data.setSizePolicy(sizePolicy)
self.pushButton_Transfer_data.setMinimumSize(QtCore.QSize(150, 100))
self.pushButton_Transfer_data.setMaximumSize(QtCore.QSize(150, 100))
font = QtGui.QFont()
font.setFamily("Agency FB")
font.setPointSize(12)
self.pushButton_Transfer_data.setFont(font)
self.pushButton_Transfer_data.setObjectName("pushButton_Transfer_data")
self.verticalLayout.addWidget(self.pushButton_Transfer_data)
self.verticalLayout.setStretch(2, 1)
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(190, 40, 541, 641))
self.label.setObjectName("label")
#self.horizontalLayout.addWidget(self.label)
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(750, 30, 371, 651))
self.label_2.setLineWidth(2)
self.label_2.setObjectName("label_2")
#self.horizontalLayout.addWidget(self.label_2)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1507, 26))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.pushButton_img.setText(_translate("MainWindow", "Image detection"))
self.pushButton_camera.setText(_translate("MainWindow", "Camera detection"))
self.pushButton_video.setText(_translate("MainWindow", "Video detection"))
self.pushButton_Transfer_data.setText(_translate("MainWindow", "data transmission"))
self.label.setText(_translate("MainWindow", "TextLabel"))
self.label_2.setText(_translate("MainWindow", "TextLabel"))
def init_slots(self):
self.pushButton_img.clicked.connect(self.button_image_open)
self.pushButton_video.clicked.connect(self.button_video_open)
self.pushButton_camera.clicked.connect(self.button_camera_open)
self.pushButton_Transfer_data.clicked.connect(self.pushButton_Transfer_data_open)
self.timer_video.timeout.connect(self.show_video_frame)
self.timer_video.timeout.connect(self.show_video_frame_2)
def init_logo(self):
pix = QtGui.QPixmap('wechat.jpg')
self.label.setScaledContents(True)
self.label_2.setScaledContents(True)
self.label.setPixmap(pix)
self.label_2.setPixmap(pix)
def button_image_open(self):
print('button_image_open')
name_list = []
img_name, _ = QtWidgets.QFileDialog.getOpenFileName(self, "Open image", "", "*.jpg;;*.png;;All Files(*)")
img = cv2.imread(img_name)
print(img_name)
showimg = img
with torch.no_grad():
img = letterbox(img, new_shape=self.opt.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
img = torch.from_numpy(img).to(self.device)
img = img.half() if self.half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
pred = self.model(img, augment=self.opt.augment)[0]
# Apply NMS
pred = non_max_suppression(pred, self.opt.conf_thres, self.opt.iou_thres, classes=self.opt.classes,
agnostic=self.opt.agnostic_nms)
print(pred)
# Process detections
for i, det in enumerate(pred):
if det is not None and len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], showimg.shape).round()
for *xyxy, conf, cls in reversed(det):
label = '%s %.2f' % (self.names[int(cls)], conf)
name_list.append(self.names[int(cls)])
plot_one_box(xyxy, showimg, label=label, color=self.colors[int(cls)], line_thickness=2)
cv2.imwrite('prediction.jpg', showimg)
self.result = cv2.cvtColor(showimg, cv2.COLOR_BGR2BGRA)
self.result = cv2.resize(self.result, (640, 480), interpolation=cv2.INTER_AREA)
self.QtImg = QtGui.QImage(self.result.data, self.result.shape[1], self.result.shape[0], QtGui.QImage.Format_RGB32)
self.label.setPixmap(QtGui.QPixmap.fromImage(self.QtImg))
self.label.setStyleSheet("border: 2px solid red")#添加,设置窗口边界颜色
self.label_2.setPixmap(QtGui.QPixmap.fromImage(self.QtImg))#添加
self.label_2.setStyleSheet("border: 2px solid blue") # 添加,设置窗口边界颜色
def button_video_open(self):
video_name, _ = QtWidgets.QFileDialog.getOpenFileName(self, "Open video", "", "*.mp4;;*.avi;;All Files(*)")
flag = self.cap.open(video_name)
if flag == False:
QtWidgets.QMessageBox.warning(self, u"Warning", u"Video opening failed", buttons=QtWidgets.QMessageBox.Ok, defaultButton=QtWidgets.QMessageBox.Ok)
else:
self.out = cv2.VideoWriter('prediction.avi', cv2.VideoWriter_fourcc(*'MJPG'), 20, (int(self.cap.get(3)), int(self.cap.get(4))))
self.timer_video.start(30)
self.pushButton_video.setDisabled(True)
self.pushButton_img.setDisabled(True)
self.pushButton_camera.setDisabled(True)
self.label.setStyleSheet("border: 2px solid red") # 添加,设置窗口边界颜色
#self.label_2.setPixmap(QtGui.QPixmap.fromImage(self.flag)) # 添加
self.label_2.setStyleSheet("border: 2px solid blue") # 添加,设置窗口边界颜色
QtWidgets.QMessageBox.information(self, u"Tips", u"Video detection in progress!", buttons=QtWidgets.QMessageBox.Ok,
defaultButton=QtWidgets.QMessageBox.Ok)
def button_camera_open(self):
if not self.timer_video.isActive():
# 默认使用第一个本地camera
flag = self.cap.open(0)
if flag == False:
QtWidgets.QMessageBox.warning(self, u"Warning", u"Camera failed to open", buttons=QtWidgets.QMessageBox.Ok, defaultButton=QtWidgets.QMessageBox.Ok)
else:
self.out = cv2.VideoWriter('prediction.avi', cv2.VideoWriter_fourcc(*'MJPG'), 20, (int(self.cap.get(3)), int(self.cap.get(4))))
self.timer_video.start(30)
self.pushButton_video.setDisabled(True)
self.pushButton_img.setDisabled(True)
self.pushButton_camera.setText(u"Turn off the camera")
else:
self.timer_video.stop()
self.cap.release()
self.out.release()
self.label.clear()
self.init_logo()
self.pushButton_video.setDisabled(False)
self.pushButton_img.setDisabled(False)
self.pushButton_camera.setText(u"Camera detection")
def pushButton_Transfer_data_open(self):
flile_name = "E:/YoloV5/yolov5-master/runs/img/out/excel/excel_xls.xls"
# 读取源excel
xlsx = xlrd.open_workbook(flile_name)
# xlsx = xlrd.open_workbook("readexcel.xlsx")
# 获取sheet个数
sheets = len(xlsx.sheets())
# 准备写入
new_workbook = xlwt.Workbook()
for sheet in range(sheets):
table = xlsx.sheet_by_index(sheet)
rows = table.nrows
cols = table.ncols
worksheet = new_workbook.add_sheet("sheet" + str(sheet))
for i in range(0, rows):
for j in range(0, cols):
# print(i,j,table.cell_value(i, j))
worksheet.write(i, j, table.cell_value(i, j))
new_workbook.save('E:/YoloV5/yolov5-master/runs/pass_xls/Data.xls')
QtWidgets.QMessageBox.information(self, u"Tips", u"Data transfer is successful!", buttons=QtWidgets.QMessageBox.Ok,
defaultButton=QtWidgets.QMessageBox.Ok)
def show_video_frame(self):#左侧显示窗口显示
name_list = []
flag, img = self.cap.read()
if img is not None:
showimg = img
with torch.no_grad():
img = letterbox(img, new_shape=self.opt.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
img = torch.from_numpy(img).to(self.device)
img = img.half() if self.half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
pred = self.model(img, augment=self.opt.augment)[0]
# Apply NMS
pred = non_max_suppression(pred, self.opt.conf_thres, self.opt.iou_thres, classes=self.opt.classes,
agnostic=self.opt.agnostic_nms)
# Process detections
for i, det in enumerate(pred): # detections per image
if det is not None and len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], showimg.shape).round()
# Write results
for *xyxy, conf, cls in reversed(det):
label = '%s %.2f' % (self.names[int(cls)], conf)
name_list.append(self.names[int(cls)])
print(label)
plot_one_box(xyxy, showimg, label=label, color=self.colors[int(cls)], line_thickness=2)
self.out.write(showimg)
show = cv2.resize(showimg, (640, 480))
self.result = cv2.cvtColor(show, cv2.COLOR_BGR2RGB)
showImage = QtGui.QImage(self.result.data, self.result.shape[1], self.result.shape[0],
QtGui.QImage.Format_RGB888)
self.label.setPixmap(QtGui.QPixmap.fromImage(showImage))
else:
self.timer_video.stop()
self.cap.release()
self.out.release()
self.label.clear()
self.pushButton_video.setDisabled(False)
self.pushButton_img.setDisabled(False)
self.pushButton_camera.setDisabled(False)
self.init_logo()
def show_video_frame_2(self):#右侧显示窗口显示
name_list = []
flag, img = self.cap.read()
if img is not None:
showimg = img
with torch.no_grad():
img = letterbox(img, new_shape=self.opt.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
img = torch.from_numpy(img).to(self.device)
img = img.half() if self.half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
pred = self.model(img, augment=self.opt.augment)[0]
# Apply NMS
pred = non_max_suppression(pred, self.opt.conf_thres, self.opt.iou_thres, classes=self.opt.classes,
agnostic=self.opt.agnostic_nms)
# Process detections
for i, det in enumerate(pred): # detections per image
if det is not None and len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], showimg.shape).round()
# Write results
for *xyxy, conf, cls in reversed(det):
label_2 = '%s %.2f' % (self.names[int(cls)], conf)
name_list.append(self.names[int(cls)])
print(label_2)
plot_one_box(xyxy, showimg, label=label_2, color=self.colors[int(cls)], line_thickness=2)
self.out.write(showimg)
show = cv2.resize(showimg, (640, 480))
self.result = cv2.cvtColor(show, cv2.COLOR_BGR2RGB)
showImage = QtGui.QImage(self.result.data, self.result.shape[1], self.result.shape[0],
QtGui.QImage.Format_RGB888)
self.label.setPixmap(QtGui.QPixmap.fromImage(showImage))
self.label_2.setPixmap(QtGui.QPixmap.fromImage(showImage))#添加,不可或缺,用来显示右侧标签的
else:
self.timer_video.stop()
self.cap.release()
self.out.release()
self.label.clear()
self.pushButton_video.setDisabled(False)
self.pushButton_img.setDisabled(False)
self.pushButton_camera.setDisabled(False)
self.init_logo()
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
ui = Ui_MainWindow()
ui.show()
sys.exit(app.exec_())
代码94:Video.Py文件*********间隔帧数拍照、检测、保存text,生成一个excel,传递至另一个文件夹生成excel(2021年8月29日)********
import time
import cv2
import numpy as np
from PIL import Image
import detect
import os
import xlwt
import xlrd
# from detect import parse_opt #这里的detect是detect.py(yolov5的检测)文件,parse_opt表示检测参数的设置
# detect = parse_opt
capture = cv2.VideoCapture(0)
# capture=cv2.VideoCapture("D:/1.mp4")
if capture.isOpened():
ref, frame = capture.read()
else:
ref = False
fps = 0.0
timeF = 50 #yolov5每秒140帧(FPS)
c = 1
while ref:
t1 = time.time()
# 读取某一帧
ref, frame = capture.read()
# 此处保存图片无检测结果,用于采集训练数据和测试摄像头是否清晰稳定
if (c % timeF == 0):
fps = (fps + (1. / (time.time() - t1))) / 2
print("fps= %.2f" % (fps))
frame = cv2.putText(frame, "fps= %.2f" % (fps), (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
# 采集到每隔420帧的图像,保存到/runs/img./in/
cv2.imwrite("./runs/img/in/" + str(c) + '.jpg', frame)
# 将采集到的/runs/img./in/图像输入detect检测,结果保存在/runs/img/out
detect.run(source="./runs/img/in/" + str(c) + '.jpg', name='../img/out/photo+str(c)'+'.jpg', save_txt =True)
def getline(filepath, xlspath):
# 读取所有文本
file_names = os.listdir(filepath)
# 对os.listdir进行排序 指定参数加上 (key=lambda x:int(x[0]))
file_names.sort(key=lambda x: int(x[:-4]))
file_ob_list = []
try:
# 获取完整目录名并保存到数组
for file_name in file_names:
file_ob = filepath + "/" + file_name
file_ob_list.append(file_ob)
print(file_ob_list)
# 新建工作表格
xls = xlwt.Workbook()
# 循环读取文件,并写入到表格中
for file_ob in file_ob_list:
# 仅获取文件名,如果末尾为 '/' '\' ,返回空
sheet_name = os.path.basename(file_ob)
# 每一个文本都会新建一个相同文件名的 sheet
sheet = xls.add_sheet(sheet_name, cell_overwrite_ok=True)
# txt 写入 xls
f = open(file_ob)
x = 0
# 按行读取文本
while True:
line = f.readline()
if not line:
break
for i in range(len(line.split('\t'))):
data = line.split('\t')[i]
data = str(data) # 将数据转化为字符串,再对其中的换行符进行处理
data = data.replace('\n', ' ') # 使用python中字符串函数替换换行符为空格
sheet.write(x, i, data) # x,i,data 代表横、纵坐标和内容
x += 1
# 然后读取下一个文本
f.close()
xls.save(xlspath)
except:
raise
if __name__ == "__main__":
filepath = "E:/YoloV5/yolov5-master/runs/img/out/photo+str(c)2.jpg/labels" # 文件目录
xlspath = "E:/YoloV5/yolov5-master/runs/img/out/excel/excel_xls.xls" # xls 文件绝对路径
# 传入参数执行
getline(filepath, xlspath)
# 需求:拷贝excel中的内容到另外一个excel中
flile_name = "E:/YoloV5/yolov5-master/runs/img/out/excel/excel_xls.xls"
# 读取源excel
xlsx = xlrd.open_workbook(flile_name)
# xlsx = xlrd.open_workbook("readexcel.xlsx")
# 获取sheet个数
sheets = len(xlsx.sheets())
# 准备写入
new_workbook = xlwt.Workbook()
for sheet in range(sheets):
table = xlsx.sheet_by_index(sheet)
rows = table.nrows
cols = table.ncols
worksheet = new_workbook.add_sheet("sheet" + str(sheet))
for i in range(0, rows):
for j in range(0, cols):
# print(i,j,table.cell_value(i, j))
worksheet.write(i, j, table.cell_value(i, j))
new_workbook.save('E:/YoloV5/yolov5-master/runs/pass_xls/Data.xls')
c += 1
# print(frame)
#
# # 格式转变,BGRtoRGB
# frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# # 转变成Image
# frame = Image.fromarray(np.uint8(frame))
# # 进行检测
# frame = np.array(detect.run(source=frame))
# # RGBtoBGR满足opencv显示格式
# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
#
# # 此处保存的图片有检测结果,用于保留检测结果
# if (c % timeF == 0):
# cv2.imwrite("./runs/" + str(c) + '.jpg', frame)
# c += 1
#
# fps = (fps + (1. / (time.time() - t1))) / 2
# print("fps= %.2f" % (fps))
# frame = cv2.putText(frame, "fps= %.2f" % (fps), (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
# 此处保存的图片上既有检测结果,也有fps值,用于监测不同fps下的检测结果
# if (c % timeF == 0):
# cv2.imwrite("D:/photo/" + str(c) + '.jpg', frame)
# c += 1
# 显示摄像头
cv2.imshow("video", frame)
k = cv2.waitKey(1)
# 按q退出
if k == ord('q'):
capture.release()
break
# 按ESC退出
k = cv2.waitKey(1)
if k == 27:
capture.release()
break