python使用opencv+tensorflow+redis+rebbitmq 曝气池异常视频分析

   主要对视频进行监控,曝气池异常 业务场景不需要太实时!三分钟一次就可以!

思路是从redis获取所以关联企业视频地址链接信息,模型地址等等,在使用opencv解析实时rtsp流,截图当前一帧的图片将其转换为numpy。这个转换过程一定要跟之前训练时的图片大小 RGB一样。然后加载tensorflow模型获取之前训练时输入的tensor和概率,最后推送至mq通知报警系统。


import cv2
import numpy as np
from skimage import io, transform
import uuid
import tensorflow as tf
import schedule
import pika
import json
import redis
import re

flower_dict = {0: 'no', 1: 'ok'}


def producer(msg):
    credentials = pika.PlainCredentials('username', 'psassword')
    connection = pika.BlockingConnection(pika.ConnectionParameters(
        'ip', 5672, '/', credentials))
    channel = connection.channel()
    channel.exchange_declare(exchange='Clogs',
                             exchange_type='fanout')
    channel.basic_publish(exchange='Clogs',
                          routing_key='',
                          body=msg)
    connection.close()


def read_one_image(path):
    img = io.imread(path)
    img = transform.resize(img, (100, 100))
    return np.asarray(img)


def job(dictinfo):
    print("start job")
    for obj in dictinfo:
        with tf.Session() as sess:
            id = str(uuid.uuid1()).replace('-', '')
            cap = cv2.VideoCapture(obj['videoPat'])  # 获取视频
            ret, frame = cap.read()
            cv2.imwrite(obj['storagePath'] + id + ".jpg", frame)  # 截图一张
            data = []
            data.append(read_one_image(obj['storagePath'] + id + ".jpg"))  # 把图片转换为numpy
            saver = tf.train.import_meta_graph(
                obj['metaPath'])  # 将保存在.meta文件中的图添加到当前的图中。所以,创建了一个图/网络,但是我们使用需要加载训练的参数到这个图中
            saver.restore(sess, tf.train.latest_checkpoint(obj['modelPath']))  # 获取最后一次保存的模型 并且使模型恢复
            graph = tf.get_default_graph()
            x = graph.get_tensor_by_name("x:0")  # 获取训练是定义输入的tensor
            feed_dict = {x: data}
            logits = graph.get_tensor_by_name("logits_eval:0")  # 获取概率
            classification_result = sess.run(logits, feed_dict)  # 把当前图片根据模型运算获取结果
            print(classification_result)
            output = tf.argmax(classification_result, 1).eval()  # argmax此函数是对矩阵按行或列计算最大值 0表示按列,1表示按行
            print(output)
            print(flower_dict[output[0]])
            msg = [{'val': str(output[0]), 'epid': "****"}]
            producer(json.dumps(msg))
    print("end job")


def main():
    r = redis.Redis(host='127.16.1.10', password='123456', port='6379')
    json_str = r.get('test').decode('utf-8')
    dictinfo = json.loads(re.sub('\'', '\"', json_str))
    schedule.every(3).minutes.do(job(dictinfo))  #  三分钟一次
    while True:
         schedule.run_pending()


if __name__ == '__main__':
    main()

你可能感兴趣的:(人工智能,视频分析)