使用 Yolov8 Flask 自定義訓練實時火災和煙霧檢測
近年來,人工智能和機器學習的進步徹底改變了包括公共安全在內的各個行業。這些技術在火災和煙霧檢測方面取得了顯著進展,這對于早期預警系統和高效的應急響應至關重要。實現這一目標的最有效方法之一是將YOLOv8強大的目標檢測能力與基于Python的輕量級Web框架Flask的靈活性相結合。它們共同構成了一個通過視頻流實現的強大實時火災和煙霧檢測解決方案。
本文開發了一個專門用于火災和煙霧檢測的自定義訓練YOLOv8模型。用于此訓練的數據集可在Kaggle上找到,如果需要重新訓練模型,訓練腳本也可供使用。
數據集:
https://www.kaggle.com/code/deepaknr/yolov8-fire-and-smoke-detection?source=post_page-----79058b024b09--------------------------------
訓練腳本:
實際示例:使用YOLOv8和Flask進行火災和煙霧檢測
假設一個實際場景,您需要監控一個有火災風險的工業場地。通過攝像頭建立實時視頻流并利用YOLOv8模型的火災檢測功能,您可以及早識別火災或煙霧,從而預防潛在的災難。以下是一個Python代碼片段,展示了如何將YOLOv8與Flask集成以實現火災和煙霧檢測。
import os
import cv2
import numpy as np
from flask import Flask, render_template, Response, request
from werkzeug.utils import secure_filename
from ultralytics import YOLO
app = Flask(__name__)
YOLOV8_MODEL_PATH = 'path-to-yolov8-model'
ALLOWED_EXTENSIONS = {'mp4', 'avi', 'mov'}
video_path = None
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
model = YOLO(YOLOV8_MODEL_PATH)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/upload', methods=['POST'])
def upload():
global video_path
if 'file' not in request.files:
return 'No file part', 400
file = request.files['file']
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
filepath = os.path.join('uploads', filename)
file.save(filepath)
video_path = filepath
return render_template('index.html')
return 'Invalid file type', 400
def generate_frames():
global video_path
if video_path is None:
return None
cap = cv2.VideoCapture(video_path)
alpha = 0.4
while True:
success, frame = cap.read()
if not success:
break
result = model(frame, verbose=False, conf=0.35)[0]
bboxes = np.array(result.boxes.xyxy.cpu(), dtype="int")
classes = np.array(result.boxes.cls.cpu(), dtype="int")
confidence = np.array(result.boxes.conf.cpu(), dtype="float")
for cls, bbox, conf in zip(classes, bboxes, confidence):
(x1, y1, x2, y2) = bbox
object_name = model.names[cls]
if object_name == 'fire':
color = (19, 127, 240)
else:
color = (145, 137, 132)
cropped_image = frame[int(y1):int(y2), int(x1):int(x2)]
white_layer = np.ones(cropped_image.shape, dtype=np.uint8) * 255
cropped_image = cv2.addWeighted(cropped_image, 1 - alpha, white_layer, alpha, 0)
frame[int(y1):int(y2), int(x1):int(x2)] = cropped_image
cv2.rectangle(frame, (x1, y1 -30), (x1 + 200, y1), color, -1)
cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2)
cv2.putText(frame, f"{object_name.capitalize()}: {conf * 100:.2f}%", (x1, y1 - 5), cv2.FONT_HERSHEY_DUPLEX,
0.8, (255, 255, 255), 1)
ret, buffer = cv2.imencode('.jpg', frame)
frame = buffer.tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
cap.release()
@app.route('/video_feed')
def video_feed():
return Response(generate_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == '__main__':
os.makedirs('uploads', exist_ok=True)
app.run(host='0.0.0.0', port=5000, debug=True)import os
import cv2
import numpy as np
from flask import Flask, render_template, Response, request
from werkzeug.utils import secure_filename
from ultralytics import YOLO
app = Flask(__name__)
YOLOV8_MODEL_PATH = 'path-to-yolov8-model'
ALLOWED_EXTENSIONS = {'mp4', 'avi', 'mov'}
video_path = None
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
model = YOLO(YOLOV8_MODEL_PATH)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/upload', methods=['POST'])
def upload():
global video_path
if 'file' not in request.files:
return 'No file part', 400
file = request.files['file']
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
filepath = os.path.join('uploads', filename)
file.save(filepath)
video_path = filepath
return render_template('index.html')
return 'Invalid file type', 400
def generate_frames():
global video_path
if video_path is None:
return None
cap = cv2.VideoCapture(video_path)
alpha = 0.4
while True:
success, frame = cap.read()
if not success:
break
result = model(frame, verbose=False, conf=0.35)[0]
bboxes = np.array(result.boxes.xyxy.cpu(), dtype="int")
classes = np.array(result.boxes.cls.cpu(), dtype="int")
confidence = np.array(result.boxes.conf.cpu(), dtype="float")
for cls, bbox, conf in zip(classes, bboxes, confidence):
(x1, y1, x2, y2) = bbox
object_name = model.names[cls]
if object_name == 'fire':
color = (19, 127, 240)
else:
color = (145, 137, 132)
cropped_image = frame[int(y1):int(y2), int(x1):int(x2)]
white_layer = np.ones(cropped_image.shape, dtype=np.uint8) * 255
cropped_image = cv2.addWeighted(cropped_image, 1 - alpha, white_layer, alpha, 0)
frame[int(y1):int(y2), int(x1):int(x2)] = cropped_image
cv2.rectangle(frame, (x1, y1 -30), (x1 + 200, y1), color, -1)
cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2)
cv2.putText(frame, f"{object_name.capitalize()}: {conf * 100:.2f}%", (x1, y1 - 5), cv2.FONT_HERSHEY_DUPLEX,
0.8, (255, 255, 255), 1)
ret, buffer = cv2.imencode('.jpg', frame)
frame = buffer.tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
cap.release()
@app.route('/video_feed')
def video_feed():
return Response(generate_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == '__main__':
os.makedirs('uploads', exist_ok=True)
app.run(host='0.0.0.0', port=5000, debug=True)
主要函數說明:
- def generate_frames():此函數從上傳的視頻中提取幀,并利用YOLOv8模型進行目標檢測,特別是針對火災和煙霧等元素。幀上會渲染帶有相應類別標簽(火災、煙霧)的邊界框。為了增強可見性,在檢測到物體的區域應用了半透明的白色覆蓋層。處理后的幀被轉換為JPEG格式,并持續輸出以生成視頻流。
- def video_feed():此路由使用generate_frames函數將處理后的視頻幀作為HTTP響應流式傳輸。它使用MIME類型multipart/x-mixed-replace向Web客戶端發送JPEG圖像流。
應用程序啟動:
if __name__ == '__main__':
os.makedirs('uploads', exist_ok=True)
app.run(host='0.0.0.0', port=5000, debug=True)
如果直接運行腳本,它會確保uploads目錄存在,然后在端口5000上啟動Flask應用程序,并監聽所有接口(0.0.0.0)。