1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
|
import sys
import cv2
import numpy as np
from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QPushButton, QVBoxLayout, QWidget
from PyQt5.QtCore import Qt, QTimer
from PyQt5.QtGui import QImage, QPixmap
class RTPPlayer(QMainWindow):
def __init__(self):
super().__init__()
self.initUI()
self.initGStreamer()
# 定时器用于刷新视频帧
self.timer = QTimer()
self.timer.timeout.connect(self.update_frame)
self.timer.start(30) # 30ms 刷新间隔
def initUI(self):
self.setWindowTitle('RTP H.264 Player')
self.setGeometry(100, 100, 800, 600)
# 视频显示区域
self.video_label = QLabel(self)
self.video_label.setFixedSize(800, 600)
self.video_label.setAlignment(Qt.AlignCenter)
# 控制按钮
self.play_btn = QPushButton('Play', self)
self.play_btn.clicked.connect(self.toggle_play)
# 布局
layout = QVBoxLayout()
layout.addWidget(self.video_label)
layout.addWidget(self.play_btn)
container = QWidget()
container.setLayout(layout)
self.setCentralWidget(container)
def initGStreamer(self):
# GStreamer 管道配置(根据实际 RTP 地址修改)
self.pipeline_str = """
rtpbin name=rtpbin latency=0
udpsrc address=127.0.0.1 port=5000 !
application/x-rtp,encoding-name=H264,payload=96 !
rtpbin.recv_rtp_sink_0
rtpbin. ! rtph264depay ! avdec_h264 ! videoconvert ! appsink name=sink
"""
import gi
gi.require_version('Gst', '1.0')
from gi.repository import Gst
Gst.init(None)
self.pipeline = Gst.parse_launch(self.pipeline_str)
self.appsink = self.pipeline.get_by_name('sink')
# 连接 Appsink 信号
self.appsink.connect('new-sample', self.on_new_sample)
def toggle_play(self):
if self.play_btn.text() == 'Play':
self.pipeline.set_state(Gst.State.PLAYING)
self.play_btn.setText('Stop')
else:
self.pipeline.set_state(Gst.State.NULL)
self.play_btn.setText('Play')
def on_new_sample(self, sink):
sample = sink.emit('pull-sample')
buf = sample.get_buffer()
caps = sample.get_caps()
# 获取视频数据
data = buf.extract_dup(0, buf.get_size())
height = caps.get_structure(0).get_value('height')
width = caps.get_structure(0).get_value('width')
# 转换为 OpenCV 格式
frame = np.ndarray((height, width, 3), dtype=np.uint8, buffer=data)
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
# 保存最后一帧(可选)
self.latest_frame = frame
return Gst.FlowReturn.OK
def update_frame(self):
if hasattr(self, 'latest_frame'):
# 转换为 QImage
h, w, ch = self.latest_frame.shape
bytes_per_line = ch * w
qt_image = QImage(self.latest_frame.data, w, h, bytes_per_line, QImage.Format_BGR888)
# 缩放并显示
pixmap = QPixmap.fromImage(qt_image).scaled(
self.video_label.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation
)
self.video_label.setPixmap(pixmap)
def closeEvent(self, event):
self.pipeline.set_state(Gst.State.NULL)
event.accept()
if __name__ == '__main__':
app = QApplication(sys.argv)
player = RTPPlayer()
player.show()
sys.exit(app.exec_())
|