基于K8S搭建HLS直播/点播云服务
基于K8S搭建HLS直播/点播云服务Nginx本身是一个非常出色的HTTP服务器,FFMPEG是非常好的音视频解决方案.这两个东西通过一个nginx的模块nginx-rtmp-module,组合在一起即可以搭建一个功能相对比较完善的流媒体服务器。同时利用K8S集群管理,快速完成直播点播云服务。Pod模块架构图模块讲解待续容器编排待续该模式缺点由于通过读取ts文件的方式进行流媒体...
·
相关连接
- Kubernetes 核心概念 POD及网络
- Kubernetes 核心概念Label、RC、HA、Deployment
- Kubernetes 核心概念 StatefulSet、Service
- Kubernetes存储
- 通过MINIKUBE安装K8S测试环境(国内安装)
- 通过kubeadm安装k8s并配置集群
- 基于K8S搭建HLS直播/点播云服务
- 基于K8S搭建VR直播/点播云服务
基于K8S搭建HLS直播/点播云服务
Nginx本身是一个非常出色的HTTP服务器,FFMPEG是非常好的音视频解决方案.这两个东西通过一个nginx的模块nginx-rtmp-module,
组合在一起即可以搭建一个功能相对比较完善的流媒体服务器。同时利用K8S集群管理,快速完成直播点播云服务。
nginx 配置参考HLS-搭建Nginx流媒体点播服务(SaaS docker)
Pod模块架构图
模块讲解
待续
容器编排
管理容器代码省略
#Nginx点播服务
#nginx-hls.conf
worker_processes auto;
#error_log logs/error.log;
events {
worker_connections 1024;
}
http {
sendfile off;
tcp_nopush on;
directio 512;
# aio on;
# HTTP server required to serve the player and HLS fragments
server {
listen 8080;
# Serve HLS fragments
location /hls {
types {
application/vnd.apple.mpegurl m3u8;
video/mp2t ts;
}
root /mnt; #资源目录
add_header Cache-Control no-cache; # Disable cache
# CORS setup
add_header 'Access-Control-Allow-Origin' '*' always;
add_header 'Access-Control-Expose-Headers' 'Content-Length';
# allow CORS preflight requests
if ($request_method = 'OPTIONS') {
add_header 'Access-Control-Allow-Origin' '*';
add_header 'Access-Control-Max-Age' 1728000;
add_header 'Content-Type' 'text/plain charset=UTF-8';
add_header 'Content-Length' 0;
return 204;
}
}
# Serve DASH fragments
location /dash {
types {
application/dash+xml mpd;
video/mp4 mp4;
}
root /mnt;
add_header Cache-Control no-cache; # Disable cache
# CORS setup
add_header 'Access-Control-Allow-Origin' '*' always;
add_header 'Access-Control-Expose-Headers' 'Content-Length';
# Allow CORS preflight requests
if ($request_method = 'OPTIONS') {
add_header 'Access-Control-Allow-Origin' '*';
add_header 'Access-Control-Max-Age' 1728000;
add_header 'Content-Type' 'text/plain charset=UTF-8';
add_header 'Content-Length' 0;
return 204;
}
}
}
}
#Nginx直播服务
#nginx-rtmp.conf
worker_processes auto;
#error_log logs/error.log;
events {
worker_connections 1024;
}
http {
sendfile off;
tcp_nopush on;
directio 512;
# aio on;
# HTTP server required to serve the player and HLS fragments
server {
listen 8080;
# This URL provides RTMP statistics in XML
location /stat {
rtmp_stat all;
rtmp_stat_stylesheet stat.xsl; # Use stat.xsl stylesheet
}
location /stat.xsl {
# XML stylesheet to view RTMP stats.
root /usr/local/nginx/html;
}
location /players {
root /usr/local/nginx/html;
}
}
}
rtmp {
server {
listen 1935; # Listen on standard RTMP port
chunk_size 4000;
# ping 30s;
# notify_method get;
# This application is to accept incoming stream
application live {
live on; # Allows live input
# for each received stream, transcode for adaptive streaming
# This single ffmpeg command takes the input and transforms
# the source into 4 different streams with different bitrates
# and qualities. # these settings respect the aspect ratio.
exec_push /usr/local/bin/ffmpeg -i rtmp://localhost:1935/$app/$name -async 1 -vsync -1
-c:v libx264 -c:a aac -b:v 256k -b:a 64k -vf "scale=480:trunc(ow/a/2)*2" -tune zerolatency -preset superfast -crf 23 -f flv rtmp://localhost:1935/show/$name_low
-c:v libx264 -c:a aac -b:v 768k -b:a 128k -vf "scale=720:trunc(ow/a/2)*2" -tune zerolatency -preset superfast -crf 23 -f flv rtmp://localhost:1935/show/$name_mid
-c:v libx264 -c:a aac -b:v 1024k -b:a 128k -vf "scale=960:trunc(ow/a/2)*2" -tune zerolatency -preset superfast -crf 23 -f flv rtmp://localhost:1935/show/$name_high
-c:v libx264 -c:a aac -b:v 1920k -b:a 128k -vf "scale=1280:trunc(ow/a/2)*2" -tune zerolatency -preset superfast -crf 23 -f flv rtmp://localhost:1935/show/$name_hd720
-c copy -f flv rtmp://localhost:1935/show/$name_src;
}
# This is the HLS application
application show {
live on; # Allows live input from above application
deny play all; # disable consuming the stream from nginx as rtmp
hls on; # Enable HTTP Live Streaming
hls_fragment 3;
hls_playlist_length 20;
hls_path /mnt/hls/; # hls fragments path
# Instruct clients to adjust resolution according to bandwidth
hls_variant _src BANDWIDTH=4096000; # Source bitrate, source resolution
hls_variant _hd720 BANDWIDTH=2048000; # High bitrate, HD 720p resolution
hls_variant _high BANDWIDTH=1152000; # High bitrate, higher-than-SD resolution
hls_variant _mid BANDWIDTH=448000; # Medium bitrate, SD resolution
hls_variant _low BANDWIDTH=288000; # Low bitrate, sub-SD resolution
# MPEG-DASH
dash on;
dash_path /mnt/dash/; # dash fragments path
dash_fragment 3;
dash_playlist_length 20;
}
}
}
#创建configMap
kubectl create configmap nginx-rtmp-hls --from-file=nginx-rtmp.conf=./nginx-rtmp.conf --from-file=nginx-rtmp.conf=./nginx-rtmp.conf
#NFS存储模块
#pv-pvc-nfs.yml
#----------------创建pv---------------------
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-nfs-pv01 #创建的pv名称可创建多个.
namespace: mt-math #属于的命名空间
labels:
pv: pv-nfs-01 #定义pv标签,后续通过pvc绑定特定的pv标签。通常如果不写标签则默认通过访问方式和storage大小进行批量绑定。(重要)
spec:
capacity:
storage: 1Gi #创建的pv-nfs-pv01大小为1G
accessModes:
- ReadWriteMany
nfs: #创建的pv数据来源
path: /NFS/pv01 #数据源目录
server: 192.168.0.14 #数据源ip
---
#-----------------创建pvc------------------
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-data-pvc #创建的pvc名称
namespace: mt-math #属于的命名空间
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 100Gi #请求大小为1G
selector: #定义标签选择器,此时k8s会根据标签,storage,访问方式 进行匹配。三者同时满足才会绑定。如果不定义,则系统会根据storage的大小和访问方式进行匹配绑定.
matchLabels:
pv: pv-nfs-01 #定义请求标签为pv-nfs-pv01的pv且大小为1G
#nginx直播模块
#nginx-rtm-deployment.yml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-rtmp
namespace: mt-math #属于的命名空间
labels:
app: nginx-rtmp
spec:
replicas: 3
selector:
matchLabels:
app: nginx-rtmp-pod
template:
metadata:
labels:
app: nginx-rtmp-pod
spec:
containers:
- name: nginx-rtmp
image: alqutami/rtmp-hls:latest-alpine
imagePullPolicy: IfNotPresent
ports:
- name: http
containerPort: 8080
- name: rtmp
containerPort: 1935
volumeMounts:
- name: nfs-media-data
mountPath: /mnt/hls
- name: nginx-config #挂载数据节点名称
mountPath: /etc/nginx/nginx.conf #挂载此目录
subPath: nginx.conf
volumes:
- name: nfs-media-data
persistentVolumeClaim:
claimName: nfs-data-pvc #NFS的ip地址
- name: nginx-config
configMap:
name: nginx-rtmp-hls #指定创建configMap的名称
items:
- key: nginx-rtmp.conf #key为文件名称
path: nginx-rtmp.conf #文件路径内容
#imagePullSecrets:
#- name: xxx # harbor
#nginx HLS 点播模块
#nginx-hls-deployment.yml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-hls
namespace: mt-math #属于的命名空间
labels:
app: nginx-hls
spec:
replicas: 3
selector:
matchLabels:
app: nginx-hls-pod
template:
metadata:
labels:
app: nginx-hls-pod
spec:
containers:
- name: nginx-hls
image: alqutami/rtmp-hls:latest-alpine
imagePullPolicy: IfNotPresent
ports:
- name: http
containerPort: 8080
volumeMounts:
- name: nfs-media-data
mountPath: /mnt/hls
- name: nginx-config #挂载数据节点名称
mountPath: /etc/nginx/nginx.conf #挂载此目录
subPath: nginx.conf
volumes:
- name: nfs-media-data
persistentVolumeClaim:
claimName: nfs-data-pvc #NFS的ip地址
- name: nginx-config
configMap:
name: nginx-rtmp-hls #指定创建configMap的名称
items:
- key: nginx-hls.conf #key为文件名称
path: nginx-hls.conf #文件路径内容
#imagePullSecrets:
#- name: xxx # harbor
#对外服务模式
#service.yml
apiVersion: v1
kind: Service
metadata:
name: nginx-rtmp-service
namespace: mt-math #属于的命名空间
spec:
selector:
app: nginx-rtmp #针对标签为wyl-nginx的标签进行负载
type: NodePort #正对Node节点进行端口暴露
ports:
- protocol: TCP #使用端口的协议
port: 1935 #供内网访问暴露的端口
targetPort: 1935 #目标pod的端口
nodePort: 29995 #node节点暴露的端口
#- protocol: HTTP #使用端口的协议
# port: 8080 #供内网访问暴露的端口
# targetPort: 8080 #目标pod的端口
# nodePort: 29995 #node节点暴露的端口
---
apiVersion: v1
kind: Service
metadata:
name: nginx-hls-service
namespace: mt-math #属于的命名空间
spec:
selector:
app: nginx-hls #针对标签为wyl-nginx的标签进行负载
type: NodePort #正对Node节点进行端口暴露
ports:
- protocol: TCP #使用端口的协议
port: 8080 #供内网访问暴露的端口
targetPort: 8080 #目标pod的端口
nodePort: 29994 #node节点暴露的端口
该模式缺点
由于通过读取ts文件的方式进行流媒体播放,如果用于直播,则有一定的时间延时。当配置ts时间间隔为1时,延时最小,测试发现大概在5~10秒左右的时间,但是对于一般的直播用户都可以接受。
更多推荐
已为社区贡献7条内容
所有评论(0)