【k8s多集群管理平台开发实践】二、实现k8s节点的列表,节点调度,节点排水功能
本章节主要讲解通过client-go实现读取worker节点的列表,并实现将节点进行调度设置、和排水处理功能。本章节中代码均调试通过,可以直接复制过去调试。最后会附上完整的go文件代码。
文章目录
简介
本章节主要讲解通过client-go实现读取worker节点的列表,并实现将节点进行调度设置、和排水处理功能。本章节中代码均调试通过,该文章全系都是采用beego、layui框架、layuimini模板,本章主要从控制器部分代码、模型部分代码、路由部分代码、前端html代码进行讲解,最后会附上完整的go文件代码。
一.读取k8s的节点列表功能
1.1.controllers控制器代码
beego采用mvc框架,Controller控制器,接收数据,数据处理。传输数据给M和V并接收应答,返回应答给浏览器。通过传递集群id、来读取不同集群的节点列表,在controlers目录下新建node.go,代码参考如下:
func (this *NodeController) List() {
clusterId := this.GetString("clusterId")
nodeList, err := m.NodeList(clusterId) //NodeList在models中创建此函数
msg := "success"
code := 0
count := len(nodeList)
if err != nil {
log.Println(err)
msg = err.Error()
code = -1
}
this.Data["json"] = &map[string]interface{}{"code": code, "msg": msg, "count": count, "data": &nodeList}
this.ServeJSON()
}
1.2.models模型代码
Model模型部分,先定义一个节点的结构体,然后通过api读取节点的信息,并赋值到结构体
Kubenode
,然后追加到一个结构体列表var NodeArry = make([]Kubenode, 0)
,,在models目录下新建nodeModel.go,代码参考如下:
//先定义需要显示的节点信息的结构体
type Kubenode struct {
NodeName string `json:"nodeName"` //节点名称
NodeIp string `json:"nodeIp"` //节点IP
NodeRole string `json:"nodeRole"` //读取标签中的角色
NodeState string `json:"nodeState"` //读取conditions中为true的状态
NodeInfo string `json:"nodeInfo"` //系统/kubelet/containerd/内核版本
PodCIDR string `json:"podCIDR"` //Endpoint/PodCIDR
Capacity string `json:"capacity"` //容量/可分配
Allocatable string `json:"allocatable"` //资源利用率、磁盘、内存、负载、CPU
CreateTime string `json:"createTime"` //创建时间
}
type NodeConditions struct {
Ctype string `json:"ctype"`
Status string `json:"status"`
LastHertbeatTime string `json:"lastHertbeatTime"`
LastTransitionTime string `json:"lastTransitionTime"`
Reason string `json:"reason"`
Message string `json:"message"`
}
func NodeList(kubeconfig string) ([]Kubenode, error) {
//通过kubeconfig集群认证文件生成一个客户端操作对象clientset
clientset := common.ClientSet(kubeconfig)
nodeList, err := clientset.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
if err != nil {
log.Printf("list nodes error:%v\n", err)
}
var NodeArry = make([]Kubenode, 0)
for _, node := range nodeList.Items {
var nodeStatus = "Ready:True"
for _, vv := range node.Status.Conditions {
if vv.Status == "True" || vv.Status == "Unknown" {
nodeStatus = fmt.Sprintf("%s:%s", vv.Type, vv.Status)
break
} else {
nodeStatus = fmt.Sprintf("%s:%s", vv.Type, vv.Status)
}
}
var nodeRole = node.Labels["node-role.kubernetes.io/work"]
if nodeRole == "" {
nodeRole = "Worker"
}
var bbb = make([]NodeConditions, 0)
for _, v1 := range node.Status.Conditions {
xItems := &NodeConditions{
LastTransitionTime: v1.LastTransitionTime.Format("2006-01-02 15:04:05"),
LastHertbeatTime: v1.LastHeartbeatTime.Format("2006-01-02 15:04:05"),
Message: v1.Message,
Reason: v1.Reason,
Status: fmt.Sprintf("%v", v1.Status),
Ctype: fmt.Sprintf("%v", v1.Type),
}
bbb = append(bbb, *xItems)
}
NodeArry = append(NodeArry, *&Kubenode{
NodeName: node.Name,
NodeIp: node.Status.Addresses[0].Address,
NodeRole: nodeRole,
NodeState: nodeStatus,
NodeInfo: fmt.Sprintf("KernelVersion:%s,OS:%s,KubeletVersion:%s,Container:%s", node.Status.NodeInfo.KernelVersion, node.Status.NodeInfo.OSImage, node.Status.NodeInfo.KubeletVersion, node.Status.NodeInfo.ContainerRuntimeVersion),
PodCIDR: fmt.Sprintf("PodCIDR:%s", node.Spec.PodCIDR),
Capacity: fmt.Sprintf("CPU:%d,Mem:%dMi,Disk:%dGi,Pods:%d", node.Status.Capacity.Cpu().Value(), node.Status.Capacity.Memory().Value()/1024/1024, node.Status.Capacity.StorageEphemeral().Value()/1024/1024/1024, node.Status.Capacity.Pods().Value()),
Allocatable: fmt.Sprintf("CPU:%d,Mem:%dMi,Disk:%dGi,Pods:%d", node.Status.Allocatable.Cpu().Value(), node.Status.Allocatable.Memory().Value()/1024/1024, node.Status.Allocatable.StorageEphemeral().Value()/1024/1024/1024, node.Status.Allocatable.Pods().Value()),
CreateTime: fmt.Sprintf("%s", node.ObjectMeta.CreationTimestamp.Time.Format("2006-01-02 15:04:05")),
})
}
return NodeArry, err
}
二.路由配置
路由,定义URL路径,并将路径指向到控制器的函数,由于本章我们将节点的排水和节点调度功能一同实现,先将这两个功能的路由先定义好.
package routers
import (
"myk8s/controllers"
beego "github.com/beego/beego/v2/server/web"
)
func init() {
beego.Router("/node/v1/List", &controllers.NodeController{}, "*:List") //列表
beego.Router("/node/v1/Unschedulable", &controllers.NodeController{}, "*:Unschedulable") //调度
beego.Router("/node/v1/Drain", &controllers.NodeController{}, "*:Drain") //排水
}
三.k8s节点调度功能
3.1.controllers控制器代码
调度的控制器完整代码如下:先读取集群ID、节点名称,然后通过gjson读取unschedulable的值是true还是false,再传到models函数NodeUnschedulable中去处理。
//调度
func (this *NodeController) Unschedulable() {
clusterId := this.GetString("clusterId")
nodeName := this.GetString("nodeName")
code := 0
msg := "success"
gp := gjson.ParseBytes(this.Ctx.Input.RequestBody)
unschedulableValue := gp.Get("unschedulable").Bool()
err := m.NodeUnschedulable(clusterId, nodeName, unschedulableValue)
if err != nil {
code = -1
msg = err.Error()
log.Printf("[ERROR] Unschedulable Fail:%s\n", err)
}
this.Data["json"] = &map[string]interface{}{"code": code, "msg": msg}
this.ServeJSON()
}
3.2.models模型代码
接收控制器传入的集群ID、节点名称、是否调度的值进行处理。
//节点调度设置
func NodeUnschedulable(kubeconfig, nodeName string, value bool) error {
clientset := common.ClientSet(kubeconfig)
node, err := clientset.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
if err != nil {
return err
}
node.Spec.Unschedulable = value
_, err = clientset.CoreV1().Nodes().Update(context.Background(), node, metav1.UpdateOptions{})
return err
}
四.节点排水功能
4.1.controllers控制器代码
读取集群ID和节点名称传到模型函数NodeDrain去处理
func (this *NodeController) Drain() {
clusterId := this.GetString("clusterId")
nodeName := this.GetString("nodeName")
code := 0
msg := "success"
err := m.NodeDrain(clusterId, nodeName)
if err != nil {
code = -1
msg = err.Error()
log.Printf("[ERROR] NodeDrain Fail:%s\n", err)
}
this.Data["json"] = &map[string]interface{}{"code": code, "msg": msg}
this.ServeJSON()
}
4.2.models模型代码
根据接收的集群ID、节点名称、然后通过节点名称读取节点上的pod列表,然后将pod进行驱逐处理。
//节点排水
func NodeDrain(kubeconfig, nodeName string) error {
clientset := common.ClientSet(kubeconfig)
//排水时需要读取节点上的pod列表来进行删除,所以需要创建一个PodList函数来读取pod列表,改函数功能结合pod功能部分来实现,可以首先在models下新建一个podList.go文件,创建该函数,该部分的实现功能结合下一章:3.pod的列表/日志/终端ssh功能的实现。
podlist, err := PodList(kubeconfig, "", "", "", "", "", nodeName)
if err != nil {
log.Printf("[ERROR] NodeDrain GetPodList Fail:%s\n", err)
return err
}
//循环pod列表,逐个pod驱逐
for _, pod := range podlist {
if pod.PodPhase == "Running" {
var periodSeconds int64
eviction := &policyv1.Eviction{
ObjectMeta: metav1.ObjectMeta{
Name: pod.PodName,
Namespace: pod.NameSpace,
},
DeleteOptions: &metav1.DeleteOptions{
GracePeriodSeconds: &periodSeconds,
},
}
//对pod进行驱逐
err = clientset.PolicyV1().Evictions(pod.NameSpace).Evict(context.Background(), eviction)
if err != nil {
log.Printf("[ERROR] NodeDrain pod Evict Fail:%s\n", err)
return err
}
}
}
return nil
}
五.前端部分html代码
5.1.编写公共js文件
前端部分代码:在views\front\page 目录下新建目录xkube,然后创建一个nodeList.html文件,其他的文件和目录可以删掉。由于需要通过选择不同集群来访问不同的k8s的 kubeconfig配置文件,所以我们需要将设置一个cookie,通过读取目前设置的cookie来管理对应的k8s集群。如果需要切换到其他k8s集群就可以通过更改cookie值来实现。在views/js/下新建一个xkube.js文件,代码内容如下:
//查询URL中的参数值
function getQueryString(name) {
let reg = new RegExp("(^|&)" + name + "=([^&]*)(&|$)", "i");
let r = window.location.search.substr(1).match(reg);
if (r != null) {
return unescape(r[2]);
};
return null;
};
//设置cookie
function setCookie(cname, cvalue, exdays) {
if ( cvalue != "" && cvalue != null ) {
var d = new Date();
d.setTime(d.getTime() + (exdays*24*60*60*1000));
var expires = "expires="+d.toUTCString();
document.cookie = cname + "=" + cvalue + "; Path=/; " + expires;
}else{
console.log("cvalue is null");
}
}
//获取cookie
function getCookie(cname) {
var name = cname + "=";
//console.log(document.cookie);
var ca = document.cookie.split(';');
for(var i=0; i<ca.length; i++) {
var c = ca[i];
while (c.charAt(0)==' ') c = c.substring(1);
if (c.indexOf(name) != -1) return c.substring(name.length, c.length);
}
return "";
}
//删除cookie
function delCookie(name) {
document.cookie = name + '=; Max-Age=-99999999;';
}
//设置默认集群,当需要设置需要管理的集群时,就调用此参数设置cookie
function SetDefaultCluster(cluster_id) {
setCookie('clusterId',cluster_id,30);
}
5.2.节点列表html完整代码
nodeList.html代码如下:
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>node列表</title>
<meta name="renderer" content="webkit">
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1">
<link rel="stylesheet" href="/lib/layui-v2.6.3/css/layui.css" media="all">
<link rel="stylesheet" href="/css/public.css" media="all">
<script type="text/javascript" src="/lib/jquery-3.4.1/jquery-3.4.1.min.js"></script>
<script src="/lib/layui-v2.6.3/layui.js" charset="utf-8"></script>
<script src="/js/lay-config.js?v=1.0.4" charset="utf-8"></script>
<script src="/js/xkube.js?v=1.0.0" charset="utf-8"></script>
<style type="text/css">
.layui-table-cell {
height: auto;
line-height: 22px !important;
text-overflow: inherit;
overflow: ellipsis;
white-space: normal;
}
.layui-table-cell .layui-table-tool-panel li {
word-break: break-word;
}
</style>
</head>
<body>
<div class="layuimini-container">
<div class="layuimini-main">
<table class="layui-table" id="currentTableId" lay-filter="currentTableFilter"></table>
<script type="text/html" id="currentTableBar">
<a class="layui-btn layui-btn-sm" lay-event="nodeDrain">节点排水</a>
<a class="layui-btn layui-btn-sm" lay-event="nodeUnschedulable">调度设置</a>
</script>
</div>
</div>
</body>
<script type="text/html" id="nodeInfoTpl">
{{# layui.each(d.nodeInfo.split(','), function(index, item){ }}
<span>{{ item }}<br></span>
{{# }); }}
</script>
<script type="text/html" id="capacityTpl">
{{# layui.each(d.capacity.split(','), function(index, item){ }}
<span>{{ item }}<br></span>
{{# }); }}
</script>
<script type="text/html" id="allocatableTpl">
{{# layui.each(d.allocatable.split(','), function(index, item){ }}
<span>{{ item }}<br></span>
{{# }); }}
</script>
<script type="text/html" id="nodeNameTpl">
<span>{{ d.nodeName }}<br>{{ d.nodeIp }}</span>
</script>
<script type="text/html" id="nodeStateTpl">
<span>{{ d.nodeRole }}</span><br>
{{# if(d.nodeState == "Ready:True") { }}
<span style="color:#009688">{{ d.nodeState }}</span>
{{# }else{ }}
<span style="color:#FF5722">{{ d.nodeState }}</span>
{{# } }}
</script>
<script>
var clusterId = getQueryString("clusterId");
if (clusterId == null) {
clusterId = getCookie("clusterId")
}
//console.log(clusterId);
layui.use(['form', 'table','miniTab'], function () {
var $ = layui.jquery,
form = layui.form,
table = layui.table;
miniTab = layui.miniTab,
miniTab.listen();
table.render({
elem: '#currentTableId',
url: '/node/v1/List?clusterId='+clusterId,
toolbar: '#toolbarDemo',
defaultToolbar: ['filter', 'exports', 'print', {
title: '提示',
layEvent: 'LAYTABLE_TIPS',
icon: 'layui-icon-tips'
}],
parseData: function(res) { //实现加载全部数据后再分页
if(this.page.curr) {
result=res.data.slice(this.limit*(this.page.curr-1),this.limit*this.page.curr);
}else{
result=res.data.slice(0,this.limit);
}
return {
"code": res.code,
"msg":'',
"count":res.count,
"data":result
};
},
cols: [[
//{type: "checkbox", width: 50},
{field: 'nodeName', title: '节点名称',hide:true},
{field: 'nodeIp', title: 'IP',hide:true},
{field: '', title: '节点名称',templet: '#nodeNameTpl'},
{field: 'nodeRole', title: '角色', sort: true,hide:true},
{field: 'nodeState', title: '状态', sort: true,hide:true},
{field: '', title: '状态', sort: true,templet: '#nodeStateTpl'},
{field: 'nodeInfo',title: '节点信息', sort: true,templet: '#nodeInfoTpl'},
{field: 'podCIDR', title: 'podCIDR', sort: true},
{field: 'capacity', title: '容量', sort: true,templet: '#capacityTpl'},
{field: 'allocatable', title: '可分配', sort: true,templet: '#allocatableTpl'},
{field: 'createTime',title: '创建时间',hide:true},
{title: '操作', minWidth: 200, toolbar: '#currentTableBar', align: "center"}
]],
//size:'lg',
limits: [25, 50, 100],
limit: 25,
page: true
});
table.on('tool(currentTableFilter)', function (obj) {
var data = obj.data;
if (obj.event === 'nodeDrain') {
layer.confirm('请确认是否要将以下进行排空节点操作(同时设置为不可调度)'+nodeName, {icon: 3, title:'排水提示',yes: function(index){
var index2 = layer.load(0, {shade: false});
layer.msg('此处需运行1-2s左右');
$.ajax({
url: '/node/v1/Drain?clusterId='+clusterId+"&nodeName="+data.nodeName,
type: "get",
success: function (resp) {
layer.close(index2);
if(resp.code == 0){
layer.msg('排水成功', {icon: 1});
}else{
layer.msg(resp.msg,{icon:2});
}
}
});
},
cancel: function(index, layero){
layer.close(index);
layer.close(index2);
console.log("不操作");
}
});
return false;
}else if (obj.event === 'nodeUnschedulable') {
layer.confirm('请确认是否要将该节点设置为不可调度'+nodeName, {icon: 3, title:'提示',yes: function(index){
var index2 = layer.load(0, {shade: false});
layer.msg('此处需运行1-2s左右');
var bodystr = {"unschedulable":true};
$.ajax({
url: '/node/v1/Unschedulable?clusterId='+clusterId+"&nodeName="+data.nodeName,
headers:{'X-Requested-With':'XMLHttpRequest'},
type: "post",
data: JSON.stringify(bodystr),
dataType: "json",
success: function (resp) {
layer.close(index2);
if(resp.code == 0){
layer.msg('更改成功', {icon: 1});
}else{
layer.msg(resp.msg,{icon:2});
}
}
});
},
cancel: function(index, layero){
layer.close(index);
layer.close(index2);
console.log("不操作");
}
});
return false;
}
});
});
</script>
</html>
六.完整的控制器和模型代码
6.1.控制器node.go代码
控制器部分node.go完整代码,放controllers目录下
package controllers
import (
"log"
m "myk8s/models"
beego "github.com/beego/beego/v2/server/web"
"github.com/tidwall/gjson"
)
type NodeController struct {
beego.Controller
}
func (this *NodeController) List() {
clusterId := this.GetString("clusterId")
nodeList, err := m.NodeList(clusterId)
msg := "success"
code := 0
count := len(nodeList)
if err != nil {
log.Println(err)
msg = err.Error()
code = -1
}
this.Data["json"] = &map[string]interface{}{"code": code, "msg": msg, "count": count, "data": &nodeList}
this.ServeJSON()
}
//排水
func (this *NodeController) Drain() {
clusterId := this.GetString("clusterId")
nodeName := this.GetString("nodeName")
code := 0
msg := "success"
err := m.NodeDrain(clusterId, nodeName)
if err != nil {
code = -1
msg = err.Error()
log.Printf("[ERROR] NodeDrain Fail:%s\n", err)
}
this.Data["json"] = &map[string]interface{}{"code": code, "msg": msg}
this.ServeJSON()
}
//调度
func (this *NodeController) Unschedulable() {
clusterId := this.GetString("clusterId")
nodeName := this.GetString("nodeName")
code := 0
msg := "success"
gp := gjson.ParseBytes(this.Ctx.Input.RequestBody)
unschedulableValue := gp.Get("unschedulable").Bool()
err := m.NodeUnschedulable(clusterId, nodeName, unschedulableValue)
if err != nil {
code = -1
msg = err.Error()
log.Printf("[ERROR] Unschedulable Fail:%s\n", err)
}
this.Data["json"] = &map[string]interface{}{"code": code, "msg": msg}
this.ServeJSON()
}
6.2模型完整代码
nodeModel.go模型部分代码放models目录下
// nodeModel.go
package models
import (
"context"
"fmt"
"log"
"myk8s/common"
policyv1 "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
//"k8s.io/apimachinery/pkg/runtime"
//"sigs.k8s.io/yaml"
)
type Kubenode struct {
NodeName string `json:"nodeName"` //节点名称
NodeIp string `json:"nodeIp"` //节点IP
NodeRole string `json:"nodeRole"` //读取标签中的角色
NodeState string `json:"nodeState"` //读取conditions中为true的状态
NodeInfo string `json:"nodeInfo"` //系统/kubelet/containerd/内核版本
PodCIDR string `json:"podCIDR"` //Endpoint/PodCIDR
Capacity string `json:"capacity"` //容量/可分配
Allocatable string `json:"allocatable"` //资源利用率、磁盘、内存、负载、CPU
CreateTime string `json:"createTime"` //创建时间
}
type NodeConditions struct {
Ctype string `json:"ctype"`
Status string `json:"status"`
LastHertbeatTime string `json:"lastHertbeatTime"`
LastTransitionTime string `json:"lastTransitionTime"`
Reason string `json:"reason"`
Message string `json:"message"`
}
func NodeList(kubeconfig string) ([]Kubenode, error) {
//通过kubeconfig集群认证文件生成一个客户端操作对象clientset
clientset := common.ClientSet(kubeconfig)
nodeList, err := clientset.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
if err != nil {
log.Printf("list nodes error:%v\n", err)
}
var NodeArry = make([]Kubenode, 0)
for _, node := range nodeList.Items {
var nodeStatus = "Ready:True"
for _, vv := range node.Status.Conditions {
if vv.Status == "True" || vv.Status == "Unknown" {
nodeStatus = fmt.Sprintf("%s:%s", vv.Type, vv.Status)
break
} else {
nodeStatus = fmt.Sprintf("%s:%s", vv.Type, vv.Status)
}
}
var nodeRole = node.Labels["node-role.kubernetes.io/work"]
if nodeRole == "" {
nodeRole = "Worker"
}
var bbb = make([]NodeConditions, 0)
for _, v1 := range node.Status.Conditions {
xItems := &NodeConditions{
LastTransitionTime: v1.LastTransitionTime.Format("2006-01-02 15:04:05"),
LastHertbeatTime: v1.LastHeartbeatTime.Format("2006-01-02 15:04:05"),
Message: v1.Message,
Reason: v1.Reason,
Status: fmt.Sprintf("%v", v1.Status),
Ctype: fmt.Sprintf("%v", v1.Type),
}
bbb = append(bbb, *xItems)
}
NodeArry = append(NodeArry, *&Kubenode{
NodeName: node.Name,
NodeIp: node.Status.Addresses[0].Address,
NodeRole: nodeRole,
NodeState: nodeStatus,
NodeInfo: fmt.Sprintf("KernelVersion:%s,OS:%s,KubeletVersion:%s,Container:%s", node.Status.NodeInfo.KernelVersion, node.Status.NodeInfo.OSImage, node.Status.NodeInfo.KubeletVersion, node.Status.NodeInfo.ContainerRuntimeVersion),
PodCIDR: fmt.Sprintf("PodCIDR:%s", node.Spec.PodCIDR),
Capacity: fmt.Sprintf("CPU:%d,Mem:%dMi,Disk:%dGi,Pods:%d", node.Status.Capacity.Cpu().Value(), node.Status.Capacity.Memory().Value()/1024/1024, node.Status.Capacity.StorageEphemeral().Value()/1024/1024/1024, node.Status.Capacity.Pods().Value()),
Allocatable: fmt.Sprintf("CPU:%d,Mem:%dMi,Disk:%dGi,Pods:%d", node.Status.Allocatable.Cpu().Value(), node.Status.Allocatable.Memory().Value()/1024/1024, node.Status.Allocatable.StorageEphemeral().Value()/1024/1024/1024, node.Status.Allocatable.Pods().Value()),
CreateTime: fmt.Sprintf("%s", node.ObjectMeta.CreationTimestamp.Time.Format("2006-01-02 15:04:05")),
})
}
return NodeArry, err
}
//节点排水
func NodeDrain(kubeconfig, nodeName string) error {
clientset := common.ClientSet(kubeconfig)
podlist, err := PodList(kubeconfig, "", "", "", "", "", nodeName)
if err != nil {
log.Printf("[ERROR] NodeDrain GetPodList Fail:%s\n", err)
return err
}
for _, pod := range podlist {
if pod.PodPhase == "Running" {
var periodSeconds int64
eviction := &policyv1.Eviction{
ObjectMeta: metav1.ObjectMeta{
Name: pod.PodName,
Namespace: pod.NameSpace,
},
DeleteOptions: &metav1.DeleteOptions{
GracePeriodSeconds: &periodSeconds,
},
}
err = clientset.PolicyV1().Evictions(pod.NameSpace).Evict(context.Background(), eviction)
if err != nil {
log.Printf("[ERROR] NodeDrain pod Evict Fail:%s\n", err)
return err
}
}
}
return nil
}
//节点调度设置
func NodeUnschedulable(kubeconfig, nodeName string, value bool) error {
clientset := common.ClientSet(kubeconfig)
node, err := clientset.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
if err != nil {
return err
}
node.Spec.Unschedulable = value
_, err = clientset.CoreV1().Nodes().Update(context.Background(), node, metav1.UpdateOptions{})
return err
}
七.效果如下图
更多推荐
所有评论(0)