环境准备

尽量选择纯净的机器,IP地址建议静态配置,IP地址变动会导致搭建的k8s集群出现故障。
准备3台虚拟机(centos-8.3)并运行以下脚本:
centos_init_config.sh

#!/bin/bash

# 更新
yum update -y

# 安装其他必要组件和常用工具包
yum install -y yum-utils zlib zlib-devel openssl openssl-devel \
net-tools vim wget lsof unzip zip bind-utils lrzsz telnet

# 卸载 firewalld
systemctl stop firewalld
yum remove firewalld -y

# 卸载 networkmanager
systemctl stop NetworkManager
yum remove NetworkManager -y

# 同步服务器时间
yum install chrony -y
systemctl enable --now chronyd
chronyc sources

# 关闭 selinux
setenforce 0
sed -i '/^SELINUX=/cSELINUX=disabled' /etc/selinux/config
getenforce

确保 br_netfilter 模块被加载

lsmod | grep br_netfilter #检查

modprobe br_netfilter #加载

将 net.bridge.bridge-nf-call-iptables 设置为 1

cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
br_netfilter
EOF

cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system

安装docker

卸载旧版本,安装yum-utils软件包,配置加速源

# 卸载旧版本
yum remove docker-client docker-client-latest docker-latest  docker-latest-logrotate  docker-logrotate
yum -y remove docker \
              docker-common \
              docker-selinux \
              docker-engine \
              docker-engine-selinux \
              container-selinux docker-ce docker-ce-cli
yum install -y yum-utils \
  device-mapper-persistent-data \
  lvm2
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum makecache fast

安装docker,设置开机自启

使用同一个IP频繁安装docker速度会变慢,是正常现象

yum list docker-ce --showduplicates | sort -r
#yum install docker-ce-19.03.12-3.el7 docker-ce-cli-19.03.12-3.el7 containerd.io -y
yum install -y docker-ce docker-ce-cli containerd.io
systemctl start docker
systemctl enable docker

配置 Docker使用systemd作为默认Cgroup驱动,docker tab键补全

k8s集群所有机器都需要配置docker加速源,否则master主机执行命令后,拉取镜像会特别慢

#使用systemd作为默认Cgroup驱动,限制日志大小
cat <<EOF > /etc/docker/daemon.json
{
   "exec-opts": ["native.cgroupdriver=systemd"]
}
{
  "registry-mirrors": [
	"https://hub-mirror.c.163.com",
    "https://dockerhub.azk8s.cn",
    "https://reg-mirror.qiniu.com",
    "https://registry.docker-cn.com"
  ],
  "log-driver":"json-file",
  "log-opts": {"max-size":"100m", "max-file":"1"}
}
EOF

systemctl daemon-reload

#docker tab键补全
yum install -y bash-completion
source /usr/share/bash-completion/completions/docker
source /usr/share/bash-completion/bash_completion

#重启docker
systemctl restart docker
	

关闭swap分区

	swapoff -a # 临时
 
   #推荐使用
	sed -i '/ swap / s/^/# /g' /etc/fstab #永久
	
	##sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab #永久

重新命名主机,修改hosts文件

#分别修改主机名称
hostnamectl set-hostname master
hostnamectl set-hostname node1
hostnamectl set-hostname node2
	
#修改hosts文件
cat >> /etc/hosts << EOF 
192.168.139.135 master
192.168.139.141 node1
192.168.139.142 node2
EOF

安装kubeadm,kubelet和kubectl

#添加kubernetes YUM软件源
cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

#安装kubeadm,kubelet和kubectl
yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
#设置开机自启
systemctl enable --now kubelet

部署Kubernetes

kubeadm常用命令

命令效果
kubeadm init用于搭建控制平面节点
kubeadm join用于搭建工作节点并将其加入到集群中
kubeadm upgrade用于升级 Kubernetes 集群到新版本
kubeadm config如果你使用了 v1.7.x 或更低版本的 kubeadm 版本初始化你的集群,则使用 kubeadm upgrade 来配置你的集群
kubeadm token用于管理 kubeadm join 使用的令牌
kubeadm reset用于恢复通过 kubeadm init 或者 kubeadm join 命令对节点进行的任何变更
kubeadm certs用于管理 Kubernetes 证书
kubeadm kubeconfig用于管理 kubeconfig 文件
kubeadm version用于打印 kubeadm 的版本信息
kubeadm alpha用于预览一组可用于收集社区反馈的特性

Kubernetes初始化

kubeadm init \
--apiserver-advertise-address=192.168.139.135 \
--image-repository registry.aliyuncs.com/google_containers \
--service-cidr=10.1.0.0/16 \
--pod-network-cidr=10.244.0.0/16

常用选项

–apiserver-advertise-address string API 服务器所公布的其正在监听的 IP 地址。如果未设置,则使用默认网络接口。
–image-repository string 默认值:“k8s.gcr.io” 选择用于拉取控制平面镜像的容器仓库
–service-cidr string 默认值:“10.96.0.0/12” 为服务的虚拟 IP 地址另外指定 IP 地址段
–pod-network-cidr string 指明 pod 网络可以使用的 IP 地址段。如果设置了这个参数,控制平面将会为每一个节点自动分配 CIDRs。

–apiserver-bind-port int32 默认值:6443 API 服务器绑定的端口。
–apiserver-cert-extra-sans stringSlice 用于 API Server 服务证书的可选附加主题备用名称(SAN)。可以是 IP 地址和 DNS 名称。

–cert-dir string 默认值:"/etc/kubernetes/pki" 保存和存储证书的路径。
–certificate-key string 用于加密 kubeadm-certs Secret 中的控制平面证书的密钥。
–config string kubeadm 配置文件的路径。

输出结果

......
Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.139.135:6443 --token mndba9.07ioghhw0wfbeayx \
    --discovery-token-ca-cert-hash sha256:406d4d6b6c94950e3e95173a19be5720f6e7aeeecb5b4bc9832fd29c680cfcd0 

如果这一步出现问题,建议检查网络和配置文件,需要重新初始化建立集群可以使用kubeadm reset命令解散集群

按照提示操作

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

node节点执行

	kubeadm join 192.168.139.135:6443 --token mndba9.07ioghhw0wfbeayx \
		    --discovery-token-ca-cert-hash sha256:406d4d6b6c94950e3e95173a19be5720f6e7aeeecb5b4bc9832fd29c680cfcd0

输出结果

[preflight] Running pre-flight checks
	[WARNING FileExisting-tc]: tc not found in system path
	[WARNING SystemVerification]: this Docker version is not on the list of validated versions: 20.10.5. Latest validated version: 19.03
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

安装网络插件(在master节点执行)

kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

如果被墙,配置文件如下
kube-flannel.yml

---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
  name: psp.flannel.unprivileged
  annotations:
    seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
    seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
    apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
    apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
  privileged: false
  volumes:
  - configMap
  - secret
  - emptyDir
  - hostPath
  allowedHostPaths:
  - pathPrefix: "/etc/cni/net.d"
  - pathPrefix: "/etc/kube-flannel"
  - pathPrefix: "/run/flannel"
  readOnlyRootFilesystem: false
  # Users and groups
  runAsUser:
    rule: RunAsAny
  supplementalGroups:
    rule: RunAsAny
  fsGroup:
    rule: RunAsAny
  # Privilege Escalation
  allowPrivilegeEscalation: false
  defaultAllowPrivilegeEscalation: false
  # Capabilities
  allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
  defaultAddCapabilities: []
  requiredDropCapabilities: []
  # Host namespaces
  hostPID: false
  hostIPC: false
  hostNetwork: true
  hostPorts:
  - min: 0
    max: 65535
  # SELinux
  seLinux:
    # SELinux is unused in CaaSP
    rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
rules:
- apiGroups: ['extensions']
  resources: ['podsecuritypolicies']
  verbs: ['use']
  resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
  - ""
  resources:
  - pods
  verbs:
  - get
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes/status
  verbs:
  - patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
- kind: ServiceAccount
  name: flannel
  namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: flannel
  namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: kube-flannel-cfg
  namespace: kube-system
  labels:
    tier: node
    app: flannel
data:
  cni-conf.json: |
    {
      "name": "cbr0",
      "cniVersion": "0.3.1",
      "plugins": [
        {
          "type": "flannel",
          "delegate": {
            "hairpinMode": true,
            "isDefaultGateway": true
          }
        },
        {
          "type": "portmap",
          "capabilities": {
            "portMappings": true
          }
        }
      ]
    }
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              - key: kubernetes.io/os
                operator: In
                values:
                - linux
      hostNetwork: true
      priorityClassName: system-node-critical
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni-plugin
        image: rancher/mirrored-flannelcni-flannel-cni-plugin:v1.2
        command:
        - cp
        args:
        - -f
        - /flannel
        - /opt/cni/bin/flannel
        volumeMounts:
        - name: cni-plugin
          mountPath: /opt/cni/bin
      - name: install-cni
        image: quay.io/coreos/flannel:v0.15.0
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.15.0
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
            add: ["NET_ADMIN", "NET_RAW"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
      - name: run
        hostPath:
          path: /run/flannel
      - name: cni-plugin
        hostPath:
          path: /opt/cni/bin
      - name: cni
        hostPath:
          path: /etc/cni/net.d
      - name: flannel-cfg
        configMap:
          name: kube-flannel-cfg

部署flannel

	[root@master ~]# kubectl apply -f kube-flannel.yml 
	podsecuritypolicy.policy/psp.flannel.unprivileged created
	clusterrole.rbac.authorization.k8s.io/flannel created
	clusterrolebinding.rbac.authorization.k8s.io/flannel created
	serviceaccount/flannel created
	configmap/kube-flannel-cfg created
	daemonset.apps/kube-flannel-ds created
	[root@master ~]# ps aux|grep flannel
	root      320372  1.8  1.4 1265920 26824 ?       Ssl  11:12   0:00 /opt/bin/flanneld --ip-masq --kube-subnet-mgr
	root      321071  0.0  0.0  12324  1088 pts/0    S+   11:13   0:00 grep --color=auto flannel

查看集群状态

	#等待节点准备完毕
	[root@master ~]# kubectl get nodes
	NAME     STATUS     ROLES                  AGE    VERSION
	master   Ready      control-plane,master   161m   v1.20.4
	node1    Ready      <none>                 135m   v1.20.4
	node2    NotReady   <none>                 39s    v1.20.4

	[root@master ~]# kubectl get nodes
	NAME     STATUS   ROLES                  AGE    VERSION
	master   Ready    control-plane,master   163m   v1.20.4
	node1    Ready    <none>                 136m   v1.20.4
	node2    Ready    <none>                 2m5s   v1.20.4


	[root@master ~]# kubectl get pod -n kube-system
	NAME                             READY   STATUS    RESTARTS   AGE
	coredns-7f89b7bc75-fjq7n         1/1     Running   0          48m
	coredns-7f89b7bc75-t5hnq         1/1     Running   0          48m
	etcd-master                      1/1     Running   0          48m
	kube-apiserver-master            1/1     Running   0          48m
	kube-controller-manager-master   1/1     Running   0          48m
	kube-flannel-ds-kz8t9            1/1     Running   0          17m
	kube-flannel-ds-r2q5s            1/1     Running   0          17m
	kube-proxy-4jlr6                 1/1     Running   0          48m
	kube-proxy-ppsr6                 1/1     Running   0          22m
	kube-scheduler-master            1/1     Running   0          48m

此时k8s搭建完成,可以简单使用了

使用pv、pvc和nfs实现数据同步,自定义页面

新加1台nfs服务器,运行上面的centos_init_config.sh脚本,然后所有机器都安装nfs服务,简单操作不一一展示了。

	yum install -y nfs-utils
	systemctl start nfs-server
	systemctl enable nfs-server


	#master机器
	[root@master ~]# yum install -y nfs-utils
	......
	[root@master ~]# systemctl start nfs-server
	[root@master ~]# systemctl enable nfs-server
	Created symlink /etc/systemd/system/multi-user.target.wants/nfs-server.service → /usr/lib/systemd/system/nfs-server.service.
	
	[root@node1 ~]# systemctl start nfs-server
	[root@node1 ~]# systemctl enable nfs-server
	Created symlink /etc/systemd/system/multi-user.target.wants/nfs-server.service → /usr/lib/systemd/system/nfs-server.service.

建立需要挂载的文件夹,修改权限

	[root@k8s-nfs ~]# mkdir /data
	[root@k8s-nfs ~]# chmod 777 /data #修改777简单粗暴
	[root@k8s-nfs ~]# ll -d /data
	drwxrwxrwx 2 root root 6 3月  16 21:08 /data
随便输入一点内容做测试

	[root@k8s-nfs data]# echo "Liuyong,hello,k8s" >> index.html
	[root@k8s-nfs data]# cat index.html 
	Liuyong,hello,k8s
	[root@k8s-nfs data]# 

修改exports文件,挂载文件夹

[root@k8s-nfs data]# cat /etc/exports
	/data  192.168.139.0/24(rw,no_root_squash,no_all_squash,sync)
	[root@k8s-nfs data]# exportfs -rv
	exporting 192.168.139.0/24:/data
	[root@k8s-nfs data]# systemctl restart nfs-server
	[root@k8s-nfs data]# exportfs -v
	/data         	192.168.139.0/24(sync,wdelay,hide,no_subtree_check,sec=sys,rw,secure,no_root_squash,no_all_squash)
	[root@k8s-nfs data]# 


	说明: /etc/exports是nfs默认的配置文件
	
	说明:各项权限的说明:
	
	rw:可读写
	
	ro: 只读
	
	no_root_squash:对root用户不压制,如果客户端以root用户写入,在服务端都映射为服务端的root用户
	
	root_squash: nfs服务:默认情况使用的是相反参数root_squash,
	
	                      如果客户端是用户root操作,会被压制成nobody用户
	
	all_squash:     不管客户端的使用nfs的用户是谁,都会压制成nobody用户
	
	insecure:   允许从客户端过来的非授权访问
	
	sync:     数据同步写入到内存和硬盘
	
	async:    数据先写入内存,不直接写入到硬盘
	
	anonuid: 指定uid的值,此uid必须存在于/etc/passwd中 --》 anonymous
	
	anongid:指定gid的值

通过配置文件使用pv,pvc

查看集群

[root@master pv-pvc-pod-nfs]# kubectl get node
NAME     STATUS   ROLES                  AGE    VERSION
master   Ready    control-plane,master   5d4h   v1.20.4
node1    Ready    <none>                 5d3h   v1.20.4
node2    Ready    <none>                 5d1h   v1.20.4

新建文件夹

mkdir pv-pvc-pod-nfs
cd pv-pvc-pod-nfs/

配置文件如下

[root@master pv-pvc-pod-nfs]# cat pv-nfs.yaml 
apiVersion: v1
kind: PersistentVolume  #资源类型
metadata:
  name: ly-nginx-pv
  labels:
	type: ly-nginx-pv
spec:
  capacity:
	storage: 5Gi
  accessModes:
	- ReadWriteMany  #访问模式,多个客户端读写
  persistentVolumeReclaimPolicy: Recycle #回收策略-可以回收
  storageClassName: nfs                                 #注:指定存储卷的类型
  nfs:
	path: "/data"
	server: 192.168.139.143 #k8s-nfs
	readOnly: false  #不是只读

[root@master pv-pvc-pod-nfs]# cat pvc-nfs.yaml 
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: ly-nginx-pvc
spec:
  accessModes:
  - ReadWriteMany      
  resources:
	 requests:
	   storage: 1Gi
  storageClassName: nfs
  
[root@master pv-pvc-pod-nfs]# cat pv-pod.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: backend-nginx
spec:
  replicas: 3
  selector:
	matchLabels:
	  app: ly
	  tier: backend-nginx
	  track: stable
  template:
	metadata:
	  labels:
		app: ly
		tier: backend-nginx
		track: stable
	spec:
	  containers:
		- name: ly-pv-container-nfs
		  image: nginx
		  ports:
			- containerPort: 80
			  
		  volumeMounts:
			- mountPath: "/usr/share/nginx/html"
			  name: ly-pv-storage-nfs
	  volumes:
	  - name: ly-pv-storage-nfs
		persistentVolumeClaim:
			claimName: ly-nginx-pvc

开始部署

	[root@master pv-pvc-pod-nfs]# kubectl apply -f pv-nfs.yaml 
	persistentvolume/ly-nginx-pv created
	[root@master pv-pvc-pod-nfs]# kubectl apply -f pvc-nfs.yaml 
	persistentvolumeclaim/ly-nginx-pvc created
	[root@master pv-pvc-pod-nfs]# kubectl apply -f pv-pod.yaml 
	deployment.apps/backend-nginx created

	
	[root@master pv-pvc-pod-nfs]# kubectl get deployment
	NAME            READY   UP-TO-DATE   AVAILABLE   AGE
	backend-nginx   0/3     3            0           8s
	
	#需要等待一段时间
	[root@master pv-pvc-pod-nfs]# kubectl get deployment
	NAME            READY   UP-TO-DATE   AVAILABLE   AGE
	backend-nginx   2/3     3            2           94s
	
	[root@master pv-pvc-pod-nfs]# kubectl get pod -o wide
	NAME                             READY   STATUS    RESTARTS   AGE     IP            NODE    NOMINATED NODE   READINESS GATES
	backend-nginx-556ccc868d-5khc5   1/1     Running   0          2m43s   10.244.1.26   node1   <none>           <none>
	backend-nginx-556ccc868d-97w8z   1/1     Running   0          2m43s   10.244.2.2    node2   <none>           <none>
	backend-nginx-556ccc868d-swv8m   1/1     Running   0          2m43s   10.244.1.25   node1   <none>           <none>

访问页面

	[root@master pv-pvc-pod-nfs]# curl 10.244.2.2
	Liuyong,hello,k8s
	[root@master pv-pvc-pod-nfs]# curl 10.244.1.26
	Liuyong,hello,k8s
	[root@master pv-pvc-pod-nfs]# curl 10.244.1.25
	Liuyong,hello,k8s
	[root@master pv-pvc-pod-nfs]# 

使用NodePort服务,暴露服务,开放端口

	[root@master pv-pvc-pod-nfs]# kubectl expose deployment/backend-nginx --type="NodePort" --port 80
	service/backend-nginx exposed
	[root@master pv-pvc-pod-nfs]# kubectl get service
	NAME            TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)        AGE
	backend-nginx   NodePort    10.1.153.12   <none>        80:31488/TCP   19s
	kubernetes      ClusterIP   10.1.0.1      <none>        443/TCP        5d4h
	[root@master pv-pvc-pod-nfs]# curl 192.168.139.135:31488
	Liuyong,hello,k8s
	[root@master pv-pvc-pod-nfs]# curl 192.168.139.141:31488
	Liuyong,hello,k8s
	[root@master pv-pvc-pod-nfs]# curl 192.168.139.142:31488
	Liuyong,hello,k8s
	[root@master pv-pvc-pod-nfs]# 

后续扩展

可加入nginx服务器进行负载均衡,可加入prometheus进行监控,使用grafana将数据可视化等等。

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐