go服务k8s容器化之grpc负载均衡
1.grpc基于HTTP/2实现,HTTP2是长连接的,io多路复用,即在一条tcp连接上可以发起多个rpc请求,请求通过流id 也就是streamID划分。2.k8s是L4层负载均衡,也就是TCP那层,支持tcp的流量转发,如果是grpc服务部署在k8s,且通过k8sclusterIP访问 就会出现负载不均衡的情况;3.istio-服务网络,支持7层代理,可以解决grpc服务容器化后负载不均的问
理论:
1.grpc基于HTTP/2实现,HTTP2是长连接的,io多路复用,即在一条tcp连接上可以发起多个rpc请求, 请求通过流id 也就是streamID划分。
2.k8s是L4层负载均衡,也就是TCP那层,支持tcp的流量转发,如果是grpc服务部署在k8s,且通过k8s clusterIP访问 就会出现负载不均衡的情况;
3.istio-服务网络,支持7层代理,可以解决grpc服务容器化后负载不均的问题,实现L7的负载均衡;
部署istio
https://www.cnblogs.com/gdut17/p/15144052.html
istio配置镜像拉取策略
https://blog.csdn.net/weixin_60092693/article/details/124599364
命名空间自动注入
kubectl label namespace default istio-injection=enabled --overwrite
去掉自动注入标签
kubectl label ns default istio-injection-
手动注入
istioctl kube-inject -f helloworld_deploy.yaml | kubectl apply -f -
deploy.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: helloworld
name: helloworld
spec:
replicas: 1
selector:
matchLabels:
app: helloworld
template:
metadata:
labels:
app: helloworld
spec:
containers:
- image: helloworld:1.0
imagePullPolicy: Never
name: service
command:
- /main
- --listen=:80
svc.yaml
[root@localhost grpc]# cat svc.yaml
apiVersion: v1
kind: Service
metadata:
labels:
app: helloworld
name: helloworld
spec:
selector:
app: helloworld
ports:
- name: http-admin
port: 8078
protocol: TCP
targetPort: 8078
- name: grpc
port: 80
protocol: TCP
targetPort: 80
[root@localhost grpc]# cat svc2.yaml
cat: svc2.yaml: No such file or directory
[root@localhost grpc]# cat svc2
apiVersion: v1
kind: Service
metadata:
labels:
app: helloworld2
name: helloworld2
spec:
selector:
app: helloworld2
ports:
- name: http-admin
port: 8078
protocol: TCP
targetPort: 8078
- name: grpc
port: 80
protocol: TCP
targetPort: 80
验证负载均衡
package main
import (
"context"
"flag"
"fmt"
pb "gg/demo"
"log"
"time"
"google.golang.org/grpc"
)
var addr = flag.String("addr", "", "")
func main() {
flag.Parse()
fmt.Printf("addr=%s\n", *addr)
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
cc, err := grpc.DialContext(ctx, *addr, grpc.WithInsecure(), grpc.WithBlock())
if err != nil {
fmt.Println(err)
return
}
cli := pb.NewDemoClient(cc)
for i := 0; i < 3; i++ {
resp, err := cli.Hello(context.Background(), &pb.HelloReq{Req: "aa"})
log.Printf("resp %+v %v\n", resp, err)
}
}
测试结果:
[root@localhost grpc]# q get po -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
helloworld-5ffbf6c758-lsckl 2/2 Running 0 16m 10.244.0.52 localhost.localdomain <none> <none>
helloworld-5ffbf6c758-pjgrw 2/2 Running 0 18m 10.244.0.49 localhost.localdomain <none> <none>
helloworld-5ffbf6c758-tdqsd 2/2 Running 0 16m 10.244.0.51 localhost.localdomain <none> <none>
helloworld2-567465468b-9qbxj 1/1 Running 0 16m 10.244.0.54 localhost.localdomain <none> <none>
helloworld2-567465468b-gffxk 1/1 Running 0 16m 10.244.0.53 localhost.localdomain <none> <none>
helloworld2-567465468b-jsdqb 1/1 Running 0 17m 10.244.0.50 localhost.localdomain <none> <none>
1.未注入sidecar的容器调用
[root@localhost grpc]# q logs -f -lapp=helloworld2 --prefix --tail=1
[pod/helloworld2-567465468b-9qbxj/service] 2024/04/16 15:26:57 Hello req:req:"aa"
[pod/helloworld2-567465468b-9qbxj/service] 2024/04/16 15:26:57 Hello req:req:"aa"
[pod/helloworld2-567465468b-9qbxj/service] 2024/04/16 15:26:57 Hello req:req:"aa"
root@helloworld2-567465468b-9qbxj:/# ./multi_cli -addr helloworld2.default.svc.cluster.local:80
addr=helloworld2.default.svc.cluster.local:80
2024/04/16 15:26:57 resp msg:"aa" <nil>
2024/04/16 15:26:57 resp msg:"aa" <nil>
2024/04/16 15:26:57 resp msg:"aa" <nil>
2.注入sidecar的容器调用
[root@localhost grpc]# q logs -f -lapp=helloworld --prefix -c service --tail=1
[pod/helloworld-5ffbf6c758-lsckl/service] 2024/04/16 15:28:58 Hello req:req:"aa"
[pod/helloworld-5ffbf6c758-pjgrw/service] 2024/04/16 15:28:58 Hello req:req:"aa"
[pod/helloworld-5ffbf6c758-tdqsd/service] 2024/04/16 15:28:58 Hello req:req:"aa"
root@helloworld-5ffbf6c758-lsckl:/# ./multi_cli -addr helloworld.default.svc.cluster.local:80
addr=helloworld.default.svc.cluster.local:80
2024/04/16 15:28:58 resp msg:"aa" <nil>
2024/04/16 15:28:58 resp msg:"aa" <nil>
2024/04/16 15:28:58 resp msg:"aa" <nil>
[root@localhost grpc]# q logs -f helloworld-5ffbf6c758-lsckl -c istio-proxy --tail=3
[2024-04-16T15:28:58.013Z] "POST /demo.demo/hello HTTP/2" 200 - "-" "-" 9 9 1 0 "-" "grpc-go/1.26.0" "2185d68d-f1e2-97b9-b8ae-56499db339b4" "helloworld.default.svc.cluster.local:80" "10.244.0.52:80" outbound|80||helloworld.default.svc.cluster.local 10.244.0.52:57827 10.1.190.110:80 10.244.0.52:41260 - default
[2024-04-16T15:28:58.015Z] "POST /demo.demo/hello HTTP/2" 200 - "-" "-" 9 9 7 6 "-" "grpc-go/1.26.0" "a8c05f17-ee23-98f1-a339-cea010103b16" "helloworld.default.svc.cluster.local:80" "10.244.0.49:80" outbound|80||helloworld.default.svc.cluster.local 10.244.0.52:56795 10.1.190.110:80 10.244.0.52:41260 - default
[2024-04-16T15:28:58.023Z] "POST /demo.demo/hello HTTP/2" 200 - "-" "-" 9 9 28 27 "-" "grpc-go/1.26.0" "c439064b-29a9-9374-869d-8281e189dd14" "helloworld.default.svc.cluster.local:80" "10.244.0.51:80" outbound|80||helloworld.default.svc.cluster.local 10.244.0.52:36740 10.1.190.110:80 10.244.0.52:41260 - default
可以看到,注入sidecar的容器,连续3次rpc调用都打到了不同的pod上,实现了L7的负载均衡
更多推荐
所有评论(0)