基于web的K8S仿真调度平台设计5-POD部署失败,重新整理思路
给node打标签,方便后面部署podpackage mainimport ("context""time"//"fmt"metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"//"fmt"//corev1 "k8s.io/api/core/v1"//"k8s.io/apimachinery/pkg/api/resource"//metav1 "k8s.io/ap
给node打标签,方便后面部署pod
package main
import (
"context"
"time"
//"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
//"fmt"
//corev1 "k8s.io/api/core/v1"
//"k8s.io/apimachinery/pkg/api/resource"
//metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
//v1 "k8s.io/client-go/applyconfigurations/core/v1"
"k8s.io/client-go/kubernetes"
//"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd"
//"strings"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
)
func getResourceList(cpu, memory string) v1.ResourceList {
res := v1.ResourceList{}
if cpu != "" {
res[v1.ResourceCPU] = resource.MustParse(cpu)
}
if memory != "" {
res[v1.ResourceMemory] = resource.MustParse(memory)
}
return res
}
func getResourceRequirements(requests, limits v1.ResourceList) v1.ResourceRequirements {
res := v1.ResourceRequirements{}
res.Requests = requests
res.Limits = limits
return res
}
func main() {
// uses the current context in kubeconfig
// path-to-kubeconfig -- for example, /root/.kube/config
config, err := clientcmd.BuildConfigFromFlags("", "C:\\Users\\HJW\\.kube\\config")
if err!= nil{
panic(err)
}
// creates the clientset
clientset, err := kubernetes.NewForConfig(config)
if err!= nil{
panic(err)
}
var labels=map[string]string{"node1":"node1"}
//pod模版
//newPod := &v1.Pod{
// TypeMeta: metav1.TypeMeta{
// Kind: "Pod",
// APIVersion: "v1",
// },
// ObjectMeta: metav1.ObjectMeta{
// Name: "pod2",
// },
// Spec: v1.PodSpec{
// Containers: []v1.Container{
// {Name: "pod2", Image: "busybox:latest", Command: []string{"sleep", "1000"},
// Resources: getResourceRequirements(getResourceList("10m", "1Gi"), getResourceList("50m", "1Gi"))},
// },
// //NodeSelector:"node1",
// },
//}
//node模板
lastheartbeattime,_ :=time.Parse("2006-01-02 15:04:05", "2019-06-05 18:38:35")
lasttransitiontime,_ :=time.Parse("2006-01-02 15:04:05", "2019-06-05 11:41:27")
newNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
Labels: labels,
},
Spec: v1.NodeSpec{},
Status: v1.NodeStatus{
Capacity: getResourceList("50m", "1Gi"),
Allocatable:getResourceList("50m", "1Gi"),
Phase:"running",
//Conditions:Conditions.Type="Ready",Conditions.Status="True"
Conditions: []v1.NodeCondition{
{"Ready","True",metav1.Time{lastheartbeattime},metav1.Time{lasttransitiontime},"KubeletReady","kubelet is posting ready status"},
},
},
}
//创建pod
//pod, err := clientset.CoreV1().Pods("kube-system").Create(context.Background(), newPod, metav1.CreateOptions{})
//if err != nil {
// panic(err)
//}
//fmt.Printf("Created pod %q.\n", pod.GetObjectMeta().GetName())
//clientset.CoreV1().Pods("kube-system").Delete(context.Background(),"pod2",metav1.DeleteOptions{})
clientset.CoreV1().Nodes().Create(context.Background(), newNode, metav1.CreateOptions{})
//clientset.CoreV1().Nodes().Delete(context.Background(),"node1",metav1.DeleteOptions{})
}
标签的类型是map[string]string,是go里面的键值对,要事先定义应用了之后可以查看到node已经被打上标签
然后创建同样标签的pod
package main
import (
"context"
"fmt"
//"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
//"fmt"
//corev1 "k8s.io/api/core/v1"
//"k8s.io/apimachinery/pkg/api/resource"
//metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
//v1 "k8s.io/client-go/applyconfigurations/core/v1"
"k8s.io/client-go/kubernetes"
//"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd"
//"strings"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
)
func getResourceList(cpu, memory string) v1.ResourceList {
res := v1.ResourceList{}
if cpu != "" {
res[v1.ResourceCPU] = resource.MustParse(cpu)
}
if memory != "" {
res[v1.ResourceMemory] = resource.MustParse(memory)
}
return res
}
func getResourceRequirements(requests, limits v1.ResourceList) v1.ResourceRequirements {
res := v1.ResourceRequirements{}
res.Requests = requests
res.Limits = limits
return res
}
func main() {
// uses the current context in kubeconfig
// path-to-kubeconfig -- for example, /root/.kube/config
config, err := clientcmd.BuildConfigFromFlags("", "C:\\Users\\HJW\\.kube\\config")
if err!= nil{
panic(err)
}
// creates the clientset
clientset, err := kubernetes.NewForConfig(config)
if err!= nil{
panic(err)
}
var labels=map[string]string{"node1":"node1"}
//pod模版
newPod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "pod2",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{Name: "pod2", Image: "busybox:latest", Command: []string{"sleep", "1000"},
Resources: getResourceRequirements(getResourceList("10m", "1Gi"), getResourceList("50m", "1Gi"))},
},
NodeSelector:labels,
},
}
//node模板
//lastheartbeattime,_ :=time.Parse("2006-01-02 15:04:05", "2019-06-05 18:38:35")
//lasttransitiontime,_ :=time.Parse("2006-01-02 15:04:05", "2019-06-05 11:41:27")
//newNode := &v1.Node{
// ObjectMeta: metav1.ObjectMeta{
// Name: "node1",
// Labels: labels,
// },
// Spec: v1.NodeSpec{},
// Status: v1.NodeStatus{
// Capacity: getResourceList("50m", "1Gi"),
// Allocatable:getResourceList("50m", "1Gi"),
// Phase:"running",
// //Conditions:Conditions.Type="Ready",Conditions.Status="True"
// Conditions: []v1.NodeCondition{
// {"Ready","True",metav1.Time{lastheartbeattime},metav1.Time{lasttransitiontime},"KubeletReady","kubelet is posting ready status"},
// },
// },
//}
//创建pod
pod, err := clientset.CoreV1().Pods("kube-system").Create(context.Background(), newPod, metav1.CreateOptions{})
if err != nil {
panic(err)
}
fmt.Printf("Created pod %q.\n", pod.GetObjectMeta().GetName())
//clientset.CoreV1().Pods("kube-system").Delete(context.Background(),"pod2",metav1.DeleteOptions{})
//clientset.CoreV1().Nodes().Create(context.Background(), newNode, metav1.CreateOptions{})
//clientset.CoreV1().Nodes().Delete(context.Background(),"node1",metav1.DeleteOptions{})
}
一开始确实创建了
但是过了一分钟就出了问题
0/2 nodes are available: 1 Too many pods, 1 node(s) didn’t match Pod’s node affinity/selector, 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }. preemption: 0/2 nodes are available: 2 Preemption is not helpful for scheduling.
有0/2个节点可用:1个Pod太多,1个节点与Pod的节点关联/选择器不匹配,1个节点具有无法容忍的污染{node.kubernetes.io/unreachable:}。抢占:0/2个节点可用:2抢占对调度没有帮助。
他说node1pod太多,还有无法忍受的污染,查了挺长时间,client-go里面的node.creat只是创建一个结构体数据而已,欺骗scheduler的,这里要转变一下思路。调度仿真平台不能真的部署,创建node和pod之后在数据库里面匹配模拟调度。
更多推荐
所有评论(0)