1
0
Fork 0
mirror of https://github.com/portainer/portainer.git synced 2025-07-19 13:29:41 +02:00

feat(k8s): review the resource assignement when creating a kubernetes application EE-437 (#5254)

* feat(nodes limits)Review the resource assignement when creating a Kubernetes application EE-437

* feat(nodes limits) review feedback EE-437

* feat(nodes limits) workaround for lodash cloneDeep not working in production mode EE-437

* feat(nodes limits) calculate max cpu of slide bar with floor function instead of round function EE-437

* feat(nodes limits) another review feedback EE-437

* feat(nodes limits) cleanup code EE-437

* feat(nodes limits) EE-437 pr feedback update

* feat(nodes limits) EE-437 rebase onto develop branch

* feat(nodes limits) EE-437 another pr feedback update

Co-authored-by: Simon Meng <simon.meng@portainer.io>
This commit is contained in:
cong meng 2021-09-01 09:08:01 +12:00 committed by GitHub
parent 0ffbe6a42e
commit c597ae96e2
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
10 changed files with 453 additions and 42 deletions

View file

@ -0,0 +1,42 @@
package cli
import (
portainer "github.com/portainer/portainer/api"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// GetNodesLimits gets the CPU and Memory limits(unused resources) of all nodes in the current k8s endpoint connection
func (kcl *KubeClient) GetNodesLimits() (portainer.K8sNodesLimits, error) {
nodesLimits := make(portainer.K8sNodesLimits)
nodes, err := kcl.cli.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
return nil, err
}
pods, err := kcl.cli.CoreV1().Pods("").List(metav1.ListOptions{})
if err != nil {
return nil, err
}
for _, item := range nodes.Items {
cpu := item.Status.Allocatable.Cpu().MilliValue()
memory := item.Status.Allocatable.Memory().Value()
nodesLimits[item.ObjectMeta.Name] = &portainer.K8sNodeLimits{
CPU: cpu,
Memory: memory,
}
}
for _, item := range pods.Items {
if nodeLimits, ok := nodesLimits[item.Spec.NodeName]; ok {
for _, container := range item.Spec.Containers {
nodeLimits.CPU -= container.Resources.Requests.Cpu().MilliValue()
nodeLimits.Memory -= container.Resources.Requests.Memory().Value()
}
}
}
return nodesLimits, nil
}

View file

@ -0,0 +1,137 @@
package cli
import (
portainer "github.com/portainer/portainer/api"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
kfake "k8s.io/client-go/kubernetes/fake"
"reflect"
"testing"
)
func newNodes() *v1.NodeList {
return &v1.NodeList{
Items: []v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "test-node-0",
},
Status: v1.NodeStatus{
Allocatable: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse("2"),
v1.ResourceName(v1.ResourceMemory): resource.MustParse("4M"),
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "test-node-1",
},
Status: v1.NodeStatus{
Allocatable: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse("3"),
v1.ResourceName(v1.ResourceMemory): resource.MustParse("6M"),
},
},
},
},
}
}
func newPods() *v1.PodList {
return &v1.PodList{
Items: []v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
Name: "test-container-0",
Namespace: "test-namespace-0",
},
Spec: v1.PodSpec{
NodeName: "test-node-0",
Containers: []v1.Container{
{
Name: "test-container-0",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse("1"),
v1.ResourceName(v1.ResourceMemory): resource.MustParse("2M"),
},
},
},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "test-container-1",
Namespace: "test-namespace-1",
},
Spec: v1.PodSpec{
NodeName: "test-node-1",
Containers: []v1.Container{
{
Name: "test-container-1",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse("2"),
v1.ResourceName(v1.ResourceMemory): resource.MustParse("3M"),
},
},
},
},
},
},
},
}
}
func TestKubeClient_GetNodesLimits(t *testing.T) {
type fields struct {
cli kubernetes.Interface
}
fieldsInstance := fields{
cli: kfake.NewSimpleClientset(newNodes(), newPods()),
}
tests := []struct {
name string
fields fields
want portainer.K8sNodesLimits
wantErr bool
}{
{
name: "2 nodes 2 pods",
fields: fieldsInstance,
want: portainer.K8sNodesLimits{
"test-node-0": &portainer.K8sNodeLimits{
CPU: 1000,
Memory: 2000000,
},
"test-node-1": &portainer.K8sNodeLimits{
CPU: 1000,
Memory: 3000000,
},
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
kcl := &KubeClient{
cli: tt.fields.cli,
}
got, err := kcl.GetNodesLimits()
if (err != nil) != tt.wantErr {
t.Errorf("GetNodesLimits() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("GetNodesLimits() got = %v, want %v", got, tt.want)
}
})
}
}