mirror of
https://github.com/portainer/portainer.git
synced 2025-07-25 08:19:40 +02:00
resolve conflicts
This commit is contained in:
commit
d191e4f9b9
50 changed files with 950 additions and 182 deletions
42
api/kubernetes/cli/nodes_limits.go
Normal file
42
api/kubernetes/cli/nodes_limits.go
Normal file
|
@ -0,0 +1,42 @@
|
|||
package cli
|
||||
|
||||
import (
|
||||
portainer "github.com/portainer/portainer/api"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// GetNodesLimits gets the CPU and Memory limits(unused resources) of all nodes in the current k8s endpoint connection
|
||||
func (kcl *KubeClient) GetNodesLimits() (portainer.K8sNodesLimits, error) {
|
||||
nodesLimits := make(portainer.K8sNodesLimits)
|
||||
|
||||
nodes, err := kcl.cli.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pods, err := kcl.cli.CoreV1().Pods("").List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, item := range nodes.Items {
|
||||
cpu := item.Status.Allocatable.Cpu().MilliValue()
|
||||
memory := item.Status.Allocatable.Memory().Value()
|
||||
|
||||
nodesLimits[item.ObjectMeta.Name] = &portainer.K8sNodeLimits{
|
||||
CPU: cpu,
|
||||
Memory: memory,
|
||||
}
|
||||
}
|
||||
|
||||
for _, item := range pods.Items {
|
||||
if nodeLimits, ok := nodesLimits[item.Spec.NodeName]; ok {
|
||||
for _, container := range item.Spec.Containers {
|
||||
nodeLimits.CPU -= container.Resources.Requests.Cpu().MilliValue()
|
||||
nodeLimits.Memory -= container.Resources.Requests.Memory().Value()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nodesLimits, nil
|
||||
}
|
137
api/kubernetes/cli/nodes_limits_test.go
Normal file
137
api/kubernetes/cli/nodes_limits_test.go
Normal file
|
@ -0,0 +1,137 @@
|
|||
package cli
|
||||
|
||||
import (
|
||||
portainer "github.com/portainer/portainer/api"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
kfake "k8s.io/client-go/kubernetes/fake"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func newNodes() *v1.NodeList {
|
||||
return &v1.NodeList{
|
||||
Items: []v1.Node{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-node-0",
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceCPU): resource.MustParse("2"),
|
||||
v1.ResourceName(v1.ResourceMemory): resource.MustParse("4M"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-node-1",
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceCPU): resource.MustParse("3"),
|
||||
v1.ResourceName(v1.ResourceMemory): resource.MustParse("6M"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newPods() *v1.PodList {
|
||||
return &v1.PodList{
|
||||
Items: []v1.Pod{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-container-0",
|
||||
Namespace: "test-namespace-0",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "test-node-0",
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "test-container-0",
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceCPU): resource.MustParse("1"),
|
||||
v1.ResourceName(v1.ResourceMemory): resource.MustParse("2M"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-container-1",
|
||||
Namespace: "test-namespace-1",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "test-node-1",
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "test-container-1",
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceCPU): resource.MustParse("2"),
|
||||
v1.ResourceName(v1.ResourceMemory): resource.MustParse("3M"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestKubeClient_GetNodesLimits(t *testing.T) {
|
||||
type fields struct {
|
||||
cli kubernetes.Interface
|
||||
}
|
||||
|
||||
fieldsInstance := fields{
|
||||
cli: kfake.NewSimpleClientset(newNodes(), newPods()),
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
want portainer.K8sNodesLimits
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "2 nodes 2 pods",
|
||||
fields: fieldsInstance,
|
||||
want: portainer.K8sNodesLimits{
|
||||
"test-node-0": &portainer.K8sNodeLimits{
|
||||
CPU: 1000,
|
||||
Memory: 2000000,
|
||||
},
|
||||
"test-node-1": &portainer.K8sNodeLimits{
|
||||
CPU: 1000,
|
||||
Memory: 3000000,
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
kcl := &KubeClient{
|
||||
cli: tt.fields.cli,
|
||||
}
|
||||
got, err := kcl.GetNodesLimits()
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("GetNodesLimits() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("GetNodesLimits() got = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -18,15 +18,10 @@ func getPortainerUserDefaultPolicies() []rbacv1.PolicyRule {
|
|||
Resources: []string{"storageclasses"},
|
||||
APIGroups: []string{"storage.k8s.io"},
|
||||
},
|
||||
{
|
||||
Verbs: []string{"list"},
|
||||
Resources: []string{"ingresses"},
|
||||
APIGroups: []string{"networking.k8s.io"},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (kcl *KubeClient) createPortainerUserClusterRole() error {
|
||||
func (kcl *KubeClient) upsertPortainerK8sClusterRoles() error {
|
||||
clusterRole := &rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: portainerUserCRName,
|
||||
|
@ -35,8 +30,13 @@ func (kcl *KubeClient) createPortainerUserClusterRole() error {
|
|||
}
|
||||
|
||||
_, err := kcl.cli.RbacV1().ClusterRoles().Create(clusterRole)
|
||||
if err != nil && !k8serrors.IsAlreadyExists(err) {
|
||||
return err
|
||||
if err != nil {
|
||||
if k8serrors.IsAlreadyExists(err) {
|
||||
_, err = kcl.cli.RbacV1().ClusterRoles().Update(clusterRole)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -63,7 +63,7 @@ func (kcl *KubeClient) SetupUserServiceAccount(userID int, teamIDs []int, restri
|
|||
}
|
||||
|
||||
func (kcl *KubeClient) ensureRequiredResourcesExist() error {
|
||||
return kcl.createPortainerUserClusterRole()
|
||||
return kcl.upsertPortainerK8sClusterRoles()
|
||||
}
|
||||
|
||||
func (kcl *KubeClient) createUserServiceAccount(namespace, serviceAccountName string) error {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue