diff --git a/api/http/handler/kubernetes/handler.go b/api/http/handler/kubernetes/handler.go index c317ed346..3efad11f4 100644 --- a/api/http/handler/kubernetes/handler.go +++ b/api/http/handler/kubernetes/handler.go @@ -39,6 +39,8 @@ func NewHandler(bouncer *security.RequestBouncer, authorizationService *authoriz kubeRouter.PathPrefix("/config").Handler( bouncer.AuthenticatedAccess(httperror.LoggerHandler(h.getKubernetesConfig))).Methods(http.MethodGet) + kubeRouter.PathPrefix("/nodes_limits").Handler( + bouncer.AuthenticatedAccess(httperror.LoggerHandler(h.getKubernetesNodesLimits))).Methods(http.MethodGet) // namespaces // in the future this piece of code might be in another package (or a few different packages - namespaces/namespace?) diff --git a/api/http/handler/kubernetes/kubernetes_nodes_limits.go b/api/http/handler/kubernetes/kubernetes_nodes_limits.go new file mode 100644 index 000000000..5e1f06215 --- /dev/null +++ b/api/http/handler/kubernetes/kubernetes_nodes_limits.go @@ -0,0 +1,52 @@ +package kubernetes + +import ( + httperror "github.com/portainer/libhttp/error" + "github.com/portainer/libhttp/request" + "github.com/portainer/libhttp/response" + portainer "github.com/portainer/portainer/api" + bolterrors "github.com/portainer/portainer/api/bolt/errors" + "net/http" +) + +// @id getKubernetesNodesLimits +// @summary Get CPU and memory limits of all nodes within k8s cluster +// @description Get CPU and memory limits of all nodes within k8s cluster +// @description **Access policy**: authorized +// @tags kubernetes +// @security jwt +// @accept json +// @produce json +// @param id path int true "Endpoint identifier" +// @success 200 {object} K8sNodesLimits "Success" +// @failure 400 "Invalid request" +// @failure 401 "Unauthorized" +// @failure 403 "Permission denied" +// @failure 404 "Endpoint not found" +// @failure 500 "Server error" +// @router /kubernetes/{id}/nodes_limits [get] +func (handler *Handler) getKubernetesNodesLimits(w http.ResponseWriter, r *http.Request) *httperror.HandlerError { + endpointID, err := request.RetrieveNumericRouteVariableValue(r, "id") + if err != nil { + return &httperror.HandlerError{http.StatusBadRequest, "Invalid endpoint identifier route variable", err} + } + + endpoint, err := handler.dataStore.Endpoint().Endpoint(portainer.EndpointID(endpointID)) + if err == bolterrors.ErrObjectNotFound { + return &httperror.HandlerError{http.StatusNotFound, "Unable to find an endpoint with the specified identifier inside the database", err} + } else if err != nil { + return &httperror.HandlerError{http.StatusInternalServerError, "Unable to find an endpoint with the specified identifier inside the database", err} + } + + cli, err := handler.kubernetesClientFactory.GetKubeClient(endpoint) + if err != nil { + return &httperror.HandlerError{http.StatusInternalServerError, "Unable to create Kubernetes client", err} + } + + nodesLimits, err := cli.GetNodesLimits() + if err != nil { + return &httperror.HandlerError{http.StatusInternalServerError, "Unable to retrieve nodes limits", err} + } + + return response.JSON(w, nodesLimits) +} diff --git a/api/kubernetes/cli/nodes_limits.go b/api/kubernetes/cli/nodes_limits.go new file mode 100644 index 000000000..9e0c044eb --- /dev/null +++ b/api/kubernetes/cli/nodes_limits.go @@ -0,0 +1,42 @@ +package cli + +import ( + portainer "github.com/portainer/portainer/api" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// GetNodesLimits gets the CPU and Memory limits(unused resources) of all nodes in the current k8s endpoint connection +func (kcl *KubeClient) GetNodesLimits() (portainer.K8sNodesLimits, error) { + nodesLimits := make(portainer.K8sNodesLimits) + + nodes, err := kcl.cli.CoreV1().Nodes().List(metav1.ListOptions{}) + if err != nil { + return nil, err + } + + pods, err := kcl.cli.CoreV1().Pods("").List(metav1.ListOptions{}) + if err != nil { + return nil, err + } + + for _, item := range nodes.Items { + cpu := item.Status.Allocatable.Cpu().MilliValue() + memory := item.Status.Allocatable.Memory().Value() + + nodesLimits[item.ObjectMeta.Name] = &portainer.K8sNodeLimits{ + CPU: cpu, + Memory: memory, + } + } + + for _, item := range pods.Items { + if nodeLimits, ok := nodesLimits[item.Spec.NodeName]; ok { + for _, container := range item.Spec.Containers { + nodeLimits.CPU -= container.Resources.Requests.Cpu().MilliValue() + nodeLimits.Memory -= container.Resources.Requests.Memory().Value() + } + } + } + + return nodesLimits, nil +} diff --git a/api/kubernetes/cli/nodes_limits_test.go b/api/kubernetes/cli/nodes_limits_test.go new file mode 100644 index 000000000..bf880c2ff --- /dev/null +++ b/api/kubernetes/cli/nodes_limits_test.go @@ -0,0 +1,137 @@ +package cli + +import ( + portainer "github.com/portainer/portainer/api" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + kfake "k8s.io/client-go/kubernetes/fake" + "reflect" + "testing" +) + +func newNodes() *v1.NodeList { + return &v1.NodeList{ + Items: []v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-node-0", + }, + Status: v1.NodeStatus{ + Allocatable: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("2"), + v1.ResourceName(v1.ResourceMemory): resource.MustParse("4M"), + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-node-1", + }, + Status: v1.NodeStatus{ + Allocatable: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("3"), + v1.ResourceName(v1.ResourceMemory): resource.MustParse("6M"), + }, + }, + }, + }, + } +} + +func newPods() *v1.PodList { + return &v1.PodList{ + Items: []v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-container-0", + Namespace: "test-namespace-0", + }, + Spec: v1.PodSpec{ + NodeName: "test-node-0", + Containers: []v1.Container{ + { + Name: "test-container-0", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("1"), + v1.ResourceName(v1.ResourceMemory): resource.MustParse("2M"), + }, + }, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-container-1", + Namespace: "test-namespace-1", + }, + Spec: v1.PodSpec{ + NodeName: "test-node-1", + Containers: []v1.Container{ + { + Name: "test-container-1", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("2"), + v1.ResourceName(v1.ResourceMemory): resource.MustParse("3M"), + }, + }, + }, + }, + }, + }, + }, + } +} + +func TestKubeClient_GetNodesLimits(t *testing.T) { + type fields struct { + cli kubernetes.Interface + } + + fieldsInstance := fields{ + cli: kfake.NewSimpleClientset(newNodes(), newPods()), + } + + tests := []struct { + name string + fields fields + want portainer.K8sNodesLimits + wantErr bool + }{ + { + name: "2 nodes 2 pods", + fields: fieldsInstance, + want: portainer.K8sNodesLimits{ + "test-node-0": &portainer.K8sNodeLimits{ + CPU: 1000, + Memory: 2000000, + }, + "test-node-1": &portainer.K8sNodeLimits{ + CPU: 1000, + Memory: 3000000, + }, + }, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + kcl := &KubeClient{ + cli: tt.fields.cli, + } + got, err := kcl.GetNodesLimits() + if (err != nil) != tt.wantErr { + t.Errorf("GetNodesLimits() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("GetNodesLimits() got = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/api/portainer.go b/api/portainer.go index 2bc64277f..2d82edf3a 100644 --- a/api/portainer.go +++ b/api/portainer.go @@ -398,6 +398,13 @@ type ( // JobType represents a job type JobType int + K8sNodeLimits struct { + CPU int64 `json:"CPU"` + Memory int64 `json:"Memory"` + } + + K8sNodesLimits map[string]*K8sNodeLimits + K8sNamespaceAccessPolicy struct { UserAccessPolicies UserAccessPolicies `json:"UserAccessPolicies"` TeamAccessPolicies TeamAccessPolicies `json:"TeamAccessPolicies"` @@ -1220,6 +1227,7 @@ type ( CreateUserShellPod(ctx context.Context, serviceAccountName string) (*KubernetesShellPod, error) StartExecProcess(token string, useAdminToken bool, namespace, podName, containerName string, command []string, stdin io.Reader, stdout io.Writer) error NamespaceAccessPoliciesDeleteNamespace(namespace string) error + GetNodesLimits() (K8sNodesLimits, error) GetNamespaceAccessPolicies() (map[string]K8sNamespaceAccessPolicy, error) UpdateNamespaceAccessPolicies(accessPolicies map[string]K8sNamespaceAccessPolicy) error DeleteRegistrySecret(registry *Registry, namespace string) error diff --git a/app/kubernetes/models/nodes-limits/models.js b/app/kubernetes/models/nodes-limits/models.js new file mode 100644 index 000000000..b2f94254b --- /dev/null +++ b/app/kubernetes/models/nodes-limits/models.js @@ -0,0 +1,65 @@ +import _ from 'lodash-es'; + +/** + * NodesLimits Model + */ +export class KubernetesNodesLimits { + constructor(nodesLimits) { + this.MaxCPU = 0; + this.MaxMemory = 0; + this.nodesLimits = this.convertCPU(nodesLimits); + + this.calculateMaxCPUMemory(); + } + + convertCPU(nodesLimits) { + _.forEach(nodesLimits, (value) => { + if (value.CPU) { + value.CPU /= 1000.0; + } + }); + return nodesLimits; + } + + calculateMaxCPUMemory() { + const nodesLimitsArray = Object.values(this.nodesLimits); + this.MaxCPU = _.maxBy(nodesLimitsArray, 'CPU').CPU; + this.MaxMemory = _.maxBy(nodesLimitsArray, 'Memory').Memory; + } + + // check if there is enough cpu and memory to allocate containers in replica mode + overflowForReplica(cpu, memory, instances) { + _.forEach(this.nodesLimits, (value) => { + instances -= Math.min(Math.floor(value.CPU / cpu), Math.floor(value.Memory / memory)); + }); + + return instances > 0; + } + + // check if there is enough cpu and memory to allocate containers in global mode + overflowForGlobal(cpu, memory) { + let overflow = false; + + _.forEach(this.nodesLimits, (value) => { + if (cpu > value.CPU || memory > value.Memory) { + overflow = true; + } + }); + + return overflow; + } + + excludesPods(pods, cpuLimit, memoryLimit) { + const nodesLimits = this.nodesLimits; + + _.forEach(pods, (value) => { + const node = value.Node; + if (node && nodesLimits[node]) { + nodesLimits[node].CPU += cpuLimit; + nodesLimits[node].Memory += memoryLimit; + } + }); + + this.calculateMaxCPUMemory(); + } +} diff --git a/app/kubernetes/rest/nodesLimits.js b/app/kubernetes/rest/nodesLimits.js new file mode 100644 index 000000000..d8eb59d93 --- /dev/null +++ b/app/kubernetes/rest/nodesLimits.js @@ -0,0 +1,21 @@ +import angular from 'angular'; + +angular.module('portainer.kubernetes').factory('KubernetesNodesLimits', KubernetesNodesLimitsFactory); + +/* @ngInject */ +function KubernetesNodesLimitsFactory($resource, API_ENDPOINT_KUBERNETES, EndpointProvider) { + const url = API_ENDPOINT_KUBERNETES + '/:endpointId/nodes_limits'; + return $resource( + url, + { + endpointId: EndpointProvider.endpointID, + }, + { + get: { + method: 'GET', + ignoreLoadingBar: true, + transformResponse: (data) => ({ data: JSON.parse(data) }), + }, + } + ); +} diff --git a/app/kubernetes/services/nodesLimitsService.js b/app/kubernetes/services/nodesLimitsService.js new file mode 100644 index 000000000..14a4f3275 --- /dev/null +++ b/app/kubernetes/services/nodesLimitsService.js @@ -0,0 +1,25 @@ +import angular from 'angular'; +import PortainerError from 'Portainer/error'; +import { KubernetesNodesLimits } from 'Kubernetes/models/nodes-limits/models'; + +class KubernetesNodesLimitsService { + /* @ngInject */ + constructor(KubernetesNodesLimits) { + this.KubernetesNodesLimits = KubernetesNodesLimits; + } + + /** + * GET + */ + async get() { + try { + const nodesLimits = await this.KubernetesNodesLimits.get().$promise; + return new KubernetesNodesLimits(nodesLimits.data); + } catch (err) { + throw new PortainerError('Unable to retrieve nodes limits', err); + } + } +} + +export default KubernetesNodesLimitsService; +angular.module('portainer.kubernetes').service('KubernetesNodesLimitsService', KubernetesNodesLimitsService); diff --git a/app/kubernetes/views/applications/create/createApplication.html b/app/kubernetes/views/applications/create/createApplication.html index 62ca91281..2a432fcf1 100644 --- a/app/kubernetes/views/applications/create/createApplication.html +++ b/app/kubernetes/views/applications/create/createApplication.html @@ -722,6 +722,13 @@
+ +