mirror of
https://github.com/portainer/portainer.git
synced 2025-07-24 15:59:41 +02:00
feat(namespace): migrate create ns to react [EE-2226] (#10377)
This commit is contained in:
parent
31bcba96c6
commit
7218eb0892
83 changed files with 1869 additions and 358 deletions
|
@ -8,7 +8,9 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
portainer "github.com/portainer/portainer/api"
|
||||
models "github.com/portainer/portainer/api/http/models/kubernetes"
|
||||
"github.com/rs/zerolog/log"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
|
@ -61,14 +63,59 @@ func (kcl *KubeClient) GetNamespace(name string) (portainer.K8sNamespaceInfo, er
|
|||
|
||||
// CreateNamespace creates a new ingress in a given namespace in a k8s endpoint.
|
||||
func (kcl *KubeClient) CreateNamespace(info models.K8sNamespaceDetails) error {
|
||||
client := kcl.cli.CoreV1().Namespaces()
|
||||
|
||||
var ns v1.Namespace
|
||||
ns.Name = info.Name
|
||||
ns.Annotations = info.Annotations
|
||||
|
||||
_, err := client.Create(context.Background(), &ns, metav1.CreateOptions{})
|
||||
return err
|
||||
resourceQuota := &v1.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "portainer-rq-" + info.Name,
|
||||
Namespace: info.Name,
|
||||
},
|
||||
Spec: v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := kcl.cli.CoreV1().Namespaces().Create(context.Background(), &ns, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Err(err).
|
||||
Str("Namespace", info.Name).
|
||||
Interface("ResourceQuota", resourceQuota).
|
||||
Msg("Failed to create the namespace due to a resource quota issue.")
|
||||
return err
|
||||
}
|
||||
|
||||
if info.ResourceQuota != nil {
|
||||
log.Info().Msgf("Creating resource quota for namespace %s", info.Name)
|
||||
log.Debug().Msgf("Creating resource quota with details: %+v", info.ResourceQuota)
|
||||
|
||||
if info.ResourceQuota.Enabled {
|
||||
memory := resource.MustParse(info.ResourceQuota.Memory)
|
||||
cpu := resource.MustParse(info.ResourceQuota.CPU)
|
||||
if memory.Value() > 0 {
|
||||
memQuota := memory
|
||||
resourceQuota.Spec.Hard[v1.ResourceLimitsMemory] = memQuota
|
||||
resourceQuota.Spec.Hard[v1.ResourceRequestsMemory] = memQuota
|
||||
}
|
||||
|
||||
if cpu.Value() > 0 {
|
||||
cpuQuota := cpu
|
||||
resourceQuota.Spec.Hard[v1.ResourceLimitsCPU] = cpuQuota
|
||||
resourceQuota.Spec.Hard[v1.ResourceRequestsCPU] = cpuQuota
|
||||
}
|
||||
}
|
||||
|
||||
_, err := kcl.cli.CoreV1().ResourceQuotas(info.Name).Create(context.Background(), resourceQuota, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
log.Error().Msgf("Failed to create resource quota for namespace %s: %s", info.Name, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func isSystemNamespace(namespace v1.Namespace) bool {
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
|
||||
portainer "github.com/portainer/portainer/api"
|
||||
"github.com/rs/zerolog/log"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
|
@ -42,3 +43,62 @@ func (kcl *KubeClient) GetNodesLimits() (portainer.K8sNodesLimits, error) {
|
|||
|
||||
return nodesLimits, nil
|
||||
}
|
||||
|
||||
// GetMaxResourceLimits gets the maximum CPU and Memory limits(unused resources) of all nodes in the current k8s environment(endpoint) connection, minus the accumulated resourcequotas for all namespaces except the one we're editing (skipNamespace)
|
||||
// if skipNamespace is set to "" then all namespaces are considered
|
||||
func (client *KubeClient) GetMaxResourceLimits(skipNamespace string, overCommitEnabled bool, resourceOverCommitPercent int) (portainer.K8sNodeLimits, error) {
|
||||
limits := portainer.K8sNodeLimits{}
|
||||
nodes, err := client.cli.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return limits, err
|
||||
}
|
||||
|
||||
// accumulated node limits
|
||||
memory := int64(0)
|
||||
for _, node := range nodes.Items {
|
||||
limits.CPU += node.Status.Allocatable.Cpu().MilliValue()
|
||||
memory += node.Status.Allocatable.Memory().Value()
|
||||
}
|
||||
limits.Memory = memory / 1000000 // B to MB
|
||||
|
||||
if !overCommitEnabled {
|
||||
namespaces, err := client.cli.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return limits, err
|
||||
}
|
||||
|
||||
reservedPercent := float64(resourceOverCommitPercent) / 100.0
|
||||
|
||||
reserved := portainer.K8sNodeLimits{}
|
||||
for _, namespace := range namespaces.Items {
|
||||
// skip the namespace we're editing
|
||||
if namespace.Name == skipNamespace {
|
||||
continue
|
||||
}
|
||||
|
||||
// minus accumulated resourcequotas for all namespaces except the one we're editing
|
||||
resourceQuota, err := client.cli.CoreV1().ResourceQuotas(namespace.Name).List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
log.Debug().Msgf("error getting resourcequota for namespace %s: %s", namespace.Name, err)
|
||||
continue // skip it
|
||||
}
|
||||
|
||||
for _, rq := range resourceQuota.Items {
|
||||
hardLimits := rq.Status.Hard
|
||||
for resourceType, limit := range hardLimits {
|
||||
switch resourceType {
|
||||
case "limits.cpu":
|
||||
reserved.CPU += limit.MilliValue()
|
||||
case "limits.memory":
|
||||
reserved.Memory += limit.ScaledValue(6) // MB
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
limits.CPU = limits.CPU - int64(float64(limits.CPU)*reservedPercent) - reserved.CPU
|
||||
limits.Memory = limits.Memory - int64(float64(limits.Memory)*reservedPercent) - reserved.Memory
|
||||
}
|
||||
|
||||
return limits, nil
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue