1
0
Fork 0
mirror of https://github.com/portainer/portainer.git synced 2025-07-24 15:59:41 +02:00

refactor(k8s): namespace core logic (#12142)

Co-authored-by: testA113 <aliharriss1995@gmail.com>
Co-authored-by: Anthony Lapenna <anthony.lapenna@portainer.io>
Co-authored-by: James Carppe <85850129+jamescarppe@users.noreply.github.com>
Co-authored-by: Ali <83188384+testA113@users.noreply.github.com>
This commit is contained in:
Steven Kang 2024-10-01 14:15:51 +13:00 committed by GitHub
parent da010f3d08
commit ea228c3d6d
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
276 changed files with 9241 additions and 3361 deletions

View file

@ -2,6 +2,7 @@ package cli
import (
"context"
"fmt"
portainer "github.com/portainer/portainer/api"
@ -21,24 +22,27 @@ func (kcl *KubeClient) NamespaceAccessPoliciesDeleteNamespace(ns string) error {
return errors.WithMessage(err, "failed to fetch access policies")
}
delete(policies, ns)
if policies != nil {
delete(policies, ns)
return kcl.UpdateNamespaceAccessPolicies(policies)
}
return kcl.UpdateNamespaceAccessPolicies(policies)
return nil
}
// GetNamespaceAccessPolicies gets the namespace access policies
// from config maps in the portainer namespace
func (kcl *KubeClient) GetNamespaceAccessPolicies() (map[string]portainer.K8sNamespaceAccessPolicy, error) {
configMap, err := kcl.cli.CoreV1().ConfigMaps(portainerNamespace).Get(context.TODO(), portainerConfigMapName, metav1.GetOptions{})
if k8serrors.IsNotFound(err) {
return nil, nil
} else if err != nil {
if err != nil {
if k8serrors.IsNotFound(err) {
return nil, nil
}
return nil, err
}
accessData := configMap.Data[portainerConfigMapAccessPoliciesKey]
var policies map[string]portainer.K8sNamespaceAccessPolicy
policies := map[string]portainer.K8sNamespaceAccessPolicy{}
err = json.Unmarshal([]byte(accessData), &policies)
if err != nil {
return nil, err
@ -109,10 +113,6 @@ func (kcl *KubeClient) UpdateNamespaceAccessPolicies(accessPolicies map[string]p
}
configMap, err := kcl.cli.CoreV1().ConfigMaps(portainerNamespace).Get(context.TODO(), portainerConfigMapName, metav1.GetOptions{})
if k8serrors.IsNotFound(err) {
return nil
}
if err != nil {
return err
}
@ -122,3 +122,20 @@ func (kcl *KubeClient) UpdateNamespaceAccessPolicies(accessPolicies map[string]p
return err
}
// GetNonAdminNamespaces retrieves namespaces for a non-admin user, excluding the default namespace if restricted.
func (kcl *KubeClient) GetNonAdminNamespaces(userID int) ([]string, error) {
accessPolicies, err := kcl.GetNamespaceAccessPolicies()
if err != nil {
return nil, fmt.Errorf("an error occurred during the getNonAdminNamespaces operation, unable to get namespace access policies via portainer-config. check if portainer-config configMap exists in the Kubernetes cluster: %w", err)
}
nonAdminNamespaces := []string{defaultNamespace}
for namespace, accessPolicy := range accessPolicies {
if hasUserAccessToNamespace(userID, nil, accessPolicy) {
nonAdminNamespaces = append(nonAdminNamespaces, namespace)
}
}
return nonAdminNamespaces, nil
}

View file

@ -2,153 +2,440 @@ package cli
import (
"context"
"strings"
"fmt"
models "github.com/portainer/portainer/api/http/models/kubernetes"
"github.com/rs/zerolog/log"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
)
// GetApplications gets a list of kubernetes workloads (or applications) by kind. If Kind is not specified, gets the all
func (kcl *KubeClient) GetApplications(namespace, kind string) ([]models.K8sApplication, error) {
applicationList := []models.K8sApplication{}
listOpts := metav1.ListOptions{}
// GetAllKubernetesApplications gets a list of kubernetes workloads (or applications) across all namespaces in the cluster
// if the user is an admin, all namespaces in the current k8s environment(endpoint) are fetched using the fetchApplications function.
// otherwise, namespaces the non-admin user has access to will be used to filter the applications based on the allowed namespaces.
func (kcl *KubeClient) GetApplications(namespace, nodeName string, withDependencies bool) ([]models.K8sApplication, error) {
if kcl.IsKubeAdmin {
return kcl.fetchApplications(namespace, nodeName, withDependencies)
}
if kind == "" || strings.EqualFold(kind, "deployment") {
deployments, err := kcl.cli.AppsV1().Deployments(namespace).List(context.TODO(), listOpts)
return kcl.fetchApplicationsForNonAdmin(namespace, nodeName, withDependencies)
}
// fetchApplications fetches the applications in the namespaces the user has access to.
// This function is called when the user is an admin.
func (kcl *KubeClient) fetchApplications(namespace, nodeName string, withDependencies bool) ([]models.K8sApplication, error) {
podListOptions := metav1.ListOptions{}
if nodeName != "" {
podListOptions.FieldSelector = fmt.Sprintf("spec.nodeName=%s", nodeName)
}
if !withDependencies {
// TODO: make sure not to fetch services in fetchAllApplicationsListResources from this call
pods, replicaSets, deployments, statefulSets, daemonSets, _, err := kcl.fetchAllApplicationsListResources(namespace, podListOptions)
if err != nil {
return nil, err
}
for _, d := range deployments.Items {
applicationList = append(applicationList, models.K8sApplication{
UID: string(d.UID),
Name: d.Name,
Namespace: d.Namespace,
Kind: "Deployment",
Labels: d.Labels,
})
}
return kcl.convertPodsToApplications(pods, replicaSets, deployments, statefulSets, daemonSets, nil)
}
if kind == "" || strings.EqualFold(kind, "statefulset") {
statefulSets, err := kcl.cli.AppsV1().StatefulSets(namespace).List(context.TODO(), listOpts)
pods, replicaSets, deployments, statefulSets, daemonSets, services, err := kcl.fetchAllApplicationsListResources(namespace, podListOptions)
if err != nil {
return nil, err
}
return kcl.convertPodsToApplications(pods, replicaSets, deployments, statefulSets, daemonSets, services)
}
// fetchApplicationsForNonAdmin fetches the applications in the namespaces the user has access to.
// This function is called when the user is not an admin.
func (kcl *KubeClient) fetchApplicationsForNonAdmin(namespace, nodeName string, withDependencies bool) ([]models.K8sApplication, error) {
log.Debug().Msgf("Fetching applications for non-admin user: %v", kcl.NonAdminNamespaces)
if len(kcl.NonAdminNamespaces) == 0 {
return nil, nil
}
podListOptions := metav1.ListOptions{}
if nodeName != "" {
podListOptions.FieldSelector = fmt.Sprintf("spec.nodeName=%s", nodeName)
}
if !withDependencies {
pods, replicaSets, _, _, _, _, err := kcl.fetchAllPodsAndReplicaSets(namespace, podListOptions)
if err != nil {
return nil, err
}
for _, s := range statefulSets.Items {
applicationList = append(applicationList, models.K8sApplication{
UID: string(s.UID),
Name: s.Name,
Namespace: s.Namespace,
Kind: "StatefulSet",
Labels: s.Labels,
})
return kcl.convertPodsToApplications(pods, replicaSets, nil, nil, nil, nil)
}
pods, replicaSets, deployments, statefulSets, daemonSets, services, err := kcl.fetchAllApplicationsListResources(namespace, podListOptions)
if err != nil {
return nil, err
}
applications, err := kcl.convertPodsToApplications(pods, replicaSets, deployments, statefulSets, daemonSets, services)
if err != nil {
return nil, err
}
nonAdminNamespaceSet := kcl.buildNonAdminNamespacesMap()
results := make([]models.K8sApplication, 0)
for _, application := range applications {
if _, ok := nonAdminNamespaceSet[application.ResourcePool]; ok {
results = append(results, application)
}
}
if kind == "" || strings.EqualFold(kind, "daemonset") {
daemonSets, err := kcl.cli.AppsV1().DaemonSets(namespace).List(context.TODO(), listOpts)
if err != nil {
return nil, err
}
return results, nil
}
for _, d := range daemonSets.Items {
applicationList = append(applicationList, models.K8sApplication{
UID: string(d.UID),
Name: d.Name,
Namespace: d.Namespace,
Kind: "DaemonSet",
Labels: d.Labels,
})
}
}
// convertPodsToApplications processes pods and converts them to applications, ensuring uniqueness by owner reference.
func (kcl *KubeClient) convertPodsToApplications(pods []corev1.Pod, replicaSets []appsv1.ReplicaSet, deployments []appsv1.Deployment, statefulSets []appsv1.StatefulSet, daemonSets []appsv1.DaemonSet, services []corev1.Service) ([]models.K8sApplication, error) {
applications := []models.K8sApplication{}
processedOwners := make(map[string]struct{})
if kind == "" || strings.EqualFold(kind, "nakedpods") {
pods, _ := kcl.cli.CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{})
for _, pod := range pods.Items {
naked := false
if len(pod.OwnerReferences) == 0 {
naked = true
} else {
managed := false
loop:
for _, ownerRef := range pod.OwnerReferences {
switch ownerRef.Kind {
case "Deployment", "DaemonSet", "ReplicaSet":
managed = true
break loop
}
}
if !managed {
naked = true
}
for _, pod := range pods {
if len(pod.OwnerReferences) > 0 {
ownerUID := string(pod.OwnerReferences[0].UID)
if _, exists := processedOwners[ownerUID]; exists {
continue
}
processedOwners[ownerUID] = struct{}{}
}
if naked {
applicationList = append(applicationList, models.K8sApplication{
UID: string(pod.UID),
Name: pod.Name,
Namespace: pod.Namespace,
Kind: "Pod",
Labels: pod.Labels,
})
application, err := kcl.ConvertPodToApplication(pod, replicaSets, deployments, statefulSets, daemonSets, services, true)
if err != nil {
return nil, err
}
if application != nil {
applications = append(applications, *application)
}
}
return applications, nil
}
// GetClusterApplicationsResource returns the total resource requests and limits for all applications in a namespace
// for a cluster level resource, set the namespace to ""
func (kcl *KubeClient) GetApplicationsResource(namespace, node string) (models.K8sApplicationResource, error) {
resource := models.K8sApplicationResource{}
podListOptions := metav1.ListOptions{}
if node != "" {
podListOptions.FieldSelector = fmt.Sprintf("spec.nodeName=%s", node)
}
pods, err := kcl.cli.CoreV1().Pods(namespace).List(context.Background(), podListOptions)
if err != nil {
return resource, err
}
for _, pod := range pods.Items {
for _, container := range pod.Spec.Containers {
resource.CPURequest += container.Resources.Requests.Cpu().MilliValue()
resource.CPULimit += container.Resources.Limits.Cpu().MilliValue()
resource.MemoryRequest += container.Resources.Requests.Memory().Value()
resource.MemoryLimit += container.Resources.Limits.Memory().Value()
}
}
return resource, nil
}
// convertApplicationResourceUnits converts the resource units from milli to core and bytes to mega bytes
func convertApplicationResourceUnits(resource models.K8sApplicationResource) models.K8sApplicationResource {
return models.K8sApplicationResource{
CPURequest: resource.CPURequest / 1000,
CPULimit: resource.CPULimit / 1000,
MemoryRequest: resource.MemoryRequest / 1024 / 1024,
MemoryLimit: resource.MemoryLimit / 1024 / 1024,
}
}
// GetApplicationsFromConfigMap gets a list of applications that use a specific ConfigMap
// by checking all pods in the same namespace as the ConfigMap
func (kcl *KubeClient) GetApplicationNamesFromConfigMap(configMap models.K8sConfigMap, pods []corev1.Pod, replicaSets []appsv1.ReplicaSet) ([]string, error) {
applications := []string{}
for _, pod := range pods {
if pod.Namespace == configMap.Namespace {
if isPodUsingConfigMap(&pod, configMap.Name) {
application, err := kcl.ConvertPodToApplication(pod, replicaSets, nil, nil, nil, nil, false)
if err != nil {
return nil, err
}
applications = append(applications, application.Name)
}
}
}
return applicationList, nil
return applications, nil
}
// GetApplication gets a kubernetes workload (application) by kind and name. If Kind is not specified, gets the all
func (kcl *KubeClient) GetApplication(namespace, kind, name string) (models.K8sApplication, error) {
opts := metav1.GetOptions{}
switch strings.ToLower(kind) {
case "deployment":
d, err := kcl.cli.AppsV1().Deployments(namespace).Get(context.TODO(), name, opts)
if err != nil {
return models.K8sApplication{}, err
func (kcl *KubeClient) GetApplicationNamesFromSecret(secret models.K8sSecret, pods []corev1.Pod, replicaSets []appsv1.ReplicaSet) ([]string, error) {
applications := []string{}
for _, pod := range pods {
if pod.Namespace == secret.Namespace {
if isPodUsingSecret(&pod, secret.Name) {
application, err := kcl.ConvertPodToApplication(pod, replicaSets, nil, nil, nil, nil, false)
if err != nil {
return nil, err
}
applications = append(applications, application.Name)
}
}
return models.K8sApplication{
UID: string(d.UID),
Name: d.Name,
Namespace: d.Namespace,
Kind: "Deployment",
Labels: d.Labels,
}, nil
case "statefulset":
s, err := kcl.cli.AppsV1().StatefulSets(namespace).Get(context.TODO(), name, opts)
if err != nil {
return models.K8sApplication{}, err
}
return models.K8sApplication{
UID: string(s.UID),
Name: s.Name,
Namespace: s.Namespace,
Kind: "StatefulSet",
Labels: s.Labels,
}, nil
case "daemonset":
d, err := kcl.cli.AppsV1().DaemonSets(namespace).Get(context.TODO(), name, opts)
if err != nil {
return models.K8sApplication{}, err
}
return models.K8sApplication{
UID: string(d.UID),
Name: d.Name,
Namespace: d.Namespace,
Kind: "DaemonSet",
Labels: d.Labels,
}, nil
}
return models.K8sApplication{}, nil
return applications, nil
}
// ConvertPodToApplication converts a pod to an application, updating owner references if necessary
func (kcl *KubeClient) ConvertPodToApplication(pod corev1.Pod, replicaSets []appsv1.ReplicaSet, deployments []appsv1.Deployment, statefulSets []appsv1.StatefulSet, daemonSets []appsv1.DaemonSet, services []corev1.Service, withResource bool) (*models.K8sApplication, error) {
if isReplicaSetOwner(pod) {
updateOwnerReferenceToDeployment(&pod, replicaSets)
}
application := createApplication(&pod, deployments, statefulSets, daemonSets, services)
if application.ID == "" && application.Name == "" {
return nil, nil
}
if withResource {
application.Resource = calculateResourceUsage(pod)
}
return &application, nil
}
// createApplication creates a K8sApplication object from a pod
// it sets the application name, namespace, kind, image, stack id, stack name, and labels
func createApplication(pod *corev1.Pod, deployments []appsv1.Deployment, statefulSets []appsv1.StatefulSet, daemonSets []appsv1.DaemonSet, services []corev1.Service) models.K8sApplication {
kind := "Pod"
name := pod.Name
if len(pod.OwnerReferences) > 0 {
kind = pod.OwnerReferences[0].Kind
name = pod.OwnerReferences[0].Name
}
application := models.K8sApplication{
Services: []corev1.Service{},
Metadata: &models.Metadata{},
}
switch kind {
case "Deployment":
for _, deployment := range deployments {
if deployment.Name == name && deployment.Namespace == pod.Namespace {
application.ApplicationType = "Deployment"
application.Kind = "Deployment"
application.ID = string(deployment.UID)
application.ResourcePool = deployment.Namespace
application.Name = name
application.Image = deployment.Spec.Template.Spec.Containers[0].Image
application.ApplicationOwner = deployment.Labels["io.portainer.kubernetes.application.owner"]
application.StackID = deployment.Labels["io.portainer.kubernetes.application.stackid"]
application.StackName = deployment.Labels["io.portainer.kubernetes.application.stack"]
application.Labels = deployment.Labels
application.MatchLabels = deployment.Spec.Selector.MatchLabels
application.CreationDate = deployment.CreationTimestamp.Time
application.TotalPodsCount = int(deployment.Status.Replicas)
application.RunningPodsCount = int(deployment.Status.ReadyReplicas)
application.DeploymentType = "Replicated"
application.Metadata = &models.Metadata{
Labels: deployment.Labels,
}
break
}
}
case "StatefulSet":
for _, statefulSet := range statefulSets {
if statefulSet.Name == name && statefulSet.Namespace == pod.Namespace {
application.Kind = "StatefulSet"
application.ApplicationType = "StatefulSet"
application.ID = string(statefulSet.UID)
application.ResourcePool = statefulSet.Namespace
application.Name = name
application.Image = statefulSet.Spec.Template.Spec.Containers[0].Image
application.ApplicationOwner = statefulSet.Labels["io.portainer.kubernetes.application.owner"]
application.StackID = statefulSet.Labels["io.portainer.kubernetes.application.stackid"]
application.StackName = statefulSet.Labels["io.portainer.kubernetes.application.stack"]
application.Labels = statefulSet.Labels
application.MatchLabels = statefulSet.Spec.Selector.MatchLabels
application.CreationDate = statefulSet.CreationTimestamp.Time
application.TotalPodsCount = int(statefulSet.Status.Replicas)
application.RunningPodsCount = int(statefulSet.Status.ReadyReplicas)
application.DeploymentType = "Replicated"
application.Metadata = &models.Metadata{
Labels: statefulSet.Labels,
}
break
}
}
case "DaemonSet":
for _, daemonSet := range daemonSets {
if daemonSet.Name == name && daemonSet.Namespace == pod.Namespace {
application.Kind = "DaemonSet"
application.ApplicationType = "DaemonSet"
application.ID = string(daemonSet.UID)
application.ResourcePool = daemonSet.Namespace
application.Name = name
application.Image = daemonSet.Spec.Template.Spec.Containers[0].Image
application.ApplicationOwner = daemonSet.Labels["io.portainer.kubernetes.application.owner"]
application.StackID = daemonSet.Labels["io.portainer.kubernetes.application.stackid"]
application.StackName = daemonSet.Labels["io.portainer.kubernetes.application.stack"]
application.Labels = daemonSet.Labels
application.MatchLabels = daemonSet.Spec.Selector.MatchLabels
application.CreationDate = daemonSet.CreationTimestamp.Time
application.TotalPodsCount = int(daemonSet.Status.DesiredNumberScheduled)
application.RunningPodsCount = int(daemonSet.Status.NumberReady)
application.DeploymentType = "Global"
application.Metadata = &models.Metadata{
Labels: daemonSet.Labels,
}
break
}
}
case "Pod":
runningPodsCount := 1
if pod.Status.Phase != corev1.PodRunning {
runningPodsCount = 0
}
application.ApplicationType = "Pod"
application.Kind = "Pod"
application.ID = string(pod.UID)
application.ResourcePool = pod.Namespace
application.Name = pod.Name
application.Image = pod.Spec.Containers[0].Image
application.ApplicationOwner = pod.Labels["io.portainer.kubernetes.application.owner"]
application.StackID = pod.Labels["io.portainer.kubernetes.application.stackid"]
application.StackName = pod.Labels["io.portainer.kubernetes.application.stack"]
application.Labels = pod.Labels
application.MatchLabels = pod.Labels
application.CreationDate = pod.CreationTimestamp.Time
application.TotalPodsCount = 1
application.RunningPodsCount = runningPodsCount
application.DeploymentType = string(pod.Status.Phase)
application.Metadata = &models.Metadata{
Labels: pod.Labels,
}
}
if application.ID != "" && application.Name != "" && len(services) > 0 {
return updateApplicationWithService(application, services)
}
return application
}
// updateApplicationWithService updates the application with the services that match the application's selector match labels
// and are in the same namespace as the application
func updateApplicationWithService(application models.K8sApplication, services []corev1.Service) models.K8sApplication {
for _, service := range services {
serviceSelector := labels.SelectorFromSet(service.Spec.Selector)
if service.Namespace == application.ResourcePool && serviceSelector.Matches(labels.Set(application.MatchLabels)) {
application.ServiceType = string(service.Spec.Type)
application.Services = append(application.Services, service)
}
}
return application
}
// calculateResourceUsage calculates the resource usage for a pod
func calculateResourceUsage(pod corev1.Pod) models.K8sApplicationResource {
resource := models.K8sApplicationResource{}
for _, container := range pod.Spec.Containers {
resource.CPURequest += container.Resources.Requests.Cpu().MilliValue()
resource.CPULimit += container.Resources.Limits.Cpu().MilliValue()
resource.MemoryRequest += container.Resources.Requests.Memory().Value()
resource.MemoryLimit += container.Resources.Limits.Memory().Value()
}
return convertApplicationResourceUnits(resource)
}
// GetApplicationFromServiceSelector gets applications based on service selectors
// it matches the service selector with the pod labels
func (kcl *KubeClient) GetApplicationFromServiceSelector(pods []corev1.Pod, service models.K8sServiceInfo, replicaSets []appsv1.ReplicaSet) (*models.K8sApplication, error) {
servicesSelector := labels.SelectorFromSet(service.Selector)
if servicesSelector.Empty() {
return nil, nil
}
for _, pod := range pods {
if servicesSelector.Matches(labels.Set(pod.Labels)) {
if isReplicaSetOwner(pod) {
updateOwnerReferenceToDeployment(&pod, replicaSets)
}
return &models.K8sApplication{
Name: pod.OwnerReferences[0].Name,
Kind: pod.OwnerReferences[0].Kind,
}, nil
}
}
return nil, nil
}
// GetApplicationConfigurationOwnersFromConfigMap gets a list of applications that use a specific ConfigMap
// by checking all pods in the same namespace as the ConfigMap
func (kcl *KubeClient) GetApplicationConfigurationOwnersFromConfigMap(configMap models.K8sConfigMap, pods []corev1.Pod, replicaSets []appsv1.ReplicaSet) ([]models.K8sConfigurationOwnerResource, error) {
configurationOwners := []models.K8sConfigurationOwnerResource{}
for _, pod := range pods {
if pod.Namespace == configMap.Namespace {
if isPodUsingConfigMap(&pod, configMap.Name) {
application, err := kcl.ConvertPodToApplication(pod, replicaSets, nil, nil, nil, nil, false)
if err != nil {
return nil, err
}
if application != nil {
configurationOwners = append(configurationOwners, models.K8sConfigurationOwnerResource{
Name: application.Name,
ResourceKind: application.Kind,
Id: application.UID,
})
}
}
}
}
return configurationOwners, nil
}
// GetApplicationConfigurationOwnersFromSecret gets a list of applications that use a specific Secret
// by checking all pods in the same namespace as the Secret
func (kcl *KubeClient) GetApplicationConfigurationOwnersFromSecret(secret models.K8sSecret, pods []corev1.Pod, replicaSets []appsv1.ReplicaSet) ([]models.K8sConfigurationOwnerResource, error) {
configurationOwners := []models.K8sConfigurationOwnerResource{}
for _, pod := range pods {
if pod.Namespace == secret.Namespace {
if isPodUsingSecret(&pod, secret.Name) {
application, err := kcl.ConvertPodToApplication(pod, replicaSets, nil, nil, nil, nil, false)
if err != nil {
return nil, err
}
if application != nil {
configurationOwners = append(configurationOwners, models.K8sConfigurationOwnerResource{
Name: application.Name,
ResourceKind: application.Kind,
Id: application.UID,
})
}
}
}
}
return configurationOwners, nil
}

View file

@ -21,12 +21,11 @@ import (
)
const (
DefaultKubeClientQPS = 30
DefaultKubeClientBurst = 100
defaultKubeClientQPS = 30
defaultKubeClientBurst = 100
maxConcurrency = 30
)
const maxConcurrency = 30
type (
// ClientFactory is used to create Kubernetes clients
ClientFactory struct {
@ -34,17 +33,17 @@ type (
reverseTunnelService portainer.ReverseTunnelService
signatureService portainer.DigitalSignatureService
instanceID string
endpointClients map[string]*KubeClient
endpointProxyClients *cache.Cache
AddrHTTPS string
mu sync.Mutex
}
// KubeClient represent a service used to execute Kubernetes operations
KubeClient struct {
cli kubernetes.Interface
instanceID string
mu sync.Mutex
cli kubernetes.Interface
instanceID string
mu sync.Mutex
IsKubeAdmin bool
NonAdminNamespaces []string
}
)
@ -70,7 +69,6 @@ func NewClientFactory(signatureService portainer.DigitalSignatureService, revers
signatureService: signatureService,
reverseTunnelService: reverseTunnelService,
instanceID: instanceID,
endpointClients: make(map[string]*KubeClient),
endpointProxyClients: cache.New(timeout, timeout),
AddrHTTPS: addrHTTPS,
}, nil
@ -80,42 +78,33 @@ func (factory *ClientFactory) GetInstanceID() (instanceID string) {
return factory.instanceID
}
// Remove the cached kube client so a new one can be created
func (factory *ClientFactory) RemoveKubeClient(endpointID portainer.EndpointID) {
factory.mu.Lock()
delete(factory.endpointClients, strconv.Itoa(int(endpointID)))
factory.mu.Unlock()
// Clear removes all cached kube clients
func (factory *ClientFactory) ClearClientCache() {
log.Debug().Msgf("kubernetes namespace permissions have changed, clearing the client cache")
factory.endpointProxyClients.Flush()
}
// GetKubeClient checks if an existing client is already registered for the environment(endpoint) and returns it if one is found.
// If no client is registered, it will create a new client, register it, and returns it.
func (factory *ClientFactory) GetKubeClient(endpoint *portainer.Endpoint) (*KubeClient, error) {
factory.mu.Lock()
key := strconv.Itoa(int(endpoint.ID))
if client, ok := factory.endpointClients[key]; ok {
factory.mu.Unlock()
return client, nil
}
factory.mu.Unlock()
// Remove the cached kube client so a new one can be created
func (factory *ClientFactory) RemoveKubeClient(endpointID portainer.EndpointID) {
factory.endpointProxyClients.Delete(strconv.Itoa(int(endpointID)))
}
// EE-6901: Do not lock
client, err := factory.createCachedAdminKubeClient(endpoint)
// GetPrivilegedKubeClient checks if an existing client is already registered for the environment(endpoint) and returns it if one is found.
// If no client is registered, it will create a new client, register it, and returns it.
func (factory *ClientFactory) GetPrivilegedKubeClient(endpoint *portainer.Endpoint) (*KubeClient, error) {
key := strconv.Itoa(int(endpoint.ID))
pcl, ok := factory.endpointProxyClients.Get(key)
if ok {
return pcl.(*KubeClient), nil
}
kcl, err := factory.createCachedPrivilegedKubeClient(endpoint)
if err != nil {
return nil, err
}
factory.mu.Lock()
defer factory.mu.Unlock()
// The lock was released before the client was created,
// so we need to check again
if c, ok := factory.endpointClients[key]; ok {
return c, nil
}
factory.endpointClients[key] = client
return client, nil
factory.endpointProxyClients.Set(key, kcl, cache.DefaultExpiration)
return kcl, nil
}
// GetProxyKubeClient retrieves a KubeClient from the cache. You should be
@ -123,46 +112,47 @@ func (factory *ClientFactory) GetKubeClient(endpoint *portainer.Endpoint) (*Kube
// kubernetes middleware.
func (factory *ClientFactory) GetProxyKubeClient(endpointID, userID string) (*KubeClient, bool) {
client, ok := factory.endpointProxyClients.Get(endpointID + "." + userID)
if !ok {
return nil, false
if ok {
return client.(*KubeClient), true
}
return client.(*KubeClient), true
return nil, false
}
// SetProxyKubeClient stores a kubeclient in the cache.
func (factory *ClientFactory) SetProxyKubeClient(endpointID, userID string, cli *KubeClient) {
factory.endpointProxyClients.Set(endpointID+"."+userID, cli, 0)
factory.endpointProxyClients.Set(endpointID+"."+userID, cli, cache.DefaultExpiration)
}
// CreateKubeClientFromKubeConfig creates a KubeClient from a clusterID, and
// Kubernetes config.
func (factory *ClientFactory) CreateKubeClientFromKubeConfig(clusterID string, kubeConfig []byte) (*KubeClient, error) {
func (factory *ClientFactory) CreateKubeClientFromKubeConfig(clusterID string, kubeConfig []byte, IsKubeAdmin bool, NonAdminNamespaces []string) (*KubeClient, error) {
config, err := clientcmd.NewClientConfigFromBytes(kubeConfig)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to create a client config from kubeconfig: %w", err)
}
cliConfig, err := config.ClientConfig()
clientConfig, err := config.ClientConfig()
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to get the complete client config from kubeconfig: %w", err)
}
cliConfig.QPS = DefaultKubeClientQPS
cliConfig.Burst = DefaultKubeClientBurst
clientConfig.QPS = defaultKubeClientQPS
clientConfig.Burst = defaultKubeClientBurst
cli, err := kubernetes.NewForConfig(cliConfig)
cli, err := kubernetes.NewForConfig(clientConfig)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to create a new clientset for the given config: %w", err)
}
return &KubeClient{
cli: cli,
instanceID: factory.instanceID,
cli: cli,
instanceID: factory.instanceID,
IsKubeAdmin: IsKubeAdmin,
NonAdminNamespaces: NonAdminNamespaces,
}, nil
}
func (factory *ClientFactory) createCachedAdminKubeClient(endpoint *portainer.Endpoint) (*KubeClient, error) {
func (factory *ClientFactory) createCachedPrivilegedKubeClient(endpoint *portainer.Endpoint) (*KubeClient, error) {
cli, err := factory.CreateClient(endpoint)
if err != nil {
return nil, err
@ -235,8 +225,8 @@ func (factory *ClientFactory) buildAgentConfig(endpoint *portainer.Endpoint) (*r
}
config.Insecure = true
config.QPS = DefaultKubeClientQPS
config.Burst = DefaultKubeClientBurst
config.QPS = defaultKubeClientQPS
config.Burst = defaultKubeClientBurst
config.Wrap(func(rt http.RoundTripper) http.RoundTripper {
return &agentHeaderRoundTripper{
@ -251,7 +241,7 @@ func (factory *ClientFactory) buildAgentConfig(endpoint *portainer.Endpoint) (*r
func (factory *ClientFactory) buildEdgeConfig(endpoint *portainer.Endpoint) (*rest.Config, error) {
tunnelAddr, err := factory.reverseTunnelService.TunnelAddr(endpoint)
if err != nil {
return nil, errors.Wrap(err, "failed activating tunnel")
return nil, errors.Wrap(err, "failed to activate the chisel reverse tunnel. check if the tunnel port is open at the portainer instance")
}
endpointURL := fmt.Sprintf("http://%s/kubernetes", tunnelAddr)
@ -266,8 +256,8 @@ func (factory *ClientFactory) buildEdgeConfig(endpoint *portainer.Endpoint) (*re
}
config.Insecure = true
config.QPS = DefaultKubeClientQPS
config.Burst = DefaultKubeClientBurst
config.QPS = defaultKubeClientQPS
config.Burst = defaultKubeClientBurst
config.Wrap(func(rt http.RoundTripper) http.RoundTripper {
return &agentHeaderRoundTripper{
@ -294,8 +284,8 @@ func buildLocalConfig() (*rest.Config, error) {
return nil, err
}
config.QPS = DefaultKubeClientQPS
config.Burst = DefaultKubeClientBurst
config.QPS = defaultKubeClientQPS
config.Burst = defaultKubeClientBurst
return config, nil
}

View file

@ -0,0 +1,43 @@
package cli
import (
"context"
"fmt"
models "github.com/portainer/portainer/api/http/models/kubernetes"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// GetClusterRoles gets all the clusterRoles for at the cluster level in a k8s endpoint.
// It returns a list of K8sClusterRole objects.
func (kcl *KubeClient) GetClusterRoles() ([]models.K8sClusterRole, error) {
if kcl.IsKubeAdmin {
return kcl.fetchClusterRoles()
}
return []models.K8sClusterRole{}, fmt.Errorf("non-admin users are not allowed to access cluster roles")
}
// fetchClusterRoles returns a list of all Roles in the specified namespace.
func (kcl *KubeClient) fetchClusterRoles() ([]models.K8sClusterRole, error) {
clusterRoles, err := kcl.cli.RbacV1().ClusterRoles().List(context.TODO(), metav1.ListOptions{})
if err != nil {
return nil, err
}
results := make([]models.K8sClusterRole, 0)
for _, clusterRole := range clusterRoles.Items {
results = append(results, parseClusterRole(clusterRole))
}
return results, nil
}
// parseClusterRole converts a rbacv1.ClusterRole object to a models.K8sClusterRole object.
func parseClusterRole(clusterRole rbacv1.ClusterRole) models.K8sClusterRole {
return models.K8sClusterRole{
Name: clusterRole.Name,
CreationDate: clusterRole.CreationTimestamp.Time,
}
}

View file

@ -0,0 +1,45 @@
package cli
import (
"context"
"fmt"
models "github.com/portainer/portainer/api/http/models/kubernetes"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// GetClusterRoleBindings gets all the clusterRoleBindings for at the cluster level in a k8s endpoint.
// It returns a list of K8sClusterRoleBinding objects.
func (kcl *KubeClient) GetClusterRoleBindings() ([]models.K8sClusterRoleBinding, error) {
if kcl.IsKubeAdmin {
return kcl.fetchClusterRoleBindings()
}
return []models.K8sClusterRoleBinding{}, fmt.Errorf("non-admin users are not allowed to access cluster role bindings")
}
// fetchClusterRoleBindings returns a list of all cluster roles in the cluster.
func (kcl *KubeClient) fetchClusterRoleBindings() ([]models.K8sClusterRoleBinding, error) {
clusterRoleBindings, err := kcl.cli.RbacV1().ClusterRoleBindings().List(context.TODO(), metav1.ListOptions{})
if err != nil {
return nil, err
}
results := make([]models.K8sClusterRoleBinding, 0)
for _, clusterRoleBinding := range clusterRoleBindings.Items {
results = append(results, parseClusterRoleBinding(clusterRoleBinding))
}
return results, nil
}
// parseClusterRoleBinding converts a rbacv1.ClusterRoleBinding object to a models.K8sClusterRoleBinding object.
func parseClusterRoleBinding(clusterRoleBinding rbacv1.ClusterRoleBinding) models.K8sClusterRoleBinding {
return models.K8sClusterRoleBinding{
Name: clusterRoleBinding.Name,
RoleRef: clusterRoleBinding.RoleRef,
Subjects: clusterRoleBinding.Subjects,
CreationDate: clusterRoleBinding.CreationTimestamp.Time,
}
}

View file

@ -0,0 +1,161 @@
package cli
import (
"context"
"fmt"
"time"
models "github.com/portainer/portainer/api/http/models/kubernetes"
"github.com/rs/zerolog/log"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// GetConfigMaps gets all the ConfigMaps for a given namespace in a k8s endpoint.
// if the user is an admin, all configMaps in the current k8s environment(endpoint) are fetched using the fetchConfigMaps function.
// otherwise, namespaces the non-admin user has access to will be used to filter the configMaps based on the allowed namespaces.
func (kcl *KubeClient) GetConfigMaps(namespace string) ([]models.K8sConfigMap, error) {
if kcl.IsKubeAdmin {
return kcl.fetchConfigMaps(namespace)
}
return kcl.fetchConfigMapsForNonAdmin(namespace)
}
// fetchConfigMapsForNonAdmin fetches the configMaps in the namespaces the user has access to.
// This function is called when the user is not an admin.
func (kcl *KubeClient) fetchConfigMapsForNonAdmin(namespace string) ([]models.K8sConfigMap, error) {
log.Debug().Msgf("Fetching volumes for non-admin user: %v", kcl.NonAdminNamespaces)
if len(kcl.NonAdminNamespaces) == 0 {
return nil, nil
}
configMaps, err := kcl.fetchConfigMaps(namespace)
if err != nil {
return nil, err
}
nonAdminNamespaceSet := kcl.buildNonAdminNamespacesMap()
results := make([]models.K8sConfigMap, 0)
for _, configMap := range configMaps {
if _, ok := nonAdminNamespaceSet[configMap.Namespace]; ok {
results = append(results, configMap)
}
}
return results, nil
}
// fetchConfigMaps gets all the ConfigMaps for a given namespace in a k8s endpoint.
// the result is a list of config maps parsed into a K8sConfigMap struct.
func (kcl *KubeClient) fetchConfigMaps(namespace string) ([]models.K8sConfigMap, error) {
configMaps, err := kcl.cli.CoreV1().ConfigMaps(namespace).List(context.Background(), metav1.ListOptions{})
if err != nil {
return nil, err
}
results := []models.K8sConfigMap{}
for _, configMap := range configMaps.Items {
results = append(results, parseConfigMap(&configMap, false))
}
return results, nil
}
func (kcl *KubeClient) GetConfigMap(namespace, configMapName string) (models.K8sConfigMap, error) {
configMap, err := kcl.cli.CoreV1().ConfigMaps(namespace).Get(context.Background(), configMapName, metav1.GetOptions{})
if err != nil {
return models.K8sConfigMap{}, err
}
return parseConfigMap(configMap, true), nil
}
// parseConfigMap parses a k8s ConfigMap object into a K8sConfigMap struct.
// for get operation, withData will be set to true.
// otherwise, only metadata will be parsed.
func parseConfigMap(configMap *corev1.ConfigMap, withData bool) models.K8sConfigMap {
result := models.K8sConfigMap{
K8sConfiguration: models.K8sConfiguration{
UID: string(configMap.UID),
Name: configMap.Name,
Namespace: configMap.Namespace,
CreationDate: configMap.CreationTimestamp.Time.UTC().Format(time.RFC3339),
Annotations: configMap.Annotations,
Labels: configMap.Labels,
ConfigurationOwner: configMap.Labels[labelPortainerKubeConfigOwner],
ConfigurationOwnerId: configMap.Labels[labelPortainerKubeConfigOwnerId],
},
}
if withData {
result.Data = configMap.Data
}
return result
}
// CombineConfigMapsWithApplications combines the config maps with the applications that use them.
// the function fetches all the pods and replica sets in the cluster and checks if the config map is used by any of the pods.
// if the config map is used by a pod, the application that uses the pod is added to the config map.
// otherwise, the config map is returned as is.
func (kcl *KubeClient) CombineConfigMapsWithApplications(configMaps []models.K8sConfigMap) ([]models.K8sConfigMap, error) {
updatedConfigMaps := make([]models.K8sConfigMap, len(configMaps))
pods, replicaSets, _, _, _, _, err := kcl.fetchAllPodsAndReplicaSets("", metav1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("an error occurred during the CombineConfigMapsWithApplications operation, unable to fetch pods and replica sets. Error: %w", err)
}
for index, configMap := range configMaps {
updatedConfigMap := configMap
applicationConfigurationOwners, err := kcl.GetApplicationConfigurationOwnersFromConfigMap(configMap, pods, replicaSets)
if err != nil {
return nil, fmt.Errorf("an error occurred during the CombineConfigMapsWithApplications operation, unable to get applications from config map. Error: %w", err)
}
if len(applicationConfigurationOwners) > 0 {
updatedConfigMap.ConfigurationOwnerResources = applicationConfigurationOwners
updatedConfigMap.IsUsed = true
}
updatedConfigMaps[index] = updatedConfigMap
}
return updatedConfigMaps, nil
}
// CombineConfigMapWithApplications combines the config map with the applications that use it.
// the function fetches all the pods in the cluster and checks if the config map is used by any of the pods.
// it needs to check if the pods are owned by a replica set to determine if the pod is part of a deployment.
func (kcl *KubeClient) CombineConfigMapWithApplications(configMap models.K8sConfigMap) (models.K8sConfigMap, error) {
pods, err := kcl.cli.CoreV1().Pods(configMap.Namespace).List(context.Background(), metav1.ListOptions{})
if err != nil {
return models.K8sConfigMap{}, fmt.Errorf("an error occurred during the CombineConfigMapWithApplications operation, unable to get pods. Error: %w", err)
}
containsReplicaSetOwner := false
for _, pod := range pods.Items {
containsReplicaSetOwner = isReplicaSetOwner(pod)
break
}
if containsReplicaSetOwner {
replicaSets, err := kcl.cli.AppsV1().ReplicaSets(configMap.Namespace).List(context.Background(), metav1.ListOptions{})
if err != nil {
return models.K8sConfigMap{}, fmt.Errorf("an error occurred during the CombineConfigMapWithApplications operation, unable to get replica sets. Error: %w", err)
}
applicationConfigurationOwners, err := kcl.GetApplicationConfigurationOwnersFromConfigMap(configMap, pods.Items, replicaSets.Items)
if err != nil {
return models.K8sConfigMap{}, fmt.Errorf("an error occurred during the CombineConfigMapWithApplications operation, unable to get applications from config map. Error: %w", err)
}
if len(applicationConfigurationOwners) > 0 {
configMap.ConfigurationOwnerResources = applicationConfigurationOwners
}
}
return configMap, nil
}

View file

@ -1,64 +0,0 @@
package cli
import (
"context"
"time"
models "github.com/portainer/portainer/api/http/models/kubernetes"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// GetConfigMapsAndSecrets gets all the ConfigMaps AND all the Secrets for a
// given namespace in a k8s endpoint. The result is a list of both config maps
// and secrets. The IsSecret boolean property indicates if a given struct is a
// secret or configmap.
func (kcl *KubeClient) GetConfigMapsAndSecrets(namespace string) ([]models.K8sConfigMapOrSecret, error) {
mapsClient := kcl.cli.CoreV1().ConfigMaps(namespace)
mapsList, err := mapsClient.List(context.Background(), v1.ListOptions{})
if err != nil {
return nil, err
}
// TODO: Applications
var combined []models.K8sConfigMapOrSecret
for _, m := range mapsList.Items {
var cm models.K8sConfigMapOrSecret
cm.UID = string(m.UID)
cm.Name = m.Name
cm.Namespace = m.Namespace
cm.Annotations = m.Annotations
cm.Data = m.Data
cm.CreationDate = m.CreationTimestamp.Time.UTC().Format(time.RFC3339)
cm.IsSecret = false
combined = append(combined, cm)
}
secretClient := kcl.cli.CoreV1().Secrets(namespace)
secretList, err := secretClient.List(context.Background(), v1.ListOptions{})
if err != nil {
return nil, err
}
for _, s := range secretList.Items {
var secret models.K8sConfigMapOrSecret
secret.UID = string(s.UID)
secret.Name = s.Name
secret.Namespace = s.Namespace
secret.Annotations = s.Annotations
secret.Data = msbToMss(s.Data)
secret.CreationDate = s.CreationTimestamp.Time.UTC().Format(time.RFC3339)
secret.IsSecret = true
secret.SecretType = string(s.Type)
combined = append(combined, secret)
}
return combined, nil
}
func msbToMss(msa map[string][]byte) map[string]string {
mss := make(map[string]string, len(msa))
for k, v := range msa {
mss[k] = string(v)
}
return mss
}

View file

@ -151,12 +151,16 @@ func getApplicationsCount(ctx context.Context, kcl *KubeClient, namespace string
}
// + (naked pods)
nakedPods, err := kcl.GetApplications(namespace, "nakedpods")
if err != nil {
return 0, err
}
// TODO: Implement fetching of naked pods
// This is to be reworked as part of the dashboard refactor
return count + int64(len(nakedPods)), nil
// nakedPods, err := kcl.GetApplications(namespace, "nakedpods")
// if err != nil {
// return 0, err
// }
// For now, we're not including naked pods in the count
return count, nil
}
// Get the total count of services for the given namespace

View file

@ -2,183 +2,252 @@ package cli
import (
"context"
"fmt"
"strings"
models "github.com/portainer/portainer/api/http/models/kubernetes"
"github.com/portainer/portainer/api/stacks/stackutils"
"github.com/rs/zerolog/log"
netv1 "k8s.io/api/networking/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func (kcl *KubeClient) GetIngressControllers() (models.K8sIngressControllers, error) {
var controllers []models.K8sIngressController
// We know that each existing class points to a controller so we can start
// by collecting these easy ones.
classClient := kcl.cli.NetworkingV1().IngressClasses()
classList, err := classClient.List(context.Background(), metav1.ListOptions{})
classeses, err := kcl.cli.NetworkingV1().IngressClasses().List(context.Background(), metav1.ListOptions{})
if err != nil {
return nil, err
}
// We want to know which of these controllers is in use.
var ingresses []models.K8sIngressInfo
namespaces, err := kcl.GetNamespaces()
ingresses, err := kcl.GetIngresses("")
if err != nil {
return nil, err
}
for namespace := range namespaces {
t, err := kcl.GetIngresses(namespace)
if err != nil {
// User might not be able to list ingresses in system/not allowed
// namespaces.
log.Debug().Err(err).Msg("failed to list ingresses for the current user, skipped sending ingress")
continue
}
ingresses = append(ingresses, t...)
}
usedClasses := make(map[string]struct{})
for _, ingress := range ingresses {
usedClasses[ingress.ClassName] = struct{}{}
}
for _, class := range classList.Items {
var controller models.K8sIngressController
controller.Name = class.Spec.Controller
controller.ClassName = class.Name
// If the class is used mark it as such.
results := []models.K8sIngressController{}
for _, class := range classeses.Items {
ingressClass := parseIngressClass(class)
if _, ok := usedClasses[class.Name]; ok {
controller.Used = true
ingressClass.Used = true
}
switch {
case strings.Contains(controller.Name, "nginx"):
controller.Type = "nginx"
case strings.Contains(controller.Name, "traefik"):
controller.Type = "traefik"
default:
controller.Type = "other"
}
controllers = append(controllers, controller)
results = append(results, ingressClass)
}
return results, nil
}
// fetchIngressClasses fetches all the ingress classes in a k8s endpoint.
func (kcl *KubeClient) fetchIngressClasses() ([]models.K8sIngressController, error) {
ingressClasses, err := kcl.cli.NetworkingV1().IngressClasses().List(context.Background(), metav1.ListOptions{})
if err != nil {
return nil, err
}
var controllers []models.K8sIngressController
for _, ingressClass := range ingressClasses.Items {
controllers = append(controllers, parseIngressClass(ingressClass))
}
return controllers, nil
}
// parseIngressClass converts a k8s native ingress class object to a Portainer K8sIngressController object.
func parseIngressClass(ingressClasses netv1.IngressClass) models.K8sIngressController {
ingressContoller := models.K8sIngressController{
Name: ingressClasses.Spec.Controller,
ClassName: ingressClasses.Name,
}
switch {
case strings.Contains(ingressContoller.Name, "nginx"):
ingressContoller.Type = "nginx"
case strings.Contains(ingressContoller.Name, "traefik"):
ingressContoller.Type = "traefik"
default:
ingressContoller.Type = "other"
}
return ingressContoller
}
// GetIngress gets an ingress in a given namespace in a k8s endpoint.
func (kcl *KubeClient) GetIngress(namespace, ingressName string) (models.K8sIngressInfo, error) {
ingress, err := kcl.cli.NetworkingV1().Ingresses(namespace).Get(context.Background(), ingressName, metav1.GetOptions{})
if err != nil {
return models.K8sIngressInfo{}, err
}
return parseIngress(*ingress), nil
}
// GetIngresses gets all the ingresses for a given namespace in a k8s endpoint.
func (kcl *KubeClient) GetIngresses(namespace string) ([]models.K8sIngressInfo, error) {
// Fetch ingress classes to build a map. We will later use the map to lookup
// each ingresses "type".
classes := make(map[string]string)
classClient := kcl.cli.NetworkingV1().IngressClasses()
classList, err := classClient.List(context.Background(), metav1.ListOptions{})
if kcl.IsKubeAdmin {
return kcl.fetchIngresses(namespace)
}
return kcl.fetchIngressesForNonAdmin(namespace)
}
// fetchIngressesForNonAdmin gets all the ingresses for non-admin users in a k8s endpoint.
func (kcl *KubeClient) fetchIngressesForNonAdmin(namespace string) ([]models.K8sIngressInfo, error) {
log.Debug().Msgf("Fetching ingresses for non-admin user: %v", kcl.NonAdminNamespaces)
if len(kcl.NonAdminNamespaces) == 0 {
return nil, nil
}
ingresses, err := kcl.fetchIngresses(namespace)
if err != nil {
return nil, err
}
for _, class := range classList.Items {
// Write the ingress classes "type" to our map.
classes[class.Name] = class.Spec.Controller
nonAdminNamespaceSet := kcl.buildNonAdminNamespacesMap()
results := make([]models.K8sIngressInfo, 0)
for _, ingress := range ingresses {
if _, ok := nonAdminNamespaceSet[ingress.Namespace]; ok {
results = append(results, ingress)
}
}
// Fetch each ingress.
ingressClient := kcl.cli.NetworkingV1().Ingresses(namespace)
ingressList, err := ingressClient.List(context.Background(), metav1.ListOptions{})
return results, nil
}
// fetchIngresses fetches all the ingresses for a given namespace in a k8s endpoint.
func (kcl *KubeClient) fetchIngresses(namespace string) ([]models.K8sIngressInfo, error) {
ingresses, err := kcl.cli.NetworkingV1().Ingresses(namespace).List(context.Background(), metav1.ListOptions{})
if err != nil {
return nil, err
}
var infos []models.K8sIngressInfo
for _, ingress := range ingressList.Items {
var info models.K8sIngressInfo
info.Name = ingress.Name
info.UID = string(ingress.UID)
info.Namespace = namespace
ingressClass := ingress.Spec.IngressClassName
info.ClassName = ""
if ingressClass != nil {
info.ClassName = *ingressClass
}
info.Type = classes[info.ClassName]
info.Annotations = ingress.Annotations
info.Labels = ingress.Labels
info.CreationDate = ingress.CreationTimestamp.Time
// Gather TLS information.
for _, v := range ingress.Spec.TLS {
var tls models.K8sIngressTLS
tls.Hosts = v.Hosts
tls.SecretName = v.SecretName
info.TLS = append(info.TLS, tls)
}
// Gather list of paths and hosts.
hosts := make(map[string]struct{})
for _, r := range ingress.Spec.Rules {
// We collect all exiting hosts in a map to avoid duplicates.
// Then, later convert it to a slice for the frontend.
hosts[r.Host] = struct{}{}
if r.HTTP == nil {
continue
}
// There are multiple paths per rule. We want to flatten the list
// for our frontend.
for _, p := range r.HTTP.Paths {
var path models.K8sIngressPath
path.IngressName = info.Name
path.Host = r.Host
path.Path = p.Path
if p.PathType != nil {
path.PathType = string(*p.PathType)
}
path.ServiceName = p.Backend.Service.Name
path.Port = int(p.Backend.Service.Port.Number)
info.Paths = append(info.Paths, path)
}
}
// Store list of hosts.
for host := range hosts {
info.Hosts = append(info.Hosts, host)
}
infos = append(infos, info)
ingressClasses, err := kcl.fetchIngressClasses()
if err != nil {
return nil, err
}
return infos, nil
results := []models.K8sIngressInfo{}
if len(ingresses.Items) == 0 {
return results, nil
}
for _, ingress := range ingresses.Items {
result := parseIngress(ingress)
if ingress.Spec.IngressClassName != nil {
result.Type = findUsedIngressFromIngressClasses(ingressClasses, *ingress.Spec.IngressClassName).Name
}
results = append(results, result)
}
return results, nil
}
// parseIngress converts a k8s native ingress object to a Portainer K8sIngressInfo object.
func parseIngress(ingress netv1.Ingress) models.K8sIngressInfo {
ingressClassName := ""
if ingress.Spec.IngressClassName != nil {
ingressClassName = *ingress.Spec.IngressClassName
}
result := models.K8sIngressInfo{
Name: ingress.Name,
Namespace: ingress.Namespace,
UID: string(ingress.UID),
Annotations: ingress.Annotations,
Labels: ingress.Labels,
CreationDate: ingress.CreationTimestamp.Time,
ClassName: ingressClassName,
}
for _, tls := range ingress.Spec.TLS {
result.TLS = append(result.TLS, models.K8sIngressTLS{
Hosts: tls.Hosts,
SecretName: tls.SecretName,
})
}
hosts := make(map[string]struct{})
for _, r := range ingress.Spec.Rules {
hosts[r.Host] = struct{}{}
if r.HTTP == nil {
continue
}
for _, p := range r.HTTP.Paths {
var path models.K8sIngressPath
path.IngressName = result.Name
path.Host = r.Host
path.Path = p.Path
if p.PathType != nil {
path.PathType = string(*p.PathType)
}
path.ServiceName = p.Backend.Service.Name
path.Port = int(p.Backend.Service.Port.Number)
result.Paths = append(result.Paths, path)
}
}
for host := range hosts {
result.Hosts = append(result.Hosts, host)
}
return result
}
// findUsedIngressFromIngressClasses searches for an ingress in a slice of ingress classes and returns the ingress if found.
func findUsedIngressFromIngressClasses(ingressClasses []models.K8sIngressController, className string) models.K8sIngressController {
for _, ingressClass := range ingressClasses {
if ingressClass.ClassName == className {
return ingressClass
}
}
return models.K8sIngressController{}
}
// CreateIngress creates a new ingress in a given namespace in a k8s endpoint.
func (kcl *KubeClient) CreateIngress(namespace string, info models.K8sIngressInfo, owner string) error {
ingressClient := kcl.cli.NetworkingV1().Ingresses(namespace)
var ingress netv1.Ingress
ingress.Name = info.Name
ingress.Namespace = info.Namespace
if info.ClassName != "" {
ingress.Spec.IngressClassName = &info.ClassName
ingress := kcl.convertToK8sIngress(info, owner)
_, err := kcl.cli.NetworkingV1().Ingresses(namespace).Create(context.Background(), &ingress, metav1.CreateOptions{})
if err != nil {
return err
}
ingress.Annotations = info.Annotations
if ingress.Labels == nil {
ingress.Labels = make(map[string]string)
}
ingress.Labels["io.portainer.kubernetes.ingress.owner"] = stackutils.SanitizeLabel(owner)
// Store TLS information.
var tls []netv1.IngressTLS
for _, i := range info.TLS {
return nil
}
// convertToK8sIngress converts a Portainer K8sIngressInfo object to a k8s native Ingress object.
// this is required for create and update operations.
func (kcl *KubeClient) convertToK8sIngress(info models.K8sIngressInfo, owner string) netv1.Ingress {
result := netv1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: info.Name,
Namespace: info.Namespace,
Annotations: info.Annotations,
},
Spec: netv1.IngressSpec{
IngressClassName: &info.ClassName,
},
}
labels := make(map[string]string)
labels["io.portainer.kubernetes.ingress.owner"] = stackutils.SanitizeLabel(owner)
result.Labels = labels
tls := []netv1.IngressTLS{}
for _, t := range info.TLS {
tls = append(tls, netv1.IngressTLS{
Hosts: i.Hosts,
SecretName: i.SecretName,
Hosts: t.Hosts,
SecretName: t.SecretName,
})
}
ingress.Spec.TLS = tls
result.Spec.TLS = tls
// Parse "paths" into rules with paths.
rules := make(map[string][]netv1.HTTPIngressPath)
for _, path := range info.Paths {
pathType := netv1.PathType(path.PathType)
@ -197,7 +266,7 @@ func (kcl *KubeClient) CreateIngress(namespace string, info models.K8sIngressInf
}
for rule, paths := range rules {
ingress.Spec.Rules = append(ingress.Spec.Rules, netv1.IngressRule{
result.Spec.Rules = append(result.Spec.Rules, netv1.IngressRule{
Host: rule,
IngressRuleValue: netv1.IngressRuleValue{
HTTP: &netv1.HTTPIngressRuleValue{
@ -207,102 +276,86 @@ func (kcl *KubeClient) CreateIngress(namespace string, info models.K8sIngressInf
})
}
// Add rules for hosts that does not have paths.
// e.g. dafault ingress rule without path to support what we had in 2.15
for _, host := range info.Hosts {
if _, ok := rules[host]; !ok {
ingress.Spec.Rules = append(ingress.Spec.Rules, netv1.IngressRule{
result.Spec.Rules = append(result.Spec.Rules, netv1.IngressRule{
Host: host,
})
}
}
_, err := ingressClient.Create(context.Background(), &ingress, metav1.CreateOptions{})
return err
return result
}
// DeleteIngresses processes a K8sIngressDeleteRequest by deleting each ingress
// in its given namespace.
func (kcl *KubeClient) DeleteIngresses(reqs models.K8sIngressDeleteRequests) error {
var err error
for namespace := range reqs {
for _, ingress := range reqs[namespace] {
ingressClient := kcl.cli.NetworkingV1().Ingresses(namespace)
err = ingressClient.Delete(
err := kcl.cli.NetworkingV1().Ingresses(namespace).Delete(
context.Background(),
ingress,
metav1.DeleteOptions{},
)
if err != nil {
return err
}
}
}
return err
return nil
}
// UpdateIngress updates an existing ingress in a given namespace in a k8s endpoint.
func (kcl *KubeClient) UpdateIngress(namespace string, info models.K8sIngressInfo) error {
ingressClient := kcl.cli.NetworkingV1().Ingresses(namespace)
ingress, err := ingressClient.Get(context.Background(), info.Name, metav1.GetOptions{})
ingress := kcl.convertToK8sIngress(info, "")
_, err := kcl.cli.NetworkingV1().Ingresses(namespace).Update(context.Background(), &ingress, metav1.UpdateOptions{})
if err != nil {
return err
}
ingress.Name = info.Name
ingress.Namespace = info.Namespace
if info.ClassName != "" {
ingress.Spec.IngressClassName = &info.ClassName
}
ingress.Annotations = info.Annotations
return nil
}
// Store TLS information.
var tls []netv1.IngressTLS
for _, i := range info.TLS {
tls = append(tls, netv1.IngressTLS{
Hosts: i.Hosts,
SecretName: i.SecretName,
})
}
ingress.Spec.TLS = tls
// Parse "paths" into rules with paths.
rules := make(map[string][]netv1.HTTPIngressPath)
for _, path := range info.Paths {
pathType := netv1.PathType(path.PathType)
rules[path.Host] = append(rules[path.Host], netv1.HTTPIngressPath{
Path: path.Path,
PathType: &pathType,
Backend: netv1.IngressBackend{
Service: &netv1.IngressServiceBackend{
Name: path.ServiceName,
Port: netv1.ServiceBackendPort{
Number: int32(path.Port),
},
},
},
})
// CombineIngressWithService combines an ingress with a service that is being used by the ingress.
// this is required to display the service that is being used by the ingress in the UI edit view.
func (kcl *KubeClient) CombineIngressWithService(ingress models.K8sIngressInfo) (models.K8sIngressInfo, error) {
services, err := kcl.GetServices(ingress.Namespace)
if err != nil {
return models.K8sIngressInfo{}, fmt.Errorf("an error occurred during the CombineIngressWithService operation, unable to retrieve services from the Kubernetes for a namespace level user. Error: %w", err)
}
ingress.Spec.Rules = make([]netv1.IngressRule, 0)
for rule, paths := range rules {
ingress.Spec.Rules = append(ingress.Spec.Rules, netv1.IngressRule{
Host: rule,
IngressRuleValue: netv1.IngressRuleValue{
HTTP: &netv1.HTTPIngressRuleValue{
Paths: paths,
},
},
})
}
// Add rules for hosts that does not have paths.
// e.g. dafault ingress rule without path to support what we had in 2.15
for _, host := range info.Hosts {
if _, ok := rules[host]; !ok {
ingress.Spec.Rules = append(ingress.Spec.Rules, netv1.IngressRule{
Host: host,
})
serviceMap := kcl.buildServicesMap(services)
for pathIndex, path := range ingress.Paths {
if _, ok := serviceMap[path.ServiceName]; ok {
ingress.Paths[pathIndex].HasService = true
}
}
_, err = ingressClient.Update(context.Background(), ingress, metav1.UpdateOptions{})
return err
return ingress, nil
}
// CombineIngressesWithServices combines a list of ingresses with a list of services that are being used by the ingresses.
// this is required to display the services that are being used by the ingresses in the UI list view.
func (kcl *KubeClient) CombineIngressesWithServices(ingresses []models.K8sIngressInfo) ([]models.K8sIngressInfo, error) {
services, err := kcl.GetServices("")
if err != nil {
if k8serrors.IsUnauthorized(err) {
return nil, fmt.Errorf("an error occurred during the CombineIngressesWithServices operation, unauthorized access to the Kubernetes API. Error: %w", err)
}
return nil, fmt.Errorf("an error occurred during the CombineIngressesWithServices operation, unable to retrieve services from the Kubernetes for a cluster level user. Error: %w", err)
}
serviceMap := kcl.buildServicesMap(services)
for ingressIndex, ingress := range ingresses {
for pathIndex, path := range ingress.Paths {
if _, ok := serviceMap[path.ServiceName]; ok {
(ingresses)[ingressIndex].Paths[pathIndex].HasService = true
}
}
}
return ingresses, nil
}

View file

@ -3,20 +3,27 @@ package cli
import (
"context"
"fmt"
"net/http"
"strconv"
"time"
"github.com/pkg/errors"
portainer "github.com/portainer/portainer/api"
models "github.com/portainer/portainer/api/http/models/kubernetes"
"github.com/portainer/portainer/api/stacks/stackutils"
httperror "github.com/portainer/portainer/pkg/libhttp/error"
"github.com/portainer/portainer/pkg/libhttp/response"
"github.com/rs/zerolog/log"
v1 "k8s.io/api/core/v1"
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
systemNamespaceLabel = "io.portainer.kubernetes.namespace.system"
namespaceOwnerLabel = "io.portainer.kubernetes.resourcepool.owner"
namespaceNameLabel = "io.portainer.kubernetes.resourcepool.name"
)
func defaultSystemNamespaces() map[string]struct{} {
@ -29,24 +36,69 @@ func defaultSystemNamespaces() map[string]struct{} {
}
// GetNamespaces gets the namespaces in the current k8s environment(endpoint).
// if the user is an admin, all namespaces in the current k8s environment(endpoint) are fetched using the fetchNamespaces function.
// otherwise, namespaces the non-admin user has access to will be used to filter the namespaces based on the allowed namespaces.
func (kcl *KubeClient) GetNamespaces() (map[string]portainer.K8sNamespaceInfo, error) {
namespaces, err := kcl.cli.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{})
if err != nil {
return nil, err
if kcl.IsKubeAdmin {
return kcl.fetchNamespaces()
}
return kcl.fetchNamespacesForNonAdmin()
}
// fetchNamespacesForNonAdmin gets the namespaces in the current k8s environment(endpoint) for the non-admin user.
func (kcl *KubeClient) fetchNamespacesForNonAdmin() (map[string]portainer.K8sNamespaceInfo, error) {
log.Debug().Msgf("Fetching namespaces for non-admin user: %v", kcl.NonAdminNamespaces)
if len(kcl.NonAdminNamespaces) == 0 {
return nil, nil
}
results := make(map[string]portainer.K8sNamespaceInfo)
namespaces, err := kcl.fetchNamespaces()
if err != nil {
return nil, fmt.Errorf("an error occurred during the fetchNamespacesForNonAdmin operation, unable to list namespaces for the non-admin user: %w", err)
}
for _, ns := range namespaces.Items {
results[ns.Name] = portainer.K8sNamespaceInfo{
IsSystem: isSystemNamespace(ns),
IsDefault: ns.Name == defaultNamespace,
nonAdminNamespaceSet := kcl.buildNonAdminNamespacesMap()
results := make(map[string]portainer.K8sNamespaceInfo)
for _, namespace := range namespaces {
if _, exists := nonAdminNamespaceSet[namespace.Name]; exists {
results[namespace.Name] = namespace
}
}
return results, nil
}
// fetchNamespaces gets the namespaces in the current k8s environment(endpoint).
// this function is used by both admin and non-admin users.
// the result gets parsed to a map of namespace name to namespace info.
func (kcl *KubeClient) fetchNamespaces() (map[string]portainer.K8sNamespaceInfo, error) {
namespaces, err := kcl.cli.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("an error occurred during the fetchNamespacesForAdmin operation, unable to list namespaces for the admin user: %w", err)
}
results := make(map[string]portainer.K8sNamespaceInfo)
for _, namespace := range namespaces.Items {
results[namespace.Name] = parseNamespace(&namespace)
}
return results, nil
}
// parseNamespace converts a k8s namespace object to a portainer namespace object.
func parseNamespace(namespace *corev1.Namespace) portainer.K8sNamespaceInfo {
return portainer.K8sNamespaceInfo{
Id: string(namespace.UID),
Name: namespace.Name,
Status: namespace.Status,
CreationDate: namespace.CreationTimestamp.Format(time.RFC3339),
NamespaceOwner: namespace.Labels[namespaceOwnerLabel],
IsSystem: isSystemNamespace(*namespace),
IsDefault: namespace.Name == defaultNamespace,
}
}
// GetNamespace gets the namespace in the current k8s environment(endpoint).
func (kcl *KubeClient) GetNamespace(name string) (portainer.K8sNamespaceInfo, error) {
namespace, err := kcl.cli.CoreV1().Namespaces().Get(context.TODO(), name, metav1.GetOptions{})
@ -54,47 +106,42 @@ func (kcl *KubeClient) GetNamespace(name string) (portainer.K8sNamespaceInfo, er
return portainer.K8sNamespaceInfo{}, err
}
result := portainer.K8sNamespaceInfo{
IsSystem: isSystemNamespace(*namespace),
IsDefault: namespace.Name == defaultNamespace,
}
return result, nil
return parseNamespace(namespace), nil
}
// CreateNamespace creates a new ingress in a given namespace in a k8s endpoint.
func (kcl *KubeClient) CreateNamespace(info models.K8sNamespaceDetails) error {
func (kcl *KubeClient) CreateNamespace(info models.K8sNamespaceDetails) (*corev1.Namespace, error) {
portainerLabels := map[string]string{
"io.portainer.kubernetes.resourcepool.name": stackutils.SanitizeLabel(info.Name),
"io.portainer.kubernetes.resourcepool.owner": stackutils.SanitizeLabel(info.Owner),
namespaceNameLabel: stackutils.SanitizeLabel(info.Name),
namespaceOwnerLabel: stackutils.SanitizeLabel(info.Owner),
}
var ns v1.Namespace
var ns corev1.Namespace
ns.Name = info.Name
ns.Annotations = info.Annotations
ns.Labels = portainerLabels
_, err := kcl.cli.CoreV1().Namespaces().Create(context.Background(), &ns, metav1.CreateOptions{})
namespace, err := kcl.cli.CoreV1().Namespaces().Create(context.Background(), &ns, metav1.CreateOptions{})
if err != nil {
log.Error().
Err(err).
Str("Namespace", info.Name).
Msg("Failed to create the namespace")
return err
return nil, err
}
if info.ResourceQuota != nil && info.ResourceQuota.Enabled {
log.Info().Msgf("Creating resource quota for namespace %s", info.Name)
log.Debug().Msgf("Creating resource quota with details: %+v", info.ResourceQuota)
resourceQuota := &v1.ResourceQuota{
resourceQuota := &corev1.ResourceQuota{
ObjectMeta: metav1.ObjectMeta{
Name: "portainer-rq-" + info.Name,
Namespace: info.Name,
Labels: portainerLabels,
},
Spec: v1.ResourceQuotaSpec{
Hard: v1.ResourceList{},
Spec: corev1.ResourceQuotaSpec{
Hard: corev1.ResourceList{},
},
}
@ -103,28 +150,28 @@ func (kcl *KubeClient) CreateNamespace(info models.K8sNamespaceDetails) error {
cpu := resource.MustParse(info.ResourceQuota.CPU)
if memory.Value() > 0 {
memQuota := memory
resourceQuota.Spec.Hard[v1.ResourceLimitsMemory] = memQuota
resourceQuota.Spec.Hard[v1.ResourceRequestsMemory] = memQuota
resourceQuota.Spec.Hard[corev1.ResourceLimitsMemory] = memQuota
resourceQuota.Spec.Hard[corev1.ResourceRequestsMemory] = memQuota
}
if cpu.Value() > 0 {
cpuQuota := cpu
resourceQuota.Spec.Hard[v1.ResourceLimitsCPU] = cpuQuota
resourceQuota.Spec.Hard[v1.ResourceRequestsCPU] = cpuQuota
resourceQuota.Spec.Hard[corev1.ResourceLimitsCPU] = cpuQuota
resourceQuota.Spec.Hard[corev1.ResourceRequestsCPU] = cpuQuota
}
}
_, err := kcl.cli.CoreV1().ResourceQuotas(info.Name).Create(context.Background(), resourceQuota, metav1.CreateOptions{})
if err != nil {
log.Error().Msgf("Failed to create resource quota for namespace %s: %s", info.Name, err)
return err
return nil, err
}
}
return nil
return namespace, nil
}
func isSystemNamespace(namespace v1.Namespace) bool {
func isSystemNamespace(namespace corev1.Namespace) bool {
systemLabelValue, hasSystemLabel := namespace.Labels[systemNamespaceLabel]
if hasSystemLabel {
return systemLabelValue == "true"
@ -176,32 +223,77 @@ func (kcl *KubeClient) ToggleSystemState(namespaceName string, isSystem bool) er
}
// UpdateIngress updates an ingress in a given namespace in a k8s endpoint.
func (kcl *KubeClient) UpdateNamespace(info models.K8sNamespaceDetails) error {
client := kcl.cli.CoreV1().Namespaces()
func (kcl *KubeClient) UpdateNamespace(info models.K8sNamespaceDetails) (*corev1.Namespace, error) {
namespace := corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: info.Name,
Annotations: info.Annotations,
},
}
var ns v1.Namespace
ns.Name = info.Name
ns.Annotations = info.Annotations
_, err := client.Update(context.Background(), &ns, metav1.UpdateOptions{})
return err
return kcl.cli.CoreV1().Namespaces().Update(context.Background(), &namespace, metav1.UpdateOptions{})
}
func (kcl *KubeClient) DeleteNamespace(namespace string) error {
client := kcl.cli.CoreV1().Namespaces()
namespaces, err := client.List(context.Background(), metav1.ListOptions{})
func (kcl *KubeClient) DeleteNamespace(namespaceName string) (*corev1.Namespace, error) {
namespace, err := kcl.cli.CoreV1().Namespaces().Get(context.Background(), namespaceName, metav1.GetOptions{})
if err != nil {
return err
return nil, err
}
for _, ns := range namespaces.Items {
if ns.Name == namespace {
return client.Delete(
context.Background(),
namespace,
metav1.DeleteOptions{},
)
}
err = kcl.cli.CoreV1().Namespaces().Delete(context.Background(), namespaceName, metav1.DeleteOptions{})
if err != nil {
return nil, err
}
return fmt.Errorf("namespace %s not found", namespace)
return namespace, nil
}
// CombineNamespacesWithResourceQuotas combines namespaces with resource quotas where matching is based on "portainer-rq-"+namespace.Name
func (kcl *KubeClient) CombineNamespacesWithResourceQuotas(namespaces map[string]portainer.K8sNamespaceInfo, w http.ResponseWriter) *httperror.HandlerError {
resourceQuotas, err := kcl.GetResourceQuotas("")
if err != nil && !k8serrors.IsNotFound(err) {
return httperror.InternalServerError("an error occurred during the CombineNamespacesWithResourceQuotas operation, unable to retrieve resource quotas from the Kubernetes for an admin user. Error: ", err)
}
if len(*resourceQuotas) > 0 {
return response.JSON(w, kcl.UpdateNamespacesWithResourceQuotas(namespaces, *resourceQuotas))
}
return response.JSON(w, kcl.ConvertNamespaceMapToSlice(namespaces))
}
// CombineNamespaceWithResourceQuota combines a namespace with a resource quota prefixed with "portainer-rq-"+namespace.Name
func (kcl *KubeClient) CombineNamespaceWithResourceQuota(namespace portainer.K8sNamespaceInfo, w http.ResponseWriter) *httperror.HandlerError {
resourceQuota, err := kcl.GetPortainerResourceQuota(namespace.Name)
if err != nil && !k8serrors.IsNotFound(err) {
return httperror.InternalServerError(fmt.Sprintf("an error occurred during the CombineNamespaceWithResourceQuota operation, unable to retrieve the resource quota associated with the namespace: %s for a non-admin user. Error: ", namespace.Name), err)
}
if resourceQuota != nil {
namespace.ResourceQuota = resourceQuota
}
return response.JSON(w, namespace)
}
// buildNonAdminNamespacesMap builds a map of non-admin namespaces.
// the map is used to filter the namespaces based on the allowed namespaces.
func (kcl *KubeClient) buildNonAdminNamespacesMap() map[string]struct{} {
nonAdminNamespaceSet := make(map[string]struct{}, len(kcl.NonAdminNamespaces))
for _, namespace := range kcl.NonAdminNamespaces {
nonAdminNamespaceSet[namespace] = struct{}{}
}
return nonAdminNamespaceSet
}
// ConvertNamespaceMapToSlice converts the namespace map to a slice of namespaces.
// this is used to for the API response.
func (kcl *KubeClient) ConvertNamespaceMapToSlice(namespaces map[string]portainer.K8sNamespaceInfo) []portainer.K8sNamespaceInfo {
namespaceSlice := make([]portainer.K8sNamespaceInfo, 0, len(namespaces))
for _, namespace := range namespaces {
namespaceSlice = append(namespaceSlice, namespace)
}
return namespaceSlice
}

View file

@ -73,8 +73,18 @@ func Test_ToggleSystemState(t *testing.T) {
t.Run("for regular namespace if isSystem is true and doesn't have a label, should set the label to true", func(t *testing.T) {
nsName := "namespace"
config := &core.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: portainerConfigMapName,
Namespace: portainerNamespace,
},
Data: map[string]string{
"NamespaceAccessPolicies": `{"ns1":{"UserAccessPolicies":{"2":{"RoleId":0}}}, "ns2":{"UserAccessPolicies":{"2":{"RoleId":0}}}}`,
},
}
kcl := &KubeClient{
cli: kfake.NewSimpleClientset(&core.Namespace{ObjectMeta: metav1.ObjectMeta{Name: nsName}}),
cli: kfake.NewSimpleClientset(&core.Namespace{ObjectMeta: metav1.ObjectMeta{Name: nsName}}, config),
instanceID: "instance",
}

View file

@ -2,6 +2,7 @@ package cli
import (
"context"
"fmt"
"strconv"
"time"
@ -9,10 +10,69 @@ import (
"github.com/pkg/errors"
"github.com/rs/zerolog/log"
v1 "k8s.io/api/core/v1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func (kcl *KubeClient) GetPods(namespace string) ([]corev1.Pod, error) {
pods, err := kcl.cli.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return nil, err
}
return pods.Items, nil
}
// isReplicaSetOwner checks if the pod's owner reference is a ReplicaSet
func isReplicaSetOwner(pod corev1.Pod) bool {
return len(pod.OwnerReferences) > 0 && pod.OwnerReferences[0].Kind == "ReplicaSet"
}
// updateOwnerReferenceToDeployment updates the pod's owner reference to the Deployment if applicable
func updateOwnerReferenceToDeployment(pod *corev1.Pod, replicaSets []appsv1.ReplicaSet) {
for _, replicaSet := range replicaSets {
if pod.OwnerReferences[0].Name == replicaSet.Name {
if len(replicaSet.OwnerReferences) > 0 && replicaSet.OwnerReferences[0].Kind == "Deployment" {
pod.OwnerReferences[0].Kind = "Deployment"
pod.OwnerReferences[0].Name = replicaSet.OwnerReferences[0].Name
}
break
}
}
}
// containsStatefulSetOwnerReference checks if the pod list contains a pod with a StatefulSet owner reference
func containsStatefulSetOwnerReference(pods *corev1.PodList) bool {
for _, pod := range pods.Items {
if len(pod.OwnerReferences) > 0 && pod.OwnerReferences[0].Kind == "StatefulSet" {
return true
}
}
return false
}
// containsDaemonSetOwnerReference checks if the pod list contains a pod with a DaemonSet owner reference
func containsDaemonSetOwnerReference(pods *corev1.PodList) bool {
for _, pod := range pods.Items {
if len(pod.OwnerReferences) > 0 && pod.OwnerReferences[0].Kind == "DaemonSet" {
return true
}
}
return false
}
// containsReplicaSetOwnerReference checks if the pod list contains a pod with a ReplicaSet owner reference
func containsReplicaSetOwnerReference(pods *corev1.PodList) bool {
for _, pod := range pods.Items {
if len(pod.OwnerReferences) > 0 && pod.OwnerReferences[0].Kind == "ReplicaSet" {
return true
}
}
return false
}
// CreateUserShellPod will create a kubectl based shell for the specified user by mounting their respective service account.
// The lifecycle of the pod is managed in this function; this entails management of the following pod operations:
// - The shell pod will be scoped to specified service accounts access permissions
@ -24,7 +84,7 @@ func (kcl *KubeClient) CreateUserShellPod(ctx context.Context, serviceAccountNam
podPrefix := userShellPodPrefix(serviceAccountName)
podSpec := &v1.Pod{
podSpec := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: podPrefix,
Namespace: portainerNamespace,
@ -32,20 +92,20 @@ func (kcl *KubeClient) CreateUserShellPod(ctx context.Context, serviceAccountNam
"kubernetes.io/pod.type": "kubectl-shell",
},
},
Spec: v1.PodSpec{
Spec: corev1.PodSpec{
TerminationGracePeriodSeconds: new(int64),
ServiceAccountName: serviceAccountName,
Containers: []v1.Container{
Containers: []corev1.Container{
{
Name: "kubectl-shell-container",
Image: shellPodImage,
Command: []string{"sleep"},
// Specify sleep time to prevent zombie pods in case portainer process is terminated
Args: []string{maxPodKeepAliveSecondsStr},
ImagePullPolicy: v1.PullIfNotPresent,
ImagePullPolicy: corev1.PullIfNotPresent,
},
},
RestartPolicy: v1.RestartPolicyNever,
RestartPolicy: corev1.RestartPolicyNever,
},
}
@ -58,7 +118,7 @@ func (kcl *KubeClient) CreateUserShellPod(ctx context.Context, serviceAccountNam
timeoutCtx, cancelFunc := context.WithTimeout(ctx, 20*time.Second)
defer cancelFunc()
if err := kcl.waitForPodStatus(timeoutCtx, v1.PodRunning, shellPod); err != nil {
if err := kcl.waitForPodStatus(timeoutCtx, corev1.PodRunning, shellPod); err != nil {
kcl.cli.CoreV1().Pods(portainerNamespace).Delete(context.TODO(), shellPod.Name, metav1.DeleteOptions{})
return nil, errors.Wrap(err, "aborting pod creation; error waiting for shell pod ready status")
@ -89,7 +149,7 @@ func (kcl *KubeClient) CreateUserShellPod(ctx context.Context, serviceAccountNam
// waitForPodStatus will wait until duration d (from now) for a pod to reach defined phase/status.
// The pod status will be polled at specified delay until the pod reaches ready state.
func (kcl *KubeClient) waitForPodStatus(ctx context.Context, phase v1.PodPhase, pod *v1.Pod) error {
func (kcl *KubeClient) waitForPodStatus(ctx context.Context, phase corev1.PodPhase, pod *corev1.Pod) error {
log.Debug().Str("pod", pod.Name).Msg("waiting for pod ready")
for {
@ -110,3 +170,102 @@ func (kcl *KubeClient) waitForPodStatus(ctx context.Context, phase v1.PodPhase,
}
}
}
// fetchAllPodsAndReplicaSets fetches all pods and replica sets across the cluster, i.e. all namespaces
func (kcl *KubeClient) fetchAllPodsAndReplicaSets(namespace string, podListOptions metav1.ListOptions) ([]corev1.Pod, []appsv1.ReplicaSet, []appsv1.Deployment, []appsv1.StatefulSet, []appsv1.DaemonSet, []corev1.Service, error) {
return kcl.fetchResourcesWithOwnerReferences(namespace, podListOptions, false, false)
}
// fetchAllApplicationsListResources fetches all pods, replica sets, stateful sets, and daemon sets across the cluster, i.e. all namespaces
// this is required for the applications list view
func (kcl *KubeClient) fetchAllApplicationsListResources(namespace string, podListOptions metav1.ListOptions) ([]corev1.Pod, []appsv1.ReplicaSet, []appsv1.Deployment, []appsv1.StatefulSet, []appsv1.DaemonSet, []corev1.Service, error) {
return kcl.fetchResourcesWithOwnerReferences(namespace, podListOptions, true, true)
}
// fetchResourcesWithOwnerReferences fetches pods and other resources based on owner references
func (kcl *KubeClient) fetchResourcesWithOwnerReferences(namespace string, podListOptions metav1.ListOptions, includeStatefulSets, includeDaemonSets bool) ([]corev1.Pod, []appsv1.ReplicaSet, []appsv1.Deployment, []appsv1.StatefulSet, []appsv1.DaemonSet, []corev1.Service, error) {
pods, err := kcl.cli.CoreV1().Pods(namespace).List(context.Background(), podListOptions)
if err != nil {
if k8serrors.IsNotFound(err) {
return nil, nil, nil, nil, nil, nil, nil
}
return nil, nil, nil, nil, nil, nil, fmt.Errorf("unable to list pods across the cluster: %w", err)
}
// if replicaSet owner reference exists, fetch the replica sets
// this also means that the deployments will be fetched because deployments own replica sets
replicaSets := &appsv1.ReplicaSetList{}
deployments := &appsv1.DeploymentList{}
if containsReplicaSetOwnerReference(pods) {
replicaSets, err = kcl.cli.AppsV1().ReplicaSets(namespace).List(context.Background(), metav1.ListOptions{})
if err != nil && !k8serrors.IsNotFound(err) {
return nil, nil, nil, nil, nil, nil, fmt.Errorf("unable to list replica sets across the cluster: %w", err)
}
deployments, err = kcl.cli.AppsV1().Deployments(namespace).List(context.Background(), metav1.ListOptions{})
if err != nil && !k8serrors.IsNotFound(err) {
return nil, nil, nil, nil, nil, nil, fmt.Errorf("unable to list deployments across the cluster: %w", err)
}
}
statefulSets := &appsv1.StatefulSetList{}
if includeStatefulSets && containsStatefulSetOwnerReference(pods) {
statefulSets, err = kcl.cli.AppsV1().StatefulSets(namespace).List(context.Background(), metav1.ListOptions{})
if err != nil && !k8serrors.IsNotFound(err) {
return nil, nil, nil, nil, nil, nil, fmt.Errorf("unable to list stateful sets across the cluster: %w", err)
}
}
daemonSets := &appsv1.DaemonSetList{}
if includeDaemonSets && containsDaemonSetOwnerReference(pods) {
daemonSets, err = kcl.cli.AppsV1().DaemonSets(namespace).List(context.Background(), metav1.ListOptions{})
if err != nil && !k8serrors.IsNotFound(err) {
return nil, nil, nil, nil, nil, nil, fmt.Errorf("unable to list daemon sets across the cluster: %w", err)
}
}
services, err := kcl.cli.CoreV1().Services(namespace).List(context.Background(), metav1.ListOptions{})
if err != nil && !k8serrors.IsNotFound(err) {
return nil, nil, nil, nil, nil, nil, fmt.Errorf("unable to list services across the cluster: %w", err)
}
return pods.Items, replicaSets.Items, deployments.Items, statefulSets.Items, daemonSets.Items, services.Items, nil
}
// isPodUsingConfigMap checks if a pod is using a specific ConfigMap
func isPodUsingConfigMap(pod *corev1.Pod, configMapName string) bool {
for _, volume := range pod.Spec.Volumes {
if volume.ConfigMap != nil && volume.ConfigMap.Name == configMapName {
return true
}
}
for _, container := range pod.Spec.Containers {
for _, env := range container.Env {
if env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name == configMapName {
return true
}
}
}
return false
}
// isPodUsingSecret checks if a pod is using a specific Secret
func isPodUsingSecret(pod *corev1.Pod, secretName string) bool {
for _, volume := range pod.Spec.Volumes {
if volume.Secret != nil && volume.Secret.SecretName == secretName {
return true
}
}
for _, container := range pod.Spec.Containers {
for _, env := range container.Env {
if env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil && env.ValueFrom.SecretKeyRef.Name == secretName {
return true
}
}
}
return false
}

View file

@ -2,198 +2,17 @@ package cli
import (
"context"
"time"
"github.com/portainer/portainer/api/internal/randomstring"
"github.com/rs/zerolog/log"
authv1 "k8s.io/api/authorization/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
authv1types "k8s.io/client-go/kubernetes/typed/authorization/v1"
corev1types "k8s.io/client-go/kubernetes/typed/core/v1"
rbacv1types "k8s.io/client-go/kubernetes/typed/rbac/v1"
)
const maxRetries = 5
// IsRBACEnabled checks if RBAC is enabled in the cluster by creating a service account, then checking it's access to a resourcequota before and after setting a cluster role and cluster role binding
// IsRBACEnabled checks if RBAC is enabled in the current Kubernetes cluster by listing cluster roles.
// if the cluster roles can be listed, RBAC is enabled.
// otherwise, RBAC is not enabled.
func (kcl *KubeClient) IsRBACEnabled() (bool, error) {
namespace := "default"
verb := "list"
resource := "resourcequotas"
saClient := kcl.cli.CoreV1().ServiceAccounts(namespace)
uniqueString := randomstring.RandomString(4) // Append a unique string to resource names, in case they already exist
saName := "portainer-rbac-test-sa-" + uniqueString
if err := createServiceAccount(saClient, saName, namespace); err != nil {
log.Error().Err(err).Msg("Error creating service account")
return false, err
}
defer deleteServiceAccount(saClient, saName)
accessReviewClient := kcl.cli.AuthorizationV1().LocalSubjectAccessReviews(namespace)
allowed, err := checkServiceAccountAccess(accessReviewClient, saName, verb, resource, namespace)
_, err := kcl.cli.RbacV1().ClusterRoles().List(context.TODO(), metav1.ListOptions{})
if err != nil {
log.Error().Err(err).Msg("Error checking service account access")
return false, err
}
// If the service account with no authorizations is allowed, RBAC must be disabled
if allowed {
return false, nil
}
// Otherwise give the service account an rbac authorisation and check again
roleClient := kcl.cli.RbacV1().Roles(namespace)
roleName := "portainer-rbac-test-role-" + uniqueString
if err := createRole(roleClient, roleName, verb, resource, namespace); err != nil {
log.Error().Err(err).Msg("Error creating role")
return false, err
}
defer deleteRole(roleClient, roleName)
roleBindingClient := kcl.cli.RbacV1().RoleBindings(namespace)
roleBindingName := "portainer-rbac-test-role-binding-" + uniqueString
if err := createRoleBinding(roleBindingClient, roleBindingName, roleName, saName, namespace); err != nil {
log.Error().Err(err).Msg("Error creating role binding")
return false, err
}
defer deleteRoleBinding(roleBindingClient, roleBindingName)
allowed, err = checkServiceAccountAccess(accessReviewClient, saName, verb, resource, namespace)
if err != nil {
log.Error().Err(err).Msg("Error checking service account access with authorizations added")
return false, err
}
// If the service account allowed to list resource quotas after given rbac role, then RBAC is enabled
return allowed, nil
}
func createServiceAccount(saClient corev1types.ServiceAccountInterface, name string, namespace string) error {
serviceAccount := &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
}
_, err := saClient.Create(context.Background(), serviceAccount, metav1.CreateOptions{})
return err
}
func deleteServiceAccount(saClient corev1types.ServiceAccountInterface, name string) {
if err := saClient.Delete(context.Background(), name, metav1.DeleteOptions{}); err != nil {
log.Error().Err(err).Msg("Error deleting service account: " + name)
}
}
func createRole(roleClient rbacv1types.RoleInterface, name string, verb string, resource string, namespace string) error {
role := &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{""},
Verbs: []string{verb},
Resources: []string{resource},
},
},
}
_, err := roleClient.Create(context.Background(), role, metav1.CreateOptions{})
return err
}
func deleteRole(roleClient rbacv1types.RoleInterface, name string) {
if err := roleClient.Delete(context.Background(), name, metav1.DeleteOptions{}); err != nil {
log.Error().Err(err).Msg("Error deleting role: " + name)
}
}
func createRoleBinding(roleBindingClient rbacv1types.RoleBindingInterface, clusterRoleBindingName string, roleName string, serviceAccountName string, namespace string) error {
clusterRoleBinding := &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: clusterRoleBindingName,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: serviceAccountName,
Namespace: namespace,
},
},
RoleRef: rbacv1.RoleRef{
Kind: "Role",
Name: roleName,
APIGroup: "rbac.authorization.k8s.io",
},
}
roleBinding, err := roleBindingClient.Create(context.Background(), clusterRoleBinding, metav1.CreateOptions{})
if err != nil {
log.Error().Err(err).Msg("Error creating role binding: " + clusterRoleBindingName)
return err
}
// Retry checkRoleBinding a maximum of 5 times with a 100ms wait after each attempt
for range maxRetries {
err = checkRoleBinding(roleBindingClient, roleBinding.Name)
time.Sleep(100 * time.Millisecond) // Wait for 100ms, even if the check passes
if err == nil {
break
}
}
return err
}
func checkRoleBinding(roleBindingClient rbacv1types.RoleBindingInterface, name string) error {
if _, err := roleBindingClient.Get(context.Background(), name, metav1.GetOptions{}); err != nil {
log.Error().Err(err).Msg("Error finding rolebinding: " + name)
return err
}
return nil
}
func deleteRoleBinding(roleBindingClient rbacv1types.RoleBindingInterface, name string) {
if err := roleBindingClient.Delete(context.Background(), name, metav1.DeleteOptions{}); err != nil {
log.Error().Err(err).Msg("Error deleting role binding: " + name)
}
}
func checkServiceAccountAccess(accessReviewClient authv1types.LocalSubjectAccessReviewInterface, serviceAccountName string, verb string, resource string, namespace string) (bool, error) {
subjectAccessReview := &authv1.LocalSubjectAccessReview{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
},
Spec: authv1.SubjectAccessReviewSpec{
ResourceAttributes: &authv1.ResourceAttributes{
Namespace: namespace,
Verb: verb,
Resource: resource,
},
User: "system:serviceaccount:default:" + serviceAccountName, // a workaround to be able to use the service account as a user
},
}
result, err := accessReviewClient.Create(context.Background(), subjectAccessReview, metav1.CreateOptions{})
if err != nil {
return false, err
}
return result.Status.Allowed, nil
return true, nil
}

View file

@ -0,0 +1,95 @@
package cli
import (
"context"
"fmt"
portainer "github.com/portainer/portainer/api"
"github.com/rs/zerolog/log"
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// GetResourceQuotas gets all resource quotas in the current k8s environment(endpoint).
// if the user is an admin, all resource quotas in all namespaces are fetched.
// otherwise, namespaces the non-admin user has access to will be used to filter the resource quotas.
func (kcl *KubeClient) GetResourceQuotas(namespace string) (*[]corev1.ResourceQuota, error) {
if kcl.IsKubeAdmin {
return kcl.fetchResourceQuotas(namespace)
}
return kcl.fetchResourceQuotasForNonAdmin(namespace)
}
// fetchResourceQuotasForNonAdmin gets the resource quotas in the current k8s environment(endpoint) for a non-admin user.
// the role of the user must have read access to the resource quotas in the defined namespaces.
func (kcl *KubeClient) fetchResourceQuotasForNonAdmin(namespace string) (*[]corev1.ResourceQuota, error) {
log.Debug().Msgf("Fetching resource quotas for non-admin user: %v", kcl.NonAdminNamespaces)
if len(kcl.NonAdminNamespaces) == 0 {
return nil, nil
}
resourceQuotas, err := kcl.fetchResourceQuotas(namespace)
if err != nil && !k8serrors.IsNotFound(err) {
return nil, err
}
nonAdminNamespaceSet := kcl.buildNonAdminNamespacesMap()
results := []corev1.ResourceQuota{}
for _, resourceQuota := range *resourceQuotas {
if _, exists := nonAdminNamespaceSet[resourceQuota.Namespace]; exists {
results = append(results, resourceQuota)
}
}
return &results, nil
}
func (kcl *KubeClient) fetchResourceQuotas(namespace string) (*[]corev1.ResourceQuota, error) {
resourceQuotas, err := kcl.cli.CoreV1().ResourceQuotas(namespace).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("an error occured, failed to list resource quotas for the admin user: %w", err)
}
return &resourceQuotas.Items, nil
}
// GetPortainerResourceQuota gets the resource quota for the portainer namespace.
// The resource quota is prefixed with "portainer-rq-".
func (kcl *KubeClient) GetPortainerResourceQuota(namespace string) (*corev1.ResourceQuota, error) {
return kcl.cli.CoreV1().ResourceQuotas(namespace).Get(context.TODO(), "portainer-rq-"+namespace, metav1.GetOptions{})
}
// GetResourceQuota gets a resource quota in a specific namespace.
func (kcl *KubeClient) GetResourceQuota(namespace, resourceQuota string) (*corev1.ResourceQuota, error) {
return kcl.cli.CoreV1().ResourceQuotas(namespace).Get(context.TODO(), resourceQuota, metav1.GetOptions{})
}
// UpdateNamespacesWithResourceQuotas updates the namespaces with the resource quotas.
// The resource quotas are matched with the namespaces by name.
func (kcl *KubeClient) UpdateNamespacesWithResourceQuotas(namespaces map[string]portainer.K8sNamespaceInfo, resourceQuotas []corev1.ResourceQuota) []portainer.K8sNamespaceInfo {
namespacesWithQuota := map[string]portainer.K8sNamespaceInfo{}
for _, namespace := range namespaces {
resourceQuota := kcl.GetResourceQuotaFromNamespace(namespace, resourceQuotas)
if resourceQuota != nil {
namespace.ResourceQuota = resourceQuota
}
namespacesWithQuota[namespace.Name] = namespace
}
return kcl.ConvertNamespaceMapToSlice(namespacesWithQuota)
}
// GetResourceQuotaFromNamespace gets the resource quota in a specific namespace where the resource quota's name is prefixed with "portainer-rq-".
func (kcl *KubeClient) GetResourceQuotaFromNamespace(namespace portainer.K8sNamespaceInfo, resourceQuotas []corev1.ResourceQuota) *corev1.ResourceQuota {
for _, resourceQuota := range resourceQuotas {
if resourceQuota.ObjectMeta.Namespace == namespace.Name && resourceQuota.ObjectMeta.Name == "portainer-rq-"+namespace.Name {
return &resourceQuota
}
}
return nil
}

View file

@ -3,11 +3,66 @@ package cli
import (
"context"
models "github.com/portainer/portainer/api/http/models/kubernetes"
rbacv1 "k8s.io/api/rbac/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// GetRoles gets all the roles for either at the cluster level or a given namespace in a k8s endpoint.
// It returns a list of K8sRole objects.
func (kcl *KubeClient) GetRoles(namespace string) ([]models.K8sRole, error) {
if kcl.IsKubeAdmin {
return kcl.fetchRoles(namespace)
}
return kcl.fetchRolesForNonAdmin(namespace)
}
// fetchRolesForNonAdmin gets all the roles for either at the cluster level or a given namespace in a k8s endpoint.
// the namespace will be coming from NonAdminNamespaces as non-admin users are restricted to certain namespaces.
// it returns a list of K8sRole objects.
func (kcl *KubeClient) fetchRolesForNonAdmin(namespace string) ([]models.K8sRole, error) {
roles, err := kcl.fetchRoles(namespace)
if err != nil {
return nil, err
}
nonAdminNamespaceSet := kcl.buildNonAdminNamespacesMap()
results := make([]models.K8sRole, 0)
for _, role := range roles {
if _, ok := nonAdminNamespaceSet[role.Namespace]; ok {
results = append(results, role)
}
}
return results, nil
}
// fetchRoles returns a list of all Roles in the specified namespace.
func (kcl *KubeClient) fetchRoles(namespace string) ([]models.K8sRole, error) {
roles, err := kcl.cli.RbacV1().Roles(namespace).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return nil, err
}
results := make([]models.K8sRole, 0)
for _, role := range roles.Items {
results = append(results, parseRole(role))
}
return results, nil
}
// parseRole converts a rbacv1.Role object to a models.K8sRole object.
func parseRole(role rbacv1.Role) models.K8sRole {
return models.K8sRole{
Name: role.Name,
Namespace: role.Namespace,
CreationDate: role.CreationTimestamp.Time,
}
}
func getPortainerUserDefaultPolicies() []rbacv1.PolicyRule {
return []rbacv1.PolicyRule{
{

View file

@ -0,0 +1,65 @@
package cli
import (
"context"
models "github.com/portainer/portainer/api/http/models/kubernetes"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// GetRoleBindings gets all the roleBindings for either at the cluster level or a given namespace in a k8s endpoint.
// It returns a list of K8sRoleBinding objects.
func (kcl *KubeClient) GetRoleBindings(namespace string) ([]models.K8sRoleBinding, error) {
if kcl.IsKubeAdmin {
return kcl.fetchRoleBindings(namespace)
}
return kcl.fetchRolebindingsForNonAdmin(namespace)
}
// fetchRolebindingsForNonAdmin gets all the roleBindings for either at the cluster level or a given namespace in a k8s endpoint.
// the namespace will be coming from NonAdminNamespaces as non-admin users are restricted to certain namespaces.
// it returns a list of K8sRoleBinding objects.
func (kcl *KubeClient) fetchRolebindingsForNonAdmin(namespace string) ([]models.K8sRoleBinding, error) {
roleBindings, err := kcl.fetchRoleBindings(namespace)
if err != nil {
return nil, err
}
nonAdminNamespaceSet := kcl.buildNonAdminNamespacesMap()
results := make([]models.K8sRoleBinding, 0)
for _, roleBinding := range roleBindings {
if _, ok := nonAdminNamespaceSet[roleBinding.Namespace]; ok {
results = append(results, roleBinding)
}
}
return results, nil
}
// fetchRoleBindings returns a list of all Roles in the specified namespace.
func (kcl *KubeClient) fetchRoleBindings(namespace string) ([]models.K8sRoleBinding, error) {
roleBindings, err := kcl.cli.RbacV1().RoleBindings(namespace).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return nil, err
}
results := make([]models.K8sRoleBinding, 0)
for _, roleBinding := range roleBindings.Items {
results = append(results, parseRoleBinding(roleBinding))
}
return results, nil
}
// parseRoleBinding converts a rbacv1.RoleBinding object to a models.K8sRoleBinding object.
func parseRoleBinding(roleBinding rbacv1.RoleBinding) models.K8sRoleBinding {
return models.K8sRoleBinding{
Name: roleBinding.Name,
Namespace: roleBinding.Namespace,
RoleRef: roleBinding.RoleRef,
Subjects: roleBinding.Subjects,
CreationDate: roleBinding.CreationTimestamp.Time,
}
}

View file

@ -3,17 +3,182 @@ package cli
import (
"context"
"errors"
"fmt"
"time"
v1 "k8s.io/api/core/v1"
models "github.com/portainer/portainer/api/http/models/kubernetes"
"github.com/rs/zerolog/log"
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
labelPortainerKubeConfigOwner = "io.portainer.kubernetes.configuration.owner"
labelPortainerKubeConfigOwnerId = "io.portainer.kubernetes.configuration.owner.id"
)
// GetSecrets gets all the Secrets for a given namespace in a k8s endpoint.
// if the user is an admin, all secrets in the current k8s environment(endpoint) are fetched using the getSecrets function.
// otherwise, namespaces the non-admin user has access to will be used to filter the secrets based on the allowed namespaces.
func (kcl *KubeClient) GetSecrets(namespace string) ([]models.K8sSecret, error) {
if kcl.IsKubeAdmin {
return kcl.getSecrets(namespace)
}
return kcl.getSecretsForNonAdmin(namespace)
}
// getSecretsForNonAdmin fetches the secrets in the namespaces the user has access to.
// This function is called when the user is not an admin.
func (kcl *KubeClient) getSecretsForNonAdmin(namespace string) ([]models.K8sSecret, error) {
log.Debug().Msgf("Fetching volumes for non-admin user: %v", kcl.NonAdminNamespaces)
if len(kcl.NonAdminNamespaces) == 0 {
return nil, nil
}
secrets, err := kcl.getSecrets(namespace)
if err != nil {
return nil, err
}
nonAdminNamespaceSet := kcl.buildNonAdminNamespacesMap()
results := make([]models.K8sSecret, 0)
for _, secret := range secrets {
if _, ok := nonAdminNamespaceSet[secret.Namespace]; ok {
results = append(results, secret)
}
}
return results, nil
}
// getSecrets gets all the Secrets for a given namespace in a k8s endpoint.
// the result is a list of secrets parsed into a K8sSecret struct.
func (kcl *KubeClient) getSecrets(namespace string) ([]models.K8sSecret, error) {
secrets, err := kcl.cli.CoreV1().Secrets(namespace).List(context.Background(), metav1.ListOptions{})
if err != nil {
return nil, err
}
results := []models.K8sSecret{}
for _, secret := range secrets.Items {
results = append(results, parseSecret(&secret, false))
}
return results, nil
}
// GetSecret gets a Secret by name for a given namespace.
// the result is a secret parsed into a K8sSecret struct.
func (kcl *KubeClient) GetSecret(namespace string, secretName string) (models.K8sSecret, error) {
secret, err := kcl.cli.CoreV1().Secrets(namespace).Get(context.Background(), secretName, metav1.GetOptions{})
if err != nil {
return models.K8sSecret{}, err
}
return parseSecret(secret, true), nil
}
// parseSecret parses a k8s Secret object into a K8sSecret struct.
// for get operation, withData will be set to true.
// otherwise, only metadata will be parsed.
func parseSecret(secret *corev1.Secret, withData bool) models.K8sSecret {
result := models.K8sSecret{
K8sConfiguration: models.K8sConfiguration{
UID: string(secret.UID),
Name: secret.Name,
Namespace: secret.Namespace,
CreationDate: secret.CreationTimestamp.Time.UTC().Format(time.RFC3339),
Annotations: secret.Annotations,
Labels: secret.Labels,
ConfigurationOwner: secret.Labels[labelPortainerKubeConfigOwner],
ConfigurationOwnerId: secret.Labels[labelPortainerKubeConfigOwnerId],
},
SecretType: string(secret.Type),
}
if withData {
secretData := secret.Data
secretDataMap := make(map[string]string, len(secretData))
for key, value := range secretData {
secretDataMap[key] = string(value)
}
result.Data = secretDataMap
}
return result
}
// CombineSecretsWithApplications combines the secrets with the applications that use them.
// the function fetches all the pods and replica sets in the cluster and checks if the secret is used by any of the pods.
// if the secret is used by a pod, the application that uses the pod is added to the secret.
// otherwise, the secret is returned as is.
func (kcl *KubeClient) CombineSecretsWithApplications(secrets []models.K8sSecret) ([]models.K8sSecret, error) {
updatedSecrets := make([]models.K8sSecret, len(secrets))
pods, replicaSets, _, _, _, _, err := kcl.fetchAllPodsAndReplicaSets("", metav1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("an error occurred during the CombineSecretsWithApplications operation, unable to fetch pods and replica sets. Error: %w", err)
}
for index, secret := range secrets {
updatedSecret := secret
applicationConfigurationOwners, err := kcl.GetApplicationConfigurationOwnersFromSecret(secret, pods, replicaSets)
if err != nil {
return nil, fmt.Errorf("an error occurred during the CombineSecretsWithApplications operation, unable to get applications from secret. Error: %w", err)
}
if len(applicationConfigurationOwners) > 0 {
updatedSecret.ConfigurationOwnerResources = applicationConfigurationOwners
}
updatedSecrets[index] = updatedSecret
}
return updatedSecrets, nil
}
// CombineSecretWithApplications combines the secret with the applications that use it.
// the function fetches all the pods in the cluster and checks if the secret is used by any of the pods.
// it needs to check if the pods are owned by a replica set to determine if the pod is part of a deployment.
func (kcl *KubeClient) CombineSecretWithApplications(secret models.K8sSecret) (models.K8sSecret, error) {
pods, err := kcl.cli.CoreV1().Pods(secret.Namespace).List(context.Background(), metav1.ListOptions{})
if err != nil {
return models.K8sSecret{}, fmt.Errorf("an error occurred during the CombineSecretWithApplications operation, unable to get pods. Error: %w", err)
}
containsReplicaSetOwner := false
for _, pod := range pods.Items {
containsReplicaSetOwner = isReplicaSetOwner(pod)
break
}
if containsReplicaSetOwner {
replicaSets, err := kcl.cli.AppsV1().ReplicaSets(secret.Namespace).List(context.Background(), metav1.ListOptions{})
if err != nil {
return models.K8sSecret{}, fmt.Errorf("an error occurred during the CombineSecretWithApplications operation, unable to get replica sets. Error: %w", err)
}
applicationConfigurationOwners, err := kcl.GetApplicationConfigurationOwnersFromSecret(secret, pods.Items, replicaSets.Items)
if err != nil {
return models.K8sSecret{}, fmt.Errorf("an error occurred during the CombineSecretWithApplications operation, unable to get applications from secret. Error: %w", err)
}
if len(applicationConfigurationOwners) > 0 {
secret.ConfigurationOwnerResources = applicationConfigurationOwners
}
}
return secret, nil
}
func (kcl *KubeClient) createServiceAccountToken(serviceAccountName string) error {
serviceAccountSecretName := userServiceAccountTokenSecretName(serviceAccountName, kcl.instanceID)
serviceAccountSecret := &v1.Secret{
serviceAccountSecret := &corev1.Secret{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
Name: serviceAccountSecretName,

View file

@ -2,99 +2,135 @@ package cli
import (
"context"
"fmt"
models "github.com/portainer/portainer/api/http/models/kubernetes"
"github.com/rs/zerolog/log"
v1 "k8s.io/api/core/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/intstr"
)
// GetServices gets all the services for a given namespace in a k8s endpoint.
func (kcl *KubeClient) GetServices(namespace string, lookupApplications bool) ([]models.K8sServiceInfo, error) {
client := kcl.cli.CoreV1().Services(namespace)
// GetServices gets all the services for either at the cluster level or a given namespace in a k8s endpoint.
// It returns a list of K8sServiceInfo objects.
func (kcl *KubeClient) GetServices(namespace string) ([]models.K8sServiceInfo, error) {
if kcl.IsKubeAdmin {
return kcl.fetchServices(namespace)
}
return kcl.fetchServicesForNonAdmin(namespace)
}
services, err := client.List(context.Background(), metav1.ListOptions{})
// fetchServicesForNonAdmin gets all the services for either at the cluster level or a given namespace in a k8s endpoint.
// the namespace will be coming from NonAdminNamespaces as non-admin users are restricted to certain namespaces.
// it returns a list of K8sServiceInfo objects.
func (kcl *KubeClient) fetchServicesForNonAdmin(namespace string) ([]models.K8sServiceInfo, error) {
log.Debug().Msgf("Fetching services for non-admin user: %v", kcl.NonAdminNamespaces)
if len(kcl.NonAdminNamespaces) == 0 {
return nil, nil
}
services, err := kcl.fetchServices(namespace)
if err != nil {
return nil, err
}
var result []models.K8sServiceInfo
nonAdminNamespaceSet := kcl.buildNonAdminNamespacesMap()
results := make([]models.K8sServiceInfo, 0)
for _, service := range services {
if _, ok := nonAdminNamespaceSet[service.Namespace]; ok {
results = append(results, service)
}
}
return results, nil
}
// fetchServices gets the services in a given namespace in a k8s endpoint.
// It returns a list of K8sServiceInfo objects.
func (kcl *KubeClient) fetchServices(namespace string) ([]models.K8sServiceInfo, error) {
services, err := kcl.cli.CoreV1().Services(namespace).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return nil, err
}
results := make([]models.K8sServiceInfo, 0)
for _, service := range services.Items {
servicePorts := make([]models.K8sServicePort, 0)
for _, port := range service.Spec.Ports {
servicePorts = append(servicePorts, models.K8sServicePort{
Name: port.Name,
NodePort: int(port.NodePort),
Port: int(port.Port),
Protocol: string(port.Protocol),
TargetPort: port.TargetPort.String(),
})
}
results = append(results, parseService(service))
}
ingressStatus := make([]models.K8sServiceIngress, 0)
for _, status := range service.Status.LoadBalancer.Ingress {
ingressStatus = append(ingressStatus, models.K8sServiceIngress{
IP: status.IP,
Host: status.Hostname,
})
}
return results, nil
}
var applications []models.K8sApplication
if lookupApplications {
applications, _ = kcl.getOwningApplication(namespace, service.Spec.Selector)
}
result = append(result, models.K8sServiceInfo{
Name: service.Name,
UID: string(service.GetUID()),
Type: string(service.Spec.Type),
Namespace: service.Namespace,
CreationTimestamp: service.GetCreationTimestamp().String(),
AllocateLoadBalancerNodePorts: service.Spec.AllocateLoadBalancerNodePorts,
Ports: servicePorts,
IngressStatus: ingressStatus,
Labels: service.GetLabels(),
Annotations: service.GetAnnotations(),
ClusterIPs: service.Spec.ClusterIPs,
ExternalName: service.Spec.ExternalName,
ExternalIPs: service.Spec.ExternalIPs,
Applications: applications,
// parseService converts a k8s native service object to a Portainer K8sServiceInfo object.
// service ports, ingress status, labels, annotations, cluster IPs, and external IPs are parsed.
// it returns a K8sServiceInfo object.
func parseService(service corev1.Service) models.K8sServiceInfo {
servicePorts := make([]models.K8sServicePort, 0)
for _, port := range service.Spec.Ports {
servicePorts = append(servicePorts, models.K8sServicePort{
Name: port.Name,
NodePort: int(port.NodePort),
Port: int(port.Port),
Protocol: string(port.Protocol),
TargetPort: port.TargetPort.String(),
})
}
return result, nil
ingressStatus := make([]models.K8sServiceIngress, 0)
for _, status := range service.Status.LoadBalancer.Ingress {
ingressStatus = append(ingressStatus, models.K8sServiceIngress{
IP: status.IP,
Host: status.Hostname,
})
}
return models.K8sServiceInfo{
Name: service.Name,
UID: string(service.GetUID()),
Type: string(service.Spec.Type),
Namespace: service.Namespace,
CreationDate: service.GetCreationTimestamp().String(),
AllocateLoadBalancerNodePorts: service.Spec.AllocateLoadBalancerNodePorts,
Ports: servicePorts,
IngressStatus: ingressStatus,
Labels: service.GetLabels(),
Annotations: service.GetAnnotations(),
ClusterIPs: service.Spec.ClusterIPs,
ExternalName: service.Spec.ExternalName,
ExternalIPs: service.Spec.ExternalIPs,
Selector: service.Spec.Selector,
}
}
func (kcl *KubeClient) fillService(info models.K8sServiceInfo) v1.Service {
var service v1.Service
// convertToK8sService converts a K8sServiceInfo object back to a k8s native service object.
// this is required for create and update operations.
// it returns a v1.Service object.
func (kcl *KubeClient) convertToK8sService(info models.K8sServiceInfo) corev1.Service {
service := corev1.Service{}
service.Name = info.Name
service.Spec.Type = v1.ServiceType(info.Type)
service.Spec.Type = corev1.ServiceType(info.Type)
service.Namespace = info.Namespace
service.Annotations = info.Annotations
service.Labels = info.Labels
service.Spec.AllocateLoadBalancerNodePorts = info.AllocateLoadBalancerNodePorts
service.Spec.Selector = info.Selector
// Set ports.
for _, p := range info.Ports {
var port v1.ServicePort
port := corev1.ServicePort{}
port.Name = p.Name
port.NodePort = int32(p.NodePort)
port.Port = int32(p.Port)
port.Protocol = v1.Protocol(p.Protocol)
port.Protocol = corev1.Protocol(p.Protocol)
port.TargetPort = intstr.FromString(p.TargetPort)
service.Spec.Ports = append(service.Spec.Ports, port)
}
// Set ingresses.
for _, i := range info.IngressStatus {
service.Status.LoadBalancer.Ingress = append(
service.Status.LoadBalancer.Ingress,
v1.LoadBalancerIngress{IP: i.IP, Hostname: i.Host},
corev1.LoadBalancerIngress{IP: i.IP, Hostname: i.Host},
)
}
@ -103,86 +139,84 @@ func (kcl *KubeClient) fillService(info models.K8sServiceInfo) v1.Service {
// CreateService creates a new service in a given namespace in a k8s endpoint.
func (kcl *KubeClient) CreateService(namespace string, info models.K8sServiceInfo) error {
serviceClient := kcl.cli.CoreV1().Services(namespace)
service := kcl.fillService(info)
_, err := serviceClient.Create(context.Background(), &service, metav1.CreateOptions{})
service := kcl.convertToK8sService(info)
_, err := kcl.cli.CoreV1().Services(namespace).Create(context.Background(), &service, metav1.CreateOptions{})
return err
}
// DeleteServices processes a K8sServiceDeleteRequest by deleting each service
// in its given namespace.
func (kcl *KubeClient) DeleteServices(reqs models.K8sServiceDeleteRequests) error {
var err error
for namespace := range reqs {
for _, service := range reqs[namespace] {
serviceClient := kcl.cli.CoreV1().Services(namespace)
err = serviceClient.Delete(
context.Background(),
service,
metav1.DeleteOptions{},
)
err := kcl.cli.CoreV1().Services(namespace).Delete(context.Background(), service, metav1.DeleteOptions{})
if err != nil {
return err
}
}
}
return err
return nil
}
// UpdateService updates service in a given namespace in a k8s endpoint.
func (kcl *KubeClient) UpdateService(namespace string, info models.K8sServiceInfo) error {
serviceClient := kcl.cli.CoreV1().Services(namespace)
service := kcl.fillService(info)
_, err := serviceClient.Update(context.Background(), &service, metav1.UpdateOptions{})
service := kcl.convertToK8sService(info)
_, err := kcl.cli.CoreV1().Services(namespace).Update(context.Background(), &service, metav1.UpdateOptions{})
return err
}
// getOwningApplication gets the application that owns the given service selector.
func (kcl *KubeClient) getOwningApplication(namespace string, selector map[string]string) ([]models.K8sApplication, error) {
if len(selector) == 0 {
return nil, nil
}
selectorLabels := labels.SelectorFromSet(selector).String()
// look for replicasets first, limit 1 (we only support one owner)
replicasets, err := kcl.cli.AppsV1().ReplicaSets(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: selectorLabels, Limit: 1})
if err != nil {
return nil, err
}
var meta metav1.Object
if replicasets != nil && len(replicasets.Items) > 0 {
meta = replicasets.Items[0].GetObjectMeta()
} else {
// otherwise look for matching pods, limit 1 (we only support one owner)
pods, err := kcl.cli.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: selectorLabels, Limit: 1})
// CombineServicesWithApplications retrieves applications based on service selectors in a given namespace
// for all services, it lists pods based on the service selector and converts the pod to an application
// if replicasets are found, it updates the owner reference to deployment
// it then combines the service with the application
// finally, it returns a list of K8sServiceInfo objects
func (kcl *KubeClient) CombineServicesWithApplications(services []models.K8sServiceInfo) ([]models.K8sServiceInfo, error) {
if containsServiceWithSelector(services) {
updatedServices := make([]models.K8sServiceInfo, len(services))
pods, replicaSets, _, _, _, _, err := kcl.fetchAllPodsAndReplicaSets("", metav1.ListOptions{})
if err != nil {
return nil, err
return nil, fmt.Errorf("an error occurred during the CombineServicesWithApplications operation, unable to fetch pods and replica sets. Error: %w", err)
}
if pods == nil || len(pods.Items) == 0 {
return nil, nil
for index, service := range services {
updatedService := service
application, err := kcl.GetApplicationFromServiceSelector(pods, service, replicaSets)
if err != nil {
return services, fmt.Errorf("an error occurred during the CombineServicesWithApplications operation, unable to get application from service. Error: %w", err)
}
if application != nil {
updatedService.Applications = append(updatedService.Applications, *application)
}
updatedServices[index] = updatedService
}
meta = pods.Items[0].GetObjectMeta()
return updatedServices, nil
}
return makeApplication(meta), nil
return services, nil
}
func makeApplication(meta metav1.Object) []models.K8sApplication {
ownerReferences := meta.GetOwnerReferences()
if len(ownerReferences) == 0 {
return nil
}
// Currently, we only support one owner reference
ownerReference := ownerReferences[0]
return []models.K8sApplication{
{
// Only the name is used right now, but we can add more fields in the future
Name: ownerReference.Name,
},
// containsServiceWithSelector checks if a list of services contains a service with a selector
// it returns true if any service has a selector, otherwise false
func containsServiceWithSelector(services []models.K8sServiceInfo) bool {
for _, service := range services {
if len(service.Selector) > 0 {
return true
}
}
return false
}
// buildServicesMap builds a map of service names from a list of K8sServiceInfo objects
// it returns a map of service names for lookups
func (kcl *KubeClient) buildServicesMap(services []models.K8sServiceInfo) map[string]struct{} {
serviceMap := make(map[string]struct{})
for _, service := range services {
serviceMap[service.Name] = struct{}{}
}
return serviceMap
}

View file

@ -4,23 +4,76 @@ import (
"context"
portainer "github.com/portainer/portainer/api"
v1 "k8s.io/api/core/v1"
models "github.com/portainer/portainer/api/http/models/kubernetes"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// GetServiceAccount returns the portainer ServiceAccountName associated to the specified user.
func (kcl *KubeClient) GetServiceAccount(tokenData *portainer.TokenData) (*v1.ServiceAccount, error) {
var portainerServiceAccountName string
// GetServiceAccounts gets all the service accounts for either at the cluster level or a given namespace in a k8s endpoint.
// It returns a list of K8sServiceAccount objects.
func (kcl *KubeClient) GetServiceAccounts(namespace string) ([]models.K8sServiceAccount, error) {
if kcl.IsKubeAdmin {
return kcl.fetchServiceAccounts(namespace)
}
return kcl.fetchServiceAccountsForNonAdmin(namespace)
}
// fetchServiceAccountsForNonAdmin gets all the service accounts for either at the cluster level or a given namespace in a k8s endpoint.
// the namespace will be coming from NonAdminNamespaces as non-admin users are restricted to certain namespaces.
// it returns a list of K8sServiceAccount objects.
func (kcl *KubeClient) fetchServiceAccountsForNonAdmin(namespace string) ([]models.K8sServiceAccount, error) {
serviceAccounts, err := kcl.fetchServiceAccounts(namespace)
if err != nil {
return nil, err
}
nonAdminNamespaceSet := kcl.buildNonAdminNamespacesMap()
results := make([]models.K8sServiceAccount, 0)
for _, serviceAccount := range serviceAccounts {
if _, ok := nonAdminNamespaceSet[serviceAccount.Namespace]; ok {
results = append(results, serviceAccount)
}
}
return results, nil
}
// fetchServiceAccounts returns a list of all ServiceAccounts in the specified namespace.
func (kcl *KubeClient) fetchServiceAccounts(namespace string) ([]models.K8sServiceAccount, error) {
serviceAccounts, err := kcl.cli.CoreV1().ServiceAccounts(namespace).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return nil, err
}
results := make([]models.K8sServiceAccount, 0)
for _, serviceAccount := range serviceAccounts.Items {
results = append(results, parseServiceAccount(serviceAccount))
}
return results, nil
}
// parseServiceAccount converts a corev1.ServiceAccount object to a models.K8sServiceAccount object.
func parseServiceAccount(serviceAccount corev1.ServiceAccount) models.K8sServiceAccount {
return models.K8sServiceAccount{
Name: serviceAccount.Name,
Namespace: serviceAccount.Namespace,
CreationDate: serviceAccount.CreationTimestamp.Time,
}
}
// GetPortainerUserServiceAccount returns the portainer ServiceAccountName associated to the specified user.
func (kcl *KubeClient) GetPortainerUserServiceAccount(tokenData *portainer.TokenData) (*corev1.ServiceAccount, error) {
portainerUserServiceAccountName := UserServiceAccountName(int(tokenData.ID), kcl.instanceID)
if tokenData.Role == portainer.AdministratorRole {
portainerServiceAccountName = portainerClusterAdminServiceAccountName
} else {
portainerServiceAccountName = UserServiceAccountName(int(tokenData.ID), kcl.instanceID)
portainerUserServiceAccountName = portainerClusterAdminServiceAccountName
}
// verify name exists as service account resource within portainer namespace
serviceAccount, err := kcl.cli.CoreV1().ServiceAccounts(portainerNamespace).Get(context.TODO(), portainerServiceAccountName, metav1.GetOptions{})
serviceAccount, err := kcl.cli.CoreV1().ServiceAccounts(portainerNamespace).Get(context.TODO(), portainerUserServiceAccountName, metav1.GetOptions{})
if err != nil {
return nil, err
}
@ -69,7 +122,7 @@ func (kcl *KubeClient) ensureRequiredResourcesExist() error {
}
func (kcl *KubeClient) createUserServiceAccount(namespace, serviceAccountName string) error {
serviceAccount := &v1.ServiceAccount{
serviceAccount := &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: serviceAccountName,
},

View file

@ -18,9 +18,9 @@ func Test_GetServiceAccount(t *testing.T) {
instanceID: "test",
}
tokenData := &portainer.TokenData{ID: 1}
_, err := k.GetServiceAccount(tokenData)
_, err := k.GetPortainerUserServiceAccount(tokenData)
if err == nil {
t.Error("GetServiceAccount should fail with service account not found")
t.Error("GetPortainerUserServiceAccount should fail with service account not found")
}
})
@ -46,9 +46,9 @@ func Test_GetServiceAccount(t *testing.T) {
}
defer k.cli.CoreV1().ServiceAccounts(portainerNamespace).Delete(context.Background(), serviceAccount.Name, metav1.DeleteOptions{})
sa, err := k.GetServiceAccount(tokenData)
sa, err := k.GetPortainerUserServiceAccount(tokenData)
if err != nil {
t.Errorf("GetServiceAccount should succeed; err=%s", err)
t.Errorf("GetPortainerUserServiceAccount should succeed; err=%s", err)
}
want := "portainer-sa-clusteradmin"
@ -79,14 +79,14 @@ func Test_GetServiceAccount(t *testing.T) {
}
defer k.cli.CoreV1().ServiceAccounts(portainerNamespace).Delete(context.Background(), serviceAccount.Name, metav1.DeleteOptions{})
sa, err := k.GetServiceAccount(tokenData)
sa, err := k.GetPortainerUserServiceAccount(tokenData)
if err != nil {
t.Errorf("GetServiceAccount should succeed; err=%s", err)
t.Errorf("GetPortainerUserServiceAccount should succeed; err=%s", err)
}
want := "portainer-sa-user-test-1"
if sa.Name != want {
t.Errorf("GetServiceAccount should succeed and return correct sa name; got=%s want=%s", sa.Name, want)
t.Errorf("GetPortainerUserServiceAccount should succeed and return correct sa name; got=%s want=%s", sa.Name, want)
}
})

View file

@ -0,0 +1,258 @@
package cli
import (
"context"
"fmt"
models "github.com/portainer/portainer/api/http/models/kubernetes"
"github.com/rs/zerolog/log"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// GetVolumes gets the volumes in the current k8s environment(endpoint).
// If the user is an admin, it fetches all the volumes in the cluster.
// If the user is not an admin, it fetches the volumes in the namespaces the user has access to.
// It returns a list of K8sVolumeInfo.
func (kcl *KubeClient) GetVolumes(namespace string) ([]models.K8sVolumeInfo, error) {
if kcl.IsKubeAdmin {
return kcl.fetchVolumes(namespace)
}
return kcl.fetchVolumesForNonAdmin(namespace)
}
// GetVolume gets the volume with the given name and namespace.
func (kcl *KubeClient) GetVolume(namespace, volumeName string) (*models.K8sVolumeInfo, error) {
persistentVolumeClaim, err := kcl.cli.CoreV1().PersistentVolumeClaims(namespace).Get(context.TODO(), volumeName, metav1.GetOptions{})
if err != nil {
if k8serrors.IsNotFound(err) {
return nil, nil
}
return nil, err
}
persistentVolumesMap, storageClassesMap, err := kcl.fetchPersistentVolumesAndStorageClassesMap()
if err != nil {
return nil, err
}
volume := parseVolume(persistentVolumeClaim, persistentVolumesMap, storageClassesMap)
return &volume, nil
}
// fetchVolumesForNonAdmin fetches the volumes in the namespaces the user has access to.
// This function is called when the user is not an admin.
// It fetches all the persistent volume claims, persistent volumes and storage classes in the namespaces the user has access to.
func (kcl *KubeClient) fetchVolumesForNonAdmin(namespace string) ([]models.K8sVolumeInfo, error) {
log.Debug().Msgf("Fetching volumes for non-admin user: %v", kcl.NonAdminNamespaces)
if len(kcl.NonAdminNamespaces) == 0 {
return nil, nil
}
volumes, err := kcl.fetchVolumes(namespace)
if err != nil {
return nil, err
}
nonAdminNamespaceSet := kcl.buildNonAdminNamespacesMap()
results := make([]models.K8sVolumeInfo, 0)
for _, volume := range volumes {
if _, ok := nonAdminNamespaceSet[volume.PersistentVolumeClaim.Namespace]; ok {
results = append(results, volume)
}
}
return results, nil
}
// fetchVolumes fetches all the persistent volume claims, persistent volumes and storage classes in the given namespace.
// It returns a list of K8sVolumeInfo.
// This function is called by fetchVolumesForAdmin and fetchVolumesForNonAdmin.
func (kcl *KubeClient) fetchVolumes(namespace string) ([]models.K8sVolumeInfo, error) {
volumes := make([]models.K8sVolumeInfo, 0)
persistentVolumeClaims, err := kcl.cli.CoreV1().PersistentVolumeClaims(namespace).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return nil, err
}
if len(persistentVolumeClaims.Items) > 0 {
persistentVolumesMap, storageClassesMap, err := kcl.fetchPersistentVolumesAndStorageClassesMap()
if err != nil {
return nil, err
}
for _, persistentVolumeClaim := range persistentVolumeClaims.Items {
volumes = append(volumes, parseVolume(&persistentVolumeClaim, persistentVolumesMap, storageClassesMap))
}
}
return volumes, nil
}
// parseVolume parses the given persistent volume claim and returns a K8sVolumeInfo.
// This function is called by fetchVolumes.
// It returns a K8sVolumeInfo.
func parseVolume(persistentVolumeClaim *corev1.PersistentVolumeClaim, persistentVolumesMap map[string]models.K8sPersistentVolume, storageClassesMap map[string]models.K8sStorageClass) models.K8sVolumeInfo {
volume := models.K8sVolumeInfo{}
volumeClaim := parsePersistentVolumeClaim(persistentVolumeClaim)
if volumeClaim.VolumeName != "" {
persistentVolume, ok := persistentVolumesMap[volumeClaim.VolumeName]
if ok {
volume.PersistentVolume = persistentVolume
}
}
if volumeClaim.StorageClass != nil {
storageClass, ok := storageClassesMap[*volumeClaim.StorageClass]
if ok {
volume.StorageClass = storageClass
}
}
volume.PersistentVolumeClaim = volumeClaim
return volume
}
// parsePersistentVolumeClaim parses the given persistent volume claim and returns a K8sPersistentVolumeClaim.
func parsePersistentVolumeClaim(volume *corev1.PersistentVolumeClaim) models.K8sPersistentVolumeClaim {
storage := volume.Spec.Resources.Requests[corev1.ResourceStorage]
return models.K8sPersistentVolumeClaim{
ID: string(volume.UID),
Name: volume.Name,
Namespace: volume.Namespace,
CreationDate: volume.CreationTimestamp.Time,
Storage: storage.Value(),
AccessModes: volume.Spec.AccessModes,
VolumeName: volume.Spec.VolumeName,
ResourcesRequests: &volume.Spec.Resources.Requests,
StorageClass: volume.Spec.StorageClassName,
VolumeMode: volume.Spec.VolumeMode,
OwningApplications: nil,
Phase: volume.Status.Phase,
}
}
// parsePersistentVolume parses the given persistent volume and returns a K8sPersistentVolume.
func parsePersistentVolume(volume *corev1.PersistentVolume) models.K8sPersistentVolume {
return models.K8sPersistentVolume{
Name: volume.Name,
Annotations: volume.Annotations,
AccessModes: volume.Spec.AccessModes,
Capacity: volume.Spec.Capacity,
ClaimRef: volume.Spec.ClaimRef,
StorageClassName: volume.Spec.StorageClassName,
PersistentVolumeReclaimPolicy: volume.Spec.PersistentVolumeReclaimPolicy,
VolumeMode: volume.Spec.VolumeMode,
CSI: volume.Spec.CSI,
}
}
// buildPersistentVolumesMap builds a map of persistent volumes.
func (kcl *KubeClient) buildPersistentVolumesMap(persistentVolumes *corev1.PersistentVolumeList) map[string]models.K8sPersistentVolume {
persistentVolumesMap := make(map[string]models.K8sPersistentVolume)
for _, persistentVolume := range persistentVolumes.Items {
persistentVolumesMap[persistentVolume.Name] = parsePersistentVolume(&persistentVolume)
}
return persistentVolumesMap
}
// parseStorageClass parses the given storage class and returns a K8sStorageClass.
func parseStorageClass(storageClass *storagev1.StorageClass) models.K8sStorageClass {
return models.K8sStorageClass{
Name: storageClass.Name,
Provisioner: storageClass.Provisioner,
ReclaimPolicy: storageClass.ReclaimPolicy,
AllowVolumeExpansion: storageClass.AllowVolumeExpansion,
}
}
// buildStorageClassesMap builds a map of storage classes.
func (kcl *KubeClient) buildStorageClassesMap(storageClasses *storagev1.StorageClassList) map[string]models.K8sStorageClass {
storageClassesMap := make(map[string]models.K8sStorageClass)
for _, storageClass := range storageClasses.Items {
storageClassesMap[storageClass.Name] = parseStorageClass(&storageClass)
}
return storageClassesMap
}
// fetchPersistentVolumesAndStorageClassesMap fetches all the persistent volumes and storage classes in the cluster.
// It returns a map of persistent volumes and a map of storage classes.
func (kcl *KubeClient) fetchPersistentVolumesAndStorageClassesMap() (map[string]models.K8sPersistentVolume, map[string]models.K8sStorageClass, error) {
persistentVolumes, err := kcl.cli.CoreV1().PersistentVolumes().List(context.TODO(), metav1.ListOptions{})
if err != nil {
return nil, nil, err
}
persistentVolumesMap := kcl.buildPersistentVolumesMap(persistentVolumes)
storageClasses, err := kcl.cli.StorageV1().StorageClasses().List(context.TODO(), metav1.ListOptions{})
if err != nil {
return nil, nil, err
}
storageClassesMap := kcl.buildStorageClassesMap(storageClasses)
return persistentVolumesMap, storageClassesMap, nil
}
// CombineVolumesWithApplications combines the volumes with the applications that use them.
func (kcl *KubeClient) CombineVolumesWithApplications(volumes *[]models.K8sVolumeInfo) (*[]models.K8sVolumeInfo, error) {
pods, err := kcl.cli.CoreV1().Pods("").List(context.Background(), metav1.ListOptions{})
if err != nil {
if k8serrors.IsNotFound(err) {
return volumes, nil
}
log.Error().Err(err).Msg("Failed to list pods across the cluster")
return nil, fmt.Errorf("an error occurred during the CombineServicesWithApplications operation, unable to list pods across the cluster. Error: %w", err)
}
hasReplicaSetOwnerReference := containsReplicaSetOwnerReference(pods)
replicaSetItems := make([]appsv1.ReplicaSet, 0)
if hasReplicaSetOwnerReference {
replicaSets, err := kcl.cli.AppsV1().ReplicaSets("").List(context.Background(), metav1.ListOptions{})
if err != nil {
log.Error().Err(err).Msg("Failed to list replica sets across the cluster")
return nil, fmt.Errorf("an error occurred during the CombineVolumesWithApplications operation, unable to list replica sets across the cluster. Error: %w", err)
}
replicaSetItems = replicaSets.Items
}
return kcl.updateVolumesWithOwningApplications(volumes, pods, replicaSetItems)
}
// updateVolumesWithOwningApplications updates the volumes with the applications that use them.
func (kcl *KubeClient) updateVolumesWithOwningApplications(volumes *[]models.K8sVolumeInfo, pods *corev1.PodList, replicaSetItems []appsv1.ReplicaSet) (*[]models.K8sVolumeInfo, error) {
for i, volume := range *volumes {
for _, pod := range pods.Items {
if pod.Spec.Volumes != nil {
for _, podVolume := range pod.Spec.Volumes {
if podVolume.PersistentVolumeClaim != nil && podVolume.PersistentVolumeClaim.ClaimName == volume.PersistentVolumeClaim.Name && pod.Namespace == volume.PersistentVolumeClaim.Namespace {
application, err := kcl.ConvertPodToApplication(pod, replicaSetItems, []appsv1.Deployment{}, []appsv1.StatefulSet{}, []appsv1.DaemonSet{}, []corev1.Service{}, false)
if err != nil {
log.Error().Err(err).Msg("Failed to convert pod to application")
return nil, fmt.Errorf("an error occurred during the CombineServicesWithApplications operation, unable to convert pod to application. Error: %w", err)
}
// Check if the application already exists in the OwningApplications slice
exists := false
for _, existingApp := range (*volumes)[i].PersistentVolumeClaim.OwningApplications {
if existingApp.Name == application.Name && existingApp.Namespace == application.Namespace {
exists = true
break
}
}
if !exists && application != nil {
(*volumes)[i].PersistentVolumeClaim.OwningApplications = append((*volumes)[i].PersistentVolumeClaim.OwningApplications, *application)
}
}
}
}
}
}
return volumes, nil
}