mirror of
https://github.com/portainer/portainer.git
synced 2025-07-23 07:19:41 +02:00
fix: display unscheduled applications (#509)
Co-authored-by: Steven Kang <skan070@gmail.com>
This commit is contained in:
parent
c0a4a9ab5c
commit
3898b9e09e
18 changed files with 796 additions and 201 deletions
|
@ -69,7 +69,6 @@ func (handler *Handler) getApplicationsResources(w http.ResponseWriter, r *http.
|
||||||
// @param id path int true "Environment(Endpoint) identifier"
|
// @param id path int true "Environment(Endpoint) identifier"
|
||||||
// @param namespace query string true "Namespace name"
|
// @param namespace query string true "Namespace name"
|
||||||
// @param nodeName query string true "Node name"
|
// @param nodeName query string true "Node name"
|
||||||
// @param withDependencies query boolean false "Include dependencies in the response"
|
|
||||||
// @success 200 {array} models.K8sApplication "Success"
|
// @success 200 {array} models.K8sApplication "Success"
|
||||||
// @failure 400 "Invalid request payload, such as missing required fields or fields not meeting validation criteria."
|
// @failure 400 "Invalid request payload, such as missing required fields or fields not meeting validation criteria."
|
||||||
// @failure 401 "Unauthorized access - the user is not authenticated or does not have the necessary permissions. Ensure that you have provided a valid API key or JWT token, and that you have the required permissions."
|
// @failure 401 "Unauthorized access - the user is not authenticated or does not have the necessary permissions. Ensure that you have provided a valid API key or JWT token, and that you have the required permissions."
|
||||||
|
@ -117,12 +116,6 @@ func (handler *Handler) getAllKubernetesApplications(r *http.Request) ([]models.
|
||||||
return nil, httperror.BadRequest("Unable to parse the namespace query parameter", err)
|
return nil, httperror.BadRequest("Unable to parse the namespace query parameter", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
withDependencies, err := request.RetrieveBooleanQueryParameter(r, "withDependencies", true)
|
|
||||||
if err != nil {
|
|
||||||
log.Error().Err(err).Str("context", "getAllKubernetesApplications").Msg("Unable to parse the withDependencies query parameter")
|
|
||||||
return nil, httperror.BadRequest("Unable to parse the withDependencies query parameter", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
nodeName, err := request.RetrieveQueryParameter(r, "nodeName", true)
|
nodeName, err := request.RetrieveQueryParameter(r, "nodeName", true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error().Err(err).Str("context", "getAllKubernetesApplications").Msg("Unable to parse the nodeName query parameter")
|
log.Error().Err(err).Str("context", "getAllKubernetesApplications").Msg("Unable to parse the nodeName query parameter")
|
||||||
|
@ -135,7 +128,7 @@ func (handler *Handler) getAllKubernetesApplications(r *http.Request) ([]models.
|
||||||
return nil, httperror.InternalServerError("Unable to get a Kubernetes client for the user", httpErr)
|
return nil, httperror.InternalServerError("Unable to get a Kubernetes client for the user", httpErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
applications, err := cli.GetApplications(namespace, nodeName, withDependencies)
|
applications, err := cli.GetApplications(namespace, nodeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if k8serrors.IsUnauthorized(err) {
|
if k8serrors.IsUnauthorized(err) {
|
||||||
log.Error().Err(err).Str("context", "getAllKubernetesApplications").Str("namespace", namespace).Str("nodeName", nodeName).Msg("Unable to get the list of applications")
|
log.Error().Err(err).Str("context", "getAllKubernetesApplications").Str("namespace", namespace).Str("nodeName", nodeName).Msg("Unable to get the list of applications")
|
||||||
|
|
|
@ -12,45 +12,58 @@ import (
|
||||||
labels "k8s.io/apimachinery/pkg/labels"
|
labels "k8s.io/apimachinery/pkg/labels"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// PortainerApplicationResources contains collections of various Kubernetes resources
|
||||||
|
// associated with a Portainer application.
|
||||||
|
type PortainerApplicationResources struct {
|
||||||
|
Pods []corev1.Pod
|
||||||
|
ReplicaSets []appsv1.ReplicaSet
|
||||||
|
Deployments []appsv1.Deployment
|
||||||
|
StatefulSets []appsv1.StatefulSet
|
||||||
|
DaemonSets []appsv1.DaemonSet
|
||||||
|
Services []corev1.Service
|
||||||
|
HorizontalPodAutoscalers []autoscalingv2.HorizontalPodAutoscaler
|
||||||
|
}
|
||||||
|
|
||||||
// GetAllKubernetesApplications gets a list of kubernetes workloads (or applications) across all namespaces in the cluster
|
// GetAllKubernetesApplications gets a list of kubernetes workloads (or applications) across all namespaces in the cluster
|
||||||
// if the user is an admin, all namespaces in the current k8s environment(endpoint) are fetched using the fetchApplications function.
|
// if the user is an admin, all namespaces in the current k8s environment(endpoint) are fetched using the fetchApplications function.
|
||||||
// otherwise, namespaces the non-admin user has access to will be used to filter the applications based on the allowed namespaces.
|
// otherwise, namespaces the non-admin user has access to will be used to filter the applications based on the allowed namespaces.
|
||||||
func (kcl *KubeClient) GetApplications(namespace, nodeName string, withDependencies bool) ([]models.K8sApplication, error) {
|
func (kcl *KubeClient) GetApplications(namespace, nodeName string) ([]models.K8sApplication, error) {
|
||||||
if kcl.IsKubeAdmin {
|
if kcl.IsKubeAdmin {
|
||||||
return kcl.fetchApplications(namespace, nodeName, withDependencies)
|
return kcl.fetchApplications(namespace, nodeName)
|
||||||
}
|
}
|
||||||
|
|
||||||
return kcl.fetchApplicationsForNonAdmin(namespace, nodeName, withDependencies)
|
return kcl.fetchApplicationsForNonAdmin(namespace, nodeName)
|
||||||
}
|
}
|
||||||
|
|
||||||
// fetchApplications fetches the applications in the namespaces the user has access to.
|
// fetchApplications fetches the applications in the namespaces the user has access to.
|
||||||
// This function is called when the user is an admin.
|
// This function is called when the user is an admin.
|
||||||
func (kcl *KubeClient) fetchApplications(namespace, nodeName string, withDependencies bool) ([]models.K8sApplication, error) {
|
func (kcl *KubeClient) fetchApplications(namespace, nodeName string) ([]models.K8sApplication, error) {
|
||||||
podListOptions := metav1.ListOptions{}
|
podListOptions := metav1.ListOptions{}
|
||||||
if nodeName != "" {
|
if nodeName != "" {
|
||||||
podListOptions.FieldSelector = "spec.nodeName=" + nodeName
|
podListOptions.FieldSelector = "spec.nodeName=" + nodeName
|
||||||
}
|
}
|
||||||
if !withDependencies {
|
|
||||||
// TODO: make sure not to fetch services in fetchAllApplicationsListResources from this call
|
|
||||||
pods, replicaSets, deployments, statefulSets, daemonSets, _, _, err := kcl.fetchAllApplicationsListResources(namespace, podListOptions)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return kcl.convertPodsToApplications(pods, replicaSets, deployments, statefulSets, daemonSets, nil, nil)
|
portainerApplicationResources, err := kcl.fetchAllApplicationsListResources(namespace, podListOptions)
|
||||||
}
|
|
||||||
|
|
||||||
pods, replicaSets, deployments, statefulSets, daemonSets, services, hpas, err := kcl.fetchAllApplicationsListResources(namespace, podListOptions)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return kcl.convertPodsToApplications(pods, replicaSets, deployments, statefulSets, daemonSets, services, hpas)
|
applications, err := kcl.convertPodsToApplications(portainerApplicationResources)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
unhealthyApplications, err := fetchUnhealthyApplications(portainerApplicationResources)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return append(applications, unhealthyApplications...), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// fetchApplicationsForNonAdmin fetches the applications in the namespaces the user has access to.
|
// fetchApplicationsForNonAdmin fetches the applications in the namespaces the user has access to.
|
||||||
// This function is called when the user is not an admin.
|
// This function is called when the user is not an admin.
|
||||||
func (kcl *KubeClient) fetchApplicationsForNonAdmin(namespace, nodeName string, withDependencies bool) ([]models.K8sApplication, error) {
|
func (kcl *KubeClient) fetchApplicationsForNonAdmin(namespace, nodeName string) ([]models.K8sApplication, error) {
|
||||||
log.Debug().Msgf("Fetching applications for non-admin user: %v", kcl.NonAdminNamespaces)
|
log.Debug().Msgf("Fetching applications for non-admin user: %v", kcl.NonAdminNamespaces)
|
||||||
|
|
||||||
if len(kcl.NonAdminNamespaces) == 0 {
|
if len(kcl.NonAdminNamespaces) == 0 {
|
||||||
|
@ -62,28 +75,24 @@ func (kcl *KubeClient) fetchApplicationsForNonAdmin(namespace, nodeName string,
|
||||||
podListOptions.FieldSelector = "spec.nodeName=" + nodeName
|
podListOptions.FieldSelector = "spec.nodeName=" + nodeName
|
||||||
}
|
}
|
||||||
|
|
||||||
if !withDependencies {
|
portainerApplicationResources, err := kcl.fetchAllApplicationsListResources(namespace, podListOptions)
|
||||||
pods, replicaSets, _, _, _, _, _, err := kcl.fetchAllPodsAndReplicaSets(namespace, podListOptions)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return kcl.convertPodsToApplications(pods, replicaSets, nil, nil, nil, nil, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
pods, replicaSets, deployments, statefulSets, daemonSets, services, hpas, err := kcl.fetchAllApplicationsListResources(namespace, podListOptions)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
applications, err := kcl.convertPodsToApplications(pods, replicaSets, deployments, statefulSets, daemonSets, services, hpas)
|
applications, err := kcl.convertPodsToApplications(portainerApplicationResources)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
unhealthyApplications, err := fetchUnhealthyApplications(portainerApplicationResources)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
nonAdminNamespaceSet := kcl.buildNonAdminNamespacesMap()
|
nonAdminNamespaceSet := kcl.buildNonAdminNamespacesMap()
|
||||||
results := make([]models.K8sApplication, 0)
|
results := make([]models.K8sApplication, 0)
|
||||||
for _, application := range applications {
|
for _, application := range append(applications, unhealthyApplications...) {
|
||||||
if _, ok := nonAdminNamespaceSet[application.ResourcePool]; ok {
|
if _, ok := nonAdminNamespaceSet[application.ResourcePool]; ok {
|
||||||
results = append(results, application)
|
results = append(results, application)
|
||||||
}
|
}
|
||||||
|
@ -93,11 +102,11 @@ func (kcl *KubeClient) fetchApplicationsForNonAdmin(namespace, nodeName string,
|
||||||
}
|
}
|
||||||
|
|
||||||
// convertPodsToApplications processes pods and converts them to applications, ensuring uniqueness by owner reference.
|
// convertPodsToApplications processes pods and converts them to applications, ensuring uniqueness by owner reference.
|
||||||
func (kcl *KubeClient) convertPodsToApplications(pods []corev1.Pod, replicaSets []appsv1.ReplicaSet, deployments []appsv1.Deployment, statefulSets []appsv1.StatefulSet, daemonSets []appsv1.DaemonSet, services []corev1.Service, hpas []autoscalingv2.HorizontalPodAutoscaler) ([]models.K8sApplication, error) {
|
func (kcl *KubeClient) convertPodsToApplications(portainerApplicationResources PortainerApplicationResources) ([]models.K8sApplication, error) {
|
||||||
applications := []models.K8sApplication{}
|
applications := []models.K8sApplication{}
|
||||||
processedOwners := make(map[string]struct{})
|
processedOwners := make(map[string]struct{})
|
||||||
|
|
||||||
for _, pod := range pods {
|
for _, pod := range portainerApplicationResources.Pods {
|
||||||
if len(pod.OwnerReferences) > 0 {
|
if len(pod.OwnerReferences) > 0 {
|
||||||
ownerUID := string(pod.OwnerReferences[0].UID)
|
ownerUID := string(pod.OwnerReferences[0].UID)
|
||||||
if _, exists := processedOwners[ownerUID]; exists {
|
if _, exists := processedOwners[ownerUID]; exists {
|
||||||
|
@ -106,7 +115,7 @@ func (kcl *KubeClient) convertPodsToApplications(pods []corev1.Pod, replicaSets
|
||||||
processedOwners[ownerUID] = struct{}{}
|
processedOwners[ownerUID] = struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
application, err := kcl.ConvertPodToApplication(pod, replicaSets, deployments, statefulSets, daemonSets, services, hpas, true)
|
application, err := kcl.ConvertPodToApplication(pod, portainerApplicationResources, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -151,7 +160,9 @@ func (kcl *KubeClient) GetApplicationNamesFromConfigMap(configMap models.K8sConf
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
if pod.Namespace == configMap.Namespace {
|
if pod.Namespace == configMap.Namespace {
|
||||||
if isPodUsingConfigMap(&pod, configMap.Name) {
|
if isPodUsingConfigMap(&pod, configMap.Name) {
|
||||||
application, err := kcl.ConvertPodToApplication(pod, replicaSets, nil, nil, nil, nil, nil, false)
|
application, err := kcl.ConvertPodToApplication(pod, PortainerApplicationResources{
|
||||||
|
ReplicaSets: replicaSets,
|
||||||
|
}, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -168,7 +179,9 @@ func (kcl *KubeClient) GetApplicationNamesFromSecret(secret models.K8sSecret, po
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
if pod.Namespace == secret.Namespace {
|
if pod.Namespace == secret.Namespace {
|
||||||
if isPodUsingSecret(&pod, secret.Name) {
|
if isPodUsingSecret(&pod, secret.Name) {
|
||||||
application, err := kcl.ConvertPodToApplication(pod, replicaSets, nil, nil, nil, nil, nil, false)
|
application, err := kcl.ConvertPodToApplication(pod, PortainerApplicationResources{
|
||||||
|
ReplicaSets: replicaSets,
|
||||||
|
}, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -181,12 +194,12 @@ func (kcl *KubeClient) GetApplicationNamesFromSecret(secret models.K8sSecret, po
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConvertPodToApplication converts a pod to an application, updating owner references if necessary
|
// ConvertPodToApplication converts a pod to an application, updating owner references if necessary
|
||||||
func (kcl *KubeClient) ConvertPodToApplication(pod corev1.Pod, replicaSets []appsv1.ReplicaSet, deployments []appsv1.Deployment, statefulSets []appsv1.StatefulSet, daemonSets []appsv1.DaemonSet, services []corev1.Service, hpas []autoscalingv2.HorizontalPodAutoscaler, withResource bool) (*models.K8sApplication, error) {
|
func (kcl *KubeClient) ConvertPodToApplication(pod corev1.Pod, portainerApplicationResources PortainerApplicationResources, withResource bool) (*models.K8sApplication, error) {
|
||||||
if isReplicaSetOwner(pod) {
|
if isReplicaSetOwner(pod) {
|
||||||
updateOwnerReferenceToDeployment(&pod, replicaSets)
|
updateOwnerReferenceToDeployment(&pod, portainerApplicationResources.ReplicaSets)
|
||||||
}
|
}
|
||||||
|
|
||||||
application := createApplication(&pod, deployments, statefulSets, daemonSets, services, hpas)
|
application := createApplicationFromPod(&pod, portainerApplicationResources)
|
||||||
if application.ID == "" && application.Name == "" {
|
if application.ID == "" && application.Name == "" {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
@ -203,9 +216,9 @@ func (kcl *KubeClient) ConvertPodToApplication(pod corev1.Pod, replicaSets []app
|
||||||
return &application, nil
|
return &application, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// createApplication creates a K8sApplication object from a pod
|
// createApplicationFromPod creates a K8sApplication object from a pod
|
||||||
// it sets the application name, namespace, kind, image, stack id, stack name, and labels
|
// it sets the application name, namespace, kind, image, stack id, stack name, and labels
|
||||||
func createApplication(pod *corev1.Pod, deployments []appsv1.Deployment, statefulSets []appsv1.StatefulSet, daemonSets []appsv1.DaemonSet, services []corev1.Service, hpas []autoscalingv2.HorizontalPodAutoscaler) models.K8sApplication {
|
func createApplicationFromPod(pod *corev1.Pod, portainerApplicationResources PortainerApplicationResources) models.K8sApplication {
|
||||||
kind := "Pod"
|
kind := "Pod"
|
||||||
name := pod.Name
|
name := pod.Name
|
||||||
|
|
||||||
|
@ -221,120 +234,172 @@ func createApplication(pod *corev1.Pod, deployments []appsv1.Deployment, statefu
|
||||||
|
|
||||||
switch kind {
|
switch kind {
|
||||||
case "Deployment":
|
case "Deployment":
|
||||||
for _, deployment := range deployments {
|
for _, deployment := range portainerApplicationResources.Deployments {
|
||||||
if deployment.Name == name && deployment.Namespace == pod.Namespace {
|
if deployment.Name == name && deployment.Namespace == pod.Namespace {
|
||||||
application.ApplicationType = "Deployment"
|
populateApplicationFromDeployment(&application, deployment)
|
||||||
application.Kind = "Deployment"
|
|
||||||
application.ID = string(deployment.UID)
|
|
||||||
application.ResourcePool = deployment.Namespace
|
|
||||||
application.Name = name
|
|
||||||
application.Image = deployment.Spec.Template.Spec.Containers[0].Image
|
|
||||||
application.ApplicationOwner = deployment.Labels["io.portainer.kubernetes.application.owner"]
|
|
||||||
application.StackID = deployment.Labels["io.portainer.kubernetes.application.stackid"]
|
|
||||||
application.StackName = deployment.Labels["io.portainer.kubernetes.application.stack"]
|
|
||||||
application.Labels = deployment.Labels
|
|
||||||
application.MatchLabels = deployment.Spec.Selector.MatchLabels
|
|
||||||
application.CreationDate = deployment.CreationTimestamp.Time
|
|
||||||
application.TotalPodsCount = int(deployment.Status.Replicas)
|
|
||||||
application.RunningPodsCount = int(deployment.Status.ReadyReplicas)
|
|
||||||
application.DeploymentType = "Replicated"
|
|
||||||
application.Metadata = &models.Metadata{
|
|
||||||
Labels: deployment.Labels,
|
|
||||||
}
|
|
||||||
|
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
case "StatefulSet":
|
case "StatefulSet":
|
||||||
for _, statefulSet := range statefulSets {
|
for _, statefulSet := range portainerApplicationResources.StatefulSets {
|
||||||
if statefulSet.Name == name && statefulSet.Namespace == pod.Namespace {
|
if statefulSet.Name == name && statefulSet.Namespace == pod.Namespace {
|
||||||
application.Kind = "StatefulSet"
|
populateApplicationFromStatefulSet(&application, statefulSet)
|
||||||
application.ApplicationType = "StatefulSet"
|
|
||||||
application.ID = string(statefulSet.UID)
|
|
||||||
application.ResourcePool = statefulSet.Namespace
|
|
||||||
application.Name = name
|
|
||||||
application.Image = statefulSet.Spec.Template.Spec.Containers[0].Image
|
|
||||||
application.ApplicationOwner = statefulSet.Labels["io.portainer.kubernetes.application.owner"]
|
|
||||||
application.StackID = statefulSet.Labels["io.portainer.kubernetes.application.stackid"]
|
|
||||||
application.StackName = statefulSet.Labels["io.portainer.kubernetes.application.stack"]
|
|
||||||
application.Labels = statefulSet.Labels
|
|
||||||
application.MatchLabels = statefulSet.Spec.Selector.MatchLabels
|
|
||||||
application.CreationDate = statefulSet.CreationTimestamp.Time
|
|
||||||
application.TotalPodsCount = int(statefulSet.Status.Replicas)
|
|
||||||
application.RunningPodsCount = int(statefulSet.Status.ReadyReplicas)
|
|
||||||
application.DeploymentType = "Replicated"
|
|
||||||
application.Metadata = &models.Metadata{
|
|
||||||
Labels: statefulSet.Labels,
|
|
||||||
}
|
|
||||||
|
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
case "DaemonSet":
|
case "DaemonSet":
|
||||||
for _, daemonSet := range daemonSets {
|
for _, daemonSet := range portainerApplicationResources.DaemonSets {
|
||||||
if daemonSet.Name == name && daemonSet.Namespace == pod.Namespace {
|
if daemonSet.Name == name && daemonSet.Namespace == pod.Namespace {
|
||||||
application.Kind = "DaemonSet"
|
populateApplicationFromDaemonSet(&application, daemonSet)
|
||||||
application.ApplicationType = "DaemonSet"
|
|
||||||
application.ID = string(daemonSet.UID)
|
|
||||||
application.ResourcePool = daemonSet.Namespace
|
|
||||||
application.Name = name
|
|
||||||
application.Image = daemonSet.Spec.Template.Spec.Containers[0].Image
|
|
||||||
application.ApplicationOwner = daemonSet.Labels["io.portainer.kubernetes.application.owner"]
|
|
||||||
application.StackID = daemonSet.Labels["io.portainer.kubernetes.application.stackid"]
|
|
||||||
application.StackName = daemonSet.Labels["io.portainer.kubernetes.application.stack"]
|
|
||||||
application.Labels = daemonSet.Labels
|
|
||||||
application.MatchLabels = daemonSet.Spec.Selector.MatchLabels
|
|
||||||
application.CreationDate = daemonSet.CreationTimestamp.Time
|
|
||||||
application.TotalPodsCount = int(daemonSet.Status.DesiredNumberScheduled)
|
|
||||||
application.RunningPodsCount = int(daemonSet.Status.NumberReady)
|
|
||||||
application.DeploymentType = "Global"
|
|
||||||
application.Metadata = &models.Metadata{
|
|
||||||
Labels: daemonSet.Labels,
|
|
||||||
}
|
|
||||||
|
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
case "Pod":
|
case "Pod":
|
||||||
runningPodsCount := 1
|
populateApplicationFromPod(&application, *pod)
|
||||||
if pod.Status.Phase != corev1.PodRunning {
|
|
||||||
runningPodsCount = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
application.ApplicationType = "Pod"
|
|
||||||
application.Kind = "Pod"
|
|
||||||
application.ID = string(pod.UID)
|
|
||||||
application.ResourcePool = pod.Namespace
|
|
||||||
application.Name = pod.Name
|
|
||||||
application.Image = pod.Spec.Containers[0].Image
|
|
||||||
application.ApplicationOwner = pod.Labels["io.portainer.kubernetes.application.owner"]
|
|
||||||
application.StackID = pod.Labels["io.portainer.kubernetes.application.stackid"]
|
|
||||||
application.StackName = pod.Labels["io.portainer.kubernetes.application.stack"]
|
|
||||||
application.Labels = pod.Labels
|
|
||||||
application.MatchLabels = pod.Labels
|
|
||||||
application.CreationDate = pod.CreationTimestamp.Time
|
|
||||||
application.TotalPodsCount = 1
|
|
||||||
application.RunningPodsCount = runningPodsCount
|
|
||||||
application.DeploymentType = string(pod.Status.Phase)
|
|
||||||
application.Metadata = &models.Metadata{
|
|
||||||
Labels: pod.Labels,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if application.ID != "" && application.Name != "" && len(services) > 0 {
|
if application.ID != "" && application.Name != "" && len(portainerApplicationResources.Services) > 0 {
|
||||||
updateApplicationWithService(&application, services)
|
updateApplicationWithService(&application, portainerApplicationResources.Services)
|
||||||
}
|
}
|
||||||
|
|
||||||
if application.ID != "" && application.Name != "" && len(hpas) > 0 {
|
if application.ID != "" && application.Name != "" && len(portainerApplicationResources.HorizontalPodAutoscalers) > 0 {
|
||||||
updateApplicationWithHorizontalPodAutoscaler(&application, hpas)
|
updateApplicationWithHorizontalPodAutoscaler(&application, portainerApplicationResources.HorizontalPodAutoscalers)
|
||||||
}
|
}
|
||||||
|
|
||||||
return application
|
return application
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// createApplicationFromDeployment creates a K8sApplication from a Deployment
|
||||||
|
func createApplicationFromDeployment(deployment appsv1.Deployment) models.K8sApplication {
|
||||||
|
var app models.K8sApplication
|
||||||
|
populateApplicationFromDeployment(&app, deployment)
|
||||||
|
return app
|
||||||
|
}
|
||||||
|
|
||||||
|
// createApplicationFromStatefulSet creates a K8sApplication from a StatefulSet
|
||||||
|
func createApplicationFromStatefulSet(statefulSet appsv1.StatefulSet) models.K8sApplication {
|
||||||
|
var app models.K8sApplication
|
||||||
|
populateApplicationFromStatefulSet(&app, statefulSet)
|
||||||
|
return app
|
||||||
|
}
|
||||||
|
|
||||||
|
// createApplicationFromDaemonSet creates a K8sApplication from a DaemonSet
|
||||||
|
func createApplicationFromDaemonSet(daemonSet appsv1.DaemonSet) models.K8sApplication {
|
||||||
|
var app models.K8sApplication
|
||||||
|
populateApplicationFromDaemonSet(&app, daemonSet)
|
||||||
|
return app
|
||||||
|
}
|
||||||
|
|
||||||
|
func populateApplicationFromDeployment(application *models.K8sApplication, deployment appsv1.Deployment) {
|
||||||
|
application.ApplicationType = "Deployment"
|
||||||
|
application.Kind = "Deployment"
|
||||||
|
application.ID = string(deployment.UID)
|
||||||
|
application.ResourcePool = deployment.Namespace
|
||||||
|
application.Name = deployment.Name
|
||||||
|
application.ApplicationOwner = deployment.Labels["io.portainer.kubernetes.application.owner"]
|
||||||
|
application.StackID = deployment.Labels["io.portainer.kubernetes.application.stackid"]
|
||||||
|
application.StackName = deployment.Labels["io.portainer.kubernetes.application.stack"]
|
||||||
|
application.Labels = deployment.Labels
|
||||||
|
application.MatchLabels = deployment.Spec.Selector.MatchLabels
|
||||||
|
application.CreationDate = deployment.CreationTimestamp.Time
|
||||||
|
application.TotalPodsCount = 0
|
||||||
|
if deployment.Spec.Replicas != nil {
|
||||||
|
application.TotalPodsCount = int(*deployment.Spec.Replicas)
|
||||||
|
}
|
||||||
|
application.RunningPodsCount = int(deployment.Status.ReadyReplicas)
|
||||||
|
application.DeploymentType = "Replicated"
|
||||||
|
application.Metadata = &models.Metadata{
|
||||||
|
Labels: deployment.Labels,
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the deployment has containers, use the first container's image
|
||||||
|
if len(deployment.Spec.Template.Spec.Containers) > 0 {
|
||||||
|
application.Image = deployment.Spec.Template.Spec.Containers[0].Image
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func populateApplicationFromStatefulSet(application *models.K8sApplication, statefulSet appsv1.StatefulSet) {
|
||||||
|
application.Kind = "StatefulSet"
|
||||||
|
application.ApplicationType = "StatefulSet"
|
||||||
|
application.ID = string(statefulSet.UID)
|
||||||
|
application.ResourcePool = statefulSet.Namespace
|
||||||
|
application.Name = statefulSet.Name
|
||||||
|
application.ApplicationOwner = statefulSet.Labels["io.portainer.kubernetes.application.owner"]
|
||||||
|
application.StackID = statefulSet.Labels["io.portainer.kubernetes.application.stackid"]
|
||||||
|
application.StackName = statefulSet.Labels["io.portainer.kubernetes.application.stack"]
|
||||||
|
application.Labels = statefulSet.Labels
|
||||||
|
application.MatchLabels = statefulSet.Spec.Selector.MatchLabels
|
||||||
|
application.CreationDate = statefulSet.CreationTimestamp.Time
|
||||||
|
application.TotalPodsCount = 0
|
||||||
|
if statefulSet.Spec.Replicas != nil {
|
||||||
|
application.TotalPodsCount = int(*statefulSet.Spec.Replicas)
|
||||||
|
}
|
||||||
|
application.RunningPodsCount = int(statefulSet.Status.ReadyReplicas)
|
||||||
|
application.DeploymentType = "Replicated"
|
||||||
|
application.Metadata = &models.Metadata{
|
||||||
|
Labels: statefulSet.Labels,
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the statefulSet has containers, use the first container's image
|
||||||
|
if len(statefulSet.Spec.Template.Spec.Containers) > 0 {
|
||||||
|
application.Image = statefulSet.Spec.Template.Spec.Containers[0].Image
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func populateApplicationFromDaemonSet(application *models.K8sApplication, daemonSet appsv1.DaemonSet) {
|
||||||
|
application.Kind = "DaemonSet"
|
||||||
|
application.ApplicationType = "DaemonSet"
|
||||||
|
application.ID = string(daemonSet.UID)
|
||||||
|
application.ResourcePool = daemonSet.Namespace
|
||||||
|
application.Name = daemonSet.Name
|
||||||
|
application.ApplicationOwner = daemonSet.Labels["io.portainer.kubernetes.application.owner"]
|
||||||
|
application.StackID = daemonSet.Labels["io.portainer.kubernetes.application.stackid"]
|
||||||
|
application.StackName = daemonSet.Labels["io.portainer.kubernetes.application.stack"]
|
||||||
|
application.Labels = daemonSet.Labels
|
||||||
|
application.MatchLabels = daemonSet.Spec.Selector.MatchLabels
|
||||||
|
application.CreationDate = daemonSet.CreationTimestamp.Time
|
||||||
|
application.TotalPodsCount = int(daemonSet.Status.DesiredNumberScheduled)
|
||||||
|
application.RunningPodsCount = int(daemonSet.Status.NumberReady)
|
||||||
|
application.DeploymentType = "Global"
|
||||||
|
application.Metadata = &models.Metadata{
|
||||||
|
Labels: daemonSet.Labels,
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(daemonSet.Spec.Template.Spec.Containers) > 0 {
|
||||||
|
application.Image = daemonSet.Spec.Template.Spec.Containers[0].Image
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func populateApplicationFromPod(application *models.K8sApplication, pod corev1.Pod) {
|
||||||
|
runningPodsCount := 1
|
||||||
|
if pod.Status.Phase != corev1.PodRunning {
|
||||||
|
runningPodsCount = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
application.ApplicationType = "Pod"
|
||||||
|
application.Kind = "Pod"
|
||||||
|
application.ID = string(pod.UID)
|
||||||
|
application.ResourcePool = pod.Namespace
|
||||||
|
application.Name = pod.Name
|
||||||
|
application.ApplicationOwner = pod.Labels["io.portainer.kubernetes.application.owner"]
|
||||||
|
application.StackID = pod.Labels["io.portainer.kubernetes.application.stackid"]
|
||||||
|
application.StackName = pod.Labels["io.portainer.kubernetes.application.stack"]
|
||||||
|
application.Labels = pod.Labels
|
||||||
|
application.MatchLabels = pod.Labels
|
||||||
|
application.CreationDate = pod.CreationTimestamp.Time
|
||||||
|
application.TotalPodsCount = 1
|
||||||
|
application.RunningPodsCount = runningPodsCount
|
||||||
|
application.DeploymentType = string(pod.Status.Phase)
|
||||||
|
application.Metadata = &models.Metadata{
|
||||||
|
Labels: pod.Labels,
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the pod has containers, use the first container's image
|
||||||
|
if len(pod.Spec.Containers) > 0 {
|
||||||
|
application.Image = pod.Spec.Containers[0].Image
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// updateApplicationWithService updates the application with the services that match the application's selector match labels
|
// updateApplicationWithService updates the application with the services that match the application's selector match labels
|
||||||
// and are in the same namespace as the application
|
// and are in the same namespace as the application
|
||||||
func updateApplicationWithService(application *models.K8sApplication, services []corev1.Service) {
|
func updateApplicationWithService(application *models.K8sApplication, services []corev1.Service) {
|
||||||
|
@ -410,7 +475,9 @@ func (kcl *KubeClient) GetApplicationConfigurationOwnersFromConfigMap(configMap
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
if pod.Namespace == configMap.Namespace {
|
if pod.Namespace == configMap.Namespace {
|
||||||
if isPodUsingConfigMap(&pod, configMap.Name) {
|
if isPodUsingConfigMap(&pod, configMap.Name) {
|
||||||
application, err := kcl.ConvertPodToApplication(pod, replicaSets, nil, nil, nil, nil, nil, false)
|
application, err := kcl.ConvertPodToApplication(pod, PortainerApplicationResources{
|
||||||
|
ReplicaSets: replicaSets,
|
||||||
|
}, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -436,7 +503,9 @@ func (kcl *KubeClient) GetApplicationConfigurationOwnersFromSecret(secret models
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
if pod.Namespace == secret.Namespace {
|
if pod.Namespace == secret.Namespace {
|
||||||
if isPodUsingSecret(&pod, secret.Name) {
|
if isPodUsingSecret(&pod, secret.Name) {
|
||||||
application, err := kcl.ConvertPodToApplication(pod, replicaSets, nil, nil, nil, nil, nil, false)
|
application, err := kcl.ConvertPodToApplication(pod, PortainerApplicationResources{
|
||||||
|
ReplicaSets: replicaSets,
|
||||||
|
}, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -454,3 +523,84 @@ func (kcl *KubeClient) GetApplicationConfigurationOwnersFromSecret(secret models
|
||||||
|
|
||||||
return configurationOwners, nil
|
return configurationOwners, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// fetchUnhealthyApplications fetches applications that failed to schedule any pods
|
||||||
|
// due to issues like missing resource limits or other scheduling constraints
|
||||||
|
func fetchUnhealthyApplications(resources PortainerApplicationResources) ([]models.K8sApplication, error) {
|
||||||
|
var unhealthyApplications []models.K8sApplication
|
||||||
|
|
||||||
|
// Process Deployments
|
||||||
|
for _, deployment := range resources.Deployments {
|
||||||
|
if hasNoScheduledPods(deployment) {
|
||||||
|
app := createApplicationFromDeployment(deployment)
|
||||||
|
addRelatedResourcesToApplication(&app, resources)
|
||||||
|
unhealthyApplications = append(unhealthyApplications, app)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process StatefulSets
|
||||||
|
for _, statefulSet := range resources.StatefulSets {
|
||||||
|
if hasNoScheduledPods(statefulSet) {
|
||||||
|
app := createApplicationFromStatefulSet(statefulSet)
|
||||||
|
addRelatedResourcesToApplication(&app, resources)
|
||||||
|
unhealthyApplications = append(unhealthyApplications, app)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process DaemonSets
|
||||||
|
for _, daemonSet := range resources.DaemonSets {
|
||||||
|
if hasNoScheduledPods(daemonSet) {
|
||||||
|
app := createApplicationFromDaemonSet(daemonSet)
|
||||||
|
addRelatedResourcesToApplication(&app, resources)
|
||||||
|
unhealthyApplications = append(unhealthyApplications, app)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return unhealthyApplications, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// addRelatedResourcesToApplication adds Services and HPA information to the application
|
||||||
|
func addRelatedResourcesToApplication(app *models.K8sApplication, resources PortainerApplicationResources) {
|
||||||
|
if app.ID == "" || app.Name == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(resources.Services) > 0 {
|
||||||
|
updateApplicationWithService(app, resources.Services)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(resources.HorizontalPodAutoscalers) > 0 {
|
||||||
|
updateApplicationWithHorizontalPodAutoscaler(app, resources.HorizontalPodAutoscalers)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// hasNoScheduledPods checks if a workload has completely failed to schedule any pods
|
||||||
|
// it checks for no replicas desired, i.e. nothing to schedule and see if any pods are running
|
||||||
|
// if any pods exist at all (even if not ready), it returns false
|
||||||
|
func hasNoScheduledPods(obj interface{}) bool {
|
||||||
|
switch resource := obj.(type) {
|
||||||
|
case appsv1.Deployment:
|
||||||
|
if resource.Status.Replicas > 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return resource.Status.ReadyReplicas == 0 && resource.Status.AvailableReplicas == 0
|
||||||
|
|
||||||
|
case appsv1.StatefulSet:
|
||||||
|
if resource.Status.Replicas > 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return resource.Status.ReadyReplicas == 0 && resource.Status.CurrentReplicas == 0
|
||||||
|
|
||||||
|
case appsv1.DaemonSet:
|
||||||
|
if resource.Status.CurrentNumberScheduled > 0 || resource.Status.NumberMisscheduled > 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return resource.Status.NumberReady == 0 && resource.Status.DesiredNumberScheduled > 0
|
||||||
|
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
461
api/kubernetes/cli/applications_test.go
Normal file
461
api/kubernetes/cli/applications_test.go
Normal file
|
@ -0,0 +1,461 @@
|
||||||
|
package cli
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
models "github.com/portainer/portainer/api/http/models/kubernetes"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Helper functions to create test resources
|
||||||
|
func createTestDeployment(name, namespace string, replicas int32) *appsv1.Deployment {
|
||||||
|
return &appsv1.Deployment{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: name,
|
||||||
|
Namespace: namespace,
|
||||||
|
UID: types.UID("deploy-" + name),
|
||||||
|
Labels: map[string]string{
|
||||||
|
"app": name,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: appsv1.DeploymentSpec{
|
||||||
|
Replicas: &replicas,
|
||||||
|
Selector: &metav1.LabelSelector{
|
||||||
|
MatchLabels: map[string]string{
|
||||||
|
"app": name,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Template: corev1.PodTemplateSpec{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Labels: map[string]string{
|
||||||
|
"app": name,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: corev1.PodSpec{
|
||||||
|
Containers: []corev1.Container{
|
||||||
|
{
|
||||||
|
Name: name,
|
||||||
|
Image: "nginx:latest",
|
||||||
|
Resources: corev1.ResourceRequirements{
|
||||||
|
Limits: corev1.ResourceList{},
|
||||||
|
Requests: corev1.ResourceList{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Status: appsv1.DeploymentStatus{
|
||||||
|
Replicas: replicas,
|
||||||
|
ReadyReplicas: replicas,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func createTestReplicaSet(name, namespace, deploymentName string) *appsv1.ReplicaSet {
|
||||||
|
return &appsv1.ReplicaSet{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: name,
|
||||||
|
Namespace: namespace,
|
||||||
|
UID: types.UID("rs-" + name),
|
||||||
|
OwnerReferences: []metav1.OwnerReference{
|
||||||
|
{
|
||||||
|
Kind: "Deployment",
|
||||||
|
Name: deploymentName,
|
||||||
|
UID: types.UID("deploy-" + deploymentName),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: appsv1.ReplicaSetSpec{
|
||||||
|
Selector: &metav1.LabelSelector{
|
||||||
|
MatchLabels: map[string]string{
|
||||||
|
"app": deploymentName,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func createTestStatefulSet(name, namespace string, replicas int32) *appsv1.StatefulSet {
|
||||||
|
return &appsv1.StatefulSet{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: name,
|
||||||
|
Namespace: namespace,
|
||||||
|
UID: types.UID("sts-" + name),
|
||||||
|
Labels: map[string]string{
|
||||||
|
"app": name,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: appsv1.StatefulSetSpec{
|
||||||
|
Replicas: &replicas,
|
||||||
|
Selector: &metav1.LabelSelector{
|
||||||
|
MatchLabels: map[string]string{
|
||||||
|
"app": name,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Template: corev1.PodTemplateSpec{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Labels: map[string]string{
|
||||||
|
"app": name,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: corev1.PodSpec{
|
||||||
|
Containers: []corev1.Container{
|
||||||
|
{
|
||||||
|
Name: name,
|
||||||
|
Image: "redis:latest",
|
||||||
|
Resources: corev1.ResourceRequirements{
|
||||||
|
Limits: corev1.ResourceList{},
|
||||||
|
Requests: corev1.ResourceList{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Status: appsv1.StatefulSetStatus{
|
||||||
|
Replicas: replicas,
|
||||||
|
ReadyReplicas: replicas,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func createTestDaemonSet(name, namespace string) *appsv1.DaemonSet {
|
||||||
|
return &appsv1.DaemonSet{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: name,
|
||||||
|
Namespace: namespace,
|
||||||
|
UID: types.UID("ds-" + name),
|
||||||
|
Labels: map[string]string{
|
||||||
|
"app": name,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: appsv1.DaemonSetSpec{
|
||||||
|
Selector: &metav1.LabelSelector{
|
||||||
|
MatchLabels: map[string]string{
|
||||||
|
"app": name,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Template: corev1.PodTemplateSpec{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Labels: map[string]string{
|
||||||
|
"app": name,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: corev1.PodSpec{
|
||||||
|
Containers: []corev1.Container{
|
||||||
|
{
|
||||||
|
Name: name,
|
||||||
|
Image: "fluentd:latest",
|
||||||
|
Resources: corev1.ResourceRequirements{
|
||||||
|
Limits: corev1.ResourceList{},
|
||||||
|
Requests: corev1.ResourceList{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Status: appsv1.DaemonSetStatus{
|
||||||
|
DesiredNumberScheduled: 2,
|
||||||
|
NumberReady: 2,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func createTestPod(name, namespace, ownerKind, ownerName string, isRunning bool) *corev1.Pod {
|
||||||
|
phase := corev1.PodPending
|
||||||
|
if isRunning {
|
||||||
|
phase = corev1.PodRunning
|
||||||
|
}
|
||||||
|
|
||||||
|
var ownerReferences []metav1.OwnerReference
|
||||||
|
if ownerKind != "" && ownerName != "" {
|
||||||
|
ownerReferences = []metav1.OwnerReference{
|
||||||
|
{
|
||||||
|
Kind: ownerKind,
|
||||||
|
Name: ownerName,
|
||||||
|
UID: types.UID(ownerKind + "-" + ownerName),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &corev1.Pod{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: name,
|
||||||
|
Namespace: namespace,
|
||||||
|
UID: types.UID("pod-" + name),
|
||||||
|
OwnerReferences: ownerReferences,
|
||||||
|
Labels: map[string]string{
|
||||||
|
"app": ownerName,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: corev1.PodSpec{
|
||||||
|
Containers: []corev1.Container{
|
||||||
|
{
|
||||||
|
Name: "container-" + name,
|
||||||
|
Image: "busybox:latest",
|
||||||
|
Resources: corev1.ResourceRequirements{
|
||||||
|
Limits: corev1.ResourceList{},
|
||||||
|
Requests: corev1.ResourceList{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Status: corev1.PodStatus{
|
||||||
|
Phase: phase,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func createTestService(name, namespace string, selector map[string]string) *corev1.Service {
|
||||||
|
return &corev1.Service{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: name,
|
||||||
|
Namespace: namespace,
|
||||||
|
UID: types.UID("svc-" + name),
|
||||||
|
},
|
||||||
|
Spec: corev1.ServiceSpec{
|
||||||
|
Selector: selector,
|
||||||
|
Type: corev1.ServiceTypeClusterIP,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetApplications(t *testing.T) {
|
||||||
|
t.Run("Admin user - Mix of deployments, statefulsets and daemonsets with and without pods", func(t *testing.T) {
|
||||||
|
// Create a fake K8s client
|
||||||
|
fakeClient := fake.NewSimpleClientset()
|
||||||
|
|
||||||
|
// Setup the test namespace
|
||||||
|
namespace := "test-namespace"
|
||||||
|
defaultNamespace := "default"
|
||||||
|
|
||||||
|
// Create resources in the test namespace
|
||||||
|
// 1. Deployment with pods
|
||||||
|
deployWithPods := createTestDeployment("deploy-with-pods", namespace, 2)
|
||||||
|
_, err := fakeClient.AppsV1().Deployments(namespace).Create(context.TODO(), deployWithPods, metav1.CreateOptions{})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
replicaSet := createTestReplicaSet("rs-deploy-with-pods", namespace, "deploy-with-pods")
|
||||||
|
_, err = fakeClient.AppsV1().ReplicaSets(namespace).Create(context.TODO(), replicaSet, metav1.CreateOptions{})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
pod1 := createTestPod("pod1-deploy", namespace, "ReplicaSet", "rs-deploy-with-pods", true)
|
||||||
|
_, err = fakeClient.CoreV1().Pods(namespace).Create(context.TODO(), pod1, metav1.CreateOptions{})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
pod2 := createTestPod("pod2-deploy", namespace, "ReplicaSet", "rs-deploy-with-pods", true)
|
||||||
|
_, err = fakeClient.CoreV1().Pods(namespace).Create(context.TODO(), pod2, metav1.CreateOptions{})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// 2. Deployment without pods (scaled to 0)
|
||||||
|
deployNoPods := createTestDeployment("deploy-no-pods", namespace, 0)
|
||||||
|
_, err = fakeClient.AppsV1().Deployments(namespace).Create(context.TODO(), deployNoPods, metav1.CreateOptions{})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// 3. StatefulSet with pods
|
||||||
|
stsWithPods := createTestStatefulSet("sts-with-pods", namespace, 1)
|
||||||
|
_, err = fakeClient.AppsV1().StatefulSets(namespace).Create(context.TODO(), stsWithPods, metav1.CreateOptions{})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
pod3 := createTestPod("pod1-sts", namespace, "StatefulSet", "sts-with-pods", true)
|
||||||
|
_, err = fakeClient.CoreV1().Pods(namespace).Create(context.TODO(), pod3, metav1.CreateOptions{})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// 4. StatefulSet without pods
|
||||||
|
stsNoPods := createTestStatefulSet("sts-no-pods", namespace, 0)
|
||||||
|
_, err = fakeClient.AppsV1().StatefulSets(namespace).Create(context.TODO(), stsNoPods, metav1.CreateOptions{})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// 5. DaemonSet with pods
|
||||||
|
dsWithPods := createTestDaemonSet("ds-with-pods", namespace)
|
||||||
|
_, err = fakeClient.AppsV1().DaemonSets(namespace).Create(context.TODO(), dsWithPods, metav1.CreateOptions{})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
pod4 := createTestPod("pod1-ds", namespace, "DaemonSet", "ds-with-pods", true)
|
||||||
|
_, err = fakeClient.CoreV1().Pods(namespace).Create(context.TODO(), pod4, metav1.CreateOptions{})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
pod5 := createTestPod("pod2-ds", namespace, "DaemonSet", "ds-with-pods", true)
|
||||||
|
_, err = fakeClient.CoreV1().Pods(namespace).Create(context.TODO(), pod5, metav1.CreateOptions{})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// 6. Naked Pod (no owner reference)
|
||||||
|
nakedPod := createTestPod("naked-pod", namespace, "", "", true)
|
||||||
|
_, err = fakeClient.CoreV1().Pods(namespace).Create(context.TODO(), nakedPod, metav1.CreateOptions{})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// 7. Resources in another namespace
|
||||||
|
deployOtherNs := createTestDeployment("deploy-other-ns", defaultNamespace, 1)
|
||||||
|
_, err = fakeClient.AppsV1().Deployments(defaultNamespace).Create(context.TODO(), deployOtherNs, metav1.CreateOptions{})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
podOtherNs := createTestPod("pod-other-ns", defaultNamespace, "Deployment", "deploy-other-ns", true)
|
||||||
|
_, err = fakeClient.CoreV1().Pods(defaultNamespace).Create(context.TODO(), podOtherNs, metav1.CreateOptions{})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// 8. Add a service (dependency)
|
||||||
|
service := createTestService("svc-deploy", namespace, map[string]string{"app": "deploy-with-pods"})
|
||||||
|
_, err = fakeClient.CoreV1().Services(namespace).Create(context.TODO(), service, metav1.CreateOptions{})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// Create the KubeClient with admin privileges
|
||||||
|
kubeClient := &KubeClient{
|
||||||
|
cli: fakeClient,
|
||||||
|
instanceID: "test-instance",
|
||||||
|
IsKubeAdmin: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test cases
|
||||||
|
|
||||||
|
// 1. All resources, no filtering
|
||||||
|
t.Run("All resources with dependencies", func(t *testing.T) {
|
||||||
|
apps, err := kubeClient.GetApplications("", "")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// We expect 7 resources: 2 deployments + 2 statefulsets + 1 daemonset + 1 naked pod + 1 deployment in other namespace
|
||||||
|
// Note: Each controller with pods should count once, not per pod
|
||||||
|
assert.Equal(t, 7, len(apps))
|
||||||
|
|
||||||
|
// Verify one of the deployments has services attached
|
||||||
|
appsWithServices := []models.K8sApplication{}
|
||||||
|
for _, app := range apps {
|
||||||
|
if len(app.Services) > 0 {
|
||||||
|
appsWithServices = append(appsWithServices, app)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert.Equal(t, 1, len(appsWithServices))
|
||||||
|
assert.Equal(t, "deploy-with-pods", appsWithServices[0].Name)
|
||||||
|
})
|
||||||
|
|
||||||
|
// 2. Filter by namespace
|
||||||
|
t.Run("Filter by namespace", func(t *testing.T) {
|
||||||
|
apps, err := kubeClient.GetApplications(namespace, "")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// We expect 6 resources in the test namespace
|
||||||
|
assert.Equal(t, 6, len(apps))
|
||||||
|
|
||||||
|
// Verify resources from other namespaces are not included
|
||||||
|
for _, app := range apps {
|
||||||
|
assert.Equal(t, namespace, app.ResourcePool)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Non-admin user - Resources filtered by accessible namespaces", func(t *testing.T) {
|
||||||
|
// Create a fake K8s client
|
||||||
|
fakeClient := fake.NewSimpleClientset()
|
||||||
|
|
||||||
|
// Setup the test namespaces
|
||||||
|
namespace1 := "allowed-ns"
|
||||||
|
namespace2 := "restricted-ns"
|
||||||
|
|
||||||
|
// Create resources in the allowed namespace
|
||||||
|
sts1 := createTestStatefulSet("sts-allowed", namespace1, 1)
|
||||||
|
_, err := fakeClient.AppsV1().StatefulSets(namespace1).Create(context.TODO(), sts1, metav1.CreateOptions{})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
pod1 := createTestPod("pod-allowed", namespace1, "StatefulSet", "sts-allowed", true)
|
||||||
|
_, err = fakeClient.CoreV1().Pods(namespace1).Create(context.TODO(), pod1, metav1.CreateOptions{})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// Add a StatefulSet without pods in the allowed namespace
|
||||||
|
stsNoPods := createTestStatefulSet("sts-no-pods-allowed", namespace1, 0)
|
||||||
|
_, err = fakeClient.AppsV1().StatefulSets(namespace1).Create(context.TODO(), stsNoPods, metav1.CreateOptions{})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// Create resources in the restricted namespace
|
||||||
|
sts2 := createTestStatefulSet("sts-restricted", namespace2, 1)
|
||||||
|
_, err = fakeClient.AppsV1().StatefulSets(namespace2).Create(context.TODO(), sts2, metav1.CreateOptions{})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
pod2 := createTestPod("pod-restricted", namespace2, "StatefulSet", "sts-restricted", true)
|
||||||
|
_, err = fakeClient.CoreV1().Pods(namespace2).Create(context.TODO(), pod2, metav1.CreateOptions{})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// Create the KubeClient with non-admin privileges (only allowed namespace1)
|
||||||
|
kubeClient := &KubeClient{
|
||||||
|
cli: fakeClient,
|
||||||
|
instanceID: "test-instance",
|
||||||
|
IsKubeAdmin: false,
|
||||||
|
NonAdminNamespaces: []string{namespace1},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that only resources from allowed namespace are returned
|
||||||
|
apps, err := kubeClient.GetApplications("", "")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// We expect 2 resources from the allowed namespace (1 sts with pod + 1 sts without pod)
|
||||||
|
assert.Equal(t, 2, len(apps))
|
||||||
|
|
||||||
|
// Verify resources are from the allowed namespace
|
||||||
|
for _, app := range apps {
|
||||||
|
assert.Equal(t, namespace1, app.ResourcePool)
|
||||||
|
assert.Equal(t, "StatefulSet", app.Kind)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify names of returned resources
|
||||||
|
stsNames := make(map[string]bool)
|
||||||
|
for _, app := range apps {
|
||||||
|
stsNames[app.Name] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.True(t, stsNames["sts-allowed"], "Expected StatefulSet 'sts-allowed' was not found")
|
||||||
|
assert.True(t, stsNames["sts-no-pods-allowed"], "Expected StatefulSet 'sts-no-pods-allowed' was not found")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Filter by node name", func(t *testing.T) {
|
||||||
|
// Create a fake K8s client
|
||||||
|
fakeClient := fake.NewSimpleClientset()
|
||||||
|
|
||||||
|
// Setup test namespace
|
||||||
|
namespace := "node-filter-ns"
|
||||||
|
nodeName := "worker-node-1"
|
||||||
|
|
||||||
|
// Create a deployment with pods on specific node
|
||||||
|
deploy := createTestDeployment("node-deploy", namespace, 2)
|
||||||
|
_, err := fakeClient.AppsV1().Deployments(namespace).Create(context.TODO(), deploy, metav1.CreateOptions{})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// Create ReplicaSet for the deployment
|
||||||
|
rs := createTestReplicaSet("rs-node-deploy", namespace, "node-deploy")
|
||||||
|
_, err = fakeClient.AppsV1().ReplicaSets(namespace).Create(context.TODO(), rs, metav1.CreateOptions{})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// Create 2 pods, one on the specified node, one on a different node
|
||||||
|
pod1 := createTestPod("pod-on-node", namespace, "ReplicaSet", "rs-node-deploy", true)
|
||||||
|
pod1.Spec.NodeName = nodeName
|
||||||
|
_, err = fakeClient.CoreV1().Pods(namespace).Create(context.TODO(), pod1, metav1.CreateOptions{})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
pod2 := createTestPod("pod-other-node", namespace, "ReplicaSet", "rs-node-deploy", true)
|
||||||
|
pod2.Spec.NodeName = "worker-node-2"
|
||||||
|
_, err = fakeClient.CoreV1().Pods(namespace).Create(context.TODO(), pod2, metav1.CreateOptions{})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// Create the KubeClient
|
||||||
|
kubeClient := &KubeClient{
|
||||||
|
cli: fakeClient,
|
||||||
|
instanceID: "test-instance",
|
||||||
|
IsKubeAdmin: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test filtering by node name
|
||||||
|
apps, err := kubeClient.GetApplications(namespace, nodeName)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// We expect to find only the pod on the specified node
|
||||||
|
assert.Equal(t, 1, len(apps))
|
||||||
|
if len(apps) > 0 {
|
||||||
|
assert.Equal(t, "node-deploy", apps[0].Name)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
|
@ -24,7 +24,7 @@ func (kcl *KubeClient) GetConfigMaps(namespace string) ([]models.K8sConfigMap, e
|
||||||
// fetchConfigMapsForNonAdmin fetches the configMaps in the namespaces the user has access to.
|
// fetchConfigMapsForNonAdmin fetches the configMaps in the namespaces the user has access to.
|
||||||
// This function is called when the user is not an admin.
|
// This function is called when the user is not an admin.
|
||||||
func (kcl *KubeClient) fetchConfigMapsForNonAdmin(namespace string) ([]models.K8sConfigMap, error) {
|
func (kcl *KubeClient) fetchConfigMapsForNonAdmin(namespace string) ([]models.K8sConfigMap, error) {
|
||||||
log.Debug().Msgf("Fetching volumes for non-admin user: %v", kcl.NonAdminNamespaces)
|
log.Debug().Msgf("Fetching configMaps for non-admin user: %v", kcl.NonAdminNamespaces)
|
||||||
|
|
||||||
if len(kcl.NonAdminNamespaces) == 0 {
|
if len(kcl.NonAdminNamespaces) == 0 {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
|
@ -102,7 +102,7 @@ func parseConfigMap(configMap *corev1.ConfigMap, withData bool) models.K8sConfig
|
||||||
func (kcl *KubeClient) CombineConfigMapsWithApplications(configMaps []models.K8sConfigMap) ([]models.K8sConfigMap, error) {
|
func (kcl *KubeClient) CombineConfigMapsWithApplications(configMaps []models.K8sConfigMap) ([]models.K8sConfigMap, error) {
|
||||||
updatedConfigMaps := make([]models.K8sConfigMap, len(configMaps))
|
updatedConfigMaps := make([]models.K8sConfigMap, len(configMaps))
|
||||||
|
|
||||||
pods, replicaSets, _, _, _, _, _, err := kcl.fetchAllPodsAndReplicaSets("", metav1.ListOptions{})
|
portainerApplicationResources, err := kcl.fetchAllApplicationsListResources("", metav1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("an error occurred during the CombineConfigMapsWithApplications operation, unable to fetch pods and replica sets. Error: %w", err)
|
return nil, fmt.Errorf("an error occurred during the CombineConfigMapsWithApplications operation, unable to fetch pods and replica sets. Error: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -110,7 +110,7 @@ func (kcl *KubeClient) CombineConfigMapsWithApplications(configMaps []models.K8s
|
||||||
for index, configMap := range configMaps {
|
for index, configMap := range configMaps {
|
||||||
updatedConfigMap := configMap
|
updatedConfigMap := configMap
|
||||||
|
|
||||||
applicationConfigurationOwners, err := kcl.GetApplicationConfigurationOwnersFromConfigMap(configMap, pods, replicaSets)
|
applicationConfigurationOwners, err := kcl.GetApplicationConfigurationOwnersFromConfigMap(configMap, portainerApplicationResources.Pods, portainerApplicationResources.ReplicaSets)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("an error occurred during the CombineConfigMapsWithApplications operation, unable to get applications from config map. Error: %w", err)
|
return nil, fmt.Errorf("an error occurred during the CombineConfigMapsWithApplications operation, unable to get applications from config map. Error: %w", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,7 +11,6 @@ import (
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rs/zerolog/log"
|
"github.com/rs/zerolog/log"
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
autoscalingv2 "k8s.io/api/autoscaling/v2"
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
@ -110,7 +109,7 @@ func (kcl *KubeClient) CreateUserShellPod(ctx context.Context, serviceAccountNam
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
shellPod, err := kcl.cli.CoreV1().Pods(portainerNamespace).Create(ctx, podSpec, metav1.CreateOptions{})
|
shellPod, err := kcl.cli.CoreV1().Pods(portainerNamespace).Create(context.TODO(), podSpec, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "error creating shell pod")
|
return nil, errors.Wrap(err, "error creating shell pod")
|
||||||
}
|
}
|
||||||
|
@ -158,7 +157,7 @@ func (kcl *KubeClient) waitForPodStatus(ctx context.Context, phase corev1.PodPha
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
default:
|
default:
|
||||||
pod, err := kcl.cli.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
|
pod, err := kcl.cli.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -172,70 +171,67 @@ func (kcl *KubeClient) waitForPodStatus(ctx context.Context, phase corev1.PodPha
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// fetchAllPodsAndReplicaSets fetches all pods and replica sets across the cluster, i.e. all namespaces
|
|
||||||
func (kcl *KubeClient) fetchAllPodsAndReplicaSets(namespace string, podListOptions metav1.ListOptions) ([]corev1.Pod, []appsv1.ReplicaSet, []appsv1.Deployment, []appsv1.StatefulSet, []appsv1.DaemonSet, []corev1.Service, []autoscalingv2.HorizontalPodAutoscaler, error) {
|
|
||||||
return kcl.fetchResourcesWithOwnerReferences(namespace, podListOptions, false, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// fetchAllApplicationsListResources fetches all pods, replica sets, stateful sets, and daemon sets across the cluster, i.e. all namespaces
|
// fetchAllApplicationsListResources fetches all pods, replica sets, stateful sets, and daemon sets across the cluster, i.e. all namespaces
|
||||||
// this is required for the applications list view
|
// this is required for the applications list view
|
||||||
func (kcl *KubeClient) fetchAllApplicationsListResources(namespace string, podListOptions metav1.ListOptions) ([]corev1.Pod, []appsv1.ReplicaSet, []appsv1.Deployment, []appsv1.StatefulSet, []appsv1.DaemonSet, []corev1.Service, []autoscalingv2.HorizontalPodAutoscaler, error) {
|
func (kcl *KubeClient) fetchAllApplicationsListResources(namespace string, podListOptions metav1.ListOptions) (PortainerApplicationResources, error) {
|
||||||
return kcl.fetchResourcesWithOwnerReferences(namespace, podListOptions, true, true)
|
return kcl.fetchResourcesWithOwnerReferences(namespace, podListOptions, true, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// fetchResourcesWithOwnerReferences fetches pods and other resources based on owner references
|
// fetchResourcesWithOwnerReferences fetches pods and other resources based on owner references
|
||||||
func (kcl *KubeClient) fetchResourcesWithOwnerReferences(namespace string, podListOptions metav1.ListOptions, includeStatefulSets, includeDaemonSets bool) ([]corev1.Pod, []appsv1.ReplicaSet, []appsv1.Deployment, []appsv1.StatefulSet, []appsv1.DaemonSet, []corev1.Service, []autoscalingv2.HorizontalPodAutoscaler, error) {
|
func (kcl *KubeClient) fetchResourcesWithOwnerReferences(namespace string, podListOptions metav1.ListOptions, includeStatefulSets, includeDaemonSets bool) (PortainerApplicationResources, error) {
|
||||||
pods, err := kcl.cli.CoreV1().Pods(namespace).List(context.Background(), podListOptions)
|
pods, err := kcl.cli.CoreV1().Pods(namespace).List(context.Background(), podListOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if k8serrors.IsNotFound(err) {
|
if k8serrors.IsNotFound(err) {
|
||||||
return nil, nil, nil, nil, nil, nil, nil, nil
|
return PortainerApplicationResources{}, nil
|
||||||
}
|
}
|
||||||
return nil, nil, nil, nil, nil, nil, nil, fmt.Errorf("unable to list pods across the cluster: %w", err)
|
return PortainerApplicationResources{}, fmt.Errorf("unable to list pods across the cluster: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// if replicaSet owner reference exists, fetch the replica sets
|
portainerApplicationResources := PortainerApplicationResources{
|
||||||
// this also means that the deployments will be fetched because deployments own replica sets
|
Pods: pods.Items,
|
||||||
replicaSets := &appsv1.ReplicaSetList{}
|
|
||||||
deployments := &appsv1.DeploymentList{}
|
|
||||||
if containsReplicaSetOwnerReference(pods) {
|
|
||||||
replicaSets, err = kcl.cli.AppsV1().ReplicaSets(namespace).List(context.Background(), metav1.ListOptions{})
|
|
||||||
if err != nil && !k8serrors.IsNotFound(err) {
|
|
||||||
return nil, nil, nil, nil, nil, nil, nil, fmt.Errorf("unable to list replica sets across the cluster: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
deployments, err = kcl.cli.AppsV1().Deployments(namespace).List(context.Background(), metav1.ListOptions{})
|
|
||||||
if err != nil && !k8serrors.IsNotFound(err) {
|
|
||||||
return nil, nil, nil, nil, nil, nil, nil, fmt.Errorf("unable to list deployments across the cluster: %w", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
statefulSets := &appsv1.StatefulSetList{}
|
replicaSets, err := kcl.cli.AppsV1().ReplicaSets(namespace).List(context.Background(), metav1.ListOptions{})
|
||||||
if includeStatefulSets && containsStatefulSetOwnerReference(pods) {
|
if err != nil && !k8serrors.IsNotFound(err) {
|
||||||
statefulSets, err = kcl.cli.AppsV1().StatefulSets(namespace).List(context.Background(), metav1.ListOptions{})
|
return PortainerApplicationResources{}, fmt.Errorf("unable to list replica sets across the cluster: %w", err)
|
||||||
|
}
|
||||||
|
portainerApplicationResources.ReplicaSets = replicaSets.Items
|
||||||
|
|
||||||
|
deployments, err := kcl.cli.AppsV1().Deployments(namespace).List(context.Background(), metav1.ListOptions{})
|
||||||
|
if err != nil && !k8serrors.IsNotFound(err) {
|
||||||
|
return PortainerApplicationResources{}, fmt.Errorf("unable to list deployments across the cluster: %w", err)
|
||||||
|
}
|
||||||
|
portainerApplicationResources.Deployments = deployments.Items
|
||||||
|
|
||||||
|
if includeStatefulSets {
|
||||||
|
statefulSets, err := kcl.cli.AppsV1().StatefulSets(namespace).List(context.Background(), metav1.ListOptions{})
|
||||||
if err != nil && !k8serrors.IsNotFound(err) {
|
if err != nil && !k8serrors.IsNotFound(err) {
|
||||||
return nil, nil, nil, nil, nil, nil, nil, fmt.Errorf("unable to list stateful sets across the cluster: %w", err)
|
return PortainerApplicationResources{}, fmt.Errorf("unable to list stateful sets across the cluster: %w", err)
|
||||||
}
|
}
|
||||||
|
portainerApplicationResources.StatefulSets = statefulSets.Items
|
||||||
}
|
}
|
||||||
|
|
||||||
daemonSets := &appsv1.DaemonSetList{}
|
if includeDaemonSets {
|
||||||
if includeDaemonSets && containsDaemonSetOwnerReference(pods) {
|
daemonSets, err := kcl.cli.AppsV1().DaemonSets(namespace).List(context.Background(), metav1.ListOptions{})
|
||||||
daemonSets, err = kcl.cli.AppsV1().DaemonSets(namespace).List(context.Background(), metav1.ListOptions{})
|
|
||||||
if err != nil && !k8serrors.IsNotFound(err) {
|
if err != nil && !k8serrors.IsNotFound(err) {
|
||||||
return nil, nil, nil, nil, nil, nil, nil, fmt.Errorf("unable to list daemon sets across the cluster: %w", err)
|
return PortainerApplicationResources{}, fmt.Errorf("unable to list daemon sets across the cluster: %w", err)
|
||||||
}
|
}
|
||||||
|
portainerApplicationResources.DaemonSets = daemonSets.Items
|
||||||
}
|
}
|
||||||
|
|
||||||
services, err := kcl.cli.CoreV1().Services(namespace).List(context.Background(), metav1.ListOptions{})
|
services, err := kcl.cli.CoreV1().Services(namespace).List(context.Background(), metav1.ListOptions{})
|
||||||
if err != nil && !k8serrors.IsNotFound(err) {
|
if err != nil && !k8serrors.IsNotFound(err) {
|
||||||
return nil, nil, nil, nil, nil, nil, nil, fmt.Errorf("unable to list services across the cluster: %w", err)
|
return PortainerApplicationResources{}, fmt.Errorf("unable to list services across the cluster: %w", err)
|
||||||
}
|
}
|
||||||
|
portainerApplicationResources.Services = services.Items
|
||||||
|
|
||||||
hpas, err := kcl.cli.AutoscalingV2().HorizontalPodAutoscalers(namespace).List(context.Background(), metav1.ListOptions{})
|
hpas, err := kcl.cli.AutoscalingV2().HorizontalPodAutoscalers(namespace).List(context.Background(), metav1.ListOptions{})
|
||||||
if err != nil && !k8serrors.IsNotFound(err) {
|
if err != nil && !k8serrors.IsNotFound(err) {
|
||||||
return nil, nil, nil, nil, nil, nil, nil, fmt.Errorf("unable to list horizontal pod autoscalers across the cluster: %w", err)
|
return PortainerApplicationResources{}, fmt.Errorf("unable to list horizontal pod autoscalers across the cluster: %w", err)
|
||||||
}
|
}
|
||||||
|
portainerApplicationResources.HorizontalPodAutoscalers = hpas.Items
|
||||||
|
|
||||||
return pods.Items, replicaSets.Items, deployments.Items, statefulSets.Items, daemonSets.Items, services.Items, hpas.Items, nil
|
return portainerApplicationResources, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// isPodUsingConfigMap checks if a pod is using a specific ConfigMap
|
// isPodUsingConfigMap checks if a pod is using a specific ConfigMap
|
||||||
|
|
|
@ -10,7 +10,6 @@ import (
|
||||||
rbacv1 "k8s.io/api/rbac/v1"
|
rbacv1 "k8s.io/api/rbac/v1"
|
||||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// GetRoles gets all the roles for either at the cluster level or a given namespace in a k8s endpoint.
|
// GetRoles gets all the roles for either at the cluster level or a given namespace in a k8s endpoint.
|
||||||
|
@ -137,7 +136,7 @@ func (kcl *KubeClient) DeleteRoles(reqs models.K8sRoleDeleteRequests) error {
|
||||||
for _, name := range reqs[namespace] {
|
for _, name := range reqs[namespace] {
|
||||||
client := kcl.cli.RbacV1().Roles(namespace)
|
client := kcl.cli.RbacV1().Roles(namespace)
|
||||||
|
|
||||||
role, err := client.Get(context.Background(), name, v1.GetOptions{})
|
role, err := client.Get(context.Background(), name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if k8serrors.IsNotFound(err) {
|
if k8serrors.IsNotFound(err) {
|
||||||
continue
|
continue
|
||||||
|
|
|
@ -7,11 +7,9 @@ import (
|
||||||
models "github.com/portainer/portainer/api/http/models/kubernetes"
|
models "github.com/portainer/portainer/api/http/models/kubernetes"
|
||||||
"github.com/portainer/portainer/api/internal/errorlist"
|
"github.com/portainer/portainer/api/internal/errorlist"
|
||||||
"github.com/rs/zerolog/log"
|
"github.com/rs/zerolog/log"
|
||||||
corev1 "k8s.io/api/rbac/v1"
|
|
||||||
rbacv1 "k8s.io/api/rbac/v1"
|
rbacv1 "k8s.io/api/rbac/v1"
|
||||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// GetRoleBindings gets all the roleBindings for either at the cluster level or a given namespace in a k8s endpoint.
|
// GetRoleBindings gets all the roleBindings for either at the cluster level or a given namespace in a k8s endpoint.
|
||||||
|
@ -98,7 +96,7 @@ func (kcl *KubeClient) isSystemRoleBinding(rb *rbacv1.RoleBinding) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (kcl *KubeClient) getRole(namespace, name string) (*corev1.Role, error) {
|
func (kcl *KubeClient) getRole(namespace, name string) (*rbacv1.Role, error) {
|
||||||
client := kcl.cli.RbacV1().Roles(namespace)
|
client := kcl.cli.RbacV1().Roles(namespace)
|
||||||
return client.Get(context.Background(), name, metav1.GetOptions{})
|
return client.Get(context.Background(), name, metav1.GetOptions{})
|
||||||
}
|
}
|
||||||
|
@ -111,7 +109,7 @@ func (kcl *KubeClient) DeleteRoleBindings(reqs models.K8sRoleBindingDeleteReques
|
||||||
for _, name := range reqs[namespace] {
|
for _, name := range reqs[namespace] {
|
||||||
client := kcl.cli.RbacV1().RoleBindings(namespace)
|
client := kcl.cli.RbacV1().RoleBindings(namespace)
|
||||||
|
|
||||||
roleBinding, err := client.Get(context.Background(), name, v1.GetOptions{})
|
roleBinding, err := client.Get(context.Background(), name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if k8serrors.IsNotFound(err) {
|
if k8serrors.IsNotFound(err) {
|
||||||
continue
|
continue
|
||||||
|
@ -125,7 +123,7 @@ func (kcl *KubeClient) DeleteRoleBindings(reqs models.K8sRoleBindingDeleteReques
|
||||||
log.Error().Str("role_name", name).Msg("ignoring delete of 'system' role binding, not allowed")
|
log.Error().Str("role_name", name).Msg("ignoring delete of 'system' role binding, not allowed")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := client.Delete(context.Background(), name, v1.DeleteOptions{}); err != nil {
|
if err := client.Delete(context.Background(), name, metav1.DeleteOptions{}); err != nil {
|
||||||
errors = append(errors, err)
|
errors = append(errors, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,7 +31,7 @@ func (kcl *KubeClient) GetSecrets(namespace string) ([]models.K8sSecret, error)
|
||||||
// getSecretsForNonAdmin fetches the secrets in the namespaces the user has access to.
|
// getSecretsForNonAdmin fetches the secrets in the namespaces the user has access to.
|
||||||
// This function is called when the user is not an admin.
|
// This function is called when the user is not an admin.
|
||||||
func (kcl *KubeClient) getSecretsForNonAdmin(namespace string) ([]models.K8sSecret, error) {
|
func (kcl *KubeClient) getSecretsForNonAdmin(namespace string) ([]models.K8sSecret, error) {
|
||||||
log.Debug().Msgf("Fetching volumes for non-admin user: %v", kcl.NonAdminNamespaces)
|
log.Debug().Msgf("Fetching secrets for non-admin user: %v", kcl.NonAdminNamespaces)
|
||||||
|
|
||||||
if len(kcl.NonAdminNamespaces) == 0 {
|
if len(kcl.NonAdminNamespaces) == 0 {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
|
@ -118,7 +118,7 @@ func parseSecret(secret *corev1.Secret, withData bool) models.K8sSecret {
|
||||||
func (kcl *KubeClient) CombineSecretsWithApplications(secrets []models.K8sSecret) ([]models.K8sSecret, error) {
|
func (kcl *KubeClient) CombineSecretsWithApplications(secrets []models.K8sSecret) ([]models.K8sSecret, error) {
|
||||||
updatedSecrets := make([]models.K8sSecret, len(secrets))
|
updatedSecrets := make([]models.K8sSecret, len(secrets))
|
||||||
|
|
||||||
pods, replicaSets, _, _, _, _, _, err := kcl.fetchAllPodsAndReplicaSets("", metav1.ListOptions{})
|
portainerApplicationResources, err := kcl.fetchAllApplicationsListResources("", metav1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("an error occurred during the CombineSecretsWithApplications operation, unable to fetch pods and replica sets. Error: %w", err)
|
return nil, fmt.Errorf("an error occurred during the CombineSecretsWithApplications operation, unable to fetch pods and replica sets. Error: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -126,7 +126,7 @@ func (kcl *KubeClient) CombineSecretsWithApplications(secrets []models.K8sSecret
|
||||||
for index, secret := range secrets {
|
for index, secret := range secrets {
|
||||||
updatedSecret := secret
|
updatedSecret := secret
|
||||||
|
|
||||||
applicationConfigurationOwners, err := kcl.GetApplicationConfigurationOwnersFromSecret(secret, pods, replicaSets)
|
applicationConfigurationOwners, err := kcl.GetApplicationConfigurationOwnersFromSecret(secret, portainerApplicationResources.Pods, portainerApplicationResources.ReplicaSets)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("an error occurred during the CombineSecretsWithApplications operation, unable to get applications from secret. Error: %w", err)
|
return nil, fmt.Errorf("an error occurred during the CombineSecretsWithApplications operation, unable to get applications from secret. Error: %w", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -174,7 +174,7 @@ func (kcl *KubeClient) UpdateService(namespace string, info models.K8sServiceInf
|
||||||
func (kcl *KubeClient) CombineServicesWithApplications(services []models.K8sServiceInfo) ([]models.K8sServiceInfo, error) {
|
func (kcl *KubeClient) CombineServicesWithApplications(services []models.K8sServiceInfo) ([]models.K8sServiceInfo, error) {
|
||||||
if containsServiceWithSelector(services) {
|
if containsServiceWithSelector(services) {
|
||||||
updatedServices := make([]models.K8sServiceInfo, len(services))
|
updatedServices := make([]models.K8sServiceInfo, len(services))
|
||||||
pods, replicaSets, _, _, _, _, _, err := kcl.fetchAllPodsAndReplicaSets("", metav1.ListOptions{})
|
portainerApplicationResources, err := kcl.fetchAllApplicationsListResources("", metav1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("an error occurred during the CombineServicesWithApplications operation, unable to fetch pods and replica sets. Error: %w", err)
|
return nil, fmt.Errorf("an error occurred during the CombineServicesWithApplications operation, unable to fetch pods and replica sets. Error: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -182,7 +182,7 @@ func (kcl *KubeClient) CombineServicesWithApplications(services []models.K8sServ
|
||||||
for index, service := range services {
|
for index, service := range services {
|
||||||
updatedService := service
|
updatedService := service
|
||||||
|
|
||||||
application, err := kcl.GetApplicationFromServiceSelector(pods, service, replicaSets)
|
application, err := kcl.GetApplicationFromServiceSelector(portainerApplicationResources.Pods, service, portainerApplicationResources.ReplicaSets)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return services, fmt.Errorf("an error occurred during the CombineServicesWithApplications operation, unable to get application from service. Error: %w", err)
|
return services, fmt.Errorf("an error occurred during the CombineServicesWithApplications operation, unable to get application from service. Error: %w", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,7 +5,6 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
portainer "github.com/portainer/portainer/api"
|
portainer "github.com/portainer/portainer/api"
|
||||||
"github.com/portainer/portainer/api/http/models/kubernetes"
|
|
||||||
models "github.com/portainer/portainer/api/http/models/kubernetes"
|
models "github.com/portainer/portainer/api/http/models/kubernetes"
|
||||||
"github.com/portainer/portainer/api/internal/errorlist"
|
"github.com/portainer/portainer/api/internal/errorlist"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
@ -92,7 +91,7 @@ func (kcl *KubeClient) isSystemServiceAccount(namespace string) bool {
|
||||||
|
|
||||||
// DeleteServices processes a K8sServiceDeleteRequest by deleting each service
|
// DeleteServices processes a K8sServiceDeleteRequest by deleting each service
|
||||||
// in its given namespace.
|
// in its given namespace.
|
||||||
func (kcl *KubeClient) DeleteServiceAccounts(reqs kubernetes.K8sServiceAccountDeleteRequests) error {
|
func (kcl *KubeClient) DeleteServiceAccounts(reqs models.K8sServiceAccountDeleteRequests) error {
|
||||||
var errors []error
|
var errors []error
|
||||||
for namespace := range reqs {
|
for namespace := range reqs {
|
||||||
for _, serviceName := range reqs[namespace] {
|
for _, serviceName := range reqs[namespace] {
|
||||||
|
|
|
@ -7,7 +7,6 @@ import (
|
||||||
models "github.com/portainer/portainer/api/http/models/kubernetes"
|
models "github.com/portainer/portainer/api/http/models/kubernetes"
|
||||||
"github.com/rs/zerolog/log"
|
"github.com/rs/zerolog/log"
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
autoscalingv2 "k8s.io/api/autoscaling/v2"
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
storagev1 "k8s.io/api/storage/v1"
|
storagev1 "k8s.io/api/storage/v1"
|
||||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
@ -265,7 +264,12 @@ func (kcl *KubeClient) updateVolumesWithOwningApplications(volumes *[]models.K8s
|
||||||
if pod.Spec.Volumes != nil {
|
if pod.Spec.Volumes != nil {
|
||||||
for _, podVolume := range pod.Spec.Volumes {
|
for _, podVolume := range pod.Spec.Volumes {
|
||||||
if podVolume.VolumeSource.PersistentVolumeClaim != nil && podVolume.VolumeSource.PersistentVolumeClaim.ClaimName == volume.PersistentVolumeClaim.Name && pod.Namespace == volume.PersistentVolumeClaim.Namespace {
|
if podVolume.VolumeSource.PersistentVolumeClaim != nil && podVolume.VolumeSource.PersistentVolumeClaim.ClaimName == volume.PersistentVolumeClaim.Name && pod.Namespace == volume.PersistentVolumeClaim.Namespace {
|
||||||
application, err := kcl.ConvertPodToApplication(pod, replicaSetItems, deploymentItems, statefulSetItems, daemonSetItems, []corev1.Service{}, []autoscalingv2.HorizontalPodAutoscaler{}, false)
|
application, err := kcl.ConvertPodToApplication(pod, PortainerApplicationResources{
|
||||||
|
ReplicaSets: replicaSetItems,
|
||||||
|
Deployments: deploymentItems,
|
||||||
|
StatefulSets: statefulSetItems,
|
||||||
|
DaemonSets: daemonSetItems,
|
||||||
|
}, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error().Err(err).Msg("Failed to convert pod to application")
|
log.Error().Err(err).Msg("Failed to convert pod to application")
|
||||||
return nil, fmt.Errorf("an error occurred during the CombineServicesWithApplications operation, unable to convert pod to application. Error: %w", err)
|
return nil, fmt.Errorf("an error occurred during the CombineServicesWithApplications operation, unable to convert pod to application. Error: %w", err)
|
||||||
|
|
|
@ -1544,7 +1544,7 @@ type (
|
||||||
GetConfigMaps(namespace string) ([]models.K8sConfigMap, error)
|
GetConfigMaps(namespace string) ([]models.K8sConfigMap, error)
|
||||||
GetSecrets(namespace string) ([]models.K8sSecret, error)
|
GetSecrets(namespace string) ([]models.K8sSecret, error)
|
||||||
GetIngressControllers() (models.K8sIngressControllers, error)
|
GetIngressControllers() (models.K8sIngressControllers, error)
|
||||||
GetApplications(namespace, nodename string, withDependencies bool) ([]models.K8sApplication, error)
|
GetApplications(namespace, nodename string) ([]models.K8sApplication, error)
|
||||||
GetMetrics() (models.K8sMetrics, error)
|
GetMetrics() (models.K8sMetrics, error)
|
||||||
GetStorage() ([]KubernetesStorageClassConfig, error)
|
GetStorage() ([]KubernetesStorageClassConfig, error)
|
||||||
CreateIngress(namespace string, info models.K8sIngressInfo, owner string) error
|
CreateIngress(namespace string, info models.K8sIngressInfo, owner string) error
|
||||||
|
|
|
@ -57,7 +57,6 @@ export function ApplicationsDatatable({
|
||||||
const applicationsQuery = useApplications(environmentId, {
|
const applicationsQuery = useApplications(environmentId, {
|
||||||
refetchInterval: tableState.autoRefreshRate * 1000,
|
refetchInterval: tableState.autoRefreshRate * 1000,
|
||||||
namespace: tableState.namespace,
|
namespace: tableState.namespace,
|
||||||
withDependencies: true,
|
|
||||||
});
|
});
|
||||||
const ingressesQuery = useIngresses(environmentId);
|
const ingressesQuery = useIngresses(environmentId);
|
||||||
const ingresses = ingressesQuery.data ?? [];
|
const ingresses = ingressesQuery.data ?? [];
|
||||||
|
|
|
@ -38,7 +38,6 @@ export function ApplicationsStacksDatatable({
|
||||||
const applicationsQuery = useApplications(environmentId, {
|
const applicationsQuery = useApplications(environmentId, {
|
||||||
refetchInterval: tableState.autoRefreshRate * 1000,
|
refetchInterval: tableState.autoRefreshRate * 1000,
|
||||||
namespace: tableState.namespace,
|
namespace: tableState.namespace,
|
||||||
withDependencies: true,
|
|
||||||
});
|
});
|
||||||
const ingressesQuery = useIngresses(environmentId);
|
const ingressesQuery = useIngresses(environmentId);
|
||||||
const ingresses = ingressesQuery.data ?? [];
|
const ingresses = ingressesQuery.data ?? [];
|
||||||
|
|
|
@ -3,7 +3,6 @@ import { EnvironmentId } from '@/react/portainer/environments/types';
|
||||||
export type GetAppsParams = {
|
export type GetAppsParams = {
|
||||||
namespace?: string;
|
namespace?: string;
|
||||||
nodeName?: string;
|
nodeName?: string;
|
||||||
withDependencies?: boolean;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
export const queryKeys = {
|
export const queryKeys = {
|
||||||
|
|
|
@ -11,7 +11,6 @@ import { queryKeys } from './query-keys';
|
||||||
type GetAppsParams = {
|
type GetAppsParams = {
|
||||||
namespace?: string;
|
namespace?: string;
|
||||||
nodeName?: string;
|
nodeName?: string;
|
||||||
withDependencies?: boolean;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
type GetAppsQueryOptions = {
|
type GetAppsQueryOptions = {
|
||||||
|
|
|
@ -33,7 +33,7 @@ export function isExternalApplication(application: Application) {
|
||||||
|
|
||||||
function getDeploymentRunningPods(deployment: Deployment): number {
|
function getDeploymentRunningPods(deployment: Deployment): number {
|
||||||
const availableReplicas = deployment.status?.availableReplicas ?? 0;
|
const availableReplicas = deployment.status?.availableReplicas ?? 0;
|
||||||
const totalReplicas = deployment.status?.replicas ?? 0;
|
const totalReplicas = deployment.spec?.replicas ?? 0;
|
||||||
const unavailableReplicas = deployment.status?.unavailableReplicas ?? 0;
|
const unavailableReplicas = deployment.status?.unavailableReplicas ?? 0;
|
||||||
return availableReplicas || totalReplicas - unavailableReplicas;
|
return availableReplicas || totalReplicas - unavailableReplicas;
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,7 +30,6 @@ export function NamespaceAppsDatatable({ namespace }: { namespace: string }) {
|
||||||
const applicationsQuery = useApplications(environmentId, {
|
const applicationsQuery = useApplications(environmentId, {
|
||||||
refetchInterval: tableState.autoRefreshRate * 1000,
|
refetchInterval: tableState.autoRefreshRate * 1000,
|
||||||
namespace,
|
namespace,
|
||||||
withDependencies: true,
|
|
||||||
});
|
});
|
||||||
const applications = applicationsQuery.data ?? [];
|
const applications = applicationsQuery.data ?? [];
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue