mirror of
https://github.com/portainer/portainer.git
synced 2025-07-19 05:19:39 +02:00
fix(more resources): fix porting and functionality [r8s-103] (#8)
Co-authored-by: testA113 <aliharriss1995@gmail.com> Co-authored-by: Anthony Lapenna <anthony.lapenna@portainer.io> Co-authored-by: Ali <83188384+testA113@users.noreply.github.com>
This commit is contained in:
parent
e6577ca269
commit
6d31f4876a
48 changed files with 894 additions and 186 deletions
|
@ -3,10 +3,14 @@ package cli
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
models "github.com/portainer/portainer/api/http/models/kubernetes"
|
||||
"github.com/portainer/portainer/api/internal/errorlist"
|
||||
"github.com/rs/zerolog/log"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// GetClusterRoles gets all the clusterRoles for at the cluster level in a k8s endpoint.
|
||||
|
@ -21,7 +25,7 @@ func (kcl *KubeClient) GetClusterRoles() ([]models.K8sClusterRole, error) {
|
|||
|
||||
// fetchClusterRoles returns a list of all Roles in the specified namespace.
|
||||
func (kcl *KubeClient) fetchClusterRoles() ([]models.K8sClusterRole, error) {
|
||||
clusterRoles, err := kcl.cli.RbacV1().ClusterRoles().List(context.TODO(), metav1.ListOptions{})
|
||||
clusterRoles, err := kcl.cli.RbacV1().ClusterRoles().List(context.TODO(), meta.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -39,5 +43,61 @@ func parseClusterRole(clusterRole rbacv1.ClusterRole) models.K8sClusterRole {
|
|||
return models.K8sClusterRole{
|
||||
Name: clusterRole.Name,
|
||||
CreationDate: clusterRole.CreationTimestamp.Time,
|
||||
UID: clusterRole.UID,
|
||||
IsSystem: isSystemClusterRole(&clusterRole),
|
||||
}
|
||||
}
|
||||
|
||||
func (kcl *KubeClient) DeleteClusterRoles(req models.K8sClusterRoleDeleteRequests) error {
|
||||
var errors []error
|
||||
for _, name := range req {
|
||||
client := kcl.cli.RbacV1().ClusterRoles()
|
||||
|
||||
clusterRole, err := client.Get(context.Background(), name, meta.GetOptions{})
|
||||
if err != nil {
|
||||
if k8serrors.IsNotFound(err) {
|
||||
continue
|
||||
}
|
||||
// this is a more serious error to do with the client so we return right away
|
||||
return err
|
||||
}
|
||||
|
||||
if isSystemClusterRole(clusterRole) {
|
||||
log.Warn().Str("role_name", name).Msg("ignoring delete of 'system' cluster role, not allowed")
|
||||
}
|
||||
|
||||
err = client.Delete(context.Background(), name, meta.DeleteOptions{})
|
||||
if err != nil {
|
||||
log.Err(err).Str("role_name", name).Msg("unable to delete the cluster role")
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
|
||||
return errorlist.Combine(errors)
|
||||
}
|
||||
|
||||
func isSystemClusterRole(role *rbacv1.ClusterRole) bool {
|
||||
if role.Namespace == "kube-system" || role.Namespace == "kube-public" ||
|
||||
role.Namespace == "kube-node-lease" || role.Namespace == "portainer" {
|
||||
return true
|
||||
}
|
||||
|
||||
if strings.HasPrefix(role.Name, "system:") {
|
||||
return true
|
||||
}
|
||||
|
||||
if role.Labels != nil {
|
||||
if role.Labels["kubernetes.io/bootstrapping"] == "rbac-defaults" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
roles := getPortainerDefaultK8sRoleNames()
|
||||
for i := range roles {
|
||||
if role.Name == roles[i] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -3,9 +3,13 @@ package cli
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
models "github.com/portainer/portainer/api/http/models/kubernetes"
|
||||
"github.com/portainer/portainer/api/internal/errorlist"
|
||||
"github.com/rs/zerolog/log"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
|
@ -38,8 +42,70 @@ func (kcl *KubeClient) fetchClusterRoleBindings() ([]models.K8sClusterRoleBindin
|
|||
func parseClusterRoleBinding(clusterRoleBinding rbacv1.ClusterRoleBinding) models.K8sClusterRoleBinding {
|
||||
return models.K8sClusterRoleBinding{
|
||||
Name: clusterRoleBinding.Name,
|
||||
UID: clusterRoleBinding.UID,
|
||||
Namespace: clusterRoleBinding.Namespace,
|
||||
RoleRef: clusterRoleBinding.RoleRef,
|
||||
Subjects: clusterRoleBinding.Subjects,
|
||||
CreationDate: clusterRoleBinding.CreationTimestamp.Time,
|
||||
IsSystem: isSystemClusterRoleBinding(&clusterRoleBinding),
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteClusterRoleBindings processes a K8sClusterRoleBindingDeleteRequest
|
||||
// by deleting each cluster role binding in its given namespace. If deleting a specific cluster role binding
|
||||
// fails, the error is logged and we continue to delete the remaining cluster role bindings.
|
||||
func (kcl *KubeClient) DeleteClusterRoleBindings(reqs models.K8sClusterRoleBindingDeleteRequests) error {
|
||||
var errors []error
|
||||
|
||||
for _, name := range reqs {
|
||||
client := kcl.cli.RbacV1().ClusterRoleBindings()
|
||||
|
||||
clusterRoleBinding, err := client.Get(context.Background(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if k8serrors.IsNotFound(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
// This is a more serious error to do with the client so we return right away
|
||||
return err
|
||||
}
|
||||
|
||||
if isSystemClusterRoleBinding(clusterRoleBinding) {
|
||||
log.Warn().Str("role_name", name).Msg("ignoring delete of 'system' cluster role binding, not allowed")
|
||||
}
|
||||
|
||||
if err := client.Delete(context.Background(), name, metav1.DeleteOptions{}); err != nil {
|
||||
log.Err(err).Str("role_name", name).Msg("unable to delete the cluster role binding")
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
|
||||
return errorlist.Combine(errors)
|
||||
}
|
||||
|
||||
func isSystemClusterRoleBinding(binding *rbacv1.ClusterRoleBinding) bool {
|
||||
if strings.HasPrefix(binding.Name, "system:") {
|
||||
return true
|
||||
}
|
||||
|
||||
if binding.Labels != nil {
|
||||
if binding.Labels["kubernetes.io/bootstrapping"] == "rbac-defaults" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
for _, sub := range binding.Subjects {
|
||||
if strings.HasPrefix(sub.Name, "system:") {
|
||||
return true
|
||||
}
|
||||
|
||||
if sub.Namespace == "kube-system" ||
|
||||
sub.Namespace == "kube-public" ||
|
||||
sub.Namespace == "kube-node-lease" ||
|
||||
sub.Namespace == "portainer" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -94,7 +94,7 @@ func parseNamespace(namespace *corev1.Namespace) portainer.K8sNamespaceInfo {
|
|||
Status: namespace.Status,
|
||||
CreationDate: namespace.CreationTimestamp.Format(time.RFC3339),
|
||||
NamespaceOwner: namespace.Labels[namespaceOwnerLabel],
|
||||
IsSystem: isSystemNamespace(*namespace),
|
||||
IsSystem: isSystemNamespace(namespace),
|
||||
IsDefault: namespace.Name == defaultNamespace,
|
||||
}
|
||||
}
|
||||
|
@ -171,7 +171,7 @@ func (kcl *KubeClient) CreateNamespace(info models.K8sNamespaceDetails) (*corev1
|
|||
return namespace, nil
|
||||
}
|
||||
|
||||
func isSystemNamespace(namespace corev1.Namespace) bool {
|
||||
func isSystemNamespace(namespace *corev1.Namespace) bool {
|
||||
systemLabelValue, hasSystemLabel := namespace.Labels[systemNamespaceLabel]
|
||||
if hasSystemLabel {
|
||||
return systemLabelValue == "true"
|
||||
|
@ -184,6 +184,15 @@ func isSystemNamespace(namespace corev1.Namespace) bool {
|
|||
return isSystem
|
||||
}
|
||||
|
||||
func (kcl *KubeClient) isSystemNamespace(namespace string) bool {
|
||||
ns, err := kcl.cli.CoreV1().Namespaces().Get(context.TODO(), namespace, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return isSystemNamespace(ns)
|
||||
}
|
||||
|
||||
// ToggleSystemState will set a namespace as a system namespace, or remove this state
|
||||
// if isSystem is true it will set `systemNamespaceLabel` to "true" and false otherwise
|
||||
// this will skip if namespace is "default" or if the required state is already set
|
||||
|
@ -199,7 +208,7 @@ func (kcl *KubeClient) ToggleSystemState(namespaceName string, isSystem bool) er
|
|||
return errors.Wrap(err, "failed fetching namespace object")
|
||||
}
|
||||
|
||||
if isSystemNamespace(*namespace) == isSystem {
|
||||
if isSystemNamespace(namespace) == isSystem {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -65,7 +65,7 @@ func Test_ToggleSystemState(t *testing.T) {
|
|||
ns, err := kcl.cli.CoreV1().Namespaces().Get(context.Background(), nsName, metav1.GetOptions{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, test.isSystem, isSystemNamespace(*ns))
|
||||
assert.Equal(t, test.isSystem, isSystemNamespace(ns))
|
||||
})
|
||||
}
|
||||
})
|
||||
|
|
|
@ -2,11 +2,15 @@ package cli
|
|||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
models "github.com/portainer/portainer/api/http/models/kubernetes"
|
||||
"github.com/portainer/portainer/api/internal/errorlist"
|
||||
"github.com/rs/zerolog/log"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// GetRoles gets all the roles for either at the cluster level or a given namespace in a k8s endpoint.
|
||||
|
@ -48,18 +52,20 @@ func (kcl *KubeClient) fetchRoles(namespace string) ([]models.K8sRole, error) {
|
|||
|
||||
results := make([]models.K8sRole, 0)
|
||||
for _, role := range roles.Items {
|
||||
results = append(results, parseRole(role))
|
||||
results = append(results, kcl.parseRole(role))
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// parseRole converts a rbacv1.Role object to a models.K8sRole object.
|
||||
func parseRole(role rbacv1.Role) models.K8sRole {
|
||||
func (kcl *KubeClient) parseRole(role rbacv1.Role) models.K8sRole {
|
||||
return models.K8sRole{
|
||||
Name: role.Name,
|
||||
UID: role.UID,
|
||||
Namespace: role.Namespace,
|
||||
CreationDate: role.CreationTimestamp.Time,
|
||||
IsSystem: kcl.isSystemRole(&role),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -108,3 +114,48 @@ func (kcl *KubeClient) upsertPortainerK8sClusterRoles() error {
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getPortainerDefaultK8sRoleNames() []string {
|
||||
return []string{
|
||||
string(portainerUserCRName),
|
||||
}
|
||||
}
|
||||
|
||||
func (kcl *KubeClient) isSystemRole(role *rbacv1.Role) bool {
|
||||
if strings.HasPrefix(role.Name, "system:") {
|
||||
return true
|
||||
}
|
||||
|
||||
return kcl.isSystemNamespace(role.Namespace)
|
||||
}
|
||||
|
||||
// DeleteRoles processes a K8sServiceDeleteRequest by deleting each role
|
||||
// in its given namespace.
|
||||
func (kcl *KubeClient) DeleteRoles(reqs models.K8sRoleDeleteRequests) error {
|
||||
var errors []error
|
||||
for namespace := range reqs {
|
||||
for _, name := range reqs[namespace] {
|
||||
client := kcl.cli.RbacV1().Roles(namespace)
|
||||
|
||||
role, err := client.Get(context.Background(), name, v1.GetOptions{})
|
||||
if err != nil {
|
||||
if k8serrors.IsNotFound(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
// This is a more serious error to do with the client so we return right away
|
||||
return err
|
||||
}
|
||||
|
||||
if kcl.isSystemRole(role) {
|
||||
log.Error().Str("role_name", name).Msg("ignoring delete of 'system' role, not allowed")
|
||||
}
|
||||
|
||||
if err := client.Delete(context.TODO(), name, metav1.DeleteOptions{}); err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return errorlist.Combine(errors)
|
||||
}
|
||||
|
|
|
@ -2,10 +2,16 @@ package cli
|
|||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
models "github.com/portainer/portainer/api/http/models/kubernetes"
|
||||
"github.com/portainer/portainer/api/internal/errorlist"
|
||||
"github.com/rs/zerolog/log"
|
||||
corev1 "k8s.io/api/rbac/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// GetRoleBindings gets all the roleBindings for either at the cluster level or a given namespace in a k8s endpoint.
|
||||
|
@ -47,19 +53,82 @@ func (kcl *KubeClient) fetchRoleBindings(namespace string) ([]models.K8sRoleBind
|
|||
|
||||
results := make([]models.K8sRoleBinding, 0)
|
||||
for _, roleBinding := range roleBindings.Items {
|
||||
results = append(results, parseRoleBinding(roleBinding))
|
||||
results = append(results, kcl.parseRoleBinding(roleBinding))
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// parseRoleBinding converts a rbacv1.RoleBinding object to a models.K8sRoleBinding object.
|
||||
func parseRoleBinding(roleBinding rbacv1.RoleBinding) models.K8sRoleBinding {
|
||||
func (kcl *KubeClient) parseRoleBinding(roleBinding rbacv1.RoleBinding) models.K8sRoleBinding {
|
||||
return models.K8sRoleBinding{
|
||||
Name: roleBinding.Name,
|
||||
UID: roleBinding.UID,
|
||||
Namespace: roleBinding.Namespace,
|
||||
RoleRef: roleBinding.RoleRef,
|
||||
Subjects: roleBinding.Subjects,
|
||||
CreationDate: roleBinding.CreationTimestamp.Time,
|
||||
IsSystem: kcl.isSystemRoleBinding(&roleBinding),
|
||||
}
|
||||
}
|
||||
|
||||
func (kcl *KubeClient) isSystemRoleBinding(rb *rbacv1.RoleBinding) bool {
|
||||
if strings.HasPrefix(rb.Name, "system:") {
|
||||
return true
|
||||
}
|
||||
|
||||
if rb.Labels != nil {
|
||||
if rb.Labels["kubernetes.io/bootstrapping"] == "rbac-defaults" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
if rb.RoleRef.Name != "" {
|
||||
role, err := kcl.getRole(rb.Namespace, rb.RoleRef.Name)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Linked to a role that is marked a system role
|
||||
if kcl.isSystemRole(role) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (kcl *KubeClient) getRole(namespace, name string) (*corev1.Role, error) {
|
||||
client := kcl.cli.RbacV1().Roles(namespace)
|
||||
return client.Get(context.Background(), name, metav1.GetOptions{})
|
||||
}
|
||||
|
||||
// DeleteRoleBindings processes a K8sServiceDeleteRequest by deleting each service
|
||||
// in its given namespace.
|
||||
func (kcl *KubeClient) DeleteRoleBindings(reqs models.K8sRoleBindingDeleteRequests) error {
|
||||
var errors []error
|
||||
for namespace := range reqs {
|
||||
for _, name := range reqs[namespace] {
|
||||
client := kcl.cli.RbacV1().RoleBindings(namespace)
|
||||
|
||||
roleBinding, err := client.Get(context.Background(), name, v1.GetOptions{})
|
||||
if err != nil {
|
||||
if k8serrors.IsNotFound(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
// This is a more serious error to do with the client so we return right away
|
||||
return err
|
||||
}
|
||||
|
||||
if kcl.isSystemRoleBinding(roleBinding) {
|
||||
log.Error().Str("role_name", name).Msg("ignoring delete of 'system' role binding, not allowed")
|
||||
}
|
||||
|
||||
if err := client.Delete(context.Background(), name, v1.DeleteOptions{}); err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return errorlist.Combine(errors)
|
||||
}
|
||||
|
|
|
@ -2,9 +2,12 @@ package cli
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
portainer "github.com/portainer/portainer/api"
|
||||
"github.com/portainer/portainer/api/http/models/kubernetes"
|
||||
models "github.com/portainer/portainer/api/http/models/kubernetes"
|
||||
"github.com/portainer/portainer/api/internal/errorlist"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
@ -50,18 +53,20 @@ func (kcl *KubeClient) fetchServiceAccounts(namespace string) ([]models.K8sServi
|
|||
|
||||
results := make([]models.K8sServiceAccount, 0)
|
||||
for _, serviceAccount := range serviceAccounts.Items {
|
||||
results = append(results, parseServiceAccount(serviceAccount))
|
||||
results = append(results, kcl.parseServiceAccount(serviceAccount))
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// parseServiceAccount converts a corev1.ServiceAccount object to a models.K8sServiceAccount object.
|
||||
func parseServiceAccount(serviceAccount corev1.ServiceAccount) models.K8sServiceAccount {
|
||||
func (kcl *KubeClient) parseServiceAccount(serviceAccount corev1.ServiceAccount) models.K8sServiceAccount {
|
||||
return models.K8sServiceAccount{
|
||||
Name: serviceAccount.Name,
|
||||
UID: serviceAccount.UID,
|
||||
Namespace: serviceAccount.Namespace,
|
||||
CreationDate: serviceAccount.CreationTimestamp.Time,
|
||||
IsSystem: kcl.isSystemServiceAccount(serviceAccount.Namespace),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -81,6 +86,40 @@ func (kcl *KubeClient) GetPortainerUserServiceAccount(tokenData *portainer.Token
|
|||
return serviceAccount, nil
|
||||
}
|
||||
|
||||
func (kcl *KubeClient) isSystemServiceAccount(namespace string) bool {
|
||||
return kcl.isSystemNamespace(namespace)
|
||||
}
|
||||
|
||||
// DeleteServices processes a K8sServiceDeleteRequest by deleting each service
|
||||
// in its given namespace.
|
||||
func (kcl *KubeClient) DeleteServiceAccounts(reqs kubernetes.K8sServiceAccountDeleteRequests) error {
|
||||
var errors []error
|
||||
for namespace := range reqs {
|
||||
for _, serviceName := range reqs[namespace] {
|
||||
client := kcl.cli.CoreV1().ServiceAccounts(namespace)
|
||||
|
||||
sa, err := client.Get(context.Background(), serviceName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if k8serrors.IsNotFound(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if kcl.isSystemServiceAccount(sa.Namespace) {
|
||||
return fmt.Errorf("cannot delete system service account %q", namespace+"/"+serviceName)
|
||||
}
|
||||
|
||||
if err := client.Delete(context.Background(), serviceName, metav1.DeleteOptions{}); err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return errorlist.Combine(errors)
|
||||
}
|
||||
|
||||
// GetServiceAccountBearerToken returns the ServiceAccountToken associated to the specified user.
|
||||
func (kcl *KubeClient) GetServiceAccountBearerToken(userID int) (string, error) {
|
||||
serviceAccountName := UserServiceAccountName(userID, kcl.instanceID)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue