1
0
Fork 0
mirror of https://github.com/portainer/portainer.git synced 2025-07-24 07:49:41 +02:00

feat(kubernetes/shell): kubectl web shell and kubeconfig functionality EE-448 (#5229)

* feat(kubernetes/shell): backport kubectl shell backend functionality EE-849 (#5168)

* backported core backend kubectl shell functionality

* - backported kubectl shell unit tests
- backported k8s cli interface update
- backported k8s client library fake patch

* refactored backend to match EE

* fixed test error typo

* GetServiceAccountName -> GetServiceAccount - making the function reusable in multiple contexts

* feat(kubernetes/shell): backport kubeconfig generation backend functionality EE-1004 (#5213)

* backported core backend kubectl shell functionality

* refactored backend to match EE

* - backported kubernetes backend handler implementation
- backported kubernetes config endpoint
- backported kubeconfig file generation
- backported kubeconfig and yaml unit tests
- backported updates to kubeclient interfaces

* feat(app): kubectl shell ui backport EE-927 (#5221)

* Kubectl UI backport to CE

* fix authentication redirect issue

* comment out redirect function

* fix shell full width & change name of shell

* disable button when terminal connected

* fixed whitespace changes for css

* fixed whitespace changes for html

* linting fixes

Co-authored-by: zees-dev <dev.786zshan@gmail.com>

* feat(kubernetes/shell): backport of kubeconfig export functionality EE-926 (#5228)

* EE backport of kubeconfig UI functionality

* using angularjs constant instead of hardcoded URL

* updated portainer kubectl shell image

* fix kubectl button position issue in ce

* fix pod keep running when switching page

* feat(app): Kubectl shell ui EE-833 EE-1099 (#5271)

* fix kubectl shell css

* fix mini css issue

* fix tech issue for ui changes from review

* delete unuse file

* - refactored variable names
- restored content-wrapper scroll
- created object to store wrapper css

Co-authored-by: zees-dev <dev.786zshan@gmail.com>

* addressing PR issues

* fix required changes from tech reviews (#5319)

* fix required changes from tech reviews

* remove unuse css variable

* component refactor accoridng to PR and style guidelines

Co-authored-by: zees-dev <dev.786zshan@gmail.com>

* removed redundant dockerhub api endpoint variable

* - autoHeight -> terminal-window
- removed redundant try-catch
- saving config.yaml file as config

* fix(kube/shell): show error on failure

* fixed default https bug

* resolved merge conflicts

Co-authored-by: Richard Wei <54336863+WaysonWei@users.noreply.github.com>
Co-authored-by: richard <richard@richards-iMac-Pro.local>
Co-authored-by: Chaim Lev-Ari <chiptus@gmail.com>
This commit is contained in:
zees-dev 2021-08-05 15:02:06 +12:00 committed by GitHub
parent ec71720ceb
commit 665bf2c887
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
28 changed files with 1281 additions and 21 deletions

View file

@ -0,0 +1,66 @@
package cli
import (
"context"
"fmt"
portainer "github.com/portainer/portainer/api"
clientV1 "k8s.io/client-go/tools/clientcmd/api/v1"
)
// GetKubeConfig returns kubeconfig for the current user based on:
// - portainer server url
// - portainer user bearer token
// - portainer token data - which maps to k8s service account
func (kcl *KubeClient) GetKubeConfig(ctx context.Context, apiServerURL string, bearerToken string, tokenData *portainer.TokenData) (*clientV1.Config, error) {
serviceAccount, err := kcl.GetServiceAccount(tokenData)
if err != nil {
errText := fmt.Sprintf("unable to find serviceaccount associated with user; username=%s", tokenData.Username)
return nil, fmt.Errorf("%s; err=%w", errText, err)
}
kubeconfig := generateKubeconfig(apiServerURL, bearerToken, serviceAccount.Name)
return kubeconfig, nil
}
// generateKubeconfig will generate and return kubeconfig resource - usable by `kubectl` cli
// which will allow the client to connect directly to k8s server endpoint via portainer (proxy)
func generateKubeconfig(apiServerURL, bearerToken, serviceAccountName string) *clientV1.Config {
const (
KubeConfigPortainerContext = "portainer-ctx"
KubeConfigPortainerCluster = "portainer-cluster"
)
return &clientV1.Config{
APIVersion: "v1",
Kind: "Config",
CurrentContext: KubeConfigPortainerContext,
Contexts: []clientV1.NamedContext{
{
Name: KubeConfigPortainerContext,
Context: clientV1.Context{
AuthInfo: serviceAccountName,
Cluster: KubeConfigPortainerCluster,
},
},
},
Clusters: []clientV1.NamedCluster{
{
Name: KubeConfigPortainerCluster,
Cluster: clientV1.Cluster{
Server: apiServerURL,
InsecureSkipTLSVerify: true,
},
},
},
AuthInfos: []clientV1.NamedAuthInfo{
{
Name: serviceAccountName,
AuthInfo: clientV1.AuthInfo{
Token: bearerToken,
},
},
},
}
}

View file

@ -0,0 +1,150 @@
package cli
import (
"context"
"errors"
"testing"
portainer "github.com/portainer/portainer/api"
v1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kfake "k8s.io/client-go/kubernetes/fake"
)
func Test_GetKubeConfig(t *testing.T) {
t.Run("returns error if SA non-existent", func(t *testing.T) {
k := &KubeClient{
cli: kfake.NewSimpleClientset(),
instanceID: "test",
}
tokenData := &portainer.TokenData{
ID: 1,
Role: portainer.AdministratorRole,
Username: portainerClusterAdminServiceAccountName,
}
_, err := k.GetKubeConfig(context.Background(), "localhost", "abc", tokenData)
if err == nil {
t.Error("GetKubeConfig should fail as service account does not exist")
}
if k8sErr := errors.Unwrap(err); !k8serrors.IsNotFound(k8sErr) {
t.Error("GetKubeConfig should fail with service account not found k8s error")
}
})
t.Run("successfully obtains kubeconfig for cluster admin", func(t *testing.T) {
k := &KubeClient{
cli: kfake.NewSimpleClientset(),
instanceID: "test",
}
tokenData := &portainer.TokenData{
Role: portainer.AdministratorRole,
Username: portainerClusterAdminServiceAccountName,
}
serviceAccount := &v1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{Name: tokenData.Username},
}
k.cli.CoreV1().ServiceAccounts(portainerNamespace).Create(serviceAccount)
defer k.cli.CoreV1().ServiceAccounts(portainerNamespace).Delete(serviceAccount.Name, nil)
_, err := k.GetKubeConfig(context.Background(), "localhost", "abc", tokenData)
if err != nil {
t.Errorf("GetKubeConfig should succeed; err=%s", err)
}
})
t.Run("successfully obtains kubeconfig for standard user", func(t *testing.T) {
k := &KubeClient{
cli: kfake.NewSimpleClientset(),
instanceID: "test",
}
tokenData := &portainer.TokenData{
ID: 1,
Role: portainer.StandardUserRole,
}
nonAdminUserName := userServiceAccountName(int(tokenData.ID), k.instanceID)
serviceAccount := &v1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{Name: nonAdminUserName},
}
k.cli.CoreV1().ServiceAccounts(portainerNamespace).Create(serviceAccount)
defer k.cli.CoreV1().ServiceAccounts(portainerNamespace).Delete(serviceAccount.Name, nil)
_, err := k.GetKubeConfig(context.Background(), "localhost", "abc", tokenData)
if err != nil {
t.Errorf("GetKubeConfig should succeed; err=%s", err)
}
})
}
func Test_generateKubeconfig(t *testing.T) {
apiServerURL, bearerToken, serviceAccountName := "localhost", "test-token", "test-user"
t.Run("generates Config resource kind", func(t *testing.T) {
config := generateKubeconfig(apiServerURL, bearerToken, serviceAccountName)
want := "Config"
if config.Kind != want {
t.Errorf("generateKubeconfig resource kind should be %s", want)
}
})
t.Run("generates v1 version", func(t *testing.T) {
config := generateKubeconfig(apiServerURL, bearerToken, serviceAccountName)
want := "v1"
if config.APIVersion != want {
t.Errorf("generateKubeconfig api version should be %s", want)
}
})
t.Run("generates single entry context cluster and authinfo", func(t *testing.T) {
config := generateKubeconfig(apiServerURL, bearerToken, serviceAccountName)
if len(config.Contexts) != 1 {
t.Error("generateKubeconfig should generate single context configuration")
}
if len(config.Clusters) != 1 {
t.Error("generateKubeconfig should generate single cluster configuration")
}
if len(config.AuthInfos) != 1 {
t.Error("generateKubeconfig should generate single user configuration")
}
})
t.Run("sets default context appropriately", func(t *testing.T) {
config := generateKubeconfig(apiServerURL, bearerToken, serviceAccountName)
want := "portainer-ctx"
if config.CurrentContext != want {
t.Errorf("generateKubeconfig set cluster to be %s", want)
}
})
t.Run("generates cluster with InsecureSkipTLSVerify to be set to true", func(t *testing.T) {
config := generateKubeconfig(apiServerURL, bearerToken, serviceAccountName)
if config.Clusters[0].Cluster.InsecureSkipTLSVerify != true {
t.Error("generateKubeconfig default cluster InsecureSkipTLSVerify should be true")
}
})
t.Run("should contain passed in value", func(t *testing.T) {
config := generateKubeconfig(apiServerURL, bearerToken, serviceAccountName)
if config.Clusters[0].Cluster.Server != apiServerURL {
t.Errorf("generateKubeconfig default cluster server url should be %s", apiServerURL)
}
if config.AuthInfos[0].Name != serviceAccountName {
t.Errorf("generateKubeconfig default authinfo name should be %s", serviceAccountName)
}
if config.AuthInfos[0].AuthInfo.Token != bearerToken {
t.Errorf("generateKubeconfig default authinfo user token should be %s", bearerToken)
}
})
}

View file

@ -1,16 +1,20 @@
package cli
import "fmt"
import (
"fmt"
)
const (
defaultNamespace = "default"
portainerNamespace = "portainer"
portainerUserCRName = "portainer-cr-user"
portainerUserCRBName = "portainer-crb-user"
portainerUserServiceAccountPrefix = "portainer-sa-user"
portainerRBPrefix = "portainer-rb"
portainerConfigMapName = "portainer-config"
portainerConfigMapAccessPoliciesKey = "NamespaceAccessPolicies"
defaultNamespace = "default"
portainerNamespace = "portainer"
portainerUserCRName = "portainer-cr-user"
portainerUserCRBName = "portainer-crb-user"
portainerClusterAdminServiceAccountName = "portainer-sa-clusteradmin"
portainerUserServiceAccountPrefix = "portainer-sa-user"
portainerRBPrefix = "portainer-rb"
portainerConfigMapName = "portainer-config"
portainerConfigMapAccessPoliciesKey = "NamespaceAccessPolicies"
portainerShellPodPrefix = "portainer-pod-kubectl-shell"
)
func userServiceAccountName(userID int, instanceID string) string {
@ -24,3 +28,7 @@ func userServiceAccountTokenSecretName(serviceAccountName string, instanceID str
func namespaceClusterRoleBindingName(namespace string, instanceID string) string {
return fmt.Sprintf("%s-%s-%s", portainerRBPrefix, instanceID, namespace)
}
func userShellPodPrefix(serviceAccountName string) string {
return fmt.Sprintf("%s-%s-", portainerShellPodPrefix, serviceAccountName)
}

120
api/kubernetes/cli/pod.go Normal file
View file

@ -0,0 +1,120 @@
package cli
import (
"context"
"fmt"
"log"
"time"
"github.com/pkg/errors"
portainer "github.com/portainer/portainer/api"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const shellPodImage = "portainer/kubectl-shell"
// CreateUserShellPod will create a kubectl based shell for the specified user by mounting their respective service account.
// The lifecycle of the pod is managed in this function; this entails management of the following pod operations:
// - The shell pod will be scoped to specified service accounts access permissions
// - The shell pod will be automatically removed if it's not ready after specified period of time
// - The shell pod will be automatically removed after a specified max life (prevent zombie pods)
// - The shell pod will be automatically removed if request is cancelled (or client closes websocket connection)
func (kcl *KubeClient) CreateUserShellPod(ctx context.Context, serviceAccountName string) (*portainer.KubernetesShellPod, error) {
// Schedule the pod for automatic removal
maxPodKeepAlive := 1 * time.Hour
maxPodKeepAliveSecondsStr := fmt.Sprintf("%d", int(maxPodKeepAlive.Seconds()))
podPrefix := userShellPodPrefix(serviceAccountName)
podSpec := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: podPrefix,
Namespace: portainerNamespace,
Annotations: map[string]string{
"kubernetes.io/pod.type": "kubectl-shell",
},
},
Spec: v1.PodSpec{
TerminationGracePeriodSeconds: new(int64),
ServiceAccountName: serviceAccountName,
Containers: []v1.Container{
{
Name: "kubectl-shell-container",
Image: shellPodImage,
Command: []string{"sleep"},
// Specify sleep time to prevent zombie pods in case portainer process is terminated
Args: []string{maxPodKeepAliveSecondsStr},
ImagePullPolicy: v1.PullIfNotPresent,
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
shellPod, err := kcl.cli.CoreV1().Pods(portainerNamespace).Create(podSpec)
if err != nil {
return nil, errors.Wrap(err, "error creating shell pod")
}
// Wait for pod to reach ready state
timeoutCtx, cancelFunc := context.WithTimeout(ctx, 20*time.Second)
defer cancelFunc()
err = kcl.waitForPodStatus(timeoutCtx, v1.PodRunning, shellPod)
if err != nil {
kcl.cli.CoreV1().Pods(portainerNamespace).Delete(shellPod.Name, nil)
return nil, errors.Wrap(err, "aborting pod creation; error waiting for shell pod ready status")
}
if len(shellPod.Spec.Containers) != 1 {
kcl.cli.CoreV1().Pods(portainerNamespace).Delete(shellPod.Name, nil)
return nil, fmt.Errorf("incorrect shell pod state, expecting single container to be present")
}
podData := &portainer.KubernetesShellPod{
Namespace: shellPod.Namespace,
PodName: shellPod.Name,
ContainerName: shellPod.Spec.Containers[0].Name,
ShellExecCommand: "env COLUMNS=200 /bin/bash", // env COLUMNS dictates minimum width of the shell
}
// Handle pod lifecycle/cleanup - terminate pod after maxPodKeepAlive or upon request (long-lived) cancellation
go func() {
select {
case <-time.After(maxPodKeepAlive):
log.Println("[DEBUG] [internal,kubernetes/pod] [message: pod removal schedule duration exceeded]")
kcl.cli.CoreV1().Pods(portainerNamespace).Delete(shellPod.Name, nil)
case <-ctx.Done():
err := ctx.Err()
log.Printf("[DEBUG] [internal,kubernetes/pod] [message: context error: err=%s ]\n", err)
kcl.cli.CoreV1().Pods(portainerNamespace).Delete(shellPod.Name, nil)
}
}()
return podData, nil
}
// waitForPodStatus will wait until duration d (from now) for a pod to reach defined phase/status.
// The pod status will be polled at specified delay until the pod reaches ready state.
func (kcl *KubeClient) waitForPodStatus(ctx context.Context, phase v1.PodPhase, pod *v1.Pod) error {
log.Printf("[DEBUG] [internal,kubernetes/pod] [message: waiting for pod ready: pod=%s... ]\n", pod.Name)
pollDelay := 500 * time.Millisecond
for {
select {
case <-ctx.Done():
return ctx.Err()
default:
pod, err := kcl.cli.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
if err != nil {
return err
}
if pod.Status.Phase == phase {
return nil
}
<-time.After(pollDelay)
}
}
}

View file

@ -0,0 +1,67 @@
package cli
import (
"context"
"testing"
"time"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kfake "k8s.io/client-go/kubernetes/fake"
)
func Test_waitForPodStatus(t *testing.T) {
t.Run("successfully errors on cancelled context", func(t *testing.T) {
k := &KubeClient{
cli: kfake.NewSimpleClientset(),
instanceID: "test",
}
podSpec := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: defaultNamespace},
Spec: v1.PodSpec{
Containers: []v1.Container{
{Name: "test-pod", Image: "containous/whoami"},
},
},
}
ctx, cancel := context.WithCancel(context.TODO())
cancel()
err := k.waitForPodStatus(ctx, v1.PodRunning, podSpec)
if err != context.Canceled {
t.Errorf("waitForPodStatus should throw context cancellation error; err=%s", err)
}
})
t.Run("successfully errors on timeout", func(t *testing.T) {
k := &KubeClient{
cli: kfake.NewSimpleClientset(),
instanceID: "test",
}
podSpec := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: defaultNamespace},
Spec: v1.PodSpec{
Containers: []v1.Container{
{Name: "test-pod", Image: "containous/whoami"},
},
},
}
pod, err := k.cli.CoreV1().Pods(defaultNamespace).Create(podSpec)
if err != nil {
t.Errorf("failed to create pod; err=%s", err)
}
defer k.cli.CoreV1().Pods(defaultNamespace).Delete(pod.Name, nil)
ctx, cancelFunc := context.WithTimeout(context.TODO(), 0*time.Second)
defer cancelFunc()
err = k.waitForPodStatus(ctx, v1.PodRunning, podSpec)
if err != context.DeadlineExceeded {
t.Errorf("waitForPodStatus should throw deadline exceeded error; err=%s", err)
}
})
}

View file

@ -0,0 +1,27 @@
package cli
import (
"bytes"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer/json"
)
func GenerateYAML(obj runtime.Object) (string, error) {
serializer := json.NewSerializerWithOptions(
json.DefaultMetaFactory, nil, nil,
json.SerializerOptions{
Yaml: true,
Pretty: true,
Strict: true,
},
)
b := new(bytes.Buffer)
err := serializer.Encode(obj, b)
if err != nil {
return "", err
}
return b.String(), nil
}

View file

@ -0,0 +1,93 @@
package cli
import (
"strings"
"testing"
"k8s.io/apimachinery/pkg/runtime"
clientV1 "k8s.io/client-go/tools/clientcmd/api/v1"
)
// compareYAMLStrings will compare 2 strings by stripping tabs, newlines and whitespaces from both strings
func compareYAMLStrings(in1, in2 string) int {
r := strings.NewReplacer("\t", "", "\n", "", " ", "")
in1 = r.Replace(in1)
in2 = r.Replace(in2)
return strings.Compare(in1, in2)
}
func Test_GenerateYAML(t *testing.T) {
resourceYAMLTests := []struct {
title string
resource runtime.Object
wantYAML string
}{
{
title: "Config",
resource: &clientV1.Config{
APIVersion: "v1",
Kind: "Config",
CurrentContext: "portainer-ctx",
Contexts: []clientV1.NamedContext{
{
Name: "portainer-ctx",
Context: clientV1.Context{
AuthInfo: "test-user",
Cluster: "portainer-cluster",
},
},
},
Clusters: []clientV1.NamedCluster{
{
Name: "portainer-cluster",
Cluster: clientV1.Cluster{
Server: "localhost",
InsecureSkipTLSVerify: true,
},
},
},
AuthInfos: []clientV1.NamedAuthInfo{
{
Name: "test-user",
AuthInfo: clientV1.AuthInfo{
Token: "test-token",
},
},
},
},
wantYAML: `
apiVersion: v1
clusters:
- cluster:
insecure-skip-tls-verify: true
server: localhost
name: portainer-cluster
contexts:
- context:
cluster: portainer-cluster
user: test-user
name: portainer-ctx
current-context: portainer-ctx
kind: Config
preferences: {}
users:
- name: test-user
user:
token: test-token
`,
},
}
for _, ryt := range resourceYAMLTests {
t.Run(ryt.title, func(t *testing.T) {
yaml, err := GenerateYAML(ryt.resource)
if err != nil {
t.Errorf("generateYamlConfig failed; err=%s", err)
}
if compareYAMLStrings(yaml, ryt.wantYAML) != 0 {
t.Errorf("generateYamlConfig failed;\ngot=\n%s\nwant=\n%s", yaml, ryt.wantYAML)
}
})
}
}

View file

@ -1,12 +1,31 @@
package cli
import (
"k8s.io/api/core/v1"
portainer "github.com/portainer/portainer/api"
v1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// GetServiceAccount returns the portainer ServiceAccountName associated to the specified user.
func (kcl *KubeClient) GetServiceAccount(tokenData *portainer.TokenData) (*v1.ServiceAccount, error) {
var portainerServiceAccountName string
if tokenData.Role == portainer.AdministratorRole {
portainerServiceAccountName = portainerClusterAdminServiceAccountName
} else {
portainerServiceAccountName = userServiceAccountName(int(tokenData.ID), kcl.instanceID)
}
// verify name exists as service account resource within portainer namespace
serviceAccount, err := kcl.cli.CoreV1().ServiceAccounts(portainerNamespace).Get(portainerServiceAccountName, metav1.GetOptions{})
if err != nil {
return nil, err
}
return serviceAccount, nil
}
// GetServiceAccountBearerToken returns the ServiceAccountToken associated to the specified user.
func (kcl *KubeClient) GetServiceAccountBearerToken(userID int) (string, error) {
serviceAccountName := userServiceAccountName(userID, kcl.instanceID)

View file

@ -0,0 +1,92 @@
package cli
import (
"testing"
portainer "github.com/portainer/portainer/api"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kfake "k8s.io/client-go/kubernetes/fake"
)
func Test_GetServiceAccount(t *testing.T) {
t.Run("returns error if non-existent", func(t *testing.T) {
k := &KubeClient{
cli: kfake.NewSimpleClientset(),
instanceID: "test",
}
tokenData := &portainer.TokenData{ID: 1}
_, err := k.GetServiceAccount(tokenData)
if err == nil {
t.Error("GetServiceAccount should fail with service account not found")
}
})
t.Run("succeeds for cluster admin role", func(t *testing.T) {
k := &KubeClient{
cli: kfake.NewSimpleClientset(),
instanceID: "test",
}
tokenData := &portainer.TokenData{
ID: 1,
Role: portainer.AdministratorRole,
Username: portainerClusterAdminServiceAccountName,
}
serviceAccount := &v1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: tokenData.Username,
},
}
_, err := k.cli.CoreV1().ServiceAccounts(portainerNamespace).Create(serviceAccount)
if err != nil {
t.Errorf("failed to create service acount; err=%s", err)
}
defer k.cli.CoreV1().ServiceAccounts(portainerNamespace).Delete(serviceAccount.Name, nil)
sa, err := k.GetServiceAccount(tokenData)
if err != nil {
t.Errorf("GetServiceAccount should succeed; err=%s", err)
}
want := "portainer-sa-clusteradmin"
if sa.Name != want {
t.Errorf("GetServiceAccount should succeed and return correct sa name; got=%s want=%s", sa.Name, want)
}
})
t.Run("succeeds for standard user role", func(t *testing.T) {
k := &KubeClient{
cli: kfake.NewSimpleClientset(),
instanceID: "test",
}
tokenData := &portainer.TokenData{
ID: 1,
Role: portainer.StandardUserRole,
}
serviceAccountName := userServiceAccountName(int(tokenData.ID), k.instanceID)
serviceAccount := &v1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: serviceAccountName,
},
}
_, err := k.cli.CoreV1().ServiceAccounts(portainerNamespace).Create(serviceAccount)
if err != nil {
t.Errorf("failed to create service acount; err=%s", err)
}
defer k.cli.CoreV1().ServiceAccounts(portainerNamespace).Delete(serviceAccount.Name, nil)
sa, err := k.GetServiceAccount(tokenData)
if err != nil {
t.Errorf("GetServiceAccount should succeed; err=%s", err)
}
want := "portainer-sa-user-test-1"
if sa.Name != want {
t.Errorf("GetServiceAccount should succeed and return correct sa name; got=%s want=%s", sa.Name, want)
}
})
}