1
0
Fork 0
mirror of https://github.com/portainer/portainer.git synced 2025-08-02 20:35:25 +02:00

feat(logging): replace all the loggers with zerolog EE-4186 (#7663)

This commit is contained in:
andres-portainer 2022-09-16 13:18:44 -03:00 committed by GitHub
parent 53025178ef
commit 36e7981ab7
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
109 changed files with 1101 additions and 662 deletions

View file

@ -3,11 +3,12 @@ package cli
import (
"context"
"fmt"
"log"
"time"
"github.com/pkg/errors"
portainer "github.com/portainer/portainer/api"
"github.com/pkg/errors"
"github.com/rs/zerolog/log"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@ -78,11 +79,11 @@ func (kcl *KubeClient) CreateUserShellPod(ctx context.Context, serviceAccountNam
go func() {
select {
case <-time.After(portainer.WebSocketKeepAlive):
log.Println("[DEBUG] [internal,kubernetes/pod] [message: pod removal schedule duration exceeded]")
log.Debug().Msg("pod removal schedule duration exceeded")
kcl.cli.CoreV1().Pods(portainerNamespace).Delete(context.TODO(), shellPod.Name, metav1.DeleteOptions{})
case <-ctx.Done():
err := ctx.Err()
log.Printf("[DEBUG] [internal,kubernetes/pod] [message: context error: err=%s ]\n", err)
log.Debug().Err(err).Msg("context error")
kcl.cli.CoreV1().Pods(portainerNamespace).Delete(context.TODO(), shellPod.Name, metav1.DeleteOptions{})
}
}()
@ -93,7 +94,7 @@ func (kcl *KubeClient) CreateUserShellPod(ctx context.Context, serviceAccountNam
// waitForPodStatus will wait until duration d (from now) for a pod to reach defined phase/status.
// The pod status will be polled at specified delay until the pod reaches ready state.
func (kcl *KubeClient) waitForPodStatus(ctx context.Context, phase v1.PodPhase, pod *v1.Pod) error {
log.Printf("[DEBUG] [internal,kubernetes/pod] [message: waiting for pod ready: pod=%s... ]\n", pod.Name)
log.Debug().Str("pod", pod.Name).Msg("waiting for pod ready")
pollDelay := 500 * time.Millisecond
for {

View file

@ -6,12 +6,12 @@ import (
"encoding/pem"
"fmt"
"io/ioutil"
"log"
"strings"
"github.com/pkg/errors"
portainer "github.com/portainer/portainer/api"
"github.com/sirupsen/logrus"
"github.com/pkg/errors"
"github.com/rs/zerolog/log"
)
// KubeClusterAccessService represents a service that is responsible for centralizing kube cluster access data
@ -45,7 +45,7 @@ var (
func NewKubeClusterAccessService(baseURL, httpsBindAddr, tlsCertPath string) KubeClusterAccessService {
certificateAuthorityData, err := getCertificateAuthorityData(tlsCertPath)
if err != nil {
log.Printf("[DEBUG] [internal,kubeconfig] [message: %s, generated KubeConfig will be insecure]", err.Error())
log.Debug().Err(err).Msg("generated KubeConfig will be insecure")
}
return &kubeClusterAccessService{
@ -106,7 +106,11 @@ func (service *kubeClusterAccessService) GetData(hostURL string, endpointID port
baseURL = fmt.Sprintf("/%s/", strings.Trim(baseURL, "/"))
}
logrus.Infof("[kubeconfig] [hostURL: %s, httpsBindAddr: %s, baseURL: %s]", hostURL, service.httpsBindAddr, baseURL)
log.Info().
Str("host_URL", hostURL).
Str("HTTPS_bind_address", service.httpsBindAddr).
Str("base_URL", baseURL).
Msg("kubeconfig")
clusterURL := hostURL + baseURL

View file

@ -2,11 +2,12 @@ package kubernetes
import (
"context"
"log"
"time"
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/kubernetes/cli"
"github.com/rs/zerolog/log"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
@ -42,12 +43,12 @@ func snapshot(cli *kubernetes.Clientset, endpoint *portainer.Endpoint) (*portain
err := snapshotVersion(snapshot, cli)
if err != nil {
log.Printf("[WARN] [kubernetes,snapshot] [message: unable to snapshot cluster version] [endpoint: %s] [err: %s]", endpoint.Name, err)
log.Warn().Str("endpoint", endpoint.Name).Err(err).Msg("unable to snapshot cluster version")
}
err = snapshotNodes(snapshot, cli)
if err != nil {
log.Printf("[WARN] [kubernetes,snapshot] [message: unable to snapshot cluster nodes] [endpoint: %s] [err: %s]", endpoint.Name, err)
log.Warn().Str("endpoint", endpoint.Name).Err(err).Msg("unable to snapshot cluster nodes")
}
snapshot.Time = time.Now().Unix()