mirror of
https://github.com/portainer/portainer.git
synced 2025-08-05 13:55:21 +02:00
feat(support): collect system info bundle to assist support troubleshooting [r8s-157] (#154)
This commit is contained in:
parent
17648d12fe
commit
783ab253af
17 changed files with 1367 additions and 440 deletions
372
pkg/snapshot/docker.go
Normal file
372
pkg/snapshot/docker.go
Normal file
|
@ -0,0 +1,372 @@
|
|||
package snapshot
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/segmentio/encoding/json"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
_container "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/api/types/volume"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/pkg/stdcopy"
|
||||
portainer "github.com/portainer/portainer/api"
|
||||
"github.com/portainer/portainer/api/docker/consts"
|
||||
edgeutils "github.com/portainer/portainer/pkg/edge"
|
||||
networkingutils "github.com/portainer/portainer/pkg/networking"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
func CreateDockerSnapshot(cli *client.Client) (*portainer.DockerSnapshot, error) {
|
||||
if _, err := cli.Ping(context.Background()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dockerSnapshot := &portainer.DockerSnapshot{
|
||||
StackCount: 0,
|
||||
}
|
||||
|
||||
err := dockerSnapshotInfo(dockerSnapshot, cli)
|
||||
if err != nil {
|
||||
log.Warn().Err(err).Msg("unable to snapshot engine information")
|
||||
}
|
||||
|
||||
if dockerSnapshot.Swarm {
|
||||
err = dockerSnapshotSwarmServices(dockerSnapshot, cli)
|
||||
if err != nil {
|
||||
log.Warn().Err(err).Msg("unable to snapshot Swarm services")
|
||||
}
|
||||
|
||||
err = dockerSnapshotNodes(dockerSnapshot, cli)
|
||||
if err != nil {
|
||||
log.Warn().Err(err).Msg("unable to snapshot Swarm nodes")
|
||||
}
|
||||
}
|
||||
|
||||
err = dockerSnapshotContainers(dockerSnapshot, cli)
|
||||
if err != nil {
|
||||
log.Warn().Err(err).Msg("unable to snapshot containers")
|
||||
}
|
||||
|
||||
err = dockerSnapshotImages(dockerSnapshot, cli)
|
||||
if err != nil {
|
||||
log.Warn().Err(err).Msg("unable to snapshot images")
|
||||
}
|
||||
|
||||
err = dockerSnapshotVolumes(dockerSnapshot, cli)
|
||||
if err != nil {
|
||||
log.Warn().Err(err).Msg("unable to snapshot volumes")
|
||||
}
|
||||
|
||||
err = dockerSnapshotNetworks(dockerSnapshot, cli)
|
||||
if err != nil {
|
||||
log.Warn().Err(err).Msg("unable to snapshot networks")
|
||||
}
|
||||
|
||||
err = dockerSnapshotVersion(dockerSnapshot, cli)
|
||||
if err != nil {
|
||||
log.Warn().Err(err).Msg("unable to snapshot engine version")
|
||||
}
|
||||
|
||||
dockerSnapshot.Time = time.Now().Unix()
|
||||
|
||||
return dockerSnapshot, nil
|
||||
}
|
||||
|
||||
func dockerSnapshotInfo(snapshot *portainer.DockerSnapshot, cli *client.Client) error {
|
||||
info, err := cli.Info(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
snapshot.Swarm = info.Swarm.ControlAvailable
|
||||
snapshot.DockerVersion = info.ServerVersion
|
||||
snapshot.TotalCPU = info.NCPU
|
||||
snapshot.TotalMemory = info.MemTotal
|
||||
snapshot.SnapshotRaw.Info = info
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func dockerSnapshotNodes(snapshot *portainer.DockerSnapshot, cli *client.Client) error {
|
||||
nodes, err := cli.NodeList(context.Background(), types.NodeListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var nanoCpus int64
|
||||
var totalMem int64
|
||||
|
||||
for _, node := range nodes {
|
||||
nanoCpus += node.Description.Resources.NanoCPUs
|
||||
totalMem += node.Description.Resources.MemoryBytes
|
||||
}
|
||||
|
||||
snapshot.TotalCPU = int(nanoCpus / 1e9)
|
||||
snapshot.TotalMemory = totalMem
|
||||
snapshot.NodeCount = len(nodes)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func dockerSnapshotSwarmServices(snapshot *portainer.DockerSnapshot, cli *client.Client) error {
|
||||
stacks := make(map[string]struct{})
|
||||
|
||||
services, err := cli.ServiceList(context.Background(), types.ServiceListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, service := range services {
|
||||
for k, v := range service.Spec.Labels {
|
||||
if k == "com.docker.stack.namespace" {
|
||||
stacks[v] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
snapshot.ServiceCount = len(services)
|
||||
snapshot.StackCount += len(stacks)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func dockerSnapshotContainers(snapshot *portainer.DockerSnapshot, cli *client.Client) error {
|
||||
containers, err := cli.ContainerList(context.Background(), container.ListOptions{All: true})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stacks := make(map[string]struct{})
|
||||
gpuUseSet := make(map[string]struct{})
|
||||
gpuUseAll := false
|
||||
|
||||
for _, container := range containers {
|
||||
if container.State == "running" {
|
||||
// Snapshot GPUs
|
||||
response, err := cli.ContainerInspect(context.Background(), container.ID)
|
||||
if err != nil {
|
||||
// Inspect a container will fail when the container runs on a different
|
||||
// Swarm node, so it is better to log the error instead of return error
|
||||
// when the Swarm mode is enabled
|
||||
if !snapshot.Swarm {
|
||||
return err
|
||||
} else {
|
||||
if !strings.Contains(err.Error(), "No such container") {
|
||||
return err
|
||||
}
|
||||
// It is common to have containers running on different Swarm nodes,
|
||||
// so we just log the error in the debug level
|
||||
log.Debug().Str("container", container.ID).Err(err).Msg("unable to inspect container in other Swarm nodes")
|
||||
}
|
||||
} else {
|
||||
var gpuOptions *_container.DeviceRequest = nil
|
||||
for _, deviceRequest := range response.HostConfig.Resources.DeviceRequests {
|
||||
if deviceRequest.Driver == "nvidia" || deviceRequest.Capabilities[0][0] == "gpu" {
|
||||
gpuOptions = &deviceRequest
|
||||
}
|
||||
}
|
||||
|
||||
if gpuOptions != nil {
|
||||
if gpuOptions.Count == -1 {
|
||||
gpuUseAll = true
|
||||
}
|
||||
|
||||
for _, id := range gpuOptions.DeviceIDs {
|
||||
gpuUseSet[id] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range container.Labels {
|
||||
if k == consts.ComposeStackNameLabel {
|
||||
stacks[v] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
gpuUseList := make([]string, 0, len(gpuUseSet))
|
||||
for gpuUse := range gpuUseSet {
|
||||
gpuUseList = append(gpuUseList, gpuUse)
|
||||
}
|
||||
|
||||
snapshot.GpuUseAll = gpuUseAll
|
||||
snapshot.GpuUseList = gpuUseList
|
||||
|
||||
stats := calculateContainerStats(containers)
|
||||
|
||||
snapshot.ContainerCount = stats.Total
|
||||
snapshot.RunningContainerCount = stats.Running
|
||||
snapshot.StoppedContainerCount = stats.Stopped
|
||||
snapshot.HealthyContainerCount = stats.Healthy
|
||||
snapshot.UnhealthyContainerCount = stats.Unhealthy
|
||||
snapshot.StackCount += len(stacks)
|
||||
|
||||
for _, container := range containers {
|
||||
snapshot.SnapshotRaw.Containers = append(snapshot.SnapshotRaw.Containers, portainer.DockerContainerSnapshot{Container: container})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func dockerSnapshotImages(snapshot *portainer.DockerSnapshot, cli *client.Client) error {
|
||||
images, err := cli.ImageList(context.Background(), image.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
snapshot.ImageCount = len(images)
|
||||
snapshot.SnapshotRaw.Images = images
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func dockerSnapshotVolumes(snapshot *portainer.DockerSnapshot, cli *client.Client) error {
|
||||
volumes, err := cli.VolumeList(context.Background(), volume.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
snapshot.VolumeCount = len(volumes.Volumes)
|
||||
snapshot.SnapshotRaw.Volumes = volumes
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func dockerSnapshotNetworks(snapshot *portainer.DockerSnapshot, cli *client.Client) error {
|
||||
networks, err := cli.NetworkList(context.Background(), types.NetworkListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
snapshot.SnapshotRaw.Networks = networks
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func dockerSnapshotVersion(snapshot *portainer.DockerSnapshot, cli *client.Client) error {
|
||||
version, err := cli.ServerVersion(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
snapshot.SnapshotRaw.Version = version
|
||||
snapshot.IsPodman = isPodman(version)
|
||||
return nil
|
||||
}
|
||||
|
||||
// DockerSnapshotDiagnostics returns the diagnostics data for the agent
|
||||
func DockerSnapshotDiagnostics(cli *client.Client, edgeKey string) (*portainer.DiagnosticsData, error) {
|
||||
containerID := os.Getenv("HOSTNAME")
|
||||
snapshot := &portainer.DockerSnapshot{
|
||||
DiagnosticsData: &portainer.DiagnosticsData{
|
||||
DNS: make(map[string]string),
|
||||
Telnet: make(map[string]string),
|
||||
},
|
||||
}
|
||||
|
||||
err := dockerSnapshotContainerErrorLogs(snapshot, cli, containerID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if edgeKey != "" {
|
||||
url, err := edgeutils.GetPortainerURLFromEdgeKey(edgeKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get portainer URL from edge key: %w", err)
|
||||
}
|
||||
|
||||
snapshot.DiagnosticsData.DNS["edge-to-portainer"] = networkingutils.ProbeDNSConnection(url)
|
||||
snapshot.DiagnosticsData.Telnet["edge-to-portainer"] = networkingutils.ProbeTelnetConnection(url)
|
||||
}
|
||||
|
||||
return snapshot.DiagnosticsData, nil
|
||||
}
|
||||
|
||||
// DockerSnapshotContainerErrorLogs returns the 5 most recent error logs of the agent container
|
||||
// this will primarily be used for agent snapshot
|
||||
func dockerSnapshotContainerErrorLogs(snapshot *portainer.DockerSnapshot, cli *client.Client, containerId string) error {
|
||||
if containerId == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
rd, err := cli.ContainerLogs(context.Background(), containerId, container.LogsOptions{
|
||||
ShowStdout: false,
|
||||
ShowStderr: true,
|
||||
Tail: "5",
|
||||
Timestamps: true,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get container logs: %w", err)
|
||||
}
|
||||
defer rd.Close()
|
||||
|
||||
var stdOut, stdErr bytes.Buffer
|
||||
_, err = stdcopy.StdCopy(&stdErr, &stdOut, rd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to copy error logs: %w", err)
|
||||
}
|
||||
|
||||
var logs []map[string]string
|
||||
jsonLogs, err := json.Marshal(logs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal logs to JSON: %w", err)
|
||||
}
|
||||
|
||||
snapshot.DiagnosticsData.Log = string(jsonLogs)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// isPodman checks if the version is for Podman by checking if any of the components contain "podman".
|
||||
// If it's podman, a component name should be "Podman Engine"
|
||||
func isPodman(version types.Version) bool {
|
||||
for _, component := range version.Components {
|
||||
if strings.Contains(strings.ToLower(component.Name), "podman") {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type ContainerStats struct {
|
||||
Running int
|
||||
Stopped int
|
||||
Healthy int
|
||||
Unhealthy int
|
||||
Total int
|
||||
}
|
||||
|
||||
func calculateContainerStats(containers []types.Container) ContainerStats {
|
||||
var running, stopped, healthy, unhealthy int
|
||||
for _, container := range containers {
|
||||
switch container.State {
|
||||
case "running":
|
||||
running++
|
||||
case "healthy":
|
||||
running++
|
||||
healthy++
|
||||
case "unhealthy":
|
||||
running++
|
||||
unhealthy++
|
||||
case "exited", "stopped":
|
||||
stopped++
|
||||
}
|
||||
}
|
||||
|
||||
return ContainerStats{
|
||||
Running: running,
|
||||
Stopped: stopped,
|
||||
Healthy: healthy,
|
||||
Unhealthy: unhealthy,
|
||||
Total: len(containers),
|
||||
}
|
||||
}
|
1
pkg/snapshot/docker_test.go
Normal file
1
pkg/snapshot/docker_test.go
Normal file
|
@ -0,0 +1 @@
|
|||
package snapshot
|
150
pkg/snapshot/kubernetes.go
Normal file
150
pkg/snapshot/kubernetes.go
Normal file
|
@ -0,0 +1,150 @@
|
|||
package snapshot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/segmentio/encoding/json"
|
||||
|
||||
"github.com/aws/smithy-go/ptr"
|
||||
portainer "github.com/portainer/portainer/api"
|
||||
edgeutils "github.com/portainer/portainer/pkg/edge"
|
||||
networkingutils "github.com/portainer/portainer/pkg/networking"
|
||||
"github.com/rs/zerolog/log"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
func CreateKubernetesSnapshot(cli *kubernetes.Clientset) (*portainer.KubernetesSnapshot, error) {
|
||||
kubernetesSnapshot := &portainer.KubernetesSnapshot{}
|
||||
|
||||
err := kubernetesSnapshotVersion(kubernetesSnapshot, cli)
|
||||
if err != nil {
|
||||
log.Warn().Err(err).Msg("unable to snapshot cluster version")
|
||||
}
|
||||
|
||||
err = kubernetesSnapshotNodes(kubernetesSnapshot, cli)
|
||||
if err != nil {
|
||||
log.Warn().Err(err).Msg("unable to snapshot cluster nodes")
|
||||
}
|
||||
|
||||
kubernetesSnapshot.Time = time.Now().Unix()
|
||||
return kubernetesSnapshot, nil
|
||||
}
|
||||
|
||||
func kubernetesSnapshotVersion(snapshot *portainer.KubernetesSnapshot, cli *kubernetes.Clientset) error {
|
||||
versionInfo, err := cli.ServerVersion()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
snapshot.KubernetesVersion = versionInfo.GitVersion
|
||||
return nil
|
||||
}
|
||||
|
||||
func kubernetesSnapshotNodes(snapshot *portainer.KubernetesSnapshot, cli *kubernetes.Clientset) error {
|
||||
nodeList, err := cli.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var totalCPUs, totalMemory int64
|
||||
for _, node := range nodeList.Items {
|
||||
totalCPUs += node.Status.Capacity.Cpu().Value()
|
||||
totalMemory += node.Status.Capacity.Memory().Value()
|
||||
}
|
||||
|
||||
snapshot.TotalCPU = totalCPUs
|
||||
snapshot.TotalMemory = totalMemory
|
||||
snapshot.NodeCount = len(nodeList.Items)
|
||||
return nil
|
||||
}
|
||||
|
||||
// KubernetesSnapshotDiagnostics returns the diagnostics data for the agent
|
||||
func KubernetesSnapshotDiagnostics(cli *kubernetes.Clientset, edgeKey string) (*portainer.DiagnosticsData, error) {
|
||||
podID := os.Getenv("HOSTNAME")
|
||||
snapshot := &portainer.KubernetesSnapshot{
|
||||
DiagnosticsData: &portainer.DiagnosticsData{
|
||||
DNS: make(map[string]string),
|
||||
Telnet: make(map[string]string),
|
||||
},
|
||||
}
|
||||
|
||||
err := kubernetesSnapshotPodErrorLogs(snapshot, cli, "portainer", podID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to snapshot pod error logs: %w", err)
|
||||
}
|
||||
|
||||
if edgeKey != "" {
|
||||
url, err := edgeutils.GetPortainerURLFromEdgeKey(edgeKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get portainer URL from edge key: %w", err)
|
||||
}
|
||||
|
||||
snapshot.DiagnosticsData.DNS["edge-to-portainer"] = networkingutils.ProbeDNSConnection(url)
|
||||
snapshot.DiagnosticsData.Telnet["edge-to-portainer"] = networkingutils.ProbeTelnetConnection(url)
|
||||
}
|
||||
|
||||
return snapshot.DiagnosticsData, nil
|
||||
}
|
||||
|
||||
// KubernetesSnapshotPodErrorLogs returns 0 to 10 lines of the most recent error logs of the agent container
|
||||
// this will primarily be used for agent snapshot
|
||||
func kubernetesSnapshotPodErrorLogs(snapshot *portainer.KubernetesSnapshot, cli *kubernetes.Clientset, namespace, podID string) error {
|
||||
if namespace == "" || podID == "" {
|
||||
return errors.New("both namespace and podID are required to capture pod error logs in the snapshot")
|
||||
}
|
||||
|
||||
logsStream, err := cli.CoreV1().Pods(namespace).GetLogs(podID, &corev1.PodLogOptions{TailLines: ptr.Int64(10), Timestamps: true}).Stream(context.TODO())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to stream logs: %w", err)
|
||||
}
|
||||
defer logsStream.Close()
|
||||
|
||||
logBytes, err := io.ReadAll(logsStream)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read error logs: %w", err)
|
||||
}
|
||||
|
||||
logs := filterLogsByPattern(logBytes, []string{"error", "err", "level=error", "exception", "fatal", "panic"})
|
||||
|
||||
jsonLogs, err := json.Marshal(logs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal logs: %w", err)
|
||||
}
|
||||
snapshot.DiagnosticsData.Log = string(jsonLogs)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// filterLogsByPattern filters the logs by the given patterns and returns a list of logs that match the patterns
|
||||
// the logs are returned as a list of maps with the keys "timestamp" and "message"
|
||||
func filterLogsByPattern(logBytes []byte, patterns []string) []map[string]string {
|
||||
logs := []map[string]string{}
|
||||
for _, line := range strings.Split(strings.TrimSpace(string(logBytes)), "\n") {
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
if parts := strings.SplitN(line, " ", 2); len(parts) == 2 {
|
||||
messageLower := strings.ToLower(parts[1])
|
||||
for _, pattern := range patterns {
|
||||
if strings.Contains(messageLower, pattern) {
|
||||
logs = append(logs, map[string]string{
|
||||
"timestamp": parts[0],
|
||||
"message": parts[1],
|
||||
})
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return logs
|
||||
}
|
1
pkg/snapshot/kubernetes_test.go
Normal file
1
pkg/snapshot/kubernetes_test.go
Normal file
|
@ -0,0 +1 @@
|
|||
package snapshot
|
Loading…
Add table
Add a link
Reference in a new issue