1
0
Fork 0
mirror of https://github.com/portainer/portainer.git synced 2025-08-07 06:45:23 +02:00

Compare commits

...

214 commits

Author SHA1 Message Date
Devon Steenberg
84b4b30f21 fix(rand): Use crypto/rand instead of math/rand in FIPS mode [BE-12071] (#961)
Co-authored-by: codecov-ai[bot] <156709835+codecov-ai[bot]@users.noreply.github.com>
2025-08-06 10:19:15 +12:00
andres-portainer
6c47598cd9 fix(apikey): use HMAC-SHA256 for FIPS mode API keys BE-11936 (#980) 2025-08-05 13:09:35 -03:00
andres-portainer
d00d71ecbf fix(linter): add linter rules to reduce the chance for invalid FIPS settings BE-11979 (#975) 2025-08-05 09:23:07 -03:00
Ali
dc273b2d63 fix(helm): don't block install with dry-run errors [r8s-454] (#976)
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-08-05 18:53:41 +12:00
James Carppe
497b16e942 Chore update readme graphic (#963)
Co-authored-by: Phil Calder <4473109+predlac@users.noreply.github.com>
2025-08-05 17:14:54 +12:00
LP B
a472de1919 fix(app/edge-jobs): edge job results page crash at scale (#954) 2025-08-04 17:10:46 +02:00
Malcolm Lockyer
d306d7a983 fix(encryption): replace encryption related methods for fips mode [be-11933] (#919)
Co-authored-by: andres-portainer <andres-portainer@users.noreply.github.com>
2025-08-04 17:04:03 +12:00
andres-portainer
163aa57e5c fix(tls): centralize the TLS configuration to ensure FIPS compliance BE-11979 (#960) 2025-08-01 22:23:59 -03:00
andres-portainer
3eab294908 fix(linters): add the bodyclose linter BE-12112 (#959) 2025-07-30 11:35:30 -03:00
Viktor Pettersson
da30780ac2 feat(autopatch): implement patch finder for retrieving latest patches from GitHub (#957) BE-12085 2025-07-30 15:57:32 +12:00
Ali
ef53354193 fix(snapshot): show snapshot stats [r8s-432] (#952) 2025-07-29 22:51:05 +12:00
andres-portainer
e9ce3d2213 fix(endpointedge): optimize buildSchedules() BE-12099 (#955) 2025-07-28 19:19:07 -03:00
andres-portainer
a46db61c4c fix(endpointrelation): optimize updateEdgeStacksAfterRelationChange() BE-12092 (#941) 2025-07-28 13:19:05 -03:00
Steven Kang
5e271fd4a4 feat(ui): reordered kubernetes create from code options [R8S-429] (#951) 2025-07-28 15:41:12 +12:00
James Player
6481483074 fix(app/sidebar): Custom logo UI issue [r8s-435] (#939) 2025-07-25 15:29:06 +12:00
James Player
7bcb37c761 feat(app/kubernetes): Popout kubectl shell into new window [r8s-307] (#922) 2025-07-25 15:24:32 +12:00
LP B
e7d97d7a2b fix(app/edge-configs): high numbers UI overlap (#931) 2025-07-24 16:37:07 +02:00
James Carppe
1afae99345 Updates for release 2.32.0 (#936) 2025-07-24 14:30:37 +12:00
Steven Kang
bdb2e2f417 fix(transport): portainer generated kubeconfig causes kubectl exec fail [R8S-430] (#929) 2025-07-24 13:11:13 +12:00
andres-portainer
bba3751268 fix(roar): return empty slices instead of nil for easier API compatibility BE-12053 (#932) 2025-07-23 14:06:20 -03:00
Ali
60bc04bc33 feat(helm): show manifest previews/changes when installing and upgrading a helm chart [r8s-405] (#898) 2025-07-23 10:52:58 +12:00
andres-portainer
a4cff13531 fix(bouncer): add missing domain to CSP header BE-12067 (#916) 2025-07-21 21:32:50 -03:00
andres-portainer
937456596a fix(edgegroups): convert the related endpoint IDs to roaring bitmaps to increase performance BE-12053 (#903) 2025-07-21 21:31:13 -03:00
Devon Steenberg
caf382b64c feat(git): support bearer token auth for git [BE-11770] (#879) 2025-07-22 08:36:08 +12:00
Ali
55cc250d2e fix(pods): represent pod container statuses correctly [r8s-416] (#910) 2025-07-21 15:05:08 +12:00
Ali
eaa2be017d fix(helm): ensure the form is not 'dirty', when the values are unchanged [r8s-421] (#901) 2025-07-17 12:07:11 +12:00
James Player
4e4c5ffdb6 fix(app/kubernetes): Fix listing of secrets and configmaps with same name [r8s-288] (#897) 2025-07-16 16:37:59 +12:00
James Player
383bcc4113 fix(docker/images): Fix image detail actions icon colours [be-12044] (#892) 2025-07-15 13:57:43 +12:00
James Player
9f906b7417 refactor(app/tests): Make createMockUsers more deterministic [r8s-406] (#887) 2025-07-14 17:16:33 +12:00
Cara Ryan
db2e168540 chore: bump version to 2.32.0 (#884) 2025-07-14 10:23:05 +12:00
Ali
2697d6c5d7 feat(oci): oci helm support [r8s-361] (#787) 2025-07-13 10:37:43 +12:00
andres-portainer
b6a6ce9aaf fix(endpointedge): fix a deadlock in createAsyncEdgeAgentEndpoint() BE-12039 (#883) 2025-07-11 18:54:05 -03:00
Ali
89f6a94bd8 chore(select): show data-cy react select [r8s-402] (#881) 2025-07-11 20:06:41 +12:00
Steven Kang
96f2d69ae5 feat(observability): alerting experimental feature (#801)
Co-authored-by: JamesPlayer <james.player@portainer.io>
2025-07-11 16:55:23 +12:00
Cara Ryan
b7e906701a fix(kubernetes): Namespace access permission changes role bindings not created [R8S-366] (#826) 2025-07-11 14:55:48 +12:00
Steven Kang
150d986179 fix: CVE-2025-53547 (#880) 2025-07-11 13:57:21 +12:00
James Player
ef10ea2a7d fix(ui): Fixed TagsDatatable name column link (#847) 2025-07-11 11:01:37 +12:00
Viktor Pettersson
3bf84e8b0c fix(tags): reconcile edge relations prior to deletion [BE-11969] (#867) 2025-07-10 10:52:12 +12:00
andres-portainer
ea4b334c7e feat(csp): enable CSP by default BE-11961 (#872) 2025-07-09 16:15:43 -03:00
Oscar Zhou
4d11aa8655 fix(tag): ignore "environment not found" when deleting tag [BE-11944] (#869) 2025-07-09 09:55:59 -03:00
andres-portainer
302deb8299 chore(dataservices): enhance ReadAll() so it takes predicates for filtering results BE-12016 (#866) 2025-07-07 14:29:56 -03:00
Viktor Pettersson
0c80b1067d fix(styles): update datetime picker styles for improved dark mode support [BE-11672] (#863) 2025-07-07 20:54:44 +12:00
Steven Kang
0a36d4fbfd fix: kubectl sdk - capture fatal error and return instead of exiting 1 [r7s-371] (#841) 2025-07-07 11:29:29 +12:00
Oscar Zhou
c20a8b5a68 fix(template): app template v3 error [BE-11998] (#854) 2025-07-04 11:49:33 -03:00
Devon Steenberg
8ffe4e284a fix(tls): set insecureSkipVerify to false in FIPS mode [BE-11932] (#849) 2025-07-04 10:48:54 +12:00
Steven Kang
1332f718ae feat: add warning events count next to the status badge (#828) 2025-07-04 10:07:57 +12:00
James Player
f4df51884c fix(tests): Fix ServicesDatatable tests - r8s-395 (#860) 2025-07-03 16:01:08 +12:00
James Carppe
ce86129478 Updates for release 2.31.3 (#859) 2025-07-03 15:17:50 +12:00
andres-portainer
097b125e3a fix(boltdb): change some options to increase performance BE-12002 (#848) 2025-07-02 18:17:19 -03:00
andres-portainer
5c6b53922a feat(go): upgrade to Go v1.24.4 BE-11774 (#855) 2025-07-02 18:14:29 -03:00
James Carppe
e1b9f23f73 Updates for release 2.27.9 (#853) 2025-07-02 17:45:59 +12:00
LP B
e1c480d3c3 feat(app/edge-stacks): summarize the edge stack statuses in the backend (#818) 2025-07-01 15:04:10 +02:00
Steven Kang
363a62d885 fix: bump the docker binary version to v28.3.0 [r8s-390] (#837) 2025-07-01 20:10:39 +12:00
James Player
c6ee9a5a52 feat(ui): Rebranding - r8s-374 (#840) 2025-07-01 12:58:31 +12:00
andres-portainer
cf5990ccba fix(edgestackstatus): improve error handling BE-11963 (#844) 2025-06-30 20:54:16 -03:00
Oscar Zhou
b6f3682a62 refactor(edge): init endpoint relation when endpoint is created [BE-11928] (#814) 2025-06-30 15:15:56 -03:00
LP B
b43f864511 fix(api/endpoints): filter out waiting room environments for non admins (#810) 2025-06-30 15:35:51 +02:00
Oscar Zhou
0556ffb4a1 feat(csrf): add trusted origins cli flags [BE-11972] (#836) 2025-06-27 17:41:10 -03:00
Ali
303047656e fix(k8s-services): avoid rerendering services table [r8s-387] (#832) 2025-06-27 22:48:40 +12:00
Steven Kang
8d29b5ae71 fix: kubeconfig download button inconsistency between http and https (#829) 2025-06-27 09:38:04 +12:00
James Carppe
7d7ae24351 Updates for release 2.31.2 (#834) 2025-06-26 15:41:23 +12:00
James Carppe
97838e614d Updates for release 2.27.8 (#827) 2025-06-25 17:11:58 +12:00
Steven Kang
c897baad20 fix: fetching values from both install and upgrade views - develop [R8S-368] (#820) 2025-06-24 15:46:10 +12:00
andres-portainer
d51e9205d9 fix(endpointrelation): use a read-write transaction for mutations BE-11964 (#819) 2025-06-20 20:03:35 -03:00
James Carppe
e051c86bb5 Updates for release 2.31.1 (#816) 2025-06-19 14:07:18 +12:00
Steven Kang
c2b48cd003 feat(k8s): CloudNativePG in applications list and details - [R8S-357] (#777) 2025-06-19 09:03:52 +12:00
James Carppe
a7009eb8d5 Update bug report template for 2.27.7 (#805) 2025-06-17 12:52:12 +12:00
andres-portainer
036b87b649 fix(middlewares): fix data race in WithEndpoint() BE-11949 (#803) 2025-06-16 12:56:51 -03:00
Steven Kang
f07a3b1875 security: cve-2025-22874 & cve-2025-22871 bump go to 1.23.10 (#798) 2025-06-12 17:30:53 +12:00
Yajith Dayarathna
6e89ccc0ae fix(api-documentation): swagger document genration error (#795) 2025-06-12 13:39:34 +12:00
James Carppe
cc67612432 Update bug report template for 2.31.0 (#793) 2025-06-12 13:26:25 +12:00
Malcolm Lockyer
17ebe221bb chore: bump version to 2.31.0 (#789) 2025-06-10 16:47:17 +12:00
Ali
1963edda66 feat(helm): add registry dropdown [r8s-340] (#779) 2025-06-09 20:08:50 +12:00
Cara Ryan
c9e3717ce3 fix(kubernetes): Display more than 10 workloads under Helm expandable in the Applications view [R8S-339] (#781) 2025-06-09 15:12:24 +12:00
Oscar Zhou
9a85246631 fix(edgestack): display deploying status by default after creating edgestack [BE-11924] (#783) 2025-06-07 09:06:57 +12:00
andres-portainer
75f165d1ff feat(edgestackstatus): optimize the Edge Stack structures BE-11740 (#756) 2025-06-05 19:46:10 -03:00
Viktor Pettersson
eaf0deb2f6 feat(update-schedules): new update schedules view [BE-11754, BE-11887] (#686) 2025-06-05 17:03:43 +12:00
Ali
a9061e5258 feat(helm): enhance helm chart install [r8s-341] (#766) 2025-06-05 13:13:45 +12:00
James Player
caac45b834 feat(UI): Add repository url to Helm chart installation list items (#769) 2025-06-05 10:14:39 +12:00
LP B
24ff7a7911 chore(deps): upgrade docker/cli to v28.2.1 | docker/docker to v28.2.1 | docker/compose to v2.36.2 (#758) 2025-05-30 09:12:27 +02:00
Devon Steenberg
b767dcb27e fix(proxy): whitelist headers for proxy to forward [BE-11819] (#665) 2025-05-30 11:49:23 +12:00
Cara Ryan
731afbee46 feat(helm): filter on chart versions at API level [R8S-324] (#754) 2025-05-27 15:20:28 +12:00
Cara Ryan
07dfd981a2 fix(kubernetes): events api to call the backend [R8S-243] (#563) 2025-05-27 13:55:31 +12:00
Cara Ryan
32ef208278 Revert "feat(helm): filter on chart versions at API level [R8S-324]" (#753) 2025-05-26 16:58:53 +12:00
Cara Ryan
a80b185e10 feat(helm): filter on chart versions at API level [R8S-324] (#747) 2025-05-26 14:10:38 +12:00
Malcolm Lockyer
b96328e098 fix(async-perf): In async poll snapshot handling, reduce redundant json marshal [be-11861] (#726) 2025-05-23 12:42:45 +12:00
Devon Steenberg
45471ce86d fix(docker): check len of device capabilities [BE-11898] (#750) 2025-05-22 14:27:14 +12:00
Viktor Pettersson
1bc91d0c7c fix(edge-update): set edge stack status to EdgeStackStatusError to avoid redeployment of portainer-updater [BE-11855] (#714) 2025-05-20 08:28:40 +02:00
James Carppe
799325d9f8 Update bug report template for 2.30.1 (#749) 2025-05-20 14:40:43 +12:00
James Carppe
b540709e03 Update bug report template for 2.30.0 (#737) 2025-05-15 12:09:28 +12:00
Oscar Zhou
44daab04ac fix(libclient): option to disable external http request [BE-11696] (#719) 2025-05-15 09:54:35 +12:00
Ali
ee65223ee7 chore: bump version to 2.30.0 (#735) 2025-05-14 17:35:05 +12:00
Ali
d49fcd8f3e feat(helm): make the atomic flag optional [r8s-314] (#733) 2025-05-14 16:31:42 +12:00
Ali
4ee349bd6b feat(helm): helm actions [r8s-259] (#715)
Co-authored-by: James Player <james.player@portainer.io>
Co-authored-by: Cara Ryan <cara.ryan@portainer.io>
Co-authored-by: stevensbkang <skan070@gmail.com>
2025-05-13 22:15:04 +12:00
Steven Kang
dfa32b6755 chore: add KaaS deprecation notice (#727)
Co-authored-by: testA113 <aliharriss1995@gmail.com>
2025-05-13 16:33:14 +12:00
Ali
0b69729173 chrore(microk8s): add deprecation notice [r8s-320] (#728) 2025-05-13 14:28:42 +12:00
Steven Kang
3b313b9308 fix(kubectl): rollout restart [r8s-322] (#729) 2025-05-13 11:35:44 +12:00
Devon Steenberg
1abdf42f99 feat(libstack): expose env vars with PORTAINER_ prefix [BE-11661] (#687) 2025-05-12 11:18:04 +12:00
andres-portainer
9fdc535d6b fix(csrf): skip the trusted origins check for plain-text HTTP requests BE-11832 (#710)
Co-authored-by: andres-portainer <andres-portainer@users.noreply.github.com>
Co-authored-by: oscarzhou <oscar.zhou@portainer.io>
2025-05-09 14:39:29 +12:00
James Carppe
b9b734ceda Update bug report template for 2.27.6 (#721) 2025-05-09 14:39:15 +12:00
Viktor Pettersson
3b05505527 fix(update-schedules): display enriched error logs for agent updates [BE-11756] (#693) 2025-05-08 10:24:20 +02:00
Steven Kang
bc29419c17 refactor: replace the kubectl binary with the upstream sdk (#524) 2025-05-07 20:40:38 +12:00
James Carppe
4d4360b86b Update bug report template for 2.27.5 (#705) 2025-05-02 13:14:39 +12:00
James Carppe
8cc28761d7 Update bug report template for 2.29.2 (#692) 2025-04-24 16:47:31 +12:00
Viktor Pettersson
24b3499c70 fix(dependencies): downgrade gorilla/csrf to v1.7.2 (#684) 2025-04-24 12:13:45 +12:00
Devon Steenberg
4e4fd5a4b4 fix(validate): refactor validate functions [BE-11574] (#683) 2025-04-24 08:59:44 +12:00
Devon Steenberg
1a3df54c04 fix(govalidator): replace govalidator dependency [BE-11574] (#673) 2025-04-23 13:59:51 +12:00
James Carppe
3edacee59b Update bug report template for 2.29.1 (#682) 2025-04-23 13:35:20 +12:00
andres-portainer
f25d31b92b fix(code): remove dead code and reduce duplication BE-11826 (#680) 2025-04-22 18:09:36 -03:00
Ali
c91c8a6467 feat(helm): rollback helm chart [r8s-287] (#660) 2025-04-23 08:58:34 +12:00
Ali
61d6ac035d feat(helm): auto refresh helm resources [r8s-298] (#672) 2025-04-23 08:58:21 +12:00
Oscar Zhou
9a9373dd0f fix: cve-2025-22871 [BE-11825] (#678) 2025-04-22 21:29:39 +12:00
andres-portainer
e319a7a5ae fix(linter): enable ineffassign BE-10204 (#669) 2025-04-21 19:27:14 -03:00
andres-portainer
342549b546 fix(validate): remove dead code BE-11824 (#671) 2025-04-21 18:59:51 -03:00
Ali
bbe94f55b6 feat(helm): uninstall helm app from details view [r8s-285] (#648) 2025-04-22 09:52:52 +12:00
andres-portainer
6fcf1893d3 fix(code): remove duplicated code BE-11821 (#667) 2025-04-18 17:34:34 -03:00
Ali
01afe34df7 fix(namespaces): fix service not found error [r8s-296] (#664) 2025-04-17 12:29:37 +12:00
Devon Steenberg
be3e8e3332 fix(proxy): don't forward sensitive headers [BE-11819] (#654) 2025-04-16 15:30:56 +12:00
James Carppe
cf31700903 Update bug report template for 2.29.0 (#655) 2025-04-16 13:34:38 +12:00
andres-portainer
66dee6fd06 fix(codemirror): optimize the autocompletion performance R8S-294 (#650)
Co-authored-by: andres-portainer <andres-portainer@users.noreply.github.com>
2025-04-16 12:27:30 +12:00
andres-portainer
bfa55f8c67 fix(logs): remove duplicated code BE-11821 (#653) 2025-04-15 17:16:04 -03:00
James Carppe
5a2318d01f Update bug report template for 2.27.4 (#646) 2025-04-15 13:50:14 +12:00
Steven Kang
7de037029f security: cve-2025-30204 and other low ones - develop [BE-11781] (#638) 2025-04-15 09:58:55 +12:00
andres-portainer
730c1115ce fix(proxy): remove code duplication BE-11627 (#644) 2025-04-14 17:46:40 -03:00
Oscar Zhou
2c37f32fa6 version: bump version to 2.29.0 (#637) 2025-04-14 13:13:38 +12:00
LP B
7aa9f8b1c3 Revert "feat(app): 1s staleTime to avoid sending repeated requests" (#639) 2025-04-14 11:12:11 +12:00
LP B
c331ada086 feat(app): 1s staleTime to avoid sending repeated requests (#607) 2025-04-14 09:05:48 +12:00
Oscar Zhou
ebc25e45d3 fix(edge): redeploy edge stack doesn't apply to std agents [BE-11766] (#633) 2025-04-12 10:24:23 +12:00
andres-portainer
f82921d2a1 fix(edgestacks): fix edge stack update when using Git BE-11766 (#629) 2025-04-10 20:12:27 -03:00
Ali
d68fe42918 fix(apps): better align sub tables [r8s-255] (#617) 2025-04-11 08:39:39 +12:00
Oscar Zhou
823f2a7991 fix(edge): missing env var in async agent docker snapshot [BE-11709] (#625) 2025-04-11 08:26:11 +12:00
Ali
0ca9321db1 feat(helm): update helm view [r8s-256] (#582)
Co-authored-by: Cara Ryan <cara.ryan@portainer.io>
Co-authored-by: James Player <james.player@portainer.io>
Co-authored-by: stevensbkang <skan070@gmail.com>
2025-04-10 16:08:24 +12:00
James Player
46eddbe7b9 fix(UI): Make sure localStorage.getUserId actually returns user id R8S-290 (#623) 2025-04-09 09:09:07 +12:00
James Player
64c796a8c3 fix(kubernetes): Config maps and secrets show as unused BE-11684 (#596)
Co-authored-by: stevensbkang <skan070@gmail.com>
2025-04-08 12:52:21 +12:00
James Player
264ff5457b chore(kubernetes): Migrate Helm Templates View to React R8S-239 (#587) 2025-04-08 12:51:36 +12:00
LP B
ad89df4d0d refactor(app): reword docker security features (#608) 2025-04-07 17:14:51 +02:00
Anthony Lapenna
0f10b8ba2b api: update TeamInspect doc (#618) 2025-04-07 11:25:23 +12:00
Oscar Zhou
940bf990f9 fix(edgeconfig): add edge config file interpolation info message on edge stack page [BE-11741] (#606) 2025-04-04 11:56:42 +13:00
Devon Steenberg
1b8fbbe7d7 fix(libstack): compose project working directory [BE-11751] (#600) 2025-04-04 09:07:35 +13:00
James Player
f6f07f4690 improvement(kubernetes): right align tags in datatables R8S-250 (#601)
Co-authored-by: testA113 <aliharriss1995@gmail.com>
2025-04-03 14:18:31 +13:00
Anthony Lapenna
3800249921 api: use response code 200 (#604) 2025-04-03 11:12:24 +13:00
Oscar Zhou
a5d857d5e7 feat(docker): add --pull-limit-check-disabled cli flag [BE-11739] (#581) 2025-04-03 09:13:01 +13:00
Devon Steenberg
4c1e80ff58 fix(axios): correctly encode urls [BE-11648] (#517)
fix(edgegroup): nil pointer defer
2025-04-02 08:51:58 +13:00
Oscar Zhou
7e5db1f55e refactor(edgegroup): optimize edge group search performance [BE-11716] (#579) 2025-04-01 14:05:56 +13:00
Anthony Lapenna
1edc56c0ce api: remove name from edgegroupupdate payload validation (#588) 2025-04-01 13:25:09 +13:00
Anthony Lapenna
4066a70ea5 api: fix typo in operation name (#585) 2025-04-01 13:24:55 +13:00
andres-portainer
a0d36cf87a fix(server): add panic logging middleware BE-11750 (#599) 2025-03-31 18:58:20 -03:00
Viktor Pettersson
1d12011eb5 fix(edge groups): make large edge groups editable [BE-11720] (#558) 2025-03-28 15:16:05 +01:00
Steven Kang
7c01f84a5c fix: improve the node view for detecting roles - develop (#354) 2025-03-28 10:52:59 +13:00
Ali
81c5f4acc3 feat(editor): provide yaml validation for docker compose in the portainer web editor [BE-11697] (#526) 2025-03-27 17:11:55 +13:00
Ali
0ebfe047d1 feat(helm): use helm upgrade for install [r8s-258] (#568) 2025-03-26 11:32:26 +13:00
samdulam
e68bd53e30 Update bug_report template with 2.27.3 (#572) 2025-03-25 08:40:15 +05:30
andres-portainer
cdd9851f72 fix(stubs): clean up the stubs and mocks BE-11722 (#557) 2025-03-24 19:56:08 -03:00
andres-portainer
995c3ef81b feat(snapshots): avoid parsing raw snapshots when possible BE-11724 (#560) 2025-03-24 19:33:05 -03:00
James Player
0dfde1374d fix(kubernetes): Cluster reservation CPU not showing R8S-268 (#569) 2025-03-25 10:59:28 +13:00
Devon Steenberg
34235199dd fix(libstack): correctly load COMPOSE_* env vars [BE-11474] (#536) 2025-03-25 08:57:23 +13:00
Anthony Lapenna
5d1cd670e9 docs: review TeamMembershipCreate API operation (#565) 2025-03-24 09:55:33 +13:00
Anthony Lapenna
1d8ea7b0ee docs: review TeamUpdate API operation (#564) 2025-03-21 16:45:43 +13:00
Oscar Zhou
4b218553c3 fix(libstack): data loss for stack with relative path [FR-437] (#548) 2025-03-21 09:19:25 +13:00
Viktor Pettersson
a61c1004d3 fix(agent-updates): fix remote agent updates cannot be scheduled properly for large edge groups [BE-11691] (#528) 2025-03-20 10:05:15 +01:00
James Carppe
5d1b42b314 Update bug report template for 2.28.1 (#549) 2025-03-20 15:54:53 +13:00
Oscar Zhou
4b992c6f3e fix(k8s/config): force insecure-skip-tls-verify option for internal use [BE-11706] (#537) 2025-03-20 08:49:27 +13:00
Viktor Pettersson
38562f9560 fix(api): remove duplicated /users/me route [BE-11689] (#516) 2025-03-19 13:08:03 +01:00
James Carppe
c01f0271fe Update bug report template for 2.27.2 (#539) 2025-03-19 17:41:36 +13:00
andres-portainer
0296998fae fix(users): optimize the /users/me API endpoint BE-11688 (#515)
Co-authored-by: andres-portainer <andres-portainer@users.noreply.github.com>
Co-authored-by: LP B <xAt0mZ@users.noreply.github.com>
Co-authored-by: JamesPlayer <james.player@portainer.io>
2025-03-18 17:55:53 -03:00
James Player
a67b917bdd Bump version to 2.28.0 (#523) 2025-03-17 16:00:33 +13:00
Steven Kang
2791bd123c fix: cve-2025-22869 develop (#511) 2025-03-17 12:24:39 +13:00
andres-portainer
e1f9b69cd5 feat(edgestack): improve the structure to make JSON operations faster BE-11668 (#475) 2025-03-15 10:10:17 -03:00
andres-portainer
2c05496962 feat(edgeconfigs): parse .env config files for interpolation BE-11673 (#514) 2025-03-15 10:09:22 -03:00
Oscar Zhou
66bcf9223a fix(k8s/config): avoid hardcoded "insecure-skip-tls-verify" in kubeconfig [BE-11651] (#500) 2025-03-14 11:20:41 +13:00
James Player
993f69db37 chore(app): Migrate helm templates list to react (#492) 2025-03-14 10:37:14 +13:00
Ali
58317edb6d fix(namespaces): only show namespaces with access [r8s-251] (#501) 2025-03-14 07:57:06 +13:00
Steven Kang
417891675d fix: ensure no non-admin users have access to system namespaces (#499) 2025-03-13 16:43:56 +13:00
Steven Kang
8b7aef883a fix: display unscheduled applications (#496)
Co-authored-by: JamesPlayer <james.player@portainer.io>
2025-03-13 14:13:18 +13:00
Ali
b5961d79f8 refactor(helm): helm binary to sdk refactor [r8s-229] (#463)
Co-authored-by: stevensbkang <skan070@gmail.com>
2025-03-13 12:20:16 +13:00
LP B
0d25f3f430 fix(app): restore gitops update options (#419) 2025-03-12 14:00:31 +01:00
Steven Kang
798fa2396a feat: kubernets service - display external hostname (#486) 2025-03-12 22:34:00 +13:00
James Player
28b222fffa fix(app): Make sure empty tables don't have select all rows checkbox checked (#489) 2025-03-12 10:34:07 +13:00
James Player
b57855f20d fix(app): datatable global checkbox doesn't reflect the selected state (#470) 2025-03-10 09:21:20 +13:00
Cara Ryan
438b1f9815 fix(helm): Remove duplicate helm instructions in CE [BE-11670] (#482) 2025-03-06 09:35:31 +13:00
LP B
2bccb3589e fix(app/images): nodeName on images list links (#484) 2025-03-05 16:04:16 +01:00
James Player
52bb06eb7b chore(helm): Convert helm details view to react (#476) 2025-03-03 11:29:58 +13:00
Malcolm Lockyer
8e6d0e7d42 perf(endpointrelation): Part 2 of fixing endpointrelation perf [be-11616] (#471) 2025-02-28 14:41:54 +13:00
Steven Kang
5526fd8296 chore: bump 2.27.1 - develop (#468) 2025-02-27 11:02:25 +13:00
Anthony Lapenna
a554a8c49f api: remove server-ce swagger.json (#467) 2025-02-26 16:10:02 +13:00
James Player
7759d762ab chore(react): Convert cluster details to react CE (#466) 2025-02-26 14:13:50 +13:00
Oscar Zhou
dd98097897 fix(libstack): miss to read default .env file [BE-11638] (#458) 2025-02-26 13:00:25 +13:00
Steven Kang
cc73b7831f fix: cve-2024-50338 - develop (#461) 2025-02-25 12:55:44 +13:00
James Carppe
9c243cc8dd Update bug report template for 2.27.0 (#450) 2025-02-20 13:38:26 +13:00
Oscar Zhou
5d568a3f32 fix(edge): edge stack pending when yaml file is under same root folder of edge configs [BE-11620] (#447) 2025-02-20 12:09:26 +13:00
Steven Kang
1b83542d41 chore: bump version to 2.27.0 - develop (#445) 2025-02-20 09:42:52 +13:00
LP B
cf95d91db3 fix(swarm): keep swarm stack stop command attached (#444) 2025-02-19 19:25:28 +01:00
Viktor Pettersson
41c1d88615 fix(edge): configure persisted mTLS certificates on start-up [BE-11622] (#437)
Co-authored-by: andres-portainer <andres-portainer@users.noreply.github.com>
Co-authored-by: oscarzhou <oscar.zhou@portainer.io>
Co-authored-by: Oscar Zhou <100548325+oscarzhou-portainer@users.noreply.github.com>
2025-02-19 14:46:39 +13:00
Steven Kang
df8673ba40 version: bump version to 2.27.0-rc3 - develop (#426) 2025-02-14 08:39:02 +13:00
andres-portainer
96b1869a0c fix(swarm): fix the Host field when listing images BE-10827 (#352)
Co-authored-by: andres-portainer <andres-portainer@users.noreply.github.com>
Co-authored-by: LP B <xAt0mZ@users.noreply.github.com>
2025-02-12 00:47:45 +01:00
Oscar Zhou
e45b852c09 fix(platform): remove error log when local env is not found [BE-11353] (#364) 2025-02-12 09:23:52 +13:00
Steven Kang
2d3e5c3499 workaround: leave the globally set helm repo to empty and add disclaimer - develop (#409) 2025-02-11 15:36:29 +13:00
Oscar Zhou
b25bf1e341 fix(podman): missing filter in homepage [BE-11502] (#404) 2025-02-10 21:08:27 +13:00
Oscar Zhou
4bb80d3e3a fix(setting): failed to persist edge computer setting [BE-11403] (#395) 2025-02-10 21:05:15 +13:00
Steven Kang
03575186a7 remove deprecated api endpoints - develop [BE-11510] (#399) 2025-02-10 10:46:36 +13:00
Steven Kang
935c7dd496 feat: improve diagnostics stability - develop (#355) 2025-02-10 10:45:47 +13:00
Steven Kang
1b2dc6a133 version: bump version to 2.27.0-rc2 - develop (#402) 2025-02-07 14:47:49 +13:00
Steven Kang
d4e2b2188e chore: bump go version to 1.23.5 develop (#392) 2025-02-07 08:48:19 +13:00
viktigpetterr
9658f757c2 fix(endpoints): use the post method for batch delete API operations [BE-11573] (#394) 2025-02-06 18:14:43 +01:00
Ali
371e84d9a5 fix(podman): create new image from a container in podman [r8s-90] (#347) 2025-02-05 20:22:33 +13:00
Steven Kang
5423a2f1b9 security: cve-2025-21613 develop (#390) 2025-02-05 15:56:30 +13:00
Oscar Zhou
7001f8e088 fix(edge): check all endpoint_relation db query logic [BE-11602] (#378) 2025-02-05 15:20:20 +13:00
Steven Kang
678cd54553 security: cve-2024-45338 develop (#386) 2025-02-05 15:03:39 +13:00
Oscar Zhou
bc19d6592f fix(libstack): cannot open std edge stack log page [BE-11603] (#384) 2025-02-05 12:17:51 +13:00
James Player
5af0859f67 fix(datatables): "Select all" should select only elements of the current page (#376) 2025-02-04 15:34:33 +13:00
Oscar Zhou
379711951c fix(edgegroup): failed to associate env to static edge group [BE-11599] (#368) 2025-02-04 09:41:24 +13:00
LP B
a50a9c5617 fix(app/edge): edge stacks webhooks cannot be disabled once created (#372) 2025-02-03 20:50:24 +01:00
LP B
c0d30a455f fix(api/edge): backend panic on edge stack removal (#371) 2025-02-03 20:25:25 +01:00
LP B
9a3f6b21d2 feat(app/service-details): hide view while loading data (#348) 2025-02-03 14:20:35 +01:00
877 changed files with 40040 additions and 13703 deletions

View file

@ -2,18 +2,17 @@ name: Bug Report
description: Create a report to help us improve. description: Create a report to help us improve.
labels: kind/bug,bug/need-confirmation labels: kind/bug,bug/need-confirmation
body: body:
- type: markdown - type: markdown
attributes: attributes:
value: | value: |
# Welcome! # Welcome!
The issue tracker is for reporting bugs. If you have an [idea for a new feature](https://github.com/orgs/portainer/discussions/categories/ideas) or a [general question about Portainer](https://github.com/orgs/portainer/discussions/categories/help) please post in our [GitHub Discussions](https://github.com/orgs/portainer/discussions). The issue tracker is for reporting bugs. If you have an [idea for a new feature](https://github.com/orgs/portainer/discussions/categories/ideas) or a [general question about Portainer](https://github.com/orgs/portainer/discussions/categories/help) please post in our [GitHub Discussions](https://github.com/orgs/portainer/discussions).
You can also ask for help in our [community Slack channel](https://join.slack.com/t/portainer/shared_invite/zt-txh3ljab-52QHTyjCqbe5RibC2lcjKA). You can also ask for help in our [community Slack channel](https://join.slack.com/t/portainer/shared_invite/zt-txh3ljab-52QHTyjCqbe5RibC2lcjKA).
Please note that we only provide support for current versions of Portainer. You can find a list of supported versions in our [lifecycle policy](https://docs.portainer.io/start/lifecycle). Please note that we only provide support for current versions of Portainer. You can find a list of supported versions in our [lifecycle policy](https://docs.portainer.io/start/lifecycle).
**DO NOT FILE ISSUES FOR GENERAL SUPPORT QUESTIONS**. **DO NOT FILE ISSUES FOR GENERAL SUPPORT QUESTIONS**.
- type: checkboxes - type: checkboxes
@ -45,7 +44,7 @@ body:
- type: textarea - type: textarea
attributes: attributes:
label: Problem Description label: Problem Description
description: A clear and concise description of what the bug is. description: A clear and concise description of what the bug is.
validations: validations:
required: true required: true
@ -71,7 +70,7 @@ body:
1. Go to '...' 1. Go to '...'
2. Click on '....' 2. Click on '....'
3. Scroll down to '....' 3. Scroll down to '....'
4. See error 4. See error
validations: validations:
required: true required: true
@ -92,9 +91,31 @@ body:
- type: dropdown - type: dropdown
attributes: attributes:
label: Portainer version label: Portainer version
description: We only provide support for current versions of Portainer as per the lifecycle policy linked above. If you are on an older version of Portainer we recommend [upgrading first](https://docs.portainer.io/start/upgrade) in case your bug has already been fixed. description: We only provide support for current versions of Portainer as per the lifecycle policy linked above. If you are on an older version of Portainer we recommend [updating first](https://docs.portainer.io/start/upgrade) in case your bug has already been fixed.
multiple: false multiple: false
options: options:
- '2.32.0'
- '2.31.3'
- '2.31.2'
- '2.31.1'
- '2.31.0'
- '2.30.1'
- '2.30.0'
- '2.29.2'
- '2.29.1'
- '2.29.0'
- '2.28.1'
- '2.28.0'
- '2.27.9'
- '2.27.8'
- '2.27.7'
- '2.27.6'
- '2.27.5'
- '2.27.4'
- '2.27.3'
- '2.27.2'
- '2.27.1'
- '2.27.0'
- '2.26.1' - '2.26.1'
- '2.26.0' - '2.26.0'
- '2.25.1' - '2.25.1'
@ -109,20 +130,6 @@ body:
- '2.21.2' - '2.21.2'
- '2.21.1' - '2.21.1'
- '2.21.0' - '2.21.0'
- '2.20.3'
- '2.20.2'
- '2.20.1'
- '2.20.0'
- '2.19.5'
- '2.19.4'
- '2.19.3'
- '2.19.2'
- '2.19.1'
- '2.19.0'
- '2.18.4'
- '2.18.3'
- '2.18.2'
- '2.18.1'
validations: validations:
required: true required: true
@ -160,7 +167,7 @@ body:
- type: input - type: input
attributes: attributes:
label: Browser label: Browser
description: | description: |
Enter your browser and version. Example: Google Chrome 114.0 Enter your browser and version. Example: Google Chrome 114.0
validations: validations:
required: false required: false

View file

@ -12,8 +12,18 @@ linters:
- copyloopvar - copyloopvar
- intrange - intrange
- perfsprint - perfsprint
- ineffassign
- bodyclose
- forbidigo
linters-settings: linters-settings:
forbidigo:
analyze-types: true
forbid:
- p: ^tls\.Config$
msg: 'Use crypto.CreateTLSConfiguration() instead'
- p: ^tls\.Config\.(InsecureSkipVerify|MinVersion|MaxVersion|CipherSuites|CurvePreferences)$
msg: 'Do not set this field directly, use crypto.CreateTLSConfiguration() instead'
depguard: depguard:
rules: rules:
main: main:

View file

@ -8,9 +8,9 @@ Portainer consists of a single container that can run on any cluster. It can be
**Portainer Business Edition** builds on the open-source base and includes a range of advanced features and functions (like RBAC and Support) that are specific to the needs of business users. **Portainer Business Edition** builds on the open-source base and includes a range of advanced features and functions (like RBAC and Support) that are specific to the needs of business users.
- [Compare Portainer CE and Compare Portainer BE](https://portainer.io/products) - [Compare Portainer CE and Compare Portainer BE](https://www.portainer.io/features)
- [Take3 get 3 free nodes of Portainer Business for as long as you want them](https://www.portainer.io/take-3) - [Take3 get 3 free nodes of Portainer Business for as long as you want them](https://www.portainer.io/take-3)
- [Portainer BE install guide](https://install.portainer.io) - [Portainer BE install guide](https://academy.portainer.io/install/)
## Latest Version ## Latest Version
@ -20,22 +20,19 @@ Portainer CE is updated regularly. We aim to do an update release every couple o
## Getting started ## Getting started
- [Deploy Portainer](https://docs.portainer.io/start/install) - [Deploy Portainer](https://docs.portainer.io/start/install-ce)
- [Documentation](https://docs.portainer.io) - [Documentation](https://docs.portainer.io)
- [Contribute to the project](https://docs.portainer.io/contribute/contribute) - [Contribute to the project](https://docs.portainer.io/contribute/contribute)
## Features & Functions ## Features & Functions
View [this](https://www.portainer.io/products) table to see all of the Portainer CE functionality and compare to Portainer Business. View [this](https://www.portainer.io/features) table to see all of the Portainer CE functionality and compare to Portainer Business.
- [Portainer CE for Docker / Docker Swarm](https://www.portainer.io/solutions/docker)
- [Portainer CE for Kubernetes](https://www.portainer.io/solutions/kubernetes-ui)
## Getting help ## Getting help
Portainer CE is an open source project and is supported by the community. You can buy a supported version of Portainer at portainer.io Portainer CE is an open source project and is supported by the community. You can buy a supported version of Portainer at portainer.io
Learn more about Portainer's community support channels [here.](https://www.portainer.io/get-support-for-portainer) Learn more about Portainer's community support channels [here.](https://www.portainer.io/resources/get-help/get-support)
- Issues: https://github.com/portainer/portainer/issues - Issues: https://github.com/portainer/portainer/issues
- Slack (chat): [https://portainer.io/slack](https://portainer.io/slack) - Slack (chat): [https://portainer.io/slack](https://portainer.io/slack)
@ -53,13 +50,13 @@ You can join the Portainer Community by visiting [https://www.portainer.io/join-
## Work for us ## Work for us
If you are a developer, and our code in this repo makes sense to you, we would love to hear from you. We are always on the hunt for awesome devs, either freelance or employed. Drop us a line to info@portainer.io with your details and/or visit our [careers page](https://portainer.io/careers). If you are a developer, and our code in this repo makes sense to you, we would love to hear from you. We are always on the hunt for awesome devs, either freelance or employed. Drop us a line to success@portainer.io with your details and/or visit our [careers page](https://apply.workable.com/portainer/).
## Privacy ## Privacy
**To make sure we focus our development effort in the right places we need to know which features get used most often. To give us this information we use [Matomo Analytics](https://matomo.org/), which is hosted in Germany and is fully GDPR compliant.** **To make sure we focus our development effort in the right places we need to know which features get used most often. To give us this information we use [Matomo Analytics](https://matomo.org/), which is hosted in Germany and is fully GDPR compliant.**
When Portainer first starts, you are given the option to DISABLE analytics. If you **don't** choose to disable it, we collect anonymous usage as per [our privacy policy](https://www.portainer.io/privacy-policy). **Please note**, there is no personally identifiable information sent or stored at any time and we only use the data to help us improve Portainer. When Portainer first starts, you are given the option to DISABLE analytics. If you **don't** choose to disable it, we collect anonymous usage as per [our privacy policy](https://www.portainer.io/legal/privacy-policy). **Please note**, there is no personally identifiable information sent or stored at any time and we only use the data to help us improve Portainer.
## Limitations ## Limitations

View file

@ -16,7 +16,7 @@ import (
// GetAgentVersionAndPlatform returns the agent version and platform // GetAgentVersionAndPlatform returns the agent version and platform
// //
// it sends a ping to the agent and parses the version and platform from the headers // it sends a ping to the agent and parses the version and platform from the headers
func GetAgentVersionAndPlatform(endpointUrl string, tlsConfig *tls.Config) (portainer.AgentPlatform, string, error) { func GetAgentVersionAndPlatform(endpointUrl string, tlsConfig *tls.Config) (portainer.AgentPlatform, string, error) { //nolint:forbidigo
httpCli := &http.Client{ httpCli := &http.Client{
Timeout: 3 * time.Second, Timeout: 3 * time.Second,
} }

View file

@ -2,7 +2,6 @@ package archive
import ( import (
"archive/zip" "archive/zip"
"bytes"
"fmt" "fmt"
"io" "io"
"os" "os"
@ -12,50 +11,6 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
) )
// UnzipArchive will unzip an archive from bytes into the dest destination folder on disk
func UnzipArchive(archiveData []byte, dest string) error {
zipReader, err := zip.NewReader(bytes.NewReader(archiveData), int64(len(archiveData)))
if err != nil {
return err
}
for _, zipFile := range zipReader.File {
err := extractFileFromArchive(zipFile, dest)
if err != nil {
return err
}
}
return nil
}
func extractFileFromArchive(file *zip.File, dest string) error {
f, err := file.Open()
if err != nil {
return err
}
defer f.Close()
data, err := io.ReadAll(f)
if err != nil {
return err
}
fpath := filepath.Join(dest, file.Name)
outFile, err := os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, file.Mode())
if err != nil {
return err
}
_, err = io.Copy(outFile, bytes.NewReader(data))
if err != nil {
return err
}
return outFile.Close()
}
// UnzipFile will decompress a zip archive, moving all files and folders // UnzipFile will decompress a zip archive, moving all files and folders
// within the zip file (parameter 1) to an output directory (parameter 2). // within the zip file (parameter 1) to an output directory (parameter 2).
func UnzipFile(src string, dest string) error { func UnzipFile(src string, dest string) error {
@ -76,11 +31,11 @@ func UnzipFile(src string, dest string) error {
if f.FileInfo().IsDir() { if f.FileInfo().IsDir() {
// Make Folder // Make Folder
os.MkdirAll(p, os.ModePerm) os.MkdirAll(p, os.ModePerm)
continue continue
} }
err = unzipFile(f, p) if err := unzipFile(f, p); err != nil {
if err != nil {
return err return err
} }
} }
@ -93,20 +48,20 @@ func unzipFile(f *zip.File, p string) error {
if err := os.MkdirAll(filepath.Dir(p), os.ModePerm); err != nil { if err := os.MkdirAll(filepath.Dir(p), os.ModePerm); err != nil {
return errors.Wrapf(err, "unzipFile: can't make a path %s", p) return errors.Wrapf(err, "unzipFile: can't make a path %s", p)
} }
outFile, err := os.OpenFile(p, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode()) outFile, err := os.OpenFile(p, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
if err != nil { if err != nil {
return errors.Wrapf(err, "unzipFile: can't create file %s", p) return errors.Wrapf(err, "unzipFile: can't create file %s", p)
} }
defer outFile.Close() defer outFile.Close()
rc, err := f.Open() rc, err := f.Open()
if err != nil { if err != nil {
return errors.Wrapf(err, "unzipFile: can't open zip file %s in the archive", f.Name) return errors.Wrapf(err, "unzipFile: can't open zip file %s in the archive", f.Name)
} }
defer rc.Close() defer rc.Close()
_, err = io.Copy(outFile, rc) if _, err = io.Copy(outFile, rc); err != nil {
if err != nil {
return errors.Wrapf(err, "unzipFile: can't copy an archived file content") return errors.Wrapf(err, "unzipFile: can't copy an archived file content")
} }

View file

@ -9,10 +9,15 @@ import (
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/datastore" "github.com/portainer/portainer/api/datastore"
"github.com/portainer/portainer/pkg/fips"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func init() {
fips.InitFIPS(false)
}
func TestPingAgentPanic(t *testing.T) { func TestPingAgentPanic(t *testing.T) {
endpoint := &portainer.Endpoint{ endpoint := &portainer.Endpoint{
ID: 1, ID: 1,

View file

@ -4,7 +4,6 @@ import (
"encoding/base64" "encoding/base64"
"errors" "errors"
"fmt" "fmt"
"math/rand"
"net" "net"
"strings" "strings"
"time" "time"
@ -14,6 +13,7 @@ import (
"github.com/portainer/portainer/api/internal/edge/cache" "github.com/portainer/portainer/api/internal/edge/cache"
"github.com/portainer/portainer/api/internal/endpointutils" "github.com/portainer/portainer/api/internal/endpointutils"
"github.com/portainer/portainer/pkg/libcrypto" "github.com/portainer/portainer/pkg/libcrypto"
"github.com/portainer/portainer/pkg/librand"
"github.com/dchest/uniuri" "github.com/dchest/uniuri"
"github.com/rs/zerolog/log" "github.com/rs/zerolog/log"
@ -200,7 +200,9 @@ func (service *Service) getUnusedPort() int {
conn, err := net.DialTCP("tcp", nil, &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: port}) conn, err := net.DialTCP("tcp", nil, &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: port})
if err == nil { if err == nil {
conn.Close() if err := conn.Close(); err != nil {
log.Warn().Msg("failed to close tcp connection that checks if port is free")
}
log.Debug(). log.Debug().
Int("port", port). Int("port", port).
@ -213,7 +215,7 @@ func (service *Service) getUnusedPort() int {
} }
func randomInt(min, max int) int { func randomInt(min, max int) int {
return min + rand.Intn(max-min) return min + librand.Intn(max-min)
} }
func generateRandomCredentials() (string, string) { func generateRandomCredentials() (string, string) {

79
api/chisel/tunnel_test.go Normal file
View file

@ -0,0 +1,79 @@
package chisel
import (
"net"
"strings"
"testing"
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices"
)
type testSettingsService struct {
dataservices.SettingsService
}
func (s *testSettingsService) Settings() (*portainer.Settings, error) {
return &portainer.Settings{
EdgeAgentCheckinInterval: 1,
}, nil
}
type testStore struct {
dataservices.DataStore
}
func (s *testStore) Settings() dataservices.SettingsService {
return &testSettingsService{}
}
func TestGetUnusedPort(t *testing.T) {
testCases := []struct {
name string
existingTunnels map[portainer.EndpointID]*portainer.TunnelDetails
expectedError error
}{
{
name: "simple case",
},
{
name: "existing tunnels",
existingTunnels: map[portainer.EndpointID]*portainer.TunnelDetails{
portainer.EndpointID(1): {
Port: 53072,
},
portainer.EndpointID(2): {
Port: 63072,
},
},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
store := &testStore{}
s := NewService(store, nil, nil)
s.activeTunnels = tc.existingTunnels
port := s.getUnusedPort()
if port < 49152 || port > 65535 {
t.Fatalf("Expected port to be inbetween 49152 and 65535 but got %d", port)
}
for _, tun := range tc.existingTunnels {
if tun.Port == port {
t.Fatalf("returned port %d already has an existing tunnel", port)
}
}
conn, err := net.DialTCP("tcp", nil, &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: port})
if err == nil {
// Ignore error
_ = conn.Close()
t.Fatalf("expected port %d to be unused", port)
} else if !strings.Contains(err.Error(), "connection refused") {
t.Fatalf("unexpected error: %v", err)
}
})
}
}

View file

@ -60,6 +60,9 @@ func CLIFlags() *portainer.CLIFlags {
LogLevel: kingpin.Flag("log-level", "Set the minimum logging level to show").Default("INFO").Enum("DEBUG", "INFO", "WARN", "ERROR"), LogLevel: kingpin.Flag("log-level", "Set the minimum logging level to show").Default("INFO").Enum("DEBUG", "INFO", "WARN", "ERROR"),
LogMode: kingpin.Flag("log-mode", "Set the logging output mode").Default("PRETTY").Enum("NOCOLOR", "PRETTY", "JSON"), LogMode: kingpin.Flag("log-mode", "Set the logging output mode").Default("PRETTY").Enum("NOCOLOR", "PRETTY", "JSON"),
KubectlShellImage: kingpin.Flag("kubectl-shell-image", "Kubectl shell image").Envar(portainer.KubectlShellImageEnvVar).Default(portainer.DefaultKubectlShellImage).String(), KubectlShellImage: kingpin.Flag("kubectl-shell-image", "Kubectl shell image").Envar(portainer.KubectlShellImageEnvVar).Default(portainer.DefaultKubectlShellImage).String(),
PullLimitCheckDisabled: kingpin.Flag("pull-limit-check-disabled", "Pull limit check").Envar(portainer.PullLimitCheckDisabledEnvVar).Default(defaultPullLimitCheckDisabled).Bool(),
TrustedOrigins: kingpin.Flag("trusted-origins", "List of trusted origins for CSRF protection. Separate multiple origins with a comma.").Envar(portainer.TrustedOriginsEnvVar).String(),
CSP: kingpin.Flag("csp", "Content Security Policy (CSP) header").Envar(portainer.CSPEnvVar).Default("true").Bool(),
} }
} }

View file

@ -4,20 +4,21 @@
package cli package cli
const ( const (
defaultBindAddress = ":9000" defaultBindAddress = ":9000"
defaultHTTPSBindAddress = ":9443" defaultHTTPSBindAddress = ":9443"
defaultTunnelServerAddress = "0.0.0.0" defaultTunnelServerAddress = "0.0.0.0"
defaultTunnelServerPort = "8000" defaultTunnelServerPort = "8000"
defaultDataDirectory = "/data" defaultDataDirectory = "/data"
defaultAssetsDirectory = "./" defaultAssetsDirectory = "./"
defaultTLS = "false" defaultTLS = "false"
defaultTLSSkipVerify = "false" defaultTLSSkipVerify = "false"
defaultTLSCACertPath = "/certs/ca.pem" defaultTLSCACertPath = "/certs/ca.pem"
defaultTLSCertPath = "/certs/cert.pem" defaultTLSCertPath = "/certs/cert.pem"
defaultTLSKeyPath = "/certs/key.pem" defaultTLSKeyPath = "/certs/key.pem"
defaultHTTPDisabled = "false" defaultHTTPDisabled = "false"
defaultHTTPEnabled = "false" defaultHTTPEnabled = "false"
defaultSSL = "false" defaultSSL = "false"
defaultBaseURL = "/" defaultBaseURL = "/"
defaultSecretKeyName = "portainer" defaultSecretKeyName = "portainer"
defaultPullLimitCheckDisabled = "false"
) )

View file

@ -1,21 +1,22 @@
package cli package cli
const ( const (
defaultBindAddress = ":9000" defaultBindAddress = ":9000"
defaultHTTPSBindAddress = ":9443" defaultHTTPSBindAddress = ":9443"
defaultTunnelServerAddress = "0.0.0.0" defaultTunnelServerAddress = "0.0.0.0"
defaultTunnelServerPort = "8000" defaultTunnelServerPort = "8000"
defaultDataDirectory = "C:\\data" defaultDataDirectory = "C:\\data"
defaultAssetsDirectory = "./" defaultAssetsDirectory = "./"
defaultTLS = "false" defaultTLS = "false"
defaultTLSSkipVerify = "false" defaultTLSSkipVerify = "false"
defaultTLSCACertPath = "C:\\certs\\ca.pem" defaultTLSCACertPath = "C:\\certs\\ca.pem"
defaultTLSCertPath = "C:\\certs\\cert.pem" defaultTLSCertPath = "C:\\certs\\cert.pem"
defaultTLSKeyPath = "C:\\certs\\key.pem" defaultTLSKeyPath = "C:\\certs\\key.pem"
defaultHTTPDisabled = "false" defaultHTTPDisabled = "false"
defaultHTTPEnabled = "false" defaultHTTPEnabled = "false"
defaultSSL = "false" defaultSSL = "false"
defaultSnapshotInterval = "5m" defaultSnapshotInterval = "5m"
defaultBaseURL = "/" defaultBaseURL = "/"
defaultSecretKeyName = "portainer" defaultSecretKeyName = "portainer"
defaultPullLimitCheckDisabled = "false"
) )

View file

@ -1,45 +0,0 @@
package cli
import (
"strings"
portainer "github.com/portainer/portainer/api"
"gopkg.in/alecthomas/kingpin.v2"
)
type pairListBool []portainer.Pair
// Set implementation for a list of portainer.Pair
func (l *pairListBool) Set(value string) error {
p := new(portainer.Pair)
// default to true. example setting=true is equivalent to setting
parts := strings.SplitN(value, "=", 2)
if len(parts) != 2 {
p.Name = parts[0]
p.Value = "true"
} else {
p.Name = parts[0]
p.Value = parts[1]
}
*l = append(*l, *p)
return nil
}
// String implementation for a list of pair
func (l *pairListBool) String() string {
return ""
}
// IsCumulative implementation for a list of pair
func (l *pairListBool) IsCumulative() bool {
return true
}
func BoolPairs(s kingpin.Settings) (target *[]portainer.Pair) {
target = new([]portainer.Pair)
s.SetValue((*pairListBool)(target))
return
}

View file

@ -39,6 +39,7 @@ import (
"github.com/portainer/portainer/api/kubernetes" "github.com/portainer/portainer/api/kubernetes"
kubecli "github.com/portainer/portainer/api/kubernetes/cli" kubecli "github.com/portainer/portainer/api/kubernetes/cli"
"github.com/portainer/portainer/api/ldap" "github.com/portainer/portainer/api/ldap"
"github.com/portainer/portainer/api/logs"
"github.com/portainer/portainer/api/oauth" "github.com/portainer/portainer/api/oauth"
"github.com/portainer/portainer/api/pendingactions" "github.com/portainer/portainer/api/pendingactions"
"github.com/portainer/portainer/api/pendingactions/actions" "github.com/portainer/portainer/api/pendingactions/actions"
@ -48,8 +49,11 @@ import (
"github.com/portainer/portainer/api/stacks/deployments" "github.com/portainer/portainer/api/stacks/deployments"
"github.com/portainer/portainer/pkg/build" "github.com/portainer/portainer/pkg/build"
"github.com/portainer/portainer/pkg/featureflags" "github.com/portainer/portainer/pkg/featureflags"
"github.com/portainer/portainer/pkg/fips"
"github.com/portainer/portainer/pkg/libhelm" "github.com/portainer/portainer/pkg/libhelm"
libhelmtypes "github.com/portainer/portainer/pkg/libhelm/types"
"github.com/portainer/portainer/pkg/libstack/compose" "github.com/portainer/portainer/pkg/libstack/compose"
"github.com/portainer/portainer/pkg/validate"
"github.com/gofrs/uuid" "github.com/gofrs/uuid"
"github.com/rs/zerolog/log" "github.com/rs/zerolog/log"
@ -165,12 +169,12 @@ func checkDBSchemaServerVersionMatch(dbStore dataservices.DataStore, serverVersi
return v.SchemaVersion == serverVersion && v.Edition == serverEdition return v.SchemaVersion == serverVersion && v.Edition == serverEdition
} }
func initKubernetesDeployer(kubernetesTokenCacheManager *kubeproxy.TokenCacheManager, kubernetesClientFactory *kubecli.ClientFactory, dataStore dataservices.DataStore, reverseTunnelService portainer.ReverseTunnelService, signatureService portainer.DigitalSignatureService, proxyManager *proxy.Manager, assetsPath string) portainer.KubernetesDeployer { func initKubernetesDeployer(kubernetesTokenCacheManager *kubeproxy.TokenCacheManager, kubernetesClientFactory *kubecli.ClientFactory, dataStore dataservices.DataStore, reverseTunnelService portainer.ReverseTunnelService, signatureService portainer.DigitalSignatureService, proxyManager *proxy.Manager) portainer.KubernetesDeployer {
return exec.NewKubernetesDeployer(kubernetesTokenCacheManager, kubernetesClientFactory, dataStore, reverseTunnelService, signatureService, proxyManager, assetsPath) return exec.NewKubernetesDeployer(kubernetesTokenCacheManager, kubernetesClientFactory, dataStore, reverseTunnelService, signatureService, proxyManager)
} }
func initHelmPackageManager(assetsPath string) (libhelm.HelmPackageManager, error) { func initHelmPackageManager() (libhelmtypes.HelmPackageManager, error) {
return libhelm.NewHelmPackageManager(libhelm.HelmConfig{BinaryPath: assetsPath}) return libhelm.NewHelmPackageManager()
} }
func initAPIKeyService(datastore dataservices.DataStore) apikey.APIKeyService { func initAPIKeyService(datastore dataservices.DataStore) apikey.APIKeyService {
@ -238,10 +242,10 @@ func updateSettingsFromFlags(dataStore dataservices.DataStore, flags *portainer.
return err return err
} }
settings.SnapshotInterval = *cmp.Or(flags.SnapshotInterval, &settings.SnapshotInterval) settings.SnapshotInterval = cmp.Or(*flags.SnapshotInterval, settings.SnapshotInterval)
settings.LogoURL = *cmp.Or(flags.Logo, &settings.LogoURL) settings.LogoURL = cmp.Or(*flags.Logo, settings.LogoURL)
settings.EnableEdgeComputeFeatures = *cmp.Or(flags.EnableEdgeComputeFeatures, &settings.EnableEdgeComputeFeatures) settings.EnableEdgeComputeFeatures = cmp.Or(*flags.EnableEdgeComputeFeatures, settings.EnableEdgeComputeFeatures)
settings.TemplatesURL = *cmp.Or(flags.Templates, &settings.TemplatesURL) settings.TemplatesURL = cmp.Or(*flags.Templates, settings.TemplatesURL)
if *flags.Labels != nil { if *flags.Labels != nil {
settings.BlackListedLabels = *flags.Labels settings.BlackListedLabels = *flags.Labels
@ -328,6 +332,21 @@ func buildServer(flags *portainer.CLIFlags) portainer.Server {
featureflags.Parse(*flags.FeatureFlags, portainer.SupportedFeatureFlags) featureflags.Parse(*flags.FeatureFlags, portainer.SupportedFeatureFlags)
} }
trustedOrigins := []string{}
if *flags.TrustedOrigins != "" {
// validate if the trusted origins are valid urls
for _, origin := range strings.Split(*flags.TrustedOrigins, ",") {
if !validate.IsTrustedOrigin(origin) {
log.Fatal().Str("trusted_origin", origin).Msg("invalid url for trusted origin. Please check the trusted origins flag.")
}
trustedOrigins = append(trustedOrigins, origin)
}
}
// -ce can not ever be run in FIPS mode
fips.InitFIPS(false)
fileService := initFileService(*flags.Data) fileService := initFileService(*flags.Data)
encryptionKey := loadEncryptionSecretKey(*flags.SecretKeyName) encryptionKey := loadEncryptionSecretKey(*flags.SecretKeyName)
if encryptionKey == nil { if encryptionKey == nil {
@ -368,7 +387,8 @@ func buildServer(flags *portainer.CLIFlags) portainer.Server {
gitService := git.NewService(shutdownCtx) gitService := git.NewService(shutdownCtx)
openAMTService := openamt.NewService() // Setting insecureSkipVerify to true to preserve the old behaviour.
openAMTService := openamt.NewService(true)
cryptoService := &crypto.Service{} cryptoService := &crypto.Service{}
@ -421,7 +441,7 @@ func buildServer(flags *portainer.CLIFlags) portainer.Server {
log.Fatal().Err(err).Msg("failed initializing swarm stack manager") log.Fatal().Err(err).Msg("failed initializing swarm stack manager")
} }
kubernetesDeployer := initKubernetesDeployer(kubernetesTokenCacheManager, kubernetesClientFactory, dataStore, reverseTunnelService, signatureService, proxyManager, *flags.Assets) kubernetesDeployer := initKubernetesDeployer(kubernetesTokenCacheManager, kubernetesClientFactory, dataStore, reverseTunnelService, signatureService, proxyManager)
pendingActionsService := pendingactions.NewService(dataStore, kubernetesClientFactory) pendingActionsService := pendingactions.NewService(dataStore, kubernetesClientFactory)
pendingActionsService.RegisterHandler(actions.CleanNAPWithOverridePolicies, handlers.NewHandlerCleanNAPWithOverridePolicies(authorizationService, dataStore)) pendingActionsService.RegisterHandler(actions.CleanNAPWithOverridePolicies, handlers.NewHandlerCleanNAPWithOverridePolicies(authorizationService, dataStore))
@ -435,9 +455,9 @@ func buildServer(flags *portainer.CLIFlags) portainer.Server {
snapshotService.Start() snapshotService.Start()
proxyManager.NewProxyFactory(dataStore, signatureService, reverseTunnelService, dockerClientFactory, kubernetesClientFactory, kubernetesTokenCacheManager, gitService, snapshotService) proxyManager.NewProxyFactory(dataStore, signatureService, reverseTunnelService, dockerClientFactory, kubernetesClientFactory, kubernetesTokenCacheManager, gitService, snapshotService, jwtService)
helmPackageManager, err := initHelmPackageManager(*flags.Assets) helmPackageManager, err := initHelmPackageManager()
if err != nil { if err != nil {
log.Fatal().Err(err).Msg("failed initializing helm package manager") log.Fatal().Err(err).Msg("failed initializing helm package manager")
} }
@ -543,6 +563,7 @@ func buildServer(flags *portainer.CLIFlags) portainer.Server {
Status: applicationStatus, Status: applicationStatus,
BindAddress: *flags.Addr, BindAddress: *flags.Addr,
BindAddressHTTPS: *flags.AddrHTTPS, BindAddressHTTPS: *flags.AddrHTTPS,
CSP: *flags.CSP,
HTTPEnabled: sslDBSettings.HTTPEnabled, HTTPEnabled: sslDBSettings.HTTPEnabled,
AssetsPath: *flags.Assets, AssetsPath: *flags.Assets,
DataStore: dataStore, DataStore: dataStore,
@ -575,17 +596,19 @@ func buildServer(flags *portainer.CLIFlags) portainer.Server {
AdminCreationDone: adminCreationDone, AdminCreationDone: adminCreationDone,
PendingActionsService: pendingActionsService, PendingActionsService: pendingActionsService,
PlatformService: platformService, PlatformService: platformService,
PullLimitCheckDisabled: *flags.PullLimitCheckDisabled,
TrustedOrigins: trustedOrigins,
} }
} }
func main() { func main() {
configureLogger() logs.ConfigureLogger()
setLoggingMode("PRETTY") logs.SetLoggingMode("PRETTY")
flags := initCLI() flags := initCLI()
setLoggingLevel(*flags.LogLevel) logs.SetLoggingLevel(*flags.LogLevel)
setLoggingMode(*flags.LogMode) logs.SetLoggingMode(*flags.LogMode)
for { for {
server := buildServer(flags) server := buildServer(flags)

View file

@ -6,8 +6,10 @@ import (
type ReadTransaction interface { type ReadTransaction interface {
GetObject(bucketName string, key []byte, object any) error GetObject(bucketName string, key []byte, object any) error
GetRawBytes(bucketName string, key []byte) ([]byte, error)
GetAll(bucketName string, obj any, append func(o any) (any, error)) error GetAll(bucketName string, obj any, append func(o any) (any, error)) error
GetAllWithKeyPrefix(bucketName string, keyPrefix []byte, obj any, append func(o any) (any, error)) error GetAllWithKeyPrefix(bucketName string, keyPrefix []byte, obj any, append func(o any) (any, error)) error
KeyExists(bucketName string, key []byte) (bool, error)
} }
type Transaction interface { type Transaction interface {

View file

@ -6,11 +6,15 @@ import (
"crypto/aes" "crypto/aes"
"crypto/cipher" "crypto/cipher"
"crypto/rand" "crypto/rand"
"crypto/sha256"
"errors" "errors"
"fmt" "fmt"
"io" "io"
"strings"
"github.com/portainer/portainer/pkg/fips"
"golang.org/x/crypto/argon2" "golang.org/x/crypto/argon2"
"golang.org/x/crypto/pbkdf2"
"golang.org/x/crypto/scrypt" "golang.org/x/crypto/scrypt"
) )
@ -19,20 +23,32 @@ const (
aesGcmHeader = "AES256-GCM" // The encrypted file header aesGcmHeader = "AES256-GCM" // The encrypted file header
aesGcmBlockSize = 1024 * 1024 // 1MB block for aes gcm aesGcmBlockSize = 1024 * 1024 // 1MB block for aes gcm
aesGcmFIPSHeader = "FIPS-AES256-GCM"
aesGcmFIPSBlockSize = 16 * 1024 * 1024 // 16MB block for aes gcm
// Argon2 settings // Argon2 settings
// Recommded settings lower memory hardware according to current OWASP recommendations // Recommended settings lower memory hardware according to current OWASP recommendations
// Considering some people run portainer on a NAS I think it's prudent not to assume we're on server grade hardware // Considering some people run portainer on a NAS I think it's prudent not to assume we're on server grade hardware
// https://cheatsheetseries.owasp.org/cheatsheets/Password_Storage_Cheat_Sheet.html#argon2id // https://cheatsheetseries.owasp.org/cheatsheets/Password_Storage_Cheat_Sheet.html#argon2id
argon2MemoryCost = 12 * 1024 argon2MemoryCost = 12 * 1024
argon2TimeCost = 3 argon2TimeCost = 3
argon2Threads = 1 argon2Threads = 1
argon2KeyLength = 32 argon2KeyLength = 32
pbkdf2Iterations = 600_000 // use recommended iterations from https://cheatsheetseries.owasp.org/cheatsheets/Password_Storage_Cheat_Sheet.html#pbkdf2 a little overkill for this use
pbkdf2SaltLength = 32
) )
// AesEncrypt reads from input, encrypts with AES-256 and writes to output. passphrase is used to generate an encryption key // AesEncrypt reads from input, encrypts with AES-256 and writes to output. passphrase is used to generate an encryption key
func AesEncrypt(input io.Reader, output io.Writer, passphrase []byte) error { func AesEncrypt(input io.Reader, output io.Writer, passphrase []byte) error {
if err := aesEncryptGCM(input, output, passphrase); err != nil { if fips.FIPSMode() {
return fmt.Errorf("error encrypting file: %w", err) if err := aesEncryptGCMFIPS(input, output, passphrase); err != nil {
return fmt.Errorf("error encrypting file: %w", err)
}
} else {
if err := aesEncryptGCM(input, output, passphrase); err != nil {
return fmt.Errorf("error encrypting file: %w", err)
}
} }
return nil return nil
@ -40,14 +56,36 @@ func AesEncrypt(input io.Reader, output io.Writer, passphrase []byte) error {
// AesDecrypt reads from input, decrypts with AES-256 and returns the reader to read the decrypted content from // AesDecrypt reads from input, decrypts with AES-256 and returns the reader to read the decrypted content from
func AesDecrypt(input io.Reader, passphrase []byte) (io.Reader, error) { func AesDecrypt(input io.Reader, passphrase []byte) (io.Reader, error) {
fipsMode := fips.FIPSMode()
return aesDecrypt(input, passphrase, fipsMode)
}
func aesDecrypt(input io.Reader, passphrase []byte, fipsMode bool) (io.Reader, error) {
// Read file header to determine how it was encrypted // Read file header to determine how it was encrypted
inputReader := bufio.NewReader(input) inputReader := bufio.NewReader(input)
header, err := inputReader.Peek(len(aesGcmHeader)) header, err := inputReader.Peek(len(aesGcmFIPSHeader))
if err != nil { if err != nil {
return nil, fmt.Errorf("error reading encrypted backup file header: %w", err) return nil, fmt.Errorf("error reading encrypted backup file header: %w", err)
} }
if string(header) == aesGcmHeader { if strings.HasPrefix(string(header), aesGcmFIPSHeader) {
if !fipsMode {
return nil, errors.New("fips encrypted file detected but fips mode is not enabled")
}
reader, err := aesDecryptGCMFIPS(inputReader, passphrase)
if err != nil {
return nil, fmt.Errorf("error decrypting file: %w", err)
}
return reader, nil
}
if strings.HasPrefix(string(header), aesGcmHeader) {
if fipsMode {
return nil, errors.New("fips mode is enabled but non-fips encrypted file detected")
}
reader, err := aesDecryptGCM(inputReader, passphrase) reader, err := aesDecryptGCM(inputReader, passphrase)
if err != nil { if err != nil {
return nil, fmt.Errorf("error decrypting file: %w", err) return nil, fmt.Errorf("error decrypting file: %w", err)
@ -203,6 +241,126 @@ func aesDecryptGCM(input io.Reader, passphrase []byte) (io.Reader, error) {
return &buf, nil return &buf, nil
} }
// aesEncryptGCMFIPS reads from input, encrypts with AES-256 in a fips compliant
// way and writes to output. passphrase is used to generate an encryption key.
func aesEncryptGCMFIPS(input io.Reader, output io.Writer, passphrase []byte) error {
salt := make([]byte, pbkdf2SaltLength)
if _, err := io.ReadFull(rand.Reader, salt); err != nil {
return err
}
key := pbkdf2.Key(passphrase, salt, pbkdf2Iterations, 32, sha256.New)
block, err := aes.NewCipher(key)
if err != nil {
return err
}
// write the header
if _, err := output.Write([]byte(aesGcmFIPSHeader)); err != nil {
return err
}
// Write nonce and salt to the output file
if _, err := output.Write(salt); err != nil {
return err
}
// Buffer for reading plaintext blocks
buf := make([]byte, aesGcmFIPSBlockSize)
// Encrypt plaintext in blocks
for {
// new random nonce for each block
aesgcm, err := cipher.NewGCMWithRandomNonce(block)
if err != nil {
return fmt.Errorf("error creating gcm: %w", err)
}
n, err := io.ReadFull(input, buf)
if n == 0 {
break // end of plaintext input
}
if err != nil && !(errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF)) {
return err
}
// Seal encrypts the plaintext
ciphertext := aesgcm.Seal(nil, nil, buf[:n], nil)
_, err = output.Write(ciphertext)
if err != nil {
return err
}
}
return nil
}
// aesDecryptGCMFIPS reads from input, decrypts with AES-256 in a fips compliant
// way and returns the reader to read the decrypted content from.
func aesDecryptGCMFIPS(input io.Reader, passphrase []byte) (io.Reader, error) {
// Reader & verify header
header := make([]byte, len(aesGcmFIPSHeader))
if _, err := io.ReadFull(input, header); err != nil {
return nil, err
}
if string(header) != aesGcmFIPSHeader {
return nil, errors.New("invalid header")
}
// Read salt
salt := make([]byte, pbkdf2SaltLength)
if _, err := io.ReadFull(input, salt); err != nil {
return nil, err
}
key := pbkdf2.Key(passphrase, salt, pbkdf2Iterations, 32, sha256.New)
// Initialize AES cipher block
block, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
// Initialize a buffer to store decrypted data
buf := bytes.Buffer{}
// Decrypt the ciphertext in blocks
for {
// Create GCM mode with the cipher block
aesgcm, err := cipher.NewGCMWithRandomNonce(block)
if err != nil {
return nil, err
}
// Read a block of ciphertext from the input reader
ciphertextBlock := make([]byte, aesGcmFIPSBlockSize+aesgcm.Overhead())
n, err := io.ReadFull(input, ciphertextBlock)
if n == 0 {
break // end of ciphertext
}
if err != nil && !(errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF)) {
return nil, err
}
// Decrypt the block of ciphertext
plaintext, err := aesgcm.Open(nil, nil, ciphertextBlock[:n], nil)
if err != nil {
return nil, err
}
if _, err := buf.Write(plaintext); err != nil {
return nil, err
}
}
return &buf, nil
}
// aesDecryptOFB reads from input, decrypts with AES-256 and returns the reader to a read decrypted content from. // aesDecryptOFB reads from input, decrypts with AES-256 and returns the reader to a read decrypted content from.
// passphrase is used to generate an encryption key. // passphrase is used to generate an encryption key.
// note: This function used to decrypt files that were encrypted without a header i.e. old archives // note: This function used to decrypt files that were encrypted without a header i.e. old archives

View file

@ -7,9 +7,15 @@ import (
"path/filepath" "path/filepath"
"testing" "testing"
"github.com/portainer/portainer/pkg/fips"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
func init() {
fips.InitFIPS(false)
}
const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
func randBytes(n int) []byte { func randBytes(n int) []byte {
@ -20,198 +26,296 @@ func randBytes(n int) []byte {
return b return b
} }
type encryptFunc func(input io.Reader, output io.Writer, passphrase []byte) error
type decryptFunc func(input io.Reader, passphrase []byte) (io.Reader, error)
func Test_encryptAndDecrypt_withTheSamePassword(t *testing.T) { func Test_encryptAndDecrypt_withTheSamePassword(t *testing.T) {
const passphrase = "passphrase" const passphrase = "passphrase"
tmpdir := t.TempDir() testFunc := func(t *testing.T, encrypt encryptFunc, decrypt decryptFunc, decryptShouldSucceed bool) {
tmpdir := t.TempDir()
var ( var (
originFilePath = filepath.Join(tmpdir, "origin") originFilePath = filepath.Join(tmpdir, "origin")
encryptedFilePath = filepath.Join(tmpdir, "encrypted") encryptedFilePath = filepath.Join(tmpdir, "encrypted")
decryptedFilePath = filepath.Join(tmpdir, "decrypted") decryptedFilePath = filepath.Join(tmpdir, "decrypted")
) )
content := randBytes(1024*1024*100 + 523) content := randBytes(1024*1024*100 + 523)
os.WriteFile(originFilePath, content, 0600) os.WriteFile(originFilePath, content, 0600)
originFile, _ := os.Open(originFilePath) originFile, _ := os.Open(originFilePath)
defer originFile.Close() defer originFile.Close()
encryptedFileWriter, _ := os.Create(encryptedFilePath) encryptedFileWriter, _ := os.Create(encryptedFilePath)
err := AesEncrypt(originFile, encryptedFileWriter, []byte(passphrase)) err := encrypt(originFile, encryptedFileWriter, []byte(passphrase))
assert.Nil(t, err, "Failed to encrypt a file") require.Nil(t, err, "Failed to encrypt a file")
encryptedFileWriter.Close() encryptedFileWriter.Close()
encryptedContent, err := os.ReadFile(encryptedFilePath) encryptedContent, err := os.ReadFile(encryptedFilePath)
assert.Nil(t, err, "Couldn't read encrypted file") require.Nil(t, err, "Couldn't read encrypted file")
assert.NotEqual(t, encryptedContent, content, "Content wasn't encrypted") assert.NotEqual(t, encryptedContent, content, "Content wasn't encrypted")
encryptedFileReader, _ := os.Open(encryptedFilePath) encryptedFileReader, _ := os.Open(encryptedFilePath)
defer encryptedFileReader.Close() defer encryptedFileReader.Close()
decryptedFileWriter, _ := os.Create(decryptedFilePath) decryptedFileWriter, _ := os.Create(decryptedFilePath)
defer decryptedFileWriter.Close() defer decryptedFileWriter.Close()
decryptedReader, err := AesDecrypt(encryptedFileReader, []byte(passphrase)) decryptedReader, err := decrypt(encryptedFileReader, []byte(passphrase))
assert.Nil(t, err, "Failed to decrypt file") if !decryptShouldSucceed {
require.Error(t, err, "Failed to decrypt file as indicated by decryptShouldSucceed")
} else {
require.NoError(t, err, "Failed to decrypt file indicated by decryptShouldSucceed")
io.Copy(decryptedFileWriter, decryptedReader) io.Copy(decryptedFileWriter, decryptedReader)
decryptedContent, _ := os.ReadFile(decryptedFilePath) decryptedContent, _ := os.ReadFile(decryptedFilePath)
assert.Equal(t, content, decryptedContent, "Original and decrypted content should match") assert.Equal(t, content, decryptedContent, "Original and decrypted content should match")
}
}
t.Run("fips", func(t *testing.T) {
testFunc(t, aesEncryptGCMFIPS, aesDecryptGCMFIPS, true)
})
t.Run("non_fips", func(t *testing.T) {
testFunc(t, aesEncryptGCM, aesDecryptGCM, true)
})
t.Run("system_fips_mode_public_entry_points", func(t *testing.T) {
// use the init mode, public entry points
testFunc(t, AesEncrypt, AesDecrypt, true)
})
t.Run("fips_encrypted_file_header_fails_in_non_fips_mode", func(t *testing.T) {
// use aesDecrypt which checks the header, confirm that it fails
decrypt := func(input io.Reader, passphrase []byte) (io.Reader, error) {
return aesDecrypt(input, passphrase, false)
}
testFunc(t, aesEncryptGCMFIPS, decrypt, false)
})
t.Run("non_fips_encrypted_file_header_fails_in_fips_mode", func(t *testing.T) {
// use aesDecrypt which checks the header, confirm that it fails
decrypt := func(input io.Reader, passphrase []byte) (io.Reader, error) {
return aesDecrypt(input, passphrase, true)
}
testFunc(t, aesEncryptGCM, decrypt, false)
})
t.Run("fips_encrypted_file_fails_in_non_fips_mode", func(t *testing.T) {
testFunc(t, aesEncryptGCMFIPS, aesDecryptGCM, false)
})
t.Run("non_fips_encrypted_file_with_fips_mode_should_fail", func(t *testing.T) {
testFunc(t, aesEncryptGCM, aesDecryptGCMFIPS, false)
})
t.Run("fips_with_base_aesDecrypt", func(t *testing.T) {
// maximize coverage, use the base aesDecrypt function with valid fips mode
decrypt := func(input io.Reader, passphrase []byte) (io.Reader, error) {
return aesDecrypt(input, passphrase, true)
}
testFunc(t, aesEncryptGCMFIPS, decrypt, true)
})
} }
func Test_encryptAndDecrypt_withStrongPassphrase(t *testing.T) { func Test_encryptAndDecrypt_withStrongPassphrase(t *testing.T) {
const passphrase = "A strong passphrase with special characters: !@#$%^&*()_+" const passphrase = "A strong passphrase with special characters: !@#$%^&*()_+"
tmpdir := t.TempDir()
var ( testFunc := func(t *testing.T, encrypt encryptFunc, decrypt decryptFunc) {
originFilePath = filepath.Join(tmpdir, "origin2") tmpdir := t.TempDir()
encryptedFilePath = filepath.Join(tmpdir, "encrypted2")
decryptedFilePath = filepath.Join(tmpdir, "decrypted2")
)
content := randBytes(500) var (
os.WriteFile(originFilePath, content, 0600) originFilePath = filepath.Join(tmpdir, "origin2")
encryptedFilePath = filepath.Join(tmpdir, "encrypted2")
decryptedFilePath = filepath.Join(tmpdir, "decrypted2")
)
originFile, _ := os.Open(originFilePath) content := randBytes(500)
defer originFile.Close() os.WriteFile(originFilePath, content, 0600)
encryptedFileWriter, _ := os.Create(encryptedFilePath) originFile, _ := os.Open(originFilePath)
defer originFile.Close()
err := AesEncrypt(originFile, encryptedFileWriter, []byte(passphrase)) encryptedFileWriter, _ := os.Create(encryptedFilePath)
assert.Nil(t, err, "Failed to encrypt a file")
encryptedFileWriter.Close()
encryptedContent, err := os.ReadFile(encryptedFilePath) err := encrypt(originFile, encryptedFileWriter, []byte(passphrase))
assert.Nil(t, err, "Couldn't read encrypted file") assert.Nil(t, err, "Failed to encrypt a file")
assert.NotEqual(t, encryptedContent, content, "Content wasn't encrypted") encryptedFileWriter.Close()
encryptedFileReader, _ := os.Open(encryptedFilePath) encryptedContent, err := os.ReadFile(encryptedFilePath)
defer encryptedFileReader.Close() assert.Nil(t, err, "Couldn't read encrypted file")
assert.NotEqual(t, encryptedContent, content, "Content wasn't encrypted")
decryptedFileWriter, _ := os.Create(decryptedFilePath) encryptedFileReader, _ := os.Open(encryptedFilePath)
defer decryptedFileWriter.Close() defer encryptedFileReader.Close()
decryptedReader, err := AesDecrypt(encryptedFileReader, []byte(passphrase)) decryptedFileWriter, _ := os.Create(decryptedFilePath)
assert.Nil(t, err, "Failed to decrypt file") defer decryptedFileWriter.Close()
io.Copy(decryptedFileWriter, decryptedReader) decryptedReader, err := decrypt(encryptedFileReader, []byte(passphrase))
assert.Nil(t, err, "Failed to decrypt file")
decryptedContent, _ := os.ReadFile(decryptedFilePath) io.Copy(decryptedFileWriter, decryptedReader)
assert.Equal(t, content, decryptedContent, "Original and decrypted content should match")
decryptedContent, _ := os.ReadFile(decryptedFilePath)
assert.Equal(t, content, decryptedContent, "Original and decrypted content should match")
}
t.Run("fips", func(t *testing.T) {
testFunc(t, aesEncryptGCMFIPS, aesDecryptGCMFIPS)
})
t.Run("non_fips", func(t *testing.T) {
testFunc(t, aesEncryptGCM, aesDecryptGCM)
})
} }
func Test_encryptAndDecrypt_withTheSamePasswordSmallFile(t *testing.T) { func Test_encryptAndDecrypt_withTheSamePasswordSmallFile(t *testing.T) {
tmpdir := t.TempDir() testFunc := func(t *testing.T, encrypt encryptFunc, decrypt decryptFunc) {
tmpdir := t.TempDir()
var ( var (
originFilePath = filepath.Join(tmpdir, "origin2") originFilePath = filepath.Join(tmpdir, "origin2")
encryptedFilePath = filepath.Join(tmpdir, "encrypted2") encryptedFilePath = filepath.Join(tmpdir, "encrypted2")
decryptedFilePath = filepath.Join(tmpdir, "decrypted2") decryptedFilePath = filepath.Join(tmpdir, "decrypted2")
) )
content := randBytes(500) content := randBytes(500)
os.WriteFile(originFilePath, content, 0600) os.WriteFile(originFilePath, content, 0600)
originFile, _ := os.Open(originFilePath) originFile, _ := os.Open(originFilePath)
defer originFile.Close() defer originFile.Close()
encryptedFileWriter, _ := os.Create(encryptedFilePath) encryptedFileWriter, _ := os.Create(encryptedFilePath)
err := AesEncrypt(originFile, encryptedFileWriter, []byte("passphrase")) err := encrypt(originFile, encryptedFileWriter, []byte("passphrase"))
assert.Nil(t, err, "Failed to encrypt a file") assert.Nil(t, err, "Failed to encrypt a file")
encryptedFileWriter.Close() encryptedFileWriter.Close()
encryptedContent, err := os.ReadFile(encryptedFilePath) encryptedContent, err := os.ReadFile(encryptedFilePath)
assert.Nil(t, err, "Couldn't read encrypted file") assert.Nil(t, err, "Couldn't read encrypted file")
assert.NotEqual(t, encryptedContent, content, "Content wasn't encrypted") assert.NotEqual(t, encryptedContent, content, "Content wasn't encrypted")
encryptedFileReader, _ := os.Open(encryptedFilePath) encryptedFileReader, _ := os.Open(encryptedFilePath)
defer encryptedFileReader.Close() defer encryptedFileReader.Close()
decryptedFileWriter, _ := os.Create(decryptedFilePath) decryptedFileWriter, _ := os.Create(decryptedFilePath)
defer decryptedFileWriter.Close() defer decryptedFileWriter.Close()
decryptedReader, err := AesDecrypt(encryptedFileReader, []byte("passphrase")) decryptedReader, err := decrypt(encryptedFileReader, []byte("passphrase"))
assert.Nil(t, err, "Failed to decrypt file") assert.Nil(t, err, "Failed to decrypt file")
io.Copy(decryptedFileWriter, decryptedReader) io.Copy(decryptedFileWriter, decryptedReader)
decryptedContent, _ := os.ReadFile(decryptedFilePath) decryptedContent, _ := os.ReadFile(decryptedFilePath)
assert.Equal(t, content, decryptedContent, "Original and decrypted content should match") assert.Equal(t, content, decryptedContent, "Original and decrypted content should match")
}
t.Run("fips", func(t *testing.T) {
testFunc(t, aesEncryptGCMFIPS, aesDecryptGCMFIPS)
})
t.Run("non_fips", func(t *testing.T) {
testFunc(t, aesEncryptGCM, aesDecryptGCM)
})
} }
func Test_encryptAndDecrypt_withEmptyPassword(t *testing.T) { func Test_encryptAndDecrypt_withEmptyPassword(t *testing.T) {
tmpdir := t.TempDir() testFunc := func(t *testing.T, encrypt encryptFunc, decrypt decryptFunc) {
tmpdir := t.TempDir()
var ( var (
originFilePath = filepath.Join(tmpdir, "origin") originFilePath = filepath.Join(tmpdir, "origin")
encryptedFilePath = filepath.Join(tmpdir, "encrypted") encryptedFilePath = filepath.Join(tmpdir, "encrypted")
decryptedFilePath = filepath.Join(tmpdir, "decrypted") decryptedFilePath = filepath.Join(tmpdir, "decrypted")
) )
content := randBytes(1024 * 50) content := randBytes(1024 * 50)
os.WriteFile(originFilePath, content, 0600) os.WriteFile(originFilePath, content, 0600)
originFile, _ := os.Open(originFilePath) originFile, _ := os.Open(originFilePath)
defer originFile.Close() defer originFile.Close()
encryptedFileWriter, _ := os.Create(encryptedFilePath) encryptedFileWriter, _ := os.Create(encryptedFilePath)
defer encryptedFileWriter.Close() defer encryptedFileWriter.Close()
err := AesEncrypt(originFile, encryptedFileWriter, []byte("")) err := encrypt(originFile, encryptedFileWriter, []byte(""))
assert.Nil(t, err, "Failed to encrypt a file") assert.Nil(t, err, "Failed to encrypt a file")
encryptedContent, err := os.ReadFile(encryptedFilePath) encryptedContent, err := os.ReadFile(encryptedFilePath)
assert.Nil(t, err, "Couldn't read encrypted file") assert.Nil(t, err, "Couldn't read encrypted file")
assert.NotEqual(t, encryptedContent, content, "Content wasn't encrypted") assert.NotEqual(t, encryptedContent, content, "Content wasn't encrypted")
encryptedFileReader, _ := os.Open(encryptedFilePath) encryptedFileReader, _ := os.Open(encryptedFilePath)
defer encryptedFileReader.Close() defer encryptedFileReader.Close()
decryptedFileWriter, _ := os.Create(decryptedFilePath) decryptedFileWriter, _ := os.Create(decryptedFilePath)
defer decryptedFileWriter.Close() defer decryptedFileWriter.Close()
decryptedReader, err := AesDecrypt(encryptedFileReader, []byte("")) decryptedReader, err := decrypt(encryptedFileReader, []byte(""))
assert.Nil(t, err, "Failed to decrypt file") assert.Nil(t, err, "Failed to decrypt file")
io.Copy(decryptedFileWriter, decryptedReader) io.Copy(decryptedFileWriter, decryptedReader)
decryptedContent, _ := os.ReadFile(decryptedFilePath) decryptedContent, _ := os.ReadFile(decryptedFilePath)
assert.Equal(t, content, decryptedContent, "Original and decrypted content should match") assert.Equal(t, content, decryptedContent, "Original and decrypted content should match")
}
t.Run("fips", func(t *testing.T) {
testFunc(t, aesEncryptGCMFIPS, aesDecryptGCMFIPS)
})
t.Run("non_fips", func(t *testing.T) {
testFunc(t, aesEncryptGCM, aesDecryptGCM)
})
} }
func Test_decryptWithDifferentPassphrase_shouldProduceWrongResult(t *testing.T) { func Test_decryptWithDifferentPassphrase_shouldProduceWrongResult(t *testing.T) {
tmpdir := t.TempDir() testFunc := func(t *testing.T, encrypt encryptFunc, decrypt decryptFunc) {
tmpdir := t.TempDir()
var ( var (
originFilePath = filepath.Join(tmpdir, "origin") originFilePath = filepath.Join(tmpdir, "origin")
encryptedFilePath = filepath.Join(tmpdir, "encrypted") encryptedFilePath = filepath.Join(tmpdir, "encrypted")
decryptedFilePath = filepath.Join(tmpdir, "decrypted") decryptedFilePath = filepath.Join(tmpdir, "decrypted")
) )
content := randBytes(1034) content := randBytes(1034)
os.WriteFile(originFilePath, content, 0600) os.WriteFile(originFilePath, content, 0600)
originFile, _ := os.Open(originFilePath) originFile, _ := os.Open(originFilePath)
defer originFile.Close() defer originFile.Close()
encryptedFileWriter, _ := os.Create(encryptedFilePath) encryptedFileWriter, _ := os.Create(encryptedFilePath)
defer encryptedFileWriter.Close() defer encryptedFileWriter.Close()
err := AesEncrypt(originFile, encryptedFileWriter, []byte("passphrase")) err := encrypt(originFile, encryptedFileWriter, []byte("passphrase"))
assert.Nil(t, err, "Failed to encrypt a file") assert.Nil(t, err, "Failed to encrypt a file")
encryptedContent, err := os.ReadFile(encryptedFilePath) encryptedContent, err := os.ReadFile(encryptedFilePath)
assert.Nil(t, err, "Couldn't read encrypted file") assert.Nil(t, err, "Couldn't read encrypted file")
assert.NotEqual(t, encryptedContent, content, "Content wasn't encrypted") assert.NotEqual(t, encryptedContent, content, "Content wasn't encrypted")
encryptedFileReader, _ := os.Open(encryptedFilePath) encryptedFileReader, _ := os.Open(encryptedFilePath)
defer encryptedFileReader.Close() defer encryptedFileReader.Close()
decryptedFileWriter, _ := os.Create(decryptedFilePath) decryptedFileWriter, _ := os.Create(decryptedFilePath)
defer decryptedFileWriter.Close() defer decryptedFileWriter.Close()
_, err = AesDecrypt(encryptedFileReader, []byte("garbage")) _, err = decrypt(encryptedFileReader, []byte("garbage"))
assert.NotNil(t, err, "Should not allow decrypt with wrong passphrase") assert.NotNil(t, err, "Should not allow decrypt with wrong passphrase")
}
t.Run("fips", func(t *testing.T) {
testFunc(t, aesEncryptGCMFIPS, aesDecryptGCMFIPS)
})
t.Run("non_fips", func(t *testing.T) {
testFunc(t, aesEncryptGCM, aesDecryptGCM)
})
} }

View file

@ -112,7 +112,7 @@ func (service *ECDSAService) CreateSignature(message string) (string, error) {
message = service.secret message = service.secret
} }
hash := libcrypto.HashFromBytes([]byte(message)) hash := libcrypto.InsecureHashFromBytes([]byte(message))
r, s, err := ecdsa.Sign(rand.Reader, service.privateKey, hash) r, s, err := ecdsa.Sign(rand.Reader, service.privateKey, hash)
if err != nil { if err != nil {

22
api/crypto/ecdsa_test.go Normal file
View file

@ -0,0 +1,22 @@
package crypto
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestCreateSignature(t *testing.T) {
var s = NewECDSAService("secret")
privKey, pubKey, err := s.GenerateKeyPair()
require.NoError(t, err)
require.Greater(t, len(privKey), 0)
require.Greater(t, len(pubKey), 0)
m := "test message"
r, err := s.CreateSignature(m)
require.NoError(t, err)
require.NotEqual(t, r, m)
require.Greater(t, len(r), 0)
}

View file

@ -15,7 +15,7 @@ func NewNonce(size int) *Nonce {
} }
// NewRandomNonce generates a new initial nonce with the lower byte set to a random value // NewRandomNonce generates a new initial nonce with the lower byte set to a random value
// This ensures there are plenty of nonce values availble before rolling over // This ensures there are plenty of nonce values available before rolling over
// Based on ideas from the Secure Programming Cookbook for C and C++ by John Viega, Matt Messier // Based on ideas from the Secure Programming Cookbook for C and C++ by John Viega, Matt Messier
// https://www.oreilly.com/library/view/secure-programming-cookbook/0596003943/ch04s09.html // https://www.oreilly.com/library/view/secure-programming-cookbook/0596003943/ch04s09.html
func NewRandomNonce(size int) (*Nonce, error) { func NewRandomNonce(size int) (*Nonce, error) {

View file

@ -1,14 +1,36 @@
package crypto package crypto
import ( import (
"crypto/fips140"
"crypto/tls" "crypto/tls"
"crypto/x509" "crypto/x509"
"os" "os"
portainer "github.com/portainer/portainer/api"
) )
// CreateTLSConfiguration creates a basic tls.Config with recommended TLS settings // CreateTLSConfiguration creates a basic tls.Config with recommended TLS settings
func CreateTLSConfiguration() *tls.Config { func CreateTLSConfiguration(insecureSkipVerify bool) *tls.Config { //nolint:forbidigo
return &tls.Config{ // TODO: use fips.FIPSMode() instead
return createTLSConfiguration(fips140.Enabled(), insecureSkipVerify)
}
func createTLSConfiguration(fipsEnabled bool, insecureSkipVerify bool) *tls.Config { //nolint:forbidigo
if fipsEnabled {
return &tls.Config{ //nolint:forbidigo
MinVersion: tls.VersionTLS12,
MaxVersion: tls.VersionTLS13,
CipherSuites: []uint16{
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
},
CurvePreferences: []tls.CurveID{tls.CurveP256, tls.CurveP384, tls.CurveP521},
}
}
return &tls.Config{ //nolint:forbidigo
MinVersion: tls.VersionTLS12, MinVersion: tls.VersionTLS12,
CipherSuites: []uint16{ CipherSuites: []uint16{
tls.TLS_AES_128_GCM_SHA256, tls.TLS_AES_128_GCM_SHA256,
@ -29,24 +51,34 @@ func CreateTLSConfiguration() *tls.Config {
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
}, },
InsecureSkipVerify: insecureSkipVerify, //nolint:forbidigo
} }
} }
// CreateTLSConfigurationFromBytes initializes a tls.Config using a CA certificate, a certificate and a key // CreateTLSConfigurationFromBytes initializes a tls.Config using a CA certificate, a certificate and a key
// loaded from memory. // loaded from memory.
func CreateTLSConfigurationFromBytes(caCert, cert, key []byte, skipClientVerification, skipServerVerification bool) (*tls.Config, error) { func CreateTLSConfigurationFromBytes(useTLS bool, caCert, cert, key []byte, skipClientVerification, skipServerVerification bool) (*tls.Config, error) { //nolint:forbidigo
config := CreateTLSConfiguration() // TODO: use fips.FIPSMode() instead
config.InsecureSkipVerify = skipServerVerification return createTLSConfigurationFromBytes(fips140.Enabled(), useTLS, caCert, cert, key, skipClientVerification, skipServerVerification)
}
if !skipClientVerification { func createTLSConfigurationFromBytes(fipsEnabled, useTLS bool, caCert, cert, key []byte, skipClientVerification, skipServerVerification bool) (*tls.Config, error) { //nolint:forbidigo
if !useTLS {
return nil, nil
}
config := createTLSConfiguration(fipsEnabled, skipServerVerification)
if !skipClientVerification || fipsEnabled {
certificate, err := tls.X509KeyPair(cert, key) certificate, err := tls.X509KeyPair(cert, key)
if err != nil { if err != nil {
return nil, err return nil, err
} }
config.Certificates = []tls.Certificate{certificate} config.Certificates = []tls.Certificate{certificate}
} }
if !skipServerVerification { if !skipServerVerification || fipsEnabled {
caCertPool := x509.NewCertPool() caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert) caCertPool.AppendCertsFromPEM(caCert)
config.RootCAs = caCertPool config.RootCAs = caCertPool
@ -57,29 +89,37 @@ func CreateTLSConfigurationFromBytes(caCert, cert, key []byte, skipClientVerific
// CreateTLSConfigurationFromDisk initializes a tls.Config using a CA certificate, a certificate and a key // CreateTLSConfigurationFromDisk initializes a tls.Config using a CA certificate, a certificate and a key
// loaded from disk. // loaded from disk.
func CreateTLSConfigurationFromDisk(caCertPath, certPath, keyPath string, skipServerVerification bool) (*tls.Config, error) { func CreateTLSConfigurationFromDisk(config portainer.TLSConfiguration) (*tls.Config, error) { //nolint:forbidigo
config := CreateTLSConfiguration() // TODO: use fips.FIPSMode() instead
config.InsecureSkipVerify = skipServerVerification return createTLSConfigurationFromDisk(fips140.Enabled(), config)
}
if certPath != "" && keyPath != "" { func createTLSConfigurationFromDisk(fipsEnabled bool, config portainer.TLSConfiguration) (*tls.Config, error) { //nolint:forbidigo
cert, err := tls.LoadX509KeyPair(certPath, keyPath) if !config.TLS {
return nil, nil
}
tlsConfig := createTLSConfiguration(fipsEnabled, config.TLSSkipVerify)
if config.TLSCertPath != "" && config.TLSKeyPath != "" {
cert, err := tls.LoadX509KeyPair(config.TLSCertPath, config.TLSKeyPath)
if err != nil { if err != nil {
return nil, err return nil, err
} }
config.Certificates = []tls.Certificate{cert} tlsConfig.Certificates = []tls.Certificate{cert}
} }
if !skipServerVerification && caCertPath != "" { if !tlsConfig.InsecureSkipVerify && config.TLSCACertPath != "" { //nolint:forbidigo
caCert, err := os.ReadFile(caCertPath) caCert, err := os.ReadFile(config.TLSCACertPath)
if err != nil { if err != nil {
return nil, err return nil, err
} }
caCertPool := x509.NewCertPool() caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert) caCertPool.AppendCertsFromPEM(caCert)
config.RootCAs = caCertPool tlsConfig.RootCAs = caCertPool
} }
return config, nil return tlsConfig, nil
} }

87
api/crypto/tls_test.go Normal file
View file

@ -0,0 +1,87 @@
package crypto
import (
"crypto/tls"
"testing"
portainer "github.com/portainer/portainer/api"
"github.com/stretchr/testify/require"
)
func TestCreateTLSConfiguration(t *testing.T) {
// InsecureSkipVerify = false
config := CreateTLSConfiguration(false)
require.Equal(t, config.MinVersion, uint16(tls.VersionTLS12)) //nolint:forbidigo
require.False(t, config.InsecureSkipVerify) //nolint:forbidigo
// InsecureSkipVerify = true
config = CreateTLSConfiguration(true)
require.Equal(t, config.MinVersion, uint16(tls.VersionTLS12)) //nolint:forbidigo
require.True(t, config.InsecureSkipVerify) //nolint:forbidigo
}
func TestCreateTLSConfigurationFIPS(t *testing.T) {
fips := true
fipsCipherSuites := []uint16{
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
}
fipsCurvePreferences := []tls.CurveID{tls.CurveP256, tls.CurveP384, tls.CurveP521}
config := createTLSConfiguration(fips, false)
require.Equal(t, config.MinVersion, uint16(tls.VersionTLS12)) //nolint:forbidigo
require.Equal(t, config.MaxVersion, uint16(tls.VersionTLS13)) //nolint:forbidigo
require.Equal(t, config.CipherSuites, fipsCipherSuites) //nolint:forbidigo
require.Equal(t, config.CurvePreferences, fipsCurvePreferences) //nolint:forbidigo
require.False(t, config.InsecureSkipVerify) //nolint:forbidigo
}
func TestCreateTLSConfigurationFromBytes(t *testing.T) {
// No TLS
config, err := CreateTLSConfigurationFromBytes(false, nil, nil, nil, false, false)
require.Nil(t, err)
require.Nil(t, config)
// Skip TLS client/server verifications
config, err = CreateTLSConfigurationFromBytes(true, nil, nil, nil, true, true)
require.NoError(t, err)
require.NotNil(t, config)
// Empty TLS
config, err = CreateTLSConfigurationFromBytes(true, nil, nil, nil, false, false)
require.Error(t, err)
require.Nil(t, config)
}
func TestCreateTLSConfigurationFromDisk(t *testing.T) {
// No TLS
config, err := CreateTLSConfigurationFromDisk(portainer.TLSConfiguration{})
require.Nil(t, err)
require.Nil(t, config)
// Skip TLS verifications
config, err = CreateTLSConfigurationFromDisk(portainer.TLSConfiguration{
TLS: true,
TLSSkipVerify: true,
})
require.NoError(t, err)
require.NotNil(t, config)
}
func TestCreateTLSConfigurationFromDiskFIPS(t *testing.T) {
fips := true
// Skipping TLS verifications cannot be done in FIPS mode
config, err := createTLSConfigurationFromDisk(fips, portainer.TLSConfiguration{
TLS: true,
TLSSkipVerify: true,
})
require.NoError(t, err)
require.NotNil(t, config)
require.False(t, config.InsecureSkipVerify) //nolint:forbidigo
}

View file

@ -138,6 +138,8 @@ func (connection *DbConnection) Open() error {
db, err := bolt.Open(databasePath, 0600, &bolt.Options{ db, err := bolt.Open(databasePath, 0600, &bolt.Options{
Timeout: 1 * time.Second, Timeout: 1 * time.Second,
InitialMmapSize: connection.InitialMmapSize, InitialMmapSize: connection.InitialMmapSize,
FreelistType: bolt.FreelistMapType,
NoFreelistSync: true,
}) })
if err != nil { if err != nil {
return err return err
@ -244,6 +246,32 @@ func (connection *DbConnection) GetObject(bucketName string, key []byte, object
}) })
} }
func (connection *DbConnection) GetRawBytes(bucketName string, key []byte) ([]byte, error) {
var value []byte
err := connection.ViewTx(func(tx portainer.Transaction) error {
var err error
value, err = tx.GetRawBytes(bucketName, key)
return err
})
return value, err
}
func (connection *DbConnection) KeyExists(bucketName string, key []byte) (bool, error) {
var exists bool
err := connection.ViewTx(func(tx portainer.Transaction) error {
var err error
exists, err = tx.KeyExists(bucketName, key)
return err
})
return exists, err
}
func (connection *DbConnection) getEncryptionKey() []byte { func (connection *DbConnection) getEncryptionKey() []byte {
if !connection.isEncrypted { if !connection.isEncrypted {
return nil return nil

View file

@ -4,8 +4,6 @@ import (
"bytes" "bytes"
"crypto/aes" "crypto/aes"
"crypto/cipher" "crypto/cipher"
"crypto/rand"
"io"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/segmentio/encoding/json" "github.com/segmentio/encoding/json"
@ -65,18 +63,18 @@ func (connection *DbConnection) UnmarshalObject(data []byte, object any) error {
// https://gist.github.com/atoponce/07d8d4c833873be2f68c34f9afc5a78a#symmetric-encryption // https://gist.github.com/atoponce/07d8d4c833873be2f68c34f9afc5a78a#symmetric-encryption
func encrypt(plaintext []byte, passphrase []byte) (encrypted []byte, err error) { func encrypt(plaintext []byte, passphrase []byte) (encrypted []byte, err error) {
block, _ := aes.NewCipher(passphrase) block, err := aes.NewCipher(passphrase)
gcm, err := cipher.NewGCM(block)
if err != nil { if err != nil {
return encrypted, err return encrypted, err
} }
nonce := make([]byte, gcm.NonceSize()) // NewGCMWithRandomNonce in go 1.24 handles setting up the nonce and adding it to the encrypted output
if _, err := io.ReadFull(rand.Reader, nonce); err != nil { gcm, err := cipher.NewGCMWithRandomNonce(block)
if err != nil {
return encrypted, err return encrypted, err
} }
return gcm.Seal(nonce, nonce, plaintext, nil), nil return gcm.Seal(nil, nil, plaintext, nil), nil
} }
func decrypt(encrypted []byte, passphrase []byte) (plaintextByte []byte, err error) { func decrypt(encrypted []byte, passphrase []byte) (plaintextByte []byte, err error) {
@ -89,19 +87,17 @@ func decrypt(encrypted []byte, passphrase []byte) (plaintextByte []byte, err err
return encrypted, errors.Wrap(err, "Error creating cypher block") return encrypted, errors.Wrap(err, "Error creating cypher block")
} }
gcm, err := cipher.NewGCM(block) // NewGCMWithRandomNonce in go 1.24 handles reading the nonce from the encrypted input for us
gcm, err := cipher.NewGCMWithRandomNonce(block)
if err != nil { if err != nil {
return encrypted, errors.Wrap(err, "Error creating GCM") return encrypted, errors.Wrap(err, "Error creating GCM")
} }
nonceSize := gcm.NonceSize() if len(encrypted) < gcm.NonceSize() {
if len(encrypted) < nonceSize {
return encrypted, errEncryptedStringTooShort return encrypted, errEncryptedStringTooShort
} }
nonce, ciphertextByteClean := encrypted[:nonceSize], encrypted[nonceSize:] plaintextByte, err = gcm.Open(nil, nil, encrypted, nil)
plaintextByte, err = gcm.Open(nil, nonce, ciphertextByteClean, nil)
if err != nil { if err != nil {
return encrypted, errors.Wrap(err, "Error decrypting text") return encrypted, errors.Wrap(err, "Error decrypting text")
} }

View file

@ -1,12 +1,19 @@
package boltdb package boltdb
import ( import (
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"crypto/sha256" "crypto/sha256"
"encoding/base64"
"fmt" "fmt"
"io"
"testing" "testing"
"github.com/gofrs/uuid" "github.com/gofrs/uuid"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
const ( const (
@ -160,7 +167,7 @@ func Test_ObjectMarshallingEncrypted(t *testing.T) {
} }
key := secretToEncryptionKey(passphrase) key := secretToEncryptionKey(passphrase)
conn := DbConnection{EncryptionKey: key} conn := DbConnection{EncryptionKey: key, isEncrypted: true}
for _, test := range tests { for _, test := range tests {
t.Run(fmt.Sprintf("%s -> %s", test.object, test.expected), func(t *testing.T) { t.Run(fmt.Sprintf("%s -> %s", test.object, test.expected), func(t *testing.T) {
@ -175,3 +182,94 @@ func Test_ObjectMarshallingEncrypted(t *testing.T) {
}) })
} }
} }
func Test_NonceSources(t *testing.T) {
// ensure that the new go 1.24 NewGCMWithRandomNonce works correctly with
// the old way of creating and including the nonce
encryptOldFn := func(plaintext []byte, passphrase []byte) (encrypted []byte, err error) {
block, _ := aes.NewCipher(passphrase)
gcm, err := cipher.NewGCM(block)
if err != nil {
return encrypted, err
}
nonce := make([]byte, gcm.NonceSize())
if _, err := io.ReadFull(rand.Reader, nonce); err != nil {
return encrypted, err
}
return gcm.Seal(nonce, nonce, plaintext, nil), nil
}
decryptOldFn := func(encrypted []byte, passphrase []byte) (plaintext []byte, err error) {
block, err := aes.NewCipher(passphrase)
if err != nil {
return encrypted, errors.Wrap(err, "Error creating cypher block")
}
gcm, err := cipher.NewGCM(block)
if err != nil {
return encrypted, errors.Wrap(err, "Error creating GCM")
}
nonceSize := gcm.NonceSize()
if len(encrypted) < nonceSize {
return encrypted, errEncryptedStringTooShort
}
nonce, ciphertextByteClean := encrypted[:nonceSize], encrypted[nonceSize:]
plaintext, err = gcm.Open(nil, nonce, ciphertextByteClean, nil)
if err != nil {
return encrypted, errors.Wrap(err, "Error decrypting text")
}
return plaintext, err
}
encryptNewFn := encrypt
decryptNewFn := decrypt
passphrase := make([]byte, 32)
_, err := io.ReadFull(rand.Reader, passphrase)
require.NoError(t, err)
junk := make([]byte, 1024)
_, err = io.ReadFull(rand.Reader, junk)
require.NoError(t, err)
junkEnc := make([]byte, base64.StdEncoding.EncodedLen(len(junk)))
base64.StdEncoding.Encode(junkEnc, junk)
cases := [][]byte{
[]byte("test"),
[]byte("35"),
[]byte("9ca4a1dd-a439-4593-b386-a7dfdc2e9fc6"),
[]byte(jsonobject),
passphrase,
junk,
junkEnc,
}
for _, plain := range cases {
var enc, dec []byte
var err error
enc, err = encryptOldFn(plain, passphrase)
require.NoError(t, err)
dec, err = decryptNewFn(enc, passphrase)
require.NoError(t, err)
require.Equal(t, plain, dec)
enc, err = encryptNewFn(plain, passphrase)
require.NoError(t, err)
dec, err = decryptOldFn(enc, passphrase)
require.NoError(t, err)
require.Equal(t, plain, dec)
}
}

View file

@ -6,6 +6,7 @@ import (
dserrors "github.com/portainer/portainer/api/dataservices/errors" dserrors "github.com/portainer/portainer/api/dataservices/errors"
"github.com/pkg/errors"
"github.com/rs/zerolog/log" "github.com/rs/zerolog/log"
bolt "go.etcd.io/bbolt" bolt "go.etcd.io/bbolt"
) )
@ -31,6 +32,33 @@ func (tx *DbTransaction) GetObject(bucketName string, key []byte, object any) er
return tx.conn.UnmarshalObject(value, object) return tx.conn.UnmarshalObject(value, object)
} }
func (tx *DbTransaction) GetRawBytes(bucketName string, key []byte) ([]byte, error) {
bucket := tx.tx.Bucket([]byte(bucketName))
value := bucket.Get(key)
if value == nil {
return nil, fmt.Errorf("%w (bucket=%s, key=%s)", dserrors.ErrObjectNotFound, bucketName, keyToString(key))
}
if tx.conn.getEncryptionKey() != nil {
var err error
if value, err = decrypt(value, tx.conn.getEncryptionKey()); err != nil {
return value, errors.Wrap(err, "Failed decrypting object")
}
}
return value, nil
}
func (tx *DbTransaction) KeyExists(bucketName string, key []byte) (bool, error) {
bucket := tx.tx.Bucket([]byte(bucketName))
value := bucket.Get(key)
return value != nil, nil
}
func (tx *DbTransaction) UpdateObject(bucketName string, key []byte, object any) error { func (tx *DbTransaction) UpdateObject(bucketName string, key []byte, object any) error {
data, err := tx.conn.MarshalObject(object) data, err := tx.conn.MarshalObject(object)
if err != nil { if err != nil {

View file

@ -9,7 +9,8 @@ import (
type BaseCRUD[T any, I constraints.Integer] interface { type BaseCRUD[T any, I constraints.Integer] interface {
Create(element *T) error Create(element *T) error
Read(ID I) (*T, error) Read(ID I) (*T, error)
ReadAll() ([]T, error) Exists(ID I) (bool, error)
ReadAll(predicates ...func(T) bool) ([]T, error)
Update(ID I, element *T) error Update(ID I, element *T) error
Delete(ID I) error Delete(ID I) error
} }
@ -42,12 +43,26 @@ func (service BaseDataService[T, I]) Read(ID I) (*T, error) {
}) })
} }
func (service BaseDataService[T, I]) ReadAll() ([]T, error) { func (service BaseDataService[T, I]) Exists(ID I) (bool, error) {
var exists bool
err := service.Connection.ViewTx(func(tx portainer.Transaction) error {
var err error
exists, err = service.Tx(tx).Exists(ID)
return err
})
return exists, err
}
// ReadAll retrieves all the elements that satisfy all the provided predicates.
func (service BaseDataService[T, I]) ReadAll(predicates ...func(T) bool) ([]T, error) {
var collection = make([]T, 0) var collection = make([]T, 0)
return collection, service.Connection.ViewTx(func(tx portainer.Transaction) error { return collection, service.Connection.ViewTx(func(tx portainer.Transaction) error {
var err error var err error
collection, err = service.Tx(tx).ReadAll() collection, err = service.Tx(tx).ReadAll(predicates...)
return err return err
}) })

View file

@ -0,0 +1,92 @@
package dataservices
import (
"strconv"
"testing"
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/slicesx"
"github.com/stretchr/testify/require"
)
type testObject struct {
ID int
Value int
}
type mockConnection struct {
store map[int]testObject
portainer.Connection
}
func (m mockConnection) UpdateObject(bucket string, key []byte, value interface{}) error {
obj := value.(*testObject)
m.store[obj.ID] = *obj
return nil
}
func (m mockConnection) GetAll(bucketName string, obj any, appendFn func(o any) (any, error)) error {
for _, v := range m.store {
if _, err := appendFn(&v); err != nil {
return err
}
}
return nil
}
func (m mockConnection) UpdateTx(fn func(portainer.Transaction) error) error {
return fn(m)
}
func (m mockConnection) ViewTx(fn func(portainer.Transaction) error) error {
return fn(m)
}
func (m mockConnection) ConvertToKey(v int) []byte {
return []byte(strconv.Itoa(v))
}
func TestReadAll(t *testing.T) {
service := BaseDataService[testObject, int]{
Bucket: "testBucket",
Connection: mockConnection{store: make(map[int]testObject)},
}
data := []testObject{
{ID: 1, Value: 1},
{ID: 2, Value: 2},
{ID: 3, Value: 3},
{ID: 4, Value: 4},
{ID: 5, Value: 5},
}
for _, item := range data {
err := service.Update(item.ID, &item)
require.NoError(t, err)
}
// ReadAll without predicates
result, err := service.ReadAll()
require.NoError(t, err)
expected := append([]testObject{}, data...)
require.ElementsMatch(t, expected, result)
// ReadAll with predicates
hasLowID := func(obj testObject) bool { return obj.ID < 3 }
isEven := func(obj testObject) bool { return obj.Value%2 == 0 }
result, err = service.ReadAll(hasLowID, isEven)
require.NoError(t, err)
expected = slicesx.Filter(expected, hasLowID)
expected = slicesx.Filter(expected, isEven)
require.ElementsMatch(t, expected, result)
}

View file

@ -28,13 +28,38 @@ func (service BaseDataServiceTx[T, I]) Read(ID I) (*T, error) {
return &element, nil return &element, nil
} }
func (service BaseDataServiceTx[T, I]) ReadAll() ([]T, error) { func (service BaseDataServiceTx[T, I]) Exists(ID I) (bool, error) {
identifier := service.Connection.ConvertToKey(int(ID))
return service.Tx.KeyExists(service.Bucket, identifier)
}
// ReadAll retrieves all the elements that satisfy all the provided predicates.
func (service BaseDataServiceTx[T, I]) ReadAll(predicates ...func(T) bool) ([]T, error) {
var collection = make([]T, 0) var collection = make([]T, 0)
if len(predicates) == 0 {
return collection, service.Tx.GetAll(
service.Bucket,
new(T),
AppendFn(&collection),
)
}
filterFn := func(element T) bool {
for _, p := range predicates {
if !p(element) {
return false
}
}
return true
}
return collection, service.Tx.GetAll( return collection, service.Tx.GetAll(
service.Bucket, service.Bucket,
new(T), new(T),
AppendFn(&collection), FilterFn(&collection, filterFn),
) )
} }

View file

@ -17,11 +17,29 @@ func (service ServiceTx) UpdateEdgeGroupFunc(ID portainer.EdgeGroupID, updateFun
} }
func (service ServiceTx) Create(group *portainer.EdgeGroup) error { func (service ServiceTx) Create(group *portainer.EdgeGroup) error {
return service.Tx.CreateObject( es := group.Endpoints
group.Endpoints = nil // Clear deprecated field
err := service.Tx.CreateObject(
BucketName, BucketName,
func(id uint64) (int, any) { func(id uint64) (int, any) {
group.ID = portainer.EdgeGroupID(id) group.ID = portainer.EdgeGroupID(id)
return int(group.ID), group return int(group.ID), group
}, },
) )
group.Endpoints = es // Restore endpoints after create
return err
}
func (service ServiceTx) Update(ID portainer.EdgeGroupID, group *portainer.EdgeGroup) error {
es := group.Endpoints
group.Endpoints = nil // Clear deprecated field
err := service.BaseDataServiceTx.Update(ID, group)
group.Endpoints = es // Restore endpoints after update
return err
} }

View file

@ -0,0 +1,50 @@
package edgestack
import (
"testing"
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/database/boltdb"
"github.com/stretchr/testify/require"
)
func TestUpdate(t *testing.T) {
var conn portainer.Connection = &boltdb.DbConnection{Path: t.TempDir()}
err := conn.Open()
require.NoError(t, err)
defer conn.Close()
service, err := NewService(conn, func(portainer.Transaction, portainer.EdgeStackID) {})
require.NoError(t, err)
const edgeStackID = 1
edgeStack := &portainer.EdgeStack{
ID: edgeStackID,
Name: "Test Stack",
}
err = service.Create(edgeStackID, edgeStack)
require.NoError(t, err)
err = service.UpdateEdgeStackFunc(edgeStackID, func(edgeStack *portainer.EdgeStack) {
edgeStack.Name = "Updated Stack"
})
require.NoError(t, err)
updatedStack, err := service.EdgeStack(edgeStackID)
require.NoError(t, err)
require.Equal(t, "Updated Stack", updatedStack.Name)
err = conn.UpdateTx(func(tx portainer.Transaction) error {
return service.UpdateEdgeStackFuncTx(tx, edgeStackID, func(edgeStack *portainer.EdgeStack) {
edgeStack.Name = "Updated Stack Again"
})
})
require.NoError(t, err)
updatedStack, err = service.EdgeStack(edgeStackID)
require.NoError(t, err)
require.Equal(t, "Updated Stack Again", updatedStack.Name)
}

View file

@ -0,0 +1,89 @@
package edgestackstatus
import (
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices"
)
var _ dataservices.EdgeStackStatusService = &Service{}
const BucketName = "edge_stack_status"
type Service struct {
conn portainer.Connection
}
func (service *Service) BucketName() string {
return BucketName
}
func NewService(connection portainer.Connection) (*Service, error) {
if err := connection.SetServiceName(BucketName); err != nil {
return nil, err
}
return &Service{conn: connection}, nil
}
func (s *Service) Tx(tx portainer.Transaction) ServiceTx {
return ServiceTx{
service: s,
tx: tx,
}
}
func (s *Service) Create(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID, status *portainer.EdgeStackStatusForEnv) error {
return s.conn.UpdateTx(func(tx portainer.Transaction) error {
return s.Tx(tx).Create(edgeStackID, endpointID, status)
})
}
func (s *Service) Read(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID) (*portainer.EdgeStackStatusForEnv, error) {
var element *portainer.EdgeStackStatusForEnv
return element, s.conn.ViewTx(func(tx portainer.Transaction) error {
var err error
element, err = s.Tx(tx).Read(edgeStackID, endpointID)
return err
})
}
func (s *Service) ReadAll(edgeStackID portainer.EdgeStackID) ([]portainer.EdgeStackStatusForEnv, error) {
var collection = make([]portainer.EdgeStackStatusForEnv, 0)
return collection, s.conn.ViewTx(func(tx portainer.Transaction) error {
var err error
collection, err = s.Tx(tx).ReadAll(edgeStackID)
return err
})
}
func (s *Service) Update(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID, status *portainer.EdgeStackStatusForEnv) error {
return s.conn.UpdateTx(func(tx portainer.Transaction) error {
return s.Tx(tx).Update(edgeStackID, endpointID, status)
})
}
func (s *Service) Delete(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID) error {
return s.conn.UpdateTx(func(tx portainer.Transaction) error {
return s.Tx(tx).Delete(edgeStackID, endpointID)
})
}
func (s *Service) DeleteAll(edgeStackID portainer.EdgeStackID) error {
return s.conn.UpdateTx(func(tx portainer.Transaction) error {
return s.Tx(tx).DeleteAll(edgeStackID)
})
}
func (s *Service) Clear(edgeStackID portainer.EdgeStackID, relatedEnvironmentsIDs []portainer.EndpointID) error {
return s.conn.UpdateTx(func(tx portainer.Transaction) error {
return s.Tx(tx).Clear(edgeStackID, relatedEnvironmentsIDs)
})
}
func (s *Service) key(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID) []byte {
return append(s.conn.ConvertToKey(int(edgeStackID)), s.conn.ConvertToKey(int(endpointID))...)
}

View file

@ -0,0 +1,95 @@
package edgestackstatus
import (
"fmt"
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices"
)
var _ dataservices.EdgeStackStatusService = &Service{}
type ServiceTx struct {
service *Service
tx portainer.Transaction
}
func (service ServiceTx) Create(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID, status *portainer.EdgeStackStatusForEnv) error {
identifier := service.service.key(edgeStackID, endpointID)
return service.tx.CreateObjectWithStringId(BucketName, identifier, status)
}
func (s ServiceTx) Read(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID) (*portainer.EdgeStackStatusForEnv, error) {
var status portainer.EdgeStackStatusForEnv
identifier := s.service.key(edgeStackID, endpointID)
if err := s.tx.GetObject(BucketName, identifier, &status); err != nil {
return nil, err
}
return &status, nil
}
func (s ServiceTx) ReadAll(edgeStackID portainer.EdgeStackID) ([]portainer.EdgeStackStatusForEnv, error) {
keyPrefix := s.service.conn.ConvertToKey(int(edgeStackID))
statuses := make([]portainer.EdgeStackStatusForEnv, 0)
if err := s.tx.GetAllWithKeyPrefix(BucketName, keyPrefix, &portainer.EdgeStackStatusForEnv{}, dataservices.AppendFn(&statuses)); err != nil {
return nil, fmt.Errorf("unable to retrieve EdgeStackStatus for EdgeStack %d: %w", edgeStackID, err)
}
return statuses, nil
}
func (s ServiceTx) Update(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID, status *portainer.EdgeStackStatusForEnv) error {
identifier := s.service.key(edgeStackID, endpointID)
return s.tx.UpdateObject(BucketName, identifier, status)
}
func (s ServiceTx) Delete(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID) error {
identifier := s.service.key(edgeStackID, endpointID)
return s.tx.DeleteObject(BucketName, identifier)
}
func (s ServiceTx) DeleteAll(edgeStackID portainer.EdgeStackID) error {
keyPrefix := s.service.conn.ConvertToKey(int(edgeStackID))
statuses := make([]portainer.EdgeStackStatusForEnv, 0)
if err := s.tx.GetAllWithKeyPrefix(BucketName, keyPrefix, &portainer.EdgeStackStatusForEnv{}, dataservices.AppendFn(&statuses)); err != nil {
return fmt.Errorf("unable to retrieve EdgeStackStatus for EdgeStack %d: %w", edgeStackID, err)
}
for _, status := range statuses {
if err := s.tx.DeleteObject(BucketName, s.service.key(edgeStackID, status.EndpointID)); err != nil {
return fmt.Errorf("unable to delete EdgeStackStatus for EdgeStack %d and Endpoint %d: %w", edgeStackID, status.EndpointID, err)
}
}
return nil
}
func (s ServiceTx) Clear(edgeStackID portainer.EdgeStackID, relatedEnvironmentsIDs []portainer.EndpointID) error {
for _, envID := range relatedEnvironmentsIDs {
existingStatus, err := s.Read(edgeStackID, envID)
if err != nil && !dataservices.IsErrObjectNotFound(err) {
return fmt.Errorf("unable to retrieve status for environment %d: %w", envID, err)
}
var deploymentInfo portainer.StackDeploymentInfo
if existingStatus != nil {
deploymentInfo = existingStatus.DeploymentInfo
}
if err := s.Update(edgeStackID, envID, &portainer.EdgeStackStatusForEnv{
EndpointID: envID,
Status: []portainer.EdgeStackDeploymentStatus{},
DeploymentInfo: deploymentInfo,
}); err != nil {
return err
}
}
return nil
}

View file

@ -6,8 +6,6 @@ import (
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices" "github.com/portainer/portainer/api/dataservices"
"github.com/portainer/portainer/api/internal/edge/cache" "github.com/portainer/portainer/api/internal/edge/cache"
"github.com/rs/zerolog/log"
) )
// BucketName represents the name of the bucket where this service stores data. // BucketName represents the name of the bucket where this service stores data.
@ -16,21 +14,20 @@ const BucketName = "endpoint_relations"
// Service represents a service for managing environment(endpoint) relation data. // Service represents a service for managing environment(endpoint) relation data.
type Service struct { type Service struct {
connection portainer.Connection connection portainer.Connection
updateStackFn func(ID portainer.EdgeStackID, updateFunc func(edgeStack *portainer.EdgeStack)) error
updateStackFnTx func(tx portainer.Transaction, ID portainer.EdgeStackID, updateFunc func(edgeStack *portainer.EdgeStack)) error updateStackFnTx func(tx portainer.Transaction, ID portainer.EdgeStackID, updateFunc func(edgeStack *portainer.EdgeStack)) error
endpointRelationsCache []portainer.EndpointRelation endpointRelationsCache []portainer.EndpointRelation
mu sync.Mutex mu sync.Mutex
} }
var _ dataservices.EndpointRelationService = &Service{}
func (service *Service) BucketName() string { func (service *Service) BucketName() string {
return BucketName return BucketName
} }
func (service *Service) RegisterUpdateStackFunction( func (service *Service) RegisterUpdateStackFunction(
updateFunc func(portainer.EdgeStackID, func(*portainer.EdgeStack)) error,
updateFuncTx func(portainer.Transaction, portainer.EdgeStackID, func(*portainer.EdgeStack)) error, updateFuncTx func(portainer.Transaction, portainer.EdgeStackID, func(*portainer.EdgeStack)) error,
) { ) {
service.updateStackFn = updateFunc
service.updateStackFnTx = updateFuncTx service.updateStackFnTx = updateFuncTx
} }
@ -89,94 +86,26 @@ func (service *Service) Create(endpointRelation *portainer.EndpointRelation) err
// UpdateEndpointRelation updates an Environment(Endpoint) relation object // UpdateEndpointRelation updates an Environment(Endpoint) relation object
func (service *Service) UpdateEndpointRelation(endpointID portainer.EndpointID, endpointRelation *portainer.EndpointRelation) error { func (service *Service) UpdateEndpointRelation(endpointID portainer.EndpointID, endpointRelation *portainer.EndpointRelation) error {
previousRelationState, _ := service.EndpointRelation(endpointID) return service.connection.UpdateTx(func(tx portainer.Transaction) error {
return service.Tx(tx).UpdateEndpointRelation(endpointID, endpointRelation)
})
}
identifier := service.connection.ConvertToKey(int(endpointID)) func (service *Service) AddEndpointRelationsForEdgeStack(endpointIDs []portainer.EndpointID, edgeStackID portainer.EdgeStackID) error {
err := service.connection.UpdateObject(BucketName, identifier, endpointRelation) return service.connection.UpdateTx(func(tx portainer.Transaction) error {
cache.Del(endpointID) return service.Tx(tx).AddEndpointRelationsForEdgeStack(endpointIDs, edgeStackID)
if err != nil { })
return err }
}
updatedRelationState, _ := service.EndpointRelation(endpointID) func (service *Service) RemoveEndpointRelationsForEdgeStack(endpointIDs []portainer.EndpointID, edgeStackID portainer.EdgeStackID) error {
return service.connection.UpdateTx(func(tx portainer.Transaction) error {
service.mu.Lock() return service.Tx(tx).RemoveEndpointRelationsForEdgeStack(endpointIDs, edgeStackID)
service.endpointRelationsCache = nil })
service.mu.Unlock()
service.updateEdgeStacksAfterRelationChange(previousRelationState, updatedRelationState)
return nil
} }
// DeleteEndpointRelation deletes an Environment(Endpoint) relation object // DeleteEndpointRelation deletes an Environment(Endpoint) relation object
func (service *Service) DeleteEndpointRelation(endpointID portainer.EndpointID) error { func (service *Service) DeleteEndpointRelation(endpointID portainer.EndpointID) error {
deletedRelation, _ := service.EndpointRelation(endpointID) return service.connection.UpdateTx(func(tx portainer.Transaction) error {
return service.Tx(tx).DeleteEndpointRelation(endpointID)
identifier := service.connection.ConvertToKey(int(endpointID)) })
err := service.connection.DeleteObject(BucketName, identifier)
cache.Del(endpointID)
if err != nil {
return err
}
service.mu.Lock()
service.endpointRelationsCache = nil
service.mu.Unlock()
service.updateEdgeStacksAfterRelationChange(deletedRelation, nil)
return nil
}
func (service *Service) updateEdgeStacksAfterRelationChange(previousRelationState *portainer.EndpointRelation, updatedRelationState *portainer.EndpointRelation) {
relations, _ := service.EndpointRelations()
stacksToUpdate := map[portainer.EdgeStackID]bool{}
if previousRelationState != nil {
for stackId, enabled := range previousRelationState.EdgeStacks {
// flag stack for update if stack is not in the updated relation state
// = stack has been removed for this relation
// or this relation has been deleted
if enabled && (updatedRelationState == nil || !updatedRelationState.EdgeStacks[stackId]) {
stacksToUpdate[stackId] = true
}
}
}
if updatedRelationState != nil {
for stackId, enabled := range updatedRelationState.EdgeStacks {
// flag stack for update if stack is not in the previous relation state
// = stack has been added for this relation
if enabled && (previousRelationState == nil || !previousRelationState.EdgeStacks[stackId]) {
stacksToUpdate[stackId] = true
}
}
}
// for each stack referenced by the updated relation
// list how many time this stack is referenced in all relations
// in order to update the stack deployments count
for refStackId, refStackEnabled := range stacksToUpdate {
if !refStackEnabled {
continue
}
numDeployments := 0
for _, r := range relations {
for sId, enabled := range r.EdgeStacks {
if enabled && sId == refStackId {
numDeployments += 1
}
}
}
if err := service.updateStackFn(refStackId, func(edgeStack *portainer.EdgeStack) {
edgeStack.NumDeployments = numDeployments
}); err != nil {
log.Error().Err(err).Msg("could not update the number of deployments")
}
}
} }

View file

@ -0,0 +1,104 @@
package endpointrelation
import (
"testing"
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/database/boltdb"
"github.com/portainer/portainer/api/internal/edge/cache"
"github.com/stretchr/testify/require"
)
func TestUpdateRelation(t *testing.T) {
const endpointID = 1
const edgeStackID1 = 1
const edgeStackID2 = 2
var conn portainer.Connection = &boltdb.DbConnection{Path: t.TempDir()}
err := conn.Open()
require.NoError(t, err)
defer conn.Close()
service, err := NewService(conn)
require.NoError(t, err)
updateStackFnTxCalled := false
edgeStacks := make(map[portainer.EdgeStackID]portainer.EdgeStack)
edgeStacks[edgeStackID1] = portainer.EdgeStack{ID: edgeStackID1}
edgeStacks[edgeStackID2] = portainer.EdgeStack{ID: edgeStackID2}
service.RegisterUpdateStackFunction(func(tx portainer.Transaction, ID portainer.EdgeStackID, updateFunc func(edgeStack *portainer.EdgeStack)) error {
updateStackFnTxCalled = true
s, ok := edgeStacks[ID]
require.True(t, ok)
updateFunc(&s)
edgeStacks[ID] = s
return nil
})
// Nil relation
cache.Set(endpointID, []byte("value"))
err = service.UpdateEndpointRelation(endpointID, nil)
_, cacheKeyExists := cache.Get(endpointID)
require.NoError(t, err)
require.False(t, updateStackFnTxCalled)
require.False(t, cacheKeyExists)
// Add a relation to two edge stacks
cache.Set(endpointID, []byte("value"))
err = service.UpdateEndpointRelation(endpointID, &portainer.EndpointRelation{
EndpointID: endpointID,
EdgeStacks: map[portainer.EdgeStackID]bool{
edgeStackID1: true,
edgeStackID2: true,
},
})
_, cacheKeyExists = cache.Get(endpointID)
require.NoError(t, err)
require.True(t, updateStackFnTxCalled)
require.False(t, cacheKeyExists)
require.Equal(t, 1, edgeStacks[edgeStackID1].NumDeployments)
require.Equal(t, 1, edgeStacks[edgeStackID2].NumDeployments)
// Remove a relation to one edge stack
updateStackFnTxCalled = false
cache.Set(endpointID, []byte("value"))
err = service.UpdateEndpointRelation(endpointID, &portainer.EndpointRelation{
EndpointID: endpointID,
EdgeStacks: map[portainer.EdgeStackID]bool{
2: true,
},
})
_, cacheKeyExists = cache.Get(endpointID)
require.NoError(t, err)
require.True(t, updateStackFnTxCalled)
require.False(t, cacheKeyExists)
require.Equal(t, 0, edgeStacks[edgeStackID1].NumDeployments)
require.Equal(t, 1, edgeStacks[edgeStackID2].NumDeployments)
// Delete the relation
updateStackFnTxCalled = false
cache.Set(endpointID, []byte("value"))
err = service.DeleteEndpointRelation(endpointID)
_, cacheKeyExists = cache.Get(endpointID)
require.NoError(t, err)
require.True(t, updateStackFnTxCalled)
require.False(t, cacheKeyExists)
require.Equal(t, 0, edgeStacks[edgeStackID1].NumDeployments)
require.Equal(t, 0, edgeStacks[edgeStackID2].NumDeployments)
}

View file

@ -13,6 +13,8 @@ type ServiceTx struct {
tx portainer.Transaction tx portainer.Transaction
} }
var _ dataservices.EndpointRelationService = &ServiceTx{}
func (service ServiceTx) BucketName() string { func (service ServiceTx) BucketName() string {
return BucketName return BucketName
} }
@ -74,6 +76,66 @@ func (service ServiceTx) UpdateEndpointRelation(endpointID portainer.EndpointID,
return nil return nil
} }
func (service ServiceTx) AddEndpointRelationsForEdgeStack(endpointIDs []portainer.EndpointID, edgeStackID portainer.EdgeStackID) error {
for _, endpointID := range endpointIDs {
rel, err := service.EndpointRelation(endpointID)
if err != nil {
return err
}
rel.EdgeStacks[edgeStackID] = true
identifier := service.service.connection.ConvertToKey(int(endpointID))
err = service.tx.UpdateObject(BucketName, identifier, rel)
cache.Del(endpointID)
if err != nil {
return err
}
}
service.service.mu.Lock()
service.service.endpointRelationsCache = nil
service.service.mu.Unlock()
if err := service.service.updateStackFnTx(service.tx, edgeStackID, func(edgeStack *portainer.EdgeStack) {
edgeStack.NumDeployments += len(endpointIDs)
}); err != nil {
log.Error().Err(err).Msg("could not update the number of deployments")
}
return nil
}
func (service ServiceTx) RemoveEndpointRelationsForEdgeStack(endpointIDs []portainer.EndpointID, edgeStackID portainer.EdgeStackID) error {
for _, endpointID := range endpointIDs {
rel, err := service.EndpointRelation(endpointID)
if err != nil {
return err
}
delete(rel.EdgeStacks, edgeStackID)
identifier := service.service.connection.ConvertToKey(int(endpointID))
err = service.tx.UpdateObject(BucketName, identifier, rel)
cache.Del(endpointID)
if err != nil {
return err
}
}
service.service.mu.Lock()
service.service.endpointRelationsCache = nil
service.service.mu.Unlock()
if err := service.service.updateStackFnTx(service.tx, edgeStackID, func(edgeStack *portainer.EdgeStack) {
edgeStack.NumDeployments -= len(endpointIDs)
}); err != nil {
log.Error().Err(err).Msg("could not update the number of deployments")
}
return nil
}
// DeleteEndpointRelation deletes an Environment(Endpoint) relation object // DeleteEndpointRelation deletes an Environment(Endpoint) relation object
func (service ServiceTx) DeleteEndpointRelation(endpointID portainer.EndpointID) error { func (service ServiceTx) DeleteEndpointRelation(endpointID portainer.EndpointID) error {
deletedRelation, _ := service.EndpointRelation(endpointID) deletedRelation, _ := service.EndpointRelation(endpointID)
@ -124,53 +186,49 @@ func (service ServiceTx) cachedEndpointRelations() ([]portainer.EndpointRelation
} }
func (service ServiceTx) updateEdgeStacksAfterRelationChange(previousRelationState *portainer.EndpointRelation, updatedRelationState *portainer.EndpointRelation) { func (service ServiceTx) updateEdgeStacksAfterRelationChange(previousRelationState *portainer.EndpointRelation, updatedRelationState *portainer.EndpointRelation) {
relations, _ := service.EndpointRelations()
stacksToUpdate := map[portainer.EdgeStackID]bool{}
if previousRelationState != nil { if previousRelationState != nil {
for stackId, enabled := range previousRelationState.EdgeStacks { for stackId, enabled := range previousRelationState.EdgeStacks {
// flag stack for update if stack is not in the updated relation state // flag stack for update if stack is not in the updated relation state
// = stack has been removed for this relation // = stack has been removed for this relation
// or this relation has been deleted // or this relation has been deleted
if enabled && (updatedRelationState == nil || !updatedRelationState.EdgeStacks[stackId]) { if enabled && (updatedRelationState == nil || !updatedRelationState.EdgeStacks[stackId]) {
stacksToUpdate[stackId] = true if err := service.service.updateStackFnTx(service.tx, stackId, func(edgeStack *portainer.EdgeStack) {
} // Sanity check
} if edgeStack.NumDeployments <= 0 {
} log.Error().
Int("edgestack_id", int(edgeStack.ID)).
Int("endpoint_id", int(previousRelationState.EndpointID)).
Int("num_deployments", edgeStack.NumDeployments).
Msg("cannot decrement the number of deployments for an edge stack with zero deployments")
if updatedRelationState != nil { return
for stackId, enabled := range updatedRelationState.EdgeStacks { }
// flag stack for update if stack is not in the previous relation state
// = stack has been added for this relation
if enabled && (previousRelationState == nil || !previousRelationState.EdgeStacks[stackId]) {
stacksToUpdate[stackId] = true
}
}
}
// for each stack referenced by the updated relation edgeStack.NumDeployments--
// list how many time this stack is referenced in all relations }); err != nil {
// in order to update the stack deployments count log.Error().Err(err).Msg("could not update the number of deployments")
for refStackId, refStackEnabled := range stacksToUpdate {
if !refStackEnabled {
continue
}
numDeployments := 0
for _, r := range relations {
for sId, enabled := range r.EdgeStacks {
if enabled && sId == refStackId {
numDeployments += 1
} }
cache.Del(previousRelationState.EndpointID)
} }
} }
}
if err := service.service.updateStackFnTx(service.tx, refStackId, func(edgeStack *portainer.EdgeStack) { if updatedRelationState == nil {
edgeStack.NumDeployments = numDeployments return
}); err != nil { }
log.Error().Err(err).Msg("could not update the number of deployments")
for stackId, enabled := range updatedRelationState.EdgeStacks {
// flag stack for update if stack is not in the previous relation state
// = stack has been added for this relation
if enabled && (previousRelationState == nil || !previousRelationState.EdgeStacks[stackId]) {
if err := service.service.updateStackFnTx(service.tx, stackId, func(edgeStack *portainer.EdgeStack) {
edgeStack.NumDeployments++
}); err != nil {
log.Error().Err(err).Msg("could not update the number of deployments")
}
cache.Del(updatedRelationState.EndpointID)
} }
} }
} }

View file

@ -12,6 +12,7 @@ type (
EdgeGroup() EdgeGroupService EdgeGroup() EdgeGroupService
EdgeJob() EdgeJobService EdgeJob() EdgeJobService
EdgeStack() EdgeStackService EdgeStack() EdgeStackService
EdgeStackStatus() EdgeStackStatusService
Endpoint() EndpointService Endpoint() EndpointService
EndpointGroup() EndpointGroupService EndpointGroup() EndpointGroupService
EndpointRelation() EndpointRelationService EndpointRelation() EndpointRelationService
@ -39,8 +40,8 @@ type (
Open() (newStore bool, err error) Open() (newStore bool, err error)
Init() error Init() error
Close() error Close() error
UpdateTx(func(DataStoreTx) error) error UpdateTx(func(tx DataStoreTx) error) error
ViewTx(func(DataStoreTx) error) error ViewTx(func(tx DataStoreTx) error) error
MigrateData() error MigrateData() error
Rollback(force bool) error Rollback(force bool) error
CheckCurrentEdition() error CheckCurrentEdition() error
@ -89,6 +90,16 @@ type (
BucketName() string BucketName() string
} }
EdgeStackStatusService interface {
Create(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID, status *portainer.EdgeStackStatusForEnv) error
Read(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID) (*portainer.EdgeStackStatusForEnv, error)
ReadAll(edgeStackID portainer.EdgeStackID) ([]portainer.EdgeStackStatusForEnv, error)
Update(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID, status *portainer.EdgeStackStatusForEnv) error
Delete(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID) error
DeleteAll(edgeStackID portainer.EdgeStackID) error
Clear(edgeStackID portainer.EdgeStackID, relatedEnvironmentsIDs []portainer.EndpointID) error
}
// EndpointService represents a service for managing environment(endpoint) data // EndpointService represents a service for managing environment(endpoint) data
EndpointService interface { EndpointService interface {
Endpoint(ID portainer.EndpointID) (*portainer.Endpoint, error) Endpoint(ID portainer.EndpointID) (*portainer.Endpoint, error)
@ -115,6 +126,8 @@ type (
EndpointRelation(EndpointID portainer.EndpointID) (*portainer.EndpointRelation, error) EndpointRelation(EndpointID portainer.EndpointID) (*portainer.EndpointRelation, error)
Create(endpointRelation *portainer.EndpointRelation) error Create(endpointRelation *portainer.EndpointRelation) error
UpdateEndpointRelation(EndpointID portainer.EndpointID, endpointRelation *portainer.EndpointRelation) error UpdateEndpointRelation(EndpointID portainer.EndpointID, endpointRelation *portainer.EndpointRelation) error
AddEndpointRelationsForEdgeStack(endpointIDs []portainer.EndpointID, edgeStackID portainer.EdgeStackID) error
RemoveEndpointRelationsForEdgeStack(endpointIDs []portainer.EndpointID, edgeStackID portainer.EdgeStackID) error
DeleteEndpointRelation(EndpointID portainer.EndpointID) error DeleteEndpointRelation(EndpointID portainer.EndpointID) error
BucketName() string BucketName() string
} }
@ -157,6 +170,7 @@ type (
SnapshotService interface { SnapshotService interface {
BaseCRUD[portainer.Snapshot, portainer.EndpointID] BaseCRUD[portainer.Snapshot, portainer.EndpointID]
ReadWithoutSnapshotRaw(ID portainer.EndpointID) (*portainer.Snapshot, error)
} }
// SSLSettingsService represents a service for managing application settings // SSLSettingsService represents a service for managing application settings

View file

@ -38,3 +38,33 @@ func (service *Service) Tx(tx portainer.Transaction) ServiceTx {
func (service *Service) Create(snapshot *portainer.Snapshot) error { func (service *Service) Create(snapshot *portainer.Snapshot) error {
return service.Connection.CreateObjectWithId(BucketName, int(snapshot.EndpointID), snapshot) return service.Connection.CreateObjectWithId(BucketName, int(snapshot.EndpointID), snapshot)
} }
func (service *Service) ReadWithoutSnapshotRaw(ID portainer.EndpointID) (*portainer.Snapshot, error) {
var snapshot *portainer.Snapshot
err := service.Connection.ViewTx(func(tx portainer.Transaction) error {
var err error
snapshot, err = service.Tx(tx).ReadWithoutSnapshotRaw(ID)
return err
})
return snapshot, err
}
func (service *Service) ReadRawMessage(ID portainer.EndpointID) (*portainer.SnapshotRawMessage, error) {
var snapshot *portainer.SnapshotRawMessage
err := service.Connection.ViewTx(func(tx portainer.Transaction) error {
var err error
snapshot, err = service.Tx(tx).ReadRawMessage(ID)
return err
})
return snapshot, err
}
func (service *Service) CreateRawMessage(snapshot *portainer.SnapshotRawMessage) error {
return service.Connection.CreateObjectWithId(BucketName, int(snapshot.EndpointID), snapshot)
}

View file

@ -12,3 +12,42 @@ type ServiceTx struct {
func (service ServiceTx) Create(snapshot *portainer.Snapshot) error { func (service ServiceTx) Create(snapshot *portainer.Snapshot) error {
return service.Tx.CreateObjectWithId(BucketName, int(snapshot.EndpointID), snapshot) return service.Tx.CreateObjectWithId(BucketName, int(snapshot.EndpointID), snapshot)
} }
func (service ServiceTx) ReadWithoutSnapshotRaw(ID portainer.EndpointID) (*portainer.Snapshot, error) {
var snapshot struct {
Docker *struct {
X struct{} `json:"DockerSnapshotRaw"`
*portainer.DockerSnapshot
} `json:"Docker"`
portainer.Snapshot
}
identifier := service.Connection.ConvertToKey(int(ID))
if err := service.Tx.GetObject(service.Bucket, identifier, &snapshot); err != nil {
return nil, err
}
if snapshot.Docker != nil {
snapshot.Snapshot.Docker = snapshot.Docker.DockerSnapshot
}
return &snapshot.Snapshot, nil
}
func (service ServiceTx) ReadRawMessage(ID portainer.EndpointID) (*portainer.SnapshotRawMessage, error) {
var snapshot = portainer.SnapshotRawMessage{}
identifier := service.Connection.ConvertToKey(int(ID))
if err := service.Tx.GetObject(service.Bucket, identifier, &snapshot); err != nil {
return nil, err
}
return &snapshot, nil
}
func (service ServiceTx) CreateRawMessage(snapshot *portainer.SnapshotRawMessage) error {
return service.Tx.CreateObjectWithId(BucketName, int(snapshot.EndpointID), snapshot)
}

View file

@ -40,13 +40,11 @@ func (store *Store) MigrateData() error {
} }
// before we alter anything in the DB, create a backup // before we alter anything in the DB, create a backup
_, err = store.Backup("") if _, err := store.Backup(""); err != nil {
if err != nil {
return errors.Wrap(err, "while backing up database") return errors.Wrap(err, "while backing up database")
} }
err = store.FailSafeMigrate(migrator, version) if err := store.FailSafeMigrate(migrator, version); err != nil {
if err != nil {
err = errors.Wrap(err, "failed to migrate database") err = errors.Wrap(err, "failed to migrate database")
log.Warn().Err(err).Msg("migration failed, restoring database to previous version") log.Warn().Err(err).Msg("migration failed, restoring database to previous version")
@ -85,7 +83,9 @@ func (store *Store) newMigratorParameters(version *models.Version, flags *portai
DockerhubService: store.DockerHubService, DockerhubService: store.DockerHubService,
AuthorizationService: authorization.NewService(store), AuthorizationService: authorization.NewService(store),
EdgeStackService: store.EdgeStackService, EdgeStackService: store.EdgeStackService,
EdgeStackStatusService: store.EdgeStackStatusService,
EdgeJobService: store.EdgeJobService, EdgeJobService: store.EdgeJobService,
EdgeGroupService: store.EdgeGroupService,
TunnelServerService: store.TunnelServerService, TunnelServerService: store.TunnelServerService,
PendingActionsService: store.PendingActionsService, PendingActionsService: store.PendingActionsService,
} }
@ -140,8 +140,7 @@ func (store *Store) connectionRollback(force bool) error {
} }
} }
err := store.Restore() if err := store.Restore(); err != nil {
if err != nil {
return err return err
} }

View file

@ -0,0 +1,31 @@
package migrator
import portainer "github.com/portainer/portainer/api"
func (m *Migrator) migrateEdgeStacksStatuses_2_31_0() error {
edgeStacks, err := m.edgeStackService.EdgeStacks()
if err != nil {
return err
}
for _, edgeStack := range edgeStacks {
for envID, status := range edgeStack.Status {
if err := m.edgeStackStatusService.Create(edgeStack.ID, envID, &portainer.EdgeStackStatusForEnv{
EndpointID: envID,
Status: status.Status,
DeploymentInfo: status.DeploymentInfo,
ReadyRePullImage: status.ReadyRePullImage,
}); err != nil {
return err
}
}
edgeStack.Status = nil
if err := m.edgeStackService.UpdateEdgeStack(edgeStack.ID, &edgeStack); err != nil {
return err
}
}
return nil
}

View file

@ -0,0 +1,33 @@
package migrator
import (
"github.com/pkg/errors"
portainer "github.com/portainer/portainer/api"
perrors "github.com/portainer/portainer/api/dataservices/errors"
"github.com/portainer/portainer/api/internal/endpointutils"
)
func (m *Migrator) addEndpointRelationForEdgeAgents_2_32_0() error {
endpoints, err := m.endpointService.Endpoints()
if err != nil {
return err
}
for _, endpoint := range endpoints {
if endpointutils.IsEdgeEndpoint(&endpoint) {
_, err := m.endpointRelationService.EndpointRelation(endpoint.ID)
if err != nil && errors.Is(err, perrors.ErrObjectNotFound) {
relation := &portainer.EndpointRelation{
EndpointID: endpoint.ID,
EdgeStacks: make(map[portainer.EdgeStackID]bool),
}
if err := m.endpointRelationService.Create(relation); err != nil {
return err
}
}
}
}
return nil
}

View file

@ -0,0 +1,23 @@
package migrator
import (
"github.com/portainer/portainer/api/roar"
)
func (m *Migrator) migrateEdgeGroupEndpointsToRoars_2_33_0() error {
egs, err := m.edgeGroupService.ReadAll()
if err != nil {
return err
}
for _, eg := range egs {
eg.EndpointIDs = roar.FromSlice(eg.Endpoints)
eg.Endpoints = nil
if err := m.edgeGroupService.Update(eg.ID, &eg); err != nil {
return err
}
}
return nil
}

View file

@ -94,6 +94,10 @@ func (m *Migrator) updateEdgeStackStatusForDB100() error {
continue continue
} }
if environmentStatus.Details == nil {
continue
}
statusArray := []portainer.EdgeStackDeploymentStatus{} statusArray := []portainer.EdgeStackDeploymentStatus{}
if environmentStatus.Details.Pending { if environmentStatus.Details.Pending {
statusArray = append(statusArray, portainer.EdgeStackDeploymentStatus{ statusArray = append(statusArray, portainer.EdgeStackDeploymentStatus{

View file

@ -18,8 +18,7 @@ func (m *Migrator) updateResourceControlsToDBVersion22() error {
for _, resourceControl := range legacyResourceControls { for _, resourceControl := range legacyResourceControls {
resourceControl.AdministratorsOnly = false resourceControl.AdministratorsOnly = false
err := m.resourceControlService.Update(resourceControl.ID, &resourceControl) if err := m.resourceControlService.Update(resourceControl.ID, &resourceControl); err != nil {
if err != nil {
return err return err
} }
} }
@ -42,8 +41,8 @@ func (m *Migrator) updateUsersAndRolesToDBVersion22() error {
for _, user := range legacyUsers { for _, user := range legacyUsers {
user.PortainerAuthorizations = authorization.DefaultPortainerAuthorizations() user.PortainerAuthorizations = authorization.DefaultPortainerAuthorizations()
err = m.userService.Update(user.ID, &user)
if err != nil { if err := m.userService.Update(user.ID, &user); err != nil {
return err return err
} }
} }
@ -52,38 +51,47 @@ func (m *Migrator) updateUsersAndRolesToDBVersion22() error {
if err != nil { if err != nil {
return err return err
} }
endpointAdministratorRole.Priority = 1 endpointAdministratorRole.Priority = 1
endpointAdministratorRole.Authorizations = authorization.DefaultEndpointAuthorizationsForEndpointAdministratorRole() endpointAdministratorRole.Authorizations = authorization.DefaultEndpointAuthorizationsForEndpointAdministratorRole()
err = m.roleService.Update(endpointAdministratorRole.ID, endpointAdministratorRole) if err := m.roleService.Update(endpointAdministratorRole.ID, endpointAdministratorRole); err != nil {
return err
}
helpDeskRole, err := m.roleService.Read(portainer.RoleID(2)) helpDeskRole, err := m.roleService.Read(portainer.RoleID(2))
if err != nil { if err != nil {
return err return err
} }
helpDeskRole.Priority = 2 helpDeskRole.Priority = 2
helpDeskRole.Authorizations = authorization.DefaultEndpointAuthorizationsForHelpDeskRole(settings.AllowVolumeBrowserForRegularUsers) helpDeskRole.Authorizations = authorization.DefaultEndpointAuthorizationsForHelpDeskRole(settings.AllowVolumeBrowserForRegularUsers)
err = m.roleService.Update(helpDeskRole.ID, helpDeskRole) if err := m.roleService.Update(helpDeskRole.ID, helpDeskRole); err != nil {
return err
}
standardUserRole, err := m.roleService.Read(portainer.RoleID(3)) standardUserRole, err := m.roleService.Read(portainer.RoleID(3))
if err != nil { if err != nil {
return err return err
} }
standardUserRole.Priority = 3 standardUserRole.Priority = 3
standardUserRole.Authorizations = authorization.DefaultEndpointAuthorizationsForStandardUserRole(settings.AllowVolumeBrowserForRegularUsers) standardUserRole.Authorizations = authorization.DefaultEndpointAuthorizationsForStandardUserRole(settings.AllowVolumeBrowserForRegularUsers)
err = m.roleService.Update(standardUserRole.ID, standardUserRole) if err := m.roleService.Update(standardUserRole.ID, standardUserRole); err != nil {
return err
}
readOnlyUserRole, err := m.roleService.Read(portainer.RoleID(4)) readOnlyUserRole, err := m.roleService.Read(portainer.RoleID(4))
if err != nil { if err != nil {
return err return err
} }
readOnlyUserRole.Priority = 4 readOnlyUserRole.Priority = 4
readOnlyUserRole.Authorizations = authorization.DefaultEndpointAuthorizationsForReadOnlyUserRole(settings.AllowVolumeBrowserForRegularUsers) readOnlyUserRole.Authorizations = authorization.DefaultEndpointAuthorizationsForReadOnlyUserRole(settings.AllowVolumeBrowserForRegularUsers)
err = m.roleService.Update(readOnlyUserRole.ID, readOnlyUserRole) if err := m.roleService.Update(readOnlyUserRole.ID, readOnlyUserRole); err != nil {
if err != nil {
return err return err
} }

View file

@ -75,6 +75,10 @@ func (m *Migrator) updateEdgeStackStatusForDB80() error {
for _, edgeStack := range edgeStacks { for _, edgeStack := range edgeStacks {
for endpointId, status := range edgeStack.Status { for endpointId, status := range edgeStack.Status {
if status.Details == nil {
status.Details = &portainer.EdgeStackStatusDetails{}
}
switch status.Type { switch status.Type {
case portainer.EdgeStackStatusPending: case portainer.EdgeStackStatusPending:
status.Details.Pending = true status.Details.Pending = true
@ -93,10 +97,10 @@ func (m *Migrator) updateEdgeStackStatusForDB80() error {
edgeStack.Status[endpointId] = status edgeStack.Status[endpointId] = status
} }
err = m.edgeStackService.UpdateEdgeStack(edgeStack.ID, &edgeStack) if err := m.edgeStackService.UpdateEdgeStack(edgeStack.ID, &edgeStack); err != nil {
if err != nil {
return err return err
} }
} }
return nil return nil
} }

View file

@ -3,12 +3,13 @@ package migrator
import ( import (
"errors" "errors"
"github.com/Masterminds/semver"
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/database/models" "github.com/portainer/portainer/api/database/models"
"github.com/portainer/portainer/api/dataservices/dockerhub" "github.com/portainer/portainer/api/dataservices/dockerhub"
"github.com/portainer/portainer/api/dataservices/edgegroup"
"github.com/portainer/portainer/api/dataservices/edgejob" "github.com/portainer/portainer/api/dataservices/edgejob"
"github.com/portainer/portainer/api/dataservices/edgestack" "github.com/portainer/portainer/api/dataservices/edgestack"
"github.com/portainer/portainer/api/dataservices/edgestackstatus"
"github.com/portainer/portainer/api/dataservices/endpoint" "github.com/portainer/portainer/api/dataservices/endpoint"
"github.com/portainer/portainer/api/dataservices/endpointgroup" "github.com/portainer/portainer/api/dataservices/endpointgroup"
"github.com/portainer/portainer/api/dataservices/endpointrelation" "github.com/portainer/portainer/api/dataservices/endpointrelation"
@ -27,6 +28,8 @@ import (
"github.com/portainer/portainer/api/dataservices/user" "github.com/portainer/portainer/api/dataservices/user"
"github.com/portainer/portainer/api/dataservices/version" "github.com/portainer/portainer/api/dataservices/version"
"github.com/portainer/portainer/api/internal/authorization" "github.com/portainer/portainer/api/internal/authorization"
"github.com/Masterminds/semver"
"github.com/rs/zerolog/log" "github.com/rs/zerolog/log"
) )
@ -56,7 +59,9 @@ type (
authorizationService *authorization.Service authorizationService *authorization.Service
dockerhubService *dockerhub.Service dockerhubService *dockerhub.Service
edgeStackService *edgestack.Service edgeStackService *edgestack.Service
edgeStackStatusService *edgestackstatus.Service
edgeJobService *edgejob.Service edgeJobService *edgejob.Service
edgeGroupService *edgegroup.Service
TunnelServerService *tunnelserver.Service TunnelServerService *tunnelserver.Service
pendingActionsService *pendingactions.Service pendingActionsService *pendingactions.Service
} }
@ -84,7 +89,9 @@ type (
AuthorizationService *authorization.Service AuthorizationService *authorization.Service
DockerhubService *dockerhub.Service DockerhubService *dockerhub.Service
EdgeStackService *edgestack.Service EdgeStackService *edgestack.Service
EdgeStackStatusService *edgestackstatus.Service
EdgeJobService *edgejob.Service EdgeJobService *edgejob.Service
EdgeGroupService *edgegroup.Service
TunnelServerService *tunnelserver.Service TunnelServerService *tunnelserver.Service
PendingActionsService *pendingactions.Service PendingActionsService *pendingactions.Service
} }
@ -114,12 +121,15 @@ func NewMigrator(parameters *MigratorParameters) *Migrator {
authorizationService: parameters.AuthorizationService, authorizationService: parameters.AuthorizationService,
dockerhubService: parameters.DockerhubService, dockerhubService: parameters.DockerhubService,
edgeStackService: parameters.EdgeStackService, edgeStackService: parameters.EdgeStackService,
edgeStackStatusService: parameters.EdgeStackStatusService,
edgeJobService: parameters.EdgeJobService, edgeJobService: parameters.EdgeJobService,
edgeGroupService: parameters.EdgeGroupService,
TunnelServerService: parameters.TunnelServerService, TunnelServerService: parameters.TunnelServerService,
pendingActionsService: parameters.PendingActionsService, pendingActionsService: parameters.PendingActionsService,
} }
migrator.initMigrations() migrator.initMigrations()
return migrator return migrator
} }
@ -242,6 +252,12 @@ func (m *Migrator) initMigrations() {
m.migratePendingActionsDataForDB130, m.migratePendingActionsDataForDB130,
) )
m.addMigrations("2.31.0", m.migrateEdgeStacksStatuses_2_31_0)
m.addMigrations("2.32.0", m.addEndpointRelationForEdgeAgents_2_32_0)
m.addMigrations("2.33.0", m.migrateEdgeGroupEndpointsToRoars_2_33_0)
// Add new migrations above... // Add new migrations above...
// One function per migration, each versions migration funcs in the same file. // One function per migration, each versions migration funcs in the same file.
} }

View file

@ -13,6 +13,7 @@ import (
"github.com/portainer/portainer/api/dataservices/edgegroup" "github.com/portainer/portainer/api/dataservices/edgegroup"
"github.com/portainer/portainer/api/dataservices/edgejob" "github.com/portainer/portainer/api/dataservices/edgejob"
"github.com/portainer/portainer/api/dataservices/edgestack" "github.com/portainer/portainer/api/dataservices/edgestack"
"github.com/portainer/portainer/api/dataservices/edgestackstatus"
"github.com/portainer/portainer/api/dataservices/endpoint" "github.com/portainer/portainer/api/dataservices/endpoint"
"github.com/portainer/portainer/api/dataservices/endpointgroup" "github.com/portainer/portainer/api/dataservices/endpointgroup"
"github.com/portainer/portainer/api/dataservices/endpointrelation" "github.com/portainer/portainer/api/dataservices/endpointrelation"
@ -39,6 +40,8 @@ import (
"github.com/segmentio/encoding/json" "github.com/segmentio/encoding/json"
) )
var _ dataservices.DataStore = &Store{}
// Store defines the implementation of portainer.DataStore using // Store defines the implementation of portainer.DataStore using
// BoltDB as the storage system. // BoltDB as the storage system.
type Store struct { type Store struct {
@ -51,6 +54,7 @@ type Store struct {
EdgeGroupService *edgegroup.Service EdgeGroupService *edgegroup.Service
EdgeJobService *edgejob.Service EdgeJobService *edgejob.Service
EdgeStackService *edgestack.Service EdgeStackService *edgestack.Service
EdgeStackStatusService *edgestackstatus.Service
EndpointGroupService *endpointgroup.Service EndpointGroupService *endpointgroup.Service
EndpointService *endpoint.Service EndpointService *endpoint.Service
EndpointRelationService *endpointrelation.Service EndpointRelationService *endpointrelation.Service
@ -107,7 +111,13 @@ func (store *Store) initServices() error {
return err return err
} }
store.EdgeStackService = edgeStackService store.EdgeStackService = edgeStackService
endpointRelationService.RegisterUpdateStackFunction(edgeStackService.UpdateEdgeStackFunc, edgeStackService.UpdateEdgeStackFuncTx) endpointRelationService.RegisterUpdateStackFunction(edgeStackService.UpdateEdgeStackFuncTx)
edgeStackStatusService, err := edgestackstatus.NewService(store.connection)
if err != nil {
return err
}
store.EdgeStackStatusService = edgeStackStatusService
edgeGroupService, err := edgegroup.NewService(store.connection) edgeGroupService, err := edgegroup.NewService(store.connection)
if err != nil { if err != nil {
@ -269,6 +279,10 @@ func (store *Store) EdgeStack() dataservices.EdgeStackService {
return store.EdgeStackService return store.EdgeStackService
} }
func (store *Store) EdgeStackStatus() dataservices.EdgeStackStatusService {
return store.EdgeStackStatusService
}
// Environment(Endpoint) gives access to the Environment(Endpoint) data management layer // Environment(Endpoint) gives access to the Environment(Endpoint) data management layer
func (store *Store) Endpoint() dataservices.EndpointService { func (store *Store) Endpoint() dataservices.EndpointService {
return store.EndpointService return store.EndpointService

View file

@ -32,6 +32,10 @@ func (tx *StoreTx) EdgeStack() dataservices.EdgeStackService {
return tx.store.EdgeStackService.Tx(tx.tx) return tx.store.EdgeStackService.Tx(tx.tx)
} }
func (tx *StoreTx) EdgeStackStatus() dataservices.EdgeStackStatusService {
return tx.store.EdgeStackStatusService.Tx(tx.tx)
}
func (tx *StoreTx) Endpoint() dataservices.EndpointService { func (tx *StoreTx) Endpoint() dataservices.EndpointService {
return tx.store.EndpointService.Tx(tx.tx) return tx.store.EndpointService.Tx(tx.tx)
} }

View file

@ -8,6 +8,7 @@
} }
], ],
"edge_stack": null, "edge_stack": null,
"edge_stack_status": null,
"edgegroups": null, "edgegroups": null,
"edgejobs": null, "edgejobs": null,
"endpoint_groups": [ "endpoint_groups": [
@ -120,6 +121,10 @@
"Ecr": { "Ecr": {
"Region": "" "Region": ""
}, },
"Github": {
"OrganisationName": "",
"UseOrganisation": false
},
"Gitlab": { "Gitlab": {
"InstanceURL": "", "InstanceURL": "",
"ProjectId": 0, "ProjectId": 0,
@ -610,7 +615,7 @@
"RequiredPasswordLength": 12 "RequiredPasswordLength": 12
}, },
"KubeconfigExpiry": "0", "KubeconfigExpiry": "0",
"KubectlShellImage": "portainer/kubectl-shell:2.27.0-rc1", "KubectlShellImage": "portainer/kubectl-shell:2.32.0",
"LDAPSettings": { "LDAPSettings": {
"AnonymousMode": true, "AnonymousMode": true,
"AutoCreateUsers": true, "AutoCreateUsers": true,
@ -678,14 +683,11 @@
"Images": null, "Images": null,
"Info": { "Info": {
"Architecture": "", "Architecture": "",
"BridgeNfIp6tables": false,
"BridgeNfIptables": false,
"CDISpecDirs": null, "CDISpecDirs": null,
"CPUSet": false, "CPUSet": false,
"CPUShares": false, "CPUShares": false,
"CgroupDriver": "", "CgroupDriver": "",
"ContainerdCommit": { "ContainerdCommit": {
"Expected": "",
"ID": "" "ID": ""
}, },
"Containers": 0, "Containers": 0,
@ -709,7 +711,6 @@
"IndexServerAddress": "", "IndexServerAddress": "",
"InitBinary": "", "InitBinary": "",
"InitCommit": { "InitCommit": {
"Expected": "",
"ID": "" "ID": ""
}, },
"Isolation": "", "Isolation": "",
@ -738,7 +739,6 @@
}, },
"RegistryConfig": null, "RegistryConfig": null,
"RuncCommit": { "RuncCommit": {
"Expected": "",
"ID": "" "ID": ""
}, },
"Runtimes": null, "Runtimes": null,
@ -780,6 +780,7 @@
"ImageCount": 9, "ImageCount": 9,
"IsPodman": false, "IsPodman": false,
"NodeCount": 0, "NodeCount": 0,
"PerformanceMetrics": null,
"RunningContainerCount": 5, "RunningContainerCount": 5,
"ServiceCount": 0, "ServiceCount": 0,
"StackCount": 2, "StackCount": 2,
@ -943,7 +944,7 @@
} }
], ],
"version": { "version": {
"VERSION": "{\"SchemaVersion\":\"2.27.0-rc1\",\"MigratorCount\":0,\"Edition\":1,\"InstanceID\":\"463d5c47-0ea5-4aca-85b1-405ceefee254\"}" "VERSION": "{\"SchemaVersion\":\"2.32.0\",\"MigratorCount\":1,\"Edition\":1,\"InstanceID\":\"463d5c47-0ea5-4aca-85b1-405ceefee254\"}"
}, },
"webhooks": null "webhooks": null
} }

View file

@ -1,15 +0,0 @@
package validate
import (
"github.com/go-playground/validator/v10"
portainer "github.com/portainer/portainer/api"
)
var validate *validator.Validate
func ValidateLDAPSettings(ldp *portainer.LDAPSettings) error {
validate = validator.New()
registerValidationMethods(validate)
return validate.Struct(ldp)
}

View file

@ -1,61 +0,0 @@
package validate
import (
"testing"
portainer "github.com/portainer/portainer/api"
)
func TestValidateLDAPSettings(t *testing.T) {
tests := []struct {
name string
ldap portainer.LDAPSettings
wantErr bool
}{
{
name: "Empty LDAP Settings",
ldap: portainer.LDAPSettings{},
wantErr: true,
},
{
name: "With URL",
ldap: portainer.LDAPSettings{
AnonymousMode: true,
URL: "192.168.0.1:323",
},
wantErr: false,
},
{
name: "Validate URL and URLs",
ldap: portainer.LDAPSettings{
AnonymousMode: true,
URL: "192.168.0.1:323",
},
wantErr: false,
},
{
name: "validate client ldap",
ldap: portainer.LDAPSettings{
AnonymousMode: false,
ReaderDN: "CN=LDAP API Service Account",
Password: "Qu**dfUUU**",
URL: "aukdc15.pgc.co:389",
TLSConfig: portainer.TLSConfiguration{
TLS: false,
TLSSkipVerify: false,
},
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := ValidateLDAPSettings(&tt.ldap)
if (err == nil) == tt.wantErr {
t.Errorf("No error expected but got %s", err)
}
})
}
}

View file

@ -1,17 +0,0 @@
package validate
import (
"github.com/go-playground/validator/v10"
)
func registerValidationMethods(v *validator.Validate) {
v.RegisterValidation("validate_bool", ValidateBool)
}
/**
* Validation methods below are being used for custom validation
*/
func ValidateBool(fl validator.FieldLevel) bool {
_, ok := fl.Field().Interface().(bool)
return ok
}

View file

@ -3,8 +3,8 @@ package client
import ( import (
"bytes" "bytes"
"errors" "errors"
"fmt"
"io" "io"
"maps"
"net/http" "net/http"
"strings" "strings"
"time" "time"
@ -73,19 +73,6 @@ func createLocalClient(endpoint *portainer.Endpoint) (*client.Client, error) {
) )
} }
func CreateClientFromEnv() (*client.Client, error) {
return client.NewClientWithOpts(
client.FromEnv,
client.WithAPIVersionNegotiation(),
)
}
func CreateSimpleClient() (*client.Client, error) {
return client.NewClientWithOpts(
client.WithAPIVersionNegotiation(),
)
}
func createTCPClient(endpoint *portainer.Endpoint, timeout *time.Duration) (*client.Client, error) { func createTCPClient(endpoint *portainer.Endpoint, timeout *time.Duration) (*client.Client, error) {
httpCli, err := httpClient(endpoint, timeout) httpCli, err := httpClient(endpoint, timeout)
if err != nil { if err != nil {
@ -141,7 +128,6 @@ func createAgentClient(endpoint *portainer.Endpoint, endpointURL string, signatu
type NodeNameTransport struct { type NodeNameTransport struct {
*http.Transport *http.Transport
nodeNames map[string]string
} }
func (t *NodeNameTransport) RoundTrip(req *http.Request) (*http.Response, error) { func (t *NodeNameTransport) RoundTrip(req *http.Request) (*http.Response, error) {
@ -176,28 +162,30 @@ func (t *NodeNameTransport) RoundTrip(req *http.Request) (*http.Response, error)
return resp, nil return resp, nil
} }
t.nodeNames = make(map[string]string) nodeNames, ok := req.Context().Value("nodeNames").(map[string]string)
for _, r := range rs { if ok {
t.nodeNames[r.ID] = r.Portainer.Agent.NodeName for idx, r := range rs {
// as there is no way to differentiate the same image available in multiple nodes only by their ID
// we append the index of the image in the payload response to match the node name later
// from the image.Summary[] list returned by docker's client.ImageList()
nodeNames[fmt.Sprintf("%s-%d", r.ID, idx)] = r.Portainer.Agent.NodeName
}
} }
return resp, err return resp, err
} }
func (t *NodeNameTransport) NodeNames() map[string]string {
return maps.Clone(t.nodeNames)
}
func httpClient(endpoint *portainer.Endpoint, timeout *time.Duration) (*http.Client, error) { func httpClient(endpoint *portainer.Endpoint, timeout *time.Duration) (*http.Client, error) {
transport := &NodeNameTransport{ transport := &NodeNameTransport{
Transport: &http.Transport{}, Transport: &http.Transport{},
} }
if endpoint.TLSConfig.TLS { if endpoint.TLSConfig.TLS {
tlsConfig, err := crypto.CreateTLSConfigurationFromDisk(endpoint.TLSConfig.TLSCACertPath, endpoint.TLSConfig.TLSCertPath, endpoint.TLSConfig.TLSKeyPath, endpoint.TLSConfig.TLSSkipVerify) tlsConfig, err := crypto.CreateTLSConfigurationFromDisk(endpoint.TLSConfig)
if err != nil { if err != nil {
return nil, err return nil, err
} }
transport.TLSClientConfig = tlsConfig transport.TLSClientConfig = tlsConfig
} }

View file

@ -0,0 +1,26 @@
package client
import (
"testing"
portainer "github.com/portainer/portainer/api"
"github.com/stretchr/testify/require"
)
func TestHttpClient(t *testing.T) {
// Valid TLS configuration
endpoint := &portainer.Endpoint{}
endpoint.TLSConfig = portainer.TLSConfiguration{TLS: true}
cli, err := httpClient(endpoint, nil)
require.NoError(t, err)
require.NotNil(t, cli)
// Invalid TLS configuration
endpoint.TLSConfig.TLSCertPath = "/invalid/path/client.crt"
endpoint.TLSConfig.TLSKeyPath = "/invalid/path/client.key"
cli, err = httpClient(endpoint, nil)
require.Error(t, err)
require.Nil(t, cli)
}

View file

@ -38,10 +38,10 @@ func NewClientWithRegistry(registryClient *RegistryClient, clientFactory *docker
func (c *DigestClient) RemoteDigest(image Image) (digest.Digest, error) { func (c *DigestClient) RemoteDigest(image Image) (digest.Digest, error) {
ctx, cancel := c.timeoutContext() ctx, cancel := c.timeoutContext()
defer cancel() defer cancel()
// Docker references with both a tag and digest are currently not supported // Docker references with both a tag and digest are currently not supported
if image.Tag != "" && image.Digest != "" { if image.Tag != "" && image.Digest != "" {
err := image.trimDigest() if err := image.TrimDigest(); err != nil {
if err != nil {
return "", err return "", err
} }
} }
@ -69,7 +69,7 @@ func (c *DigestClient) RemoteDigest(image Image) (digest.Digest, error) {
// Retrieve remote digest through HEAD request // Retrieve remote digest through HEAD request
rmDigest, err := docker.GetDigest(ctx, sysCtx, rmRef) rmDigest, err := docker.GetDigest(ctx, sysCtx, rmRef)
if err != nil { if err != nil {
// fallback to public registry for hub // Fallback to public registry for hub
if image.HubLink != "" { if image.HubLink != "" {
rmDigest, err = docker.GetDigest(ctx, c.sysCtx, rmRef) rmDigest, err = docker.GetDigest(ctx, c.sysCtx, rmRef)
if err == nil { if err == nil {
@ -131,8 +131,7 @@ func ParseRepoDigests(repoDigests []string) []digest.Digest {
func ParseRepoTags(repoTags []string) []*Image { func ParseRepoTags(repoTags []string) []*Image {
images := make([]*Image, 0) images := make([]*Image, 0)
for _, repoTag := range repoTags { for _, repoTag := range repoTags {
image := ParseRepoTag(repoTag) if image := ParseRepoTag(repoTag); image != nil {
if image != nil {
images = append(images, image) images = append(images, image)
} }
} }
@ -147,7 +146,7 @@ func ParseRepoDigest(repoDigest string) digest.Digest {
d, err := digest.Parse(strings.Split(repoDigest, "@")[1]) d, err := digest.Parse(strings.Split(repoDigest, "@")[1])
if err != nil { if err != nil {
log.Warn().Msgf("Skip invalid repo digest item: %s [error: %v]", repoDigest, err) log.Warn().Err(err).Str("digest", repoDigest).Msg("skip invalid repo item")
return "" return ""
} }

View file

@ -26,7 +26,7 @@ type Image struct {
Digest digest.Digest Digest digest.Digest
HubLink string HubLink string
named reference.Named named reference.Named
opts ParseImageOptions Opts ParseImageOptions `json:"-"`
} }
// ParseImageOptions holds image options for parsing. // ParseImageOptions holds image options for parsing.
@ -43,9 +43,10 @@ func (i *Image) Name() string {
// FullName return the real full name may include Tag or Digest of the image, Tag first. // FullName return the real full name may include Tag or Digest of the image, Tag first.
func (i *Image) FullName() string { func (i *Image) FullName() string {
if i.Tag == "" { if i.Tag == "" {
return fmt.Sprintf("%s@%s", i.Name(), i.Digest) return i.Name() + "@" + i.Digest.String()
} }
return fmt.Sprintf("%s:%s", i.Name(), i.Tag)
return i.Name() + ":" + i.Tag
} }
// String returns the string representation of an image, including Tag and Digest if existed. // String returns the string representation of an image, including Tag and Digest if existed.
@ -66,22 +67,25 @@ func (i *Image) Reference() string {
func (i *Image) WithDigest(digest digest.Digest) (err error) { func (i *Image) WithDigest(digest digest.Digest) (err error) {
i.Digest = digest i.Digest = digest
i.named, err = reference.WithDigest(i.named, digest) i.named, err = reference.WithDigest(i.named, digest)
return err return err
} }
func (i *Image) WithTag(tag string) (err error) { func (i *Image) WithTag(tag string) (err error) {
i.Tag = tag i.Tag = tag
i.named, err = reference.WithTag(i.named, tag) i.named, err = reference.WithTag(i.named, tag)
return err return err
} }
func (i *Image) trimDigest() error { func (i *Image) TrimDigest() error {
i.Digest = "" i.Digest = ""
named, err := ParseImage(ParseImageOptions{Name: i.FullName()}) named, err := ParseImage(ParseImageOptions{Name: i.FullName()})
if err != nil { if err != nil {
return err return err
} }
i.named = &named i.named = &named
return nil return nil
} }
@ -92,11 +96,12 @@ func ParseImage(parseOpts ParseImageOptions) (Image, error) {
if err != nil { if err != nil {
return Image{}, errors.Wrapf(err, "parsing image %s failed", parseOpts.Name) return Image{}, errors.Wrapf(err, "parsing image %s failed", parseOpts.Name)
} }
// Add the latest lag if they did not provide one. // Add the latest lag if they did not provide one.
named = reference.TagNameOnly(named) named = reference.TagNameOnly(named)
i := Image{ i := Image{
opts: parseOpts, Opts: parseOpts,
named: named, named: named,
Domain: reference.Domain(named), Domain: reference.Domain(named),
Path: reference.Path(named), Path: reference.Path(named),
@ -122,15 +127,16 @@ func ParseImage(parseOpts ParseImageOptions) (Image, error) {
} }
func (i *Image) hubLink() (string, error) { func (i *Image) hubLink() (string, error) {
if i.opts.HubTpl != "" { if i.Opts.HubTpl != "" {
var out bytes.Buffer var out bytes.Buffer
tmpl, err := template.New("tmpl"). tmpl, err := template.New("tmpl").
Option("missingkey=error"). Option("missingkey=error").
Parse(i.opts.HubTpl) Parse(i.Opts.HubTpl)
if err != nil { if err != nil {
return "", err return "", err
} }
err = tmpl.Execute(&out, i) err = tmpl.Execute(&out, i)
return out.String(), err return out.String(), err
} }
@ -142,6 +148,7 @@ func (i *Image) hubLink() (string, error) {
prefix = "_" prefix = "_"
path = strings.Replace(i.Path, "library/", "", 1) path = strings.Replace(i.Path, "library/", "", 1)
} }
return "https://hub.docker.com/" + prefix + "/" + path, nil return "https://hub.docker.com/" + prefix + "/" + path, nil
case "docker.bintray.io", "jfrog-docker-reg2.bintray.io": case "docker.bintray.io", "jfrog-docker-reg2.bintray.io":
return "https://bintray.com/jfrog/reg2/" + strings.ReplaceAll(i.Path, "/", "%3A"), nil return "https://bintray.com/jfrog/reg2/" + strings.ReplaceAll(i.Path, "/", "%3A"), nil

View file

@ -16,7 +16,7 @@ func TestImageParser(t *testing.T) {
}) })
is.NoError(err, "") is.NoError(err, "")
is.Equal("docker.io/portainer/portainer-ee:latest", image.FullName()) is.Equal("docker.io/portainer/portainer-ee:latest", image.FullName())
is.Equal("portainer/portainer-ee", image.opts.Name) is.Equal("portainer/portainer-ee", image.Opts.Name)
is.Equal("latest", image.Tag) is.Equal("latest", image.Tag)
is.Equal("portainer/portainer-ee", image.Path) is.Equal("portainer/portainer-ee", image.Path)
is.Equal("docker.io", image.Domain) is.Equal("docker.io", image.Domain)
@ -32,7 +32,7 @@ func TestImageParser(t *testing.T) {
}) })
is.NoError(err, "") is.NoError(err, "")
is.Equal("gcr.io/k8s-minikube/kicbase@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2", image.FullName()) is.Equal("gcr.io/k8s-minikube/kicbase@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2", image.FullName())
is.Equal("gcr.io/k8s-minikube/kicbase@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2", image.opts.Name) is.Equal("gcr.io/k8s-minikube/kicbase@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2", image.Opts.Name)
is.Equal("", image.Tag) is.Equal("", image.Tag)
is.Equal("k8s-minikube/kicbase", image.Path) is.Equal("k8s-minikube/kicbase", image.Path)
is.Equal("gcr.io", image.Domain) is.Equal("gcr.io", image.Domain)
@ -49,7 +49,7 @@ func TestImageParser(t *testing.T) {
}) })
is.NoError(err, "") is.NoError(err, "")
is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.30", image.FullName()) is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.30", image.FullName())
is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2", image.opts.Name) is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2", image.Opts.Name)
is.Equal("v0.0.30", image.Tag) is.Equal("v0.0.30", image.Tag)
is.Equal("k8s-minikube/kicbase", image.Path) is.Equal("k8s-minikube/kicbase", image.Path)
is.Equal("gcr.io", image.Domain) is.Equal("gcr.io", image.Domain)
@ -71,7 +71,7 @@ func TestUpdateParsedImage(t *testing.T) {
is.NoError(err, "") is.NoError(err, "")
_ = image.WithTag("v0.0.31") _ = image.WithTag("v0.0.31")
is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.31", image.FullName()) is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.31", image.FullName())
is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2", image.opts.Name) is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2", image.Opts.Name)
is.Equal("v0.0.31", image.Tag) is.Equal("v0.0.31", image.Tag)
is.Equal("k8s-minikube/kicbase", image.Path) is.Equal("k8s-minikube/kicbase", image.Path)
is.Equal("gcr.io", image.Domain) is.Equal("gcr.io", image.Domain)
@ -89,7 +89,7 @@ func TestUpdateParsedImage(t *testing.T) {
is.NoError(err, "") is.NoError(err, "")
_ = image.WithDigest("sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b3") _ = image.WithDigest("sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b3")
is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.30", image.FullName()) is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.30", image.FullName())
is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2", image.opts.Name) is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2", image.Opts.Name)
is.Equal("v0.0.30", image.Tag) is.Equal("v0.0.30", image.Tag)
is.Equal("k8s-minikube/kicbase", image.Path) is.Equal("k8s-minikube/kicbase", image.Path)
is.Equal("gcr.io", image.Domain) is.Equal("gcr.io", image.Domain)
@ -105,9 +105,9 @@ func TestUpdateParsedImage(t *testing.T) {
Name: "gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2", Name: "gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2",
}) })
is.NoError(err, "") is.NoError(err, "")
_ = image.trimDigest() _ = image.TrimDigest()
is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.30", image.FullName()) is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.30", image.FullName())
is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2", image.opts.Name) is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2", image.Opts.Name)
is.Equal("v0.0.30", image.Tag) is.Equal("v0.0.30", image.Tag)
is.Equal("k8s-minikube/kicbase", image.Path) is.Equal("k8s-minikube/kicbase", image.Path)
is.Equal("gcr.io", image.Domain) is.Equal("gcr.io", image.Domain)

View file

@ -29,7 +29,7 @@ func (c *RegistryClient) RegistryAuth(image Image) (string, string, error) {
return "", "", err return "", "", err
} }
registry, err := findBestMatchRegistry(image.opts.Name, registries) registry, err := findBestMatchRegistry(image.Opts.Name, registries)
if err != nil { if err != nil {
return "", "", err return "", "", err
} }
@ -59,7 +59,7 @@ func (c *RegistryClient) EncodedRegistryAuth(image Image) (string, error) {
return "", err return "", err
} }
registry, err := findBestMatchRegistry(image.opts.Name, registries) registry, err := findBestMatchRegistry(image.Opts.Name, registries)
if err != nil { if err != nil {
return "", err return "", err
} }

View file

@ -4,10 +4,12 @@ import (
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
) )
type kubernetesMockDeployer struct{} type kubernetesMockDeployer struct {
portainer.KubernetesDeployer
}
// NewKubernetesDeployer creates a mock kubernetes deployer // NewKubernetesDeployer creates a mock kubernetes deployer
func NewKubernetesDeployer() portainer.KubernetesDeployer { func NewKubernetesDeployer() *kubernetesMockDeployer {
return &kubernetesMockDeployer{} return &kubernetesMockDeployer{}
} }
@ -18,3 +20,7 @@ func (deployer *kubernetesMockDeployer) Deploy(userID portainer.UserID, endpoint
func (deployer *kubernetesMockDeployer) Remove(userID portainer.UserID, endpoint *portainer.Endpoint, manifestFiles []string, namespace string) (string, error) { func (deployer *kubernetesMockDeployer) Remove(userID portainer.UserID, endpoint *portainer.Endpoint, manifestFiles []string, namespace string) (string, error) {
return "", nil return "", nil
} }
func (deployer *kubernetesMockDeployer) Restart(userID portainer.UserID, endpoint *portainer.Endpoint, manifestFiles []string, namespace string) (string, error) {
return "", nil
}

View file

@ -1,13 +1,8 @@
package exec package exec
import ( import (
"bytes" "context"
"fmt" "fmt"
"os"
"os/exec"
"path"
"runtime"
"strings"
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices" "github.com/portainer/portainer/api/dataservices"
@ -15,13 +10,17 @@ import (
"github.com/portainer/portainer/api/http/proxy/factory" "github.com/portainer/portainer/api/http/proxy/factory"
"github.com/portainer/portainer/api/http/proxy/factory/kubernetes" "github.com/portainer/portainer/api/http/proxy/factory/kubernetes"
"github.com/portainer/portainer/api/kubernetes/cli" "github.com/portainer/portainer/api/kubernetes/cli"
"github.com/portainer/portainer/pkg/libkubectl"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
const (
defaultServerURL = "https://kubernetes.default.svc"
)
// KubernetesDeployer represents a service to deploy resources inside a Kubernetes environment(endpoint). // KubernetesDeployer represents a service to deploy resources inside a Kubernetes environment(endpoint).
type KubernetesDeployer struct { type KubernetesDeployer struct {
binaryPath string
dataStore dataservices.DataStore dataStore dataservices.DataStore
reverseTunnelService portainer.ReverseTunnelService reverseTunnelService portainer.ReverseTunnelService
signatureService portainer.DigitalSignatureService signatureService portainer.DigitalSignatureService
@ -31,9 +30,8 @@ type KubernetesDeployer struct {
} }
// NewKubernetesDeployer initializes a new KubernetesDeployer service. // NewKubernetesDeployer initializes a new KubernetesDeployer service.
func NewKubernetesDeployer(kubernetesTokenCacheManager *kubernetes.TokenCacheManager, kubernetesClientFactory *cli.ClientFactory, datastore dataservices.DataStore, reverseTunnelService portainer.ReverseTunnelService, signatureService portainer.DigitalSignatureService, proxyManager *proxy.Manager, binaryPath string) *KubernetesDeployer { func NewKubernetesDeployer(kubernetesTokenCacheManager *kubernetes.TokenCacheManager, kubernetesClientFactory *cli.ClientFactory, datastore dataservices.DataStore, reverseTunnelService portainer.ReverseTunnelService, signatureService portainer.DigitalSignatureService, proxyManager *proxy.Manager) *KubernetesDeployer {
return &KubernetesDeployer{ return &KubernetesDeployer{
binaryPath: binaryPath,
dataStore: datastore, dataStore: datastore,
reverseTunnelService: reverseTunnelService, reverseTunnelService: reverseTunnelService,
signatureService: signatureService, signatureService: signatureService,
@ -78,63 +76,56 @@ func (deployer *KubernetesDeployer) getToken(userID portainer.UserID, endpoint *
} }
// Deploy upserts Kubernetes resources defined in manifest(s) // Deploy upserts Kubernetes resources defined in manifest(s)
func (deployer *KubernetesDeployer) Deploy(userID portainer.UserID, endpoint *portainer.Endpoint, manifestFiles []string, namespace string) (string, error) { func (deployer *KubernetesDeployer) Deploy(userID portainer.UserID, endpoint *portainer.Endpoint, resources []string, namespace string) (string, error) {
return deployer.command("apply", userID, endpoint, manifestFiles, namespace) return deployer.command("apply", userID, endpoint, resources, namespace)
} }
// Remove deletes Kubernetes resources defined in manifest(s) // Remove deletes Kubernetes resources defined in manifest(s)
func (deployer *KubernetesDeployer) Remove(userID portainer.UserID, endpoint *portainer.Endpoint, manifestFiles []string, namespace string) (string, error) { func (deployer *KubernetesDeployer) Remove(userID portainer.UserID, endpoint *portainer.Endpoint, resources []string, namespace string) (string, error) {
return deployer.command("delete", userID, endpoint, manifestFiles, namespace) return deployer.command("delete", userID, endpoint, resources, namespace)
} }
func (deployer *KubernetesDeployer) command(operation string, userID portainer.UserID, endpoint *portainer.Endpoint, manifestFiles []string, namespace string) (string, error) { func (deployer *KubernetesDeployer) command(operation string, userID portainer.UserID, endpoint *portainer.Endpoint, resources []string, namespace string) (string, error) {
token, err := deployer.getToken(userID, endpoint, endpoint.Type == portainer.KubernetesLocalEnvironment) token, err := deployer.getToken(userID, endpoint, endpoint.Type == portainer.KubernetesLocalEnvironment)
if err != nil { if err != nil {
return "", errors.Wrap(err, "failed generating a user token") return "", errors.Wrap(err, "failed generating a user token")
} }
command := path.Join(deployer.binaryPath, "kubectl") serverURL := defaultServerURL
if runtime.GOOS == "windows" {
command = path.Join(deployer.binaryPath, "kubectl.exe")
}
args := []string{"--token", token}
if namespace != "" {
args = append(args, "--namespace", namespace)
}
if endpoint.Type == portainer.AgentOnKubernetesEnvironment || endpoint.Type == portainer.EdgeAgentOnKubernetesEnvironment { if endpoint.Type == portainer.AgentOnKubernetesEnvironment || endpoint.Type == portainer.EdgeAgentOnKubernetesEnvironment {
url, proxy, err := deployer.getAgentURL(endpoint) url, proxy, err := deployer.getAgentURL(endpoint)
if err != nil { if err != nil {
return "", errors.WithMessage(err, "failed generating endpoint URL") return "", errors.WithMessage(err, "failed generating endpoint URL")
} }
defer proxy.Close() defer proxy.Close()
args = append(args, "--server", url)
args = append(args, "--insecure-skip-tls-verify") serverURL = url
} }
if operation == "delete" { client, err := libkubectl.NewClient(&libkubectl.ClientAccess{
args = append(args, "--ignore-not-found=true") Token: token,
} ServerUrl: serverURL,
}, namespace, "", true)
args = append(args, operation)
for _, path := range manifestFiles {
args = append(args, "-f", strings.TrimSpace(path))
}
var stderr bytes.Buffer
cmd := exec.Command(command, args...)
cmd.Env = os.Environ()
cmd.Env = append(cmd.Env, "POD_NAMESPACE=default")
cmd.Stderr = &stderr
output, err := cmd.Output()
if err != nil { if err != nil {
return "", errors.Wrapf(err, "failed to execute kubectl command: %q", stderr.String()) return "", errors.Wrap(err, "failed to create kubectl client")
} }
return string(output), nil operations := map[string]func(context.Context, []string) (string, error){
"apply": client.Apply,
"delete": client.Delete,
}
operationFunc, ok := operations[operation]
if !ok {
return "", errors.Errorf("unsupported operation: %s", operation)
}
output, err := operationFunc(context.Background(), resources)
if err != nil {
return "", errors.Wrapf(err, "failed to execute kubectl %s command", operation)
}
return output, nil
} }
func (deployer *KubernetesDeployer) getAgentURL(endpoint *portainer.Endpoint) (string, *factory.ProxyServer, error) { func (deployer *KubernetesDeployer) getAgentURL(endpoint *portainer.Endpoint) (string, *factory.ProxyServer, error) {

View file

@ -0,0 +1,173 @@
package exec
import (
"context"
"errors"
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
type mockKubectlClient struct {
applyFunc func(ctx context.Context, files []string) error
deleteFunc func(ctx context.Context, files []string) error
rolloutRestartFunc func(ctx context.Context, resources []string) error
}
func (m *mockKubectlClient) Apply(ctx context.Context, files []string) error {
if m.applyFunc != nil {
return m.applyFunc(ctx, files)
}
return nil
}
func (m *mockKubectlClient) Delete(ctx context.Context, files []string) error {
if m.deleteFunc != nil {
return m.deleteFunc(ctx, files)
}
return nil
}
func (m *mockKubectlClient) RolloutRestart(ctx context.Context, resources []string) error {
if m.rolloutRestartFunc != nil {
return m.rolloutRestartFunc(ctx, resources)
}
return nil
}
func testExecuteKubectlOperation(client *mockKubectlClient, operation string, manifestFiles []string) error {
operations := map[string]func(context.Context, []string) error{
"apply": client.Apply,
"delete": client.Delete,
"rollout-restart": client.RolloutRestart,
}
operationFunc, ok := operations[operation]
if !ok {
return fmt.Errorf("unsupported operation: %s", operation)
}
if err := operationFunc(context.Background(), manifestFiles); err != nil {
return fmt.Errorf("failed to execute kubectl %s command: %w", operation, err)
}
return nil
}
func TestExecuteKubectlOperation_Apply_Success(t *testing.T) {
called := false
mockClient := &mockKubectlClient{
applyFunc: func(ctx context.Context, files []string) error {
called = true
assert.Equal(t, []string{"manifest1.yaml", "manifest2.yaml"}, files)
return nil
},
}
manifests := []string{"manifest1.yaml", "manifest2.yaml"}
err := testExecuteKubectlOperation(mockClient, "apply", manifests)
assert.NoError(t, err)
assert.True(t, called)
}
func TestExecuteKubectlOperation_Apply_Error(t *testing.T) {
expectedErr := errors.New("kubectl apply failed")
called := false
mockClient := &mockKubectlClient{
applyFunc: func(ctx context.Context, files []string) error {
called = true
assert.Equal(t, []string{"error.yaml"}, files)
return expectedErr
},
}
manifests := []string{"error.yaml"}
err := testExecuteKubectlOperation(mockClient, "apply", manifests)
assert.Error(t, err)
assert.Contains(t, err.Error(), expectedErr.Error())
assert.True(t, called)
}
func TestExecuteKubectlOperation_Delete_Success(t *testing.T) {
called := false
mockClient := &mockKubectlClient{
deleteFunc: func(ctx context.Context, files []string) error {
called = true
assert.Equal(t, []string{"manifest1.yaml"}, files)
return nil
},
}
manifests := []string{"manifest1.yaml"}
err := testExecuteKubectlOperation(mockClient, "delete", manifests)
assert.NoError(t, err)
assert.True(t, called)
}
func TestExecuteKubectlOperation_Delete_Error(t *testing.T) {
expectedErr := errors.New("kubectl delete failed")
called := false
mockClient := &mockKubectlClient{
deleteFunc: func(ctx context.Context, files []string) error {
called = true
assert.Equal(t, []string{"error.yaml"}, files)
return expectedErr
},
}
manifests := []string{"error.yaml"}
err := testExecuteKubectlOperation(mockClient, "delete", manifests)
assert.Error(t, err)
assert.Contains(t, err.Error(), expectedErr.Error())
assert.True(t, called)
}
func TestExecuteKubectlOperation_RolloutRestart_Success(t *testing.T) {
called := false
mockClient := &mockKubectlClient{
rolloutRestartFunc: func(ctx context.Context, resources []string) error {
called = true
assert.Equal(t, []string{"deployment/nginx"}, resources)
return nil
},
}
resources := []string{"deployment/nginx"}
err := testExecuteKubectlOperation(mockClient, "rollout-restart", resources)
assert.NoError(t, err)
assert.True(t, called)
}
func TestExecuteKubectlOperation_RolloutRestart_Error(t *testing.T) {
expectedErr := errors.New("kubectl rollout restart failed")
called := false
mockClient := &mockKubectlClient{
rolloutRestartFunc: func(ctx context.Context, resources []string) error {
called = true
assert.Equal(t, []string{"deployment/error"}, resources)
return expectedErr
},
}
resources := []string{"deployment/error"}
err := testExecuteKubectlOperation(mockClient, "rollout-restart", resources)
assert.Error(t, err)
assert.Contains(t, err.Error(), expectedErr.Error())
assert.True(t, called)
}
func TestExecuteKubectlOperation_UnsupportedOperation(t *testing.T) {
mockClient := &mockKubectlClient{}
err := testExecuteKubectlOperation(mockClient, "unsupported", []string{})
assert.Error(t, err)
assert.Contains(t, err.Error(), "unsupported operation")
}

View file

@ -127,7 +127,7 @@ func (manager *SwarmStackManager) Remove(stack *portainer.Stack, endpoint *porta
return err return err
} }
args = append(args, "stack", "rm", stack.Name) args = append(args, "stack", "rm", "--detach=false", stack.Name)
return runCommandAndCaptureStdErr(command, args, nil, "") return runCommandAndCaptureStdErr(command, args, nil, "")
} }

View file

@ -68,7 +68,7 @@ func copyFile(src, dst string) error {
defer from.Close() defer from.Close()
// has to include 'execute' bit, otherwise fails. MkdirAll follows `mkdir -m` restrictions // has to include 'execute' bit, otherwise fails. MkdirAll follows `mkdir -m` restrictions
if err := os.MkdirAll(filepath.Dir(dst), 0744); err != nil { if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
return err return err
} }
to, err := os.Create(dst) to, err := os.Create(dst)

View file

@ -841,11 +841,11 @@ func (service *Service) GetDefaultSSLCertsPath() (string, string) {
} }
func defaultMTLSCertPathUnderFileStore() (string, string, string) { func defaultMTLSCertPathUnderFileStore() (string, string, string) {
certPath := JoinPaths(SSLCertPath, MTLSCertFilename)
caCertPath := JoinPaths(SSLCertPath, MTLSCACertFilename) caCertPath := JoinPaths(SSLCertPath, MTLSCACertFilename)
certPath := JoinPaths(SSLCertPath, MTLSCertFilename)
keyPath := JoinPaths(SSLCertPath, MTLSKeyFilename) keyPath := JoinPaths(SSLCertPath, MTLSKeyFilename)
return certPath, caCertPath, keyPath return caCertPath, certPath, keyPath
} }
// GetDefaultChiselPrivateKeyPath returns the chisle private key path // GetDefaultChiselPrivateKeyPath returns the chisle private key path
@ -1014,26 +1014,45 @@ func CreateFile(path string, r io.Reader) error {
return err return err
} }
func (service *Service) StoreMTLSCertificates(cert, caCert, key []byte) (string, string, string, error) { func (service *Service) StoreMTLSCertificates(caCert, cert, key []byte) (string, string, string, error) {
certPath, caCertPath, keyPath := defaultMTLSCertPathUnderFileStore() caCertPath, certPath, keyPath := defaultMTLSCertPathUnderFileStore()
r := bytes.NewReader(cert) r := bytes.NewReader(caCert)
err := service.createFileInStore(certPath, r) if err := service.createFileInStore(caCertPath, r); err != nil {
if err != nil {
return "", "", "", err return "", "", "", err
} }
r = bytes.NewReader(caCert) r = bytes.NewReader(cert)
err = service.createFileInStore(caCertPath, r) if err := service.createFileInStore(certPath, r); err != nil {
if err != nil {
return "", "", "", err return "", "", "", err
} }
r = bytes.NewReader(key) r = bytes.NewReader(key)
err = service.createFileInStore(keyPath, r) if err := service.createFileInStore(keyPath, r); err != nil {
if err != nil {
return "", "", "", err return "", "", "", err
} }
return service.wrapFileStore(certPath), service.wrapFileStore(caCertPath), service.wrapFileStore(keyPath), nil return service.wrapFileStore(caCertPath), service.wrapFileStore(certPath), service.wrapFileStore(keyPath), nil
}
func (service *Service) GetMTLSCertificates() (string, string, string, error) {
caCertPath, certPath, keyPath := defaultMTLSCertPathUnderFileStore()
caCertPath = service.wrapFileStore(caCertPath)
certPath = service.wrapFileStore(certPath)
keyPath = service.wrapFileStore(keyPath)
paths := [...]string{caCertPath, certPath, keyPath}
for _, path := range paths {
exists, err := service.FileExists(path)
if err != nil {
return "", "", "", err
}
if !exists {
return "", "", "", fmt.Errorf("file %s does not exist", path)
}
}
return caCertPath, certPath, keyPath, nil
} }

View file

@ -15,15 +15,19 @@ type MultiFilterArgs []struct {
} }
// MultiFilterDirForPerDevConfigs filers the given dirEntries with multiple filter args, returns the merged entries for the given device // MultiFilterDirForPerDevConfigs filers the given dirEntries with multiple filter args, returns the merged entries for the given device
func MultiFilterDirForPerDevConfigs(dirEntries []DirEntry, configPath string, multiFilterArgs MultiFilterArgs) []DirEntry { func MultiFilterDirForPerDevConfigs(dirEntries []DirEntry, configPath string, multiFilterArgs MultiFilterArgs) ([]DirEntry, []string) {
var filteredDirEntries []DirEntry var filteredDirEntries []DirEntry
var envFiles []string
for _, multiFilterArg := range multiFilterArgs { for _, multiFilterArg := range multiFilterArgs {
tmp := FilterDirForPerDevConfigs(dirEntries, multiFilterArg.FilterKey, configPath, multiFilterArg.FilterType) tmp, efs := FilterDirForPerDevConfigs(dirEntries, multiFilterArg.FilterKey, configPath, multiFilterArg.FilterType)
filteredDirEntries = append(filteredDirEntries, tmp...) filteredDirEntries = append(filteredDirEntries, tmp...)
envFiles = append(envFiles, efs...)
} }
return deduplicate(filteredDirEntries) return deduplicate(filteredDirEntries), envFiles
} }
func deduplicate(dirEntries []DirEntry) []DirEntry { func deduplicate(dirEntries []DirEntry) []DirEntry {
@ -32,8 +36,7 @@ func deduplicate(dirEntries []DirEntry) []DirEntry {
marks := make(map[string]struct{}) marks := make(map[string]struct{})
for _, dirEntry := range dirEntries { for _, dirEntry := range dirEntries {
_, ok := marks[dirEntry.Name] if _, ok := marks[dirEntry.Name]; !ok {
if !ok {
marks[dirEntry.Name] = struct{}{} marks[dirEntry.Name] = struct{}{}
deduplicatedDirEntries = append(deduplicatedDirEntries, dirEntry) deduplicatedDirEntries = append(deduplicatedDirEntries, dirEntry)
} }
@ -44,34 +47,33 @@ func deduplicate(dirEntries []DirEntry) []DirEntry {
// FilterDirForPerDevConfigs filers the given dirEntries, returns entries for the given device // FilterDirForPerDevConfigs filers the given dirEntries, returns entries for the given device
// For given configPath A/B/C, return entries: // For given configPath A/B/C, return entries:
// 1. all entries outside of dir A // 1. all entries outside of dir A/B/C
// 2. dir entries A, A/B, A/B/C // 2. For filterType file:
// 3. For filterType file:
// file entries: A/B/C/<deviceName> and A/B/C/<deviceName>.* // file entries: A/B/C/<deviceName> and A/B/C/<deviceName>.*
// 4. For filterType dir: // 3. For filterType dir:
// dir entry: A/B/C/<deviceName> // dir entry: A/B/C/<deviceName>
// all entries: A/B/C/<deviceName>/* // all entries: A/B/C/<deviceName>/*
func FilterDirForPerDevConfigs(dirEntries []DirEntry, deviceName, configPath string, filterType portainer.PerDevConfigsFilterType) []DirEntry { func FilterDirForPerDevConfigs(dirEntries []DirEntry, deviceName, configPath string, filterType portainer.PerDevConfigsFilterType) ([]DirEntry, []string) {
var filteredDirEntries []DirEntry var filteredDirEntries []DirEntry
var envFiles []string
for _, dirEntry := range dirEntries { for _, dirEntry := range dirEntries {
if shouldIncludeEntry(dirEntry, deviceName, configPath, filterType) { if shouldIncludeEntry(dirEntry, deviceName, configPath, filterType) {
filteredDirEntries = append(filteredDirEntries, dirEntry) filteredDirEntries = append(filteredDirEntries, dirEntry)
if shouldParseEnvVars(dirEntry, deviceName, configPath, filterType) {
envFiles = append(envFiles, dirEntry.Name)
}
} }
} }
return filteredDirEntries return filteredDirEntries, envFiles
} }
func shouldIncludeEntry(dirEntry DirEntry, deviceName, configPath string, filterType portainer.PerDevConfigsFilterType) bool { func shouldIncludeEntry(dirEntry DirEntry, deviceName, configPath string, filterType portainer.PerDevConfigsFilterType) bool {
// Include all entries outside of dir A // Include all entries outside of dir A
if !isInConfigRootDir(dirEntry, configPath) { if !isInConfigDir(dirEntry, configPath) {
return true
}
// Include dir entries A, A/B, A/B/C
if isParentDir(dirEntry, configPath) {
return true return true
} }
@ -90,21 +92,9 @@ func shouldIncludeEntry(dirEntry DirEntry, deviceName, configPath string, filter
return false return false
} }
func isInConfigRootDir(dirEntry DirEntry, configPath string) bool { func isInConfigDir(dirEntry DirEntry, configPath string) bool {
// get the first element of the configPath // return true if entry name starts with "A/B"
rootDir := strings.Split(configPath, string(os.PathSeparator))[0] return strings.HasPrefix(dirEntry.Name, appendTailSeparator(configPath))
// return true if entry name starts with "A/"
return strings.HasPrefix(dirEntry.Name, appendTailSeparator(rootDir))
}
func isParentDir(dirEntry DirEntry, configPath string) bool {
if dirEntry.IsFile {
return false
}
// return true for dir entries A, A/B, A/B/C
return strings.HasPrefix(appendTailSeparator(configPath), appendTailSeparator(dirEntry.Name))
} }
func shouldIncludeFile(dirEntry DirEntry, deviceName, configPath string) bool { func shouldIncludeFile(dirEntry DirEntry, deviceName, configPath string) bool {
@ -138,6 +128,15 @@ func shouldIncludeDir(dirEntry DirEntry, deviceName, configPath string) bool {
return strings.HasPrefix(dirEntry.Name, filterPrefix) return strings.HasPrefix(dirEntry.Name, filterPrefix)
} }
func shouldParseEnvVars(dirEntry DirEntry, deviceName, configPath string, filterType portainer.PerDevConfigsFilterType) bool {
if !dirEntry.IsFile {
return false
}
return isInConfigDir(dirEntry, configPath) &&
filepath.Base(dirEntry.Name) == deviceName+".env"
}
func appendTailSeparator(path string) string { func appendTailSeparator(path string) string {
return fmt.Sprintf("%s%c", path, os.PathSeparator) return fmt.Sprintf("%s%c", path, os.PathSeparator)
} }

View file

@ -4,14 +4,17 @@ import (
"testing" "testing"
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
func TestMultiFilterDirForPerDevConfigs(t *testing.T) { func TestMultiFilterDirForPerDevConfigs(t *testing.T) {
type args struct { f := func(dirEntries []DirEntry, configPath string, multiFilterArgs MultiFilterArgs, wantDirEntries []DirEntry) {
dirEntries []DirEntry t.Helper()
configPath string
multiFilterArgs MultiFilterArgs dirEntries, _ = MultiFilterDirForPerDevConfigs(dirEntries, configPath, multiFilterArgs)
require.Equal(t, wantDirEntries, dirEntries)
} }
baseDirEntries := []DirEntry{ baseDirEntries := []DirEntry{
@ -26,67 +29,94 @@ func TestMultiFilterDirForPerDevConfigs(t *testing.T) {
{"configs/folder2/config2", "", true, 420}, {"configs/folder2/config2", "", true, 420},
} }
tests := []struct { // Filter file1
name string f(
args args baseDirEntries,
want []DirEntry "configs",
}{ MultiFilterArgs{{"file1", portainer.PerDevConfigsTypeFile}},
{ []DirEntry{baseDirEntries[0], baseDirEntries[1], baseDirEntries[2], baseDirEntries[3]},
name: "filter file1", )
args: args{
baseDirEntries, // Filter folder1
"configs", f(
MultiFilterArgs{{"file1", portainer.PerDevConfigsTypeFile}}, baseDirEntries,
}, "configs",
want: []DirEntry{baseDirEntries[0], baseDirEntries[1], baseDirEntries[2], baseDirEntries[3]}, MultiFilterArgs{{"folder1", portainer.PerDevConfigsTypeDir}},
[]DirEntry{baseDirEntries[0], baseDirEntries[1], baseDirEntries[2], baseDirEntries[5], baseDirEntries[6]},
)
// Filter file1 and folder1
f(
baseDirEntries,
"configs",
MultiFilterArgs{{"folder1", portainer.PerDevConfigsTypeDir}},
[]DirEntry{baseDirEntries[0], baseDirEntries[1], baseDirEntries[2], baseDirEntries[5], baseDirEntries[6]},
)
// Filter file1 and file2
f(
baseDirEntries,
"configs",
MultiFilterArgs{
{"file1", portainer.PerDevConfigsTypeFile},
{"file2", portainer.PerDevConfigsTypeFile},
}, },
{ []DirEntry{baseDirEntries[0], baseDirEntries[1], baseDirEntries[2], baseDirEntries[3], baseDirEntries[4]},
name: "filter folder1", )
args: args{
baseDirEntries, // Filter folder1 and folder2
"configs", f(
MultiFilterArgs{{"folder1", portainer.PerDevConfigsTypeDir}}, baseDirEntries,
}, "configs",
want: []DirEntry{baseDirEntries[0], baseDirEntries[1], baseDirEntries[2], baseDirEntries[5], baseDirEntries[6]}, MultiFilterArgs{
}, {"folder1", portainer.PerDevConfigsTypeDir},
{ {"folder2", portainer.PerDevConfigsTypeDir},
name: "filter file1 and folder1",
args: args{
baseDirEntries,
"configs",
MultiFilterArgs{{"folder1", portainer.PerDevConfigsTypeDir}},
},
want: []DirEntry{baseDirEntries[0], baseDirEntries[1], baseDirEntries[2], baseDirEntries[5], baseDirEntries[6]},
},
{
name: "filter file1 and file2",
args: args{
baseDirEntries,
"configs",
MultiFilterArgs{
{"file1", portainer.PerDevConfigsTypeFile},
{"file2", portainer.PerDevConfigsTypeFile},
},
},
want: []DirEntry{baseDirEntries[0], baseDirEntries[1], baseDirEntries[2], baseDirEntries[3], baseDirEntries[4]},
},
{
name: "filter folder1 and folder2",
args: args{
baseDirEntries,
"configs",
MultiFilterArgs{
{"folder1", portainer.PerDevConfigsTypeDir},
{"folder2", portainer.PerDevConfigsTypeDir},
},
},
want: []DirEntry{baseDirEntries[0], baseDirEntries[1], baseDirEntries[2], baseDirEntries[5], baseDirEntries[6], baseDirEntries[7], baseDirEntries[8]},
}, },
[]DirEntry{baseDirEntries[0], baseDirEntries[1], baseDirEntries[2], baseDirEntries[5], baseDirEntries[6], baseDirEntries[7], baseDirEntries[8]},
)
}
func TestMultiFilterDirForPerDevConfigsEnvFiles(t *testing.T) {
f := func(dirEntries []DirEntry, configPath string, multiFilterArgs MultiFilterArgs, wantEnvFiles []string) {
t.Helper()
_, envFiles := MultiFilterDirForPerDevConfigs(dirEntries, configPath, multiFilterArgs)
require.Equal(t, wantEnvFiles, envFiles)
} }
for _, tt := range tests { baseDirEntries := []DirEntry{
t.Run(tt.name, func(t *testing.T) { {".env", "", true, 420},
assert.Equalf(t, tt.want, MultiFilterDirForPerDevConfigs(tt.args.dirEntries, tt.args.configPath, tt.args.multiFilterArgs), "MultiFilterDirForPerDevConfigs(%v, %v, %v)", tt.args.dirEntries, tt.args.configPath, tt.args.multiFilterArgs) {"docker-compose.yaml", "", true, 420},
}) {"configs", "", false, 420},
{"configs/edge-id/edge-id.env", "", true, 420},
} }
f(
baseDirEntries,
"configs",
MultiFilterArgs{{"edge-id", portainer.PerDevConfigsTypeDir}},
[]string{"configs/edge-id/edge-id.env"},
)
}
func TestIsInConfigDir(t *testing.T) {
f := func(dirEntry DirEntry, configPath string, expect bool) {
t.Helper()
actual := isInConfigDir(dirEntry, configPath)
assert.Equal(t, expect, actual)
}
f(DirEntry{Name: "edge-configs"}, "edge-configs", false)
f(DirEntry{Name: "edge-configs_backup"}, "edge-configs", false)
f(DirEntry{Name: "edge-configs/standalone-edge-agent-standard"}, "edge-configs", true)
f(DirEntry{Name: "parent/edge-configs/"}, "edge-configs", false)
f(DirEntry{Name: "edgestacktest"}, "edgestacktest/edge-configs", false)
f(DirEntry{Name: "edgestacktest/edgeconfigs-test.yaml"}, "edgestacktest/edge-configs", false)
f(DirEntry{Name: "edgestacktest/file1.conf"}, "edgestacktest/edge-configs", false)
f(DirEntry{Name: "edgeconfigs-test.yaml"}, "edgestacktest/edge-configs", false)
f(DirEntry{Name: "edgestacktest/edge-configs"}, "edgestacktest/edge-configs", false)
f(DirEntry{Name: "edgestacktest/edge-configs/standalone-edge-agent-async"}, "edgestacktest/edge-configs", true)
f(DirEntry{Name: "edgestacktest/edge-configs/abc.txt"}, "edgestacktest/edge-configs", true)
} }

View file

@ -60,15 +60,9 @@ func NewAzureClient() *azureClient {
} }
func newHttpClientForAzure(insecureSkipVerify bool) *http.Client { func newHttpClientForAzure(insecureSkipVerify bool) *http.Client {
tlsConfig := crypto.CreateTLSConfiguration()
if insecureSkipVerify {
tlsConfig.InsecureSkipVerify = true
}
httpsCli := &http.Client{ httpsCli := &http.Client{
Transport: &http.Transport{ Transport: &http.Transport{
TLSClientConfig: tlsConfig, TLSClientConfig: crypto.CreateTLSConfiguration(insecureSkipVerify),
Proxy: http.ProxyFromEnvironment, Proxy: http.ProxyFromEnvironment,
}, },
Timeout: 300 * time.Second, Timeout: 300 * time.Second,

View file

@ -58,7 +58,15 @@ func TestService_ClonePublicRepository_Azure(t *testing.T) {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
dst := t.TempDir() dst := t.TempDir()
repositoryUrl := fmt.Sprintf(tt.args.repositoryURLFormat, tt.args.password) repositoryUrl := fmt.Sprintf(tt.args.repositoryURLFormat, tt.args.password)
err := service.CloneRepository(dst, repositoryUrl, tt.args.referenceName, "", "", false) err := service.CloneRepository(
dst,
repositoryUrl,
tt.args.referenceName,
"",
"",
gittypes.GitCredentialAuthType_Basic,
false,
)
assert.NoError(t, err) assert.NoError(t, err)
assert.FileExists(t, filepath.Join(dst, "README.md")) assert.FileExists(t, filepath.Join(dst, "README.md"))
}) })
@ -73,7 +81,15 @@ func TestService_ClonePrivateRepository_Azure(t *testing.T) {
dst := t.TempDir() dst := t.TempDir()
err := service.CloneRepository(dst, privateAzureRepoURL, "refs/heads/main", "", pat, false) err := service.CloneRepository(
dst,
privateAzureRepoURL,
"refs/heads/main",
"",
pat,
gittypes.GitCredentialAuthType_Basic,
false,
)
assert.NoError(t, err) assert.NoError(t, err)
assert.FileExists(t, filepath.Join(dst, "README.md")) assert.FileExists(t, filepath.Join(dst, "README.md"))
} }
@ -84,7 +100,14 @@ func TestService_LatestCommitID_Azure(t *testing.T) {
pat := getRequiredValue(t, "AZURE_DEVOPS_PAT") pat := getRequiredValue(t, "AZURE_DEVOPS_PAT")
service := NewService(context.TODO()) service := NewService(context.TODO())
id, err := service.LatestCommitID(privateAzureRepoURL, "refs/heads/main", "", pat, false) id, err := service.LatestCommitID(
privateAzureRepoURL,
"refs/heads/main",
"",
pat,
gittypes.GitCredentialAuthType_Basic,
false,
)
assert.NoError(t, err) assert.NoError(t, err)
assert.NotEmpty(t, id, "cannot guarantee commit id, but it should be not empty") assert.NotEmpty(t, id, "cannot guarantee commit id, but it should be not empty")
} }
@ -96,7 +119,14 @@ func TestService_ListRefs_Azure(t *testing.T) {
username := getRequiredValue(t, "AZURE_DEVOPS_USERNAME") username := getRequiredValue(t, "AZURE_DEVOPS_USERNAME")
service := NewService(context.TODO()) service := NewService(context.TODO())
refs, err := service.ListRefs(privateAzureRepoURL, username, accessToken, false, false) refs, err := service.ListRefs(
privateAzureRepoURL,
username,
accessToken,
gittypes.GitCredentialAuthType_Basic,
false,
false,
)
assert.NoError(t, err) assert.NoError(t, err)
assert.GreaterOrEqual(t, len(refs), 1) assert.GreaterOrEqual(t, len(refs), 1)
} }
@ -108,8 +138,8 @@ func TestService_ListRefs_Azure_Concurrently(t *testing.T) {
username := getRequiredValue(t, "AZURE_DEVOPS_USERNAME") username := getRequiredValue(t, "AZURE_DEVOPS_USERNAME")
service := newService(context.TODO(), repositoryCacheSize, 200*time.Millisecond) service := newService(context.TODO(), repositoryCacheSize, 200*time.Millisecond)
go service.ListRefs(privateAzureRepoURL, username, accessToken, false, false) go service.ListRefs(privateAzureRepoURL, username, accessToken, gittypes.GitCredentialAuthType_Basic, false, false)
service.ListRefs(privateAzureRepoURL, username, accessToken, false, false) service.ListRefs(privateAzureRepoURL, username, accessToken, gittypes.GitCredentialAuthType_Basic, false, false)
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)
} }
@ -247,7 +277,17 @@ func TestService_ListFiles_Azure(t *testing.T) {
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
paths, err := service.ListFiles(tt.args.repositoryUrl, tt.args.referenceName, tt.args.username, tt.args.password, false, false, tt.extensions, false) paths, err := service.ListFiles(
tt.args.repositoryUrl,
tt.args.referenceName,
tt.args.username,
tt.args.password,
gittypes.GitCredentialAuthType_Basic,
false,
false,
tt.extensions,
false,
)
if tt.expect.shouldFail { if tt.expect.shouldFail {
assert.Error(t, err) assert.Error(t, err)
if tt.expect.err != nil { if tt.expect.err != nil {
@ -270,8 +310,28 @@ func TestService_ListFiles_Azure_Concurrently(t *testing.T) {
username := getRequiredValue(t, "AZURE_DEVOPS_USERNAME") username := getRequiredValue(t, "AZURE_DEVOPS_USERNAME")
service := newService(context.TODO(), repositoryCacheSize, 200*time.Millisecond) service := newService(context.TODO(), repositoryCacheSize, 200*time.Millisecond)
go service.ListFiles(privateAzureRepoURL, "refs/heads/main", username, accessToken, false, false, []string{}, false) go service.ListFiles(
service.ListFiles(privateAzureRepoURL, "refs/heads/main", username, accessToken, false, false, []string{}, false) privateAzureRepoURL,
"refs/heads/main",
username,
accessToken,
gittypes.GitCredentialAuthType_Basic,
false,
false,
[]string{},
false,
)
service.ListFiles(
privateAzureRepoURL,
"refs/heads/main",
username,
accessToken,
gittypes.GitCredentialAuthType_Basic,
false,
false,
[]string{},
false,
)
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)
} }

View file

@ -19,6 +19,7 @@ type CloneOptions struct {
ReferenceName string ReferenceName string
Username string Username string
Password string Password string
AuthType gittypes.GitCredentialAuthType
// TLSSkipVerify skips SSL verification when cloning the Git repository // TLSSkipVerify skips SSL verification when cloning the Git repository
TLSSkipVerify bool `example:"false"` TLSSkipVerify bool `example:"false"`
} }
@ -42,7 +43,15 @@ func CloneWithBackup(gitService portainer.GitService, fileService portainer.File
cleanUp = true cleanUp = true
if err := gitService.CloneRepository(options.ProjectPath, options.URL, options.ReferenceName, options.Username, options.Password, options.TLSSkipVerify); err != nil { if err := gitService.CloneRepository(
options.ProjectPath,
options.URL,
options.ReferenceName,
options.Username,
options.Password,
options.AuthType,
options.TLSSkipVerify,
); err != nil {
cleanUp = false cleanUp = false
if err := filesystem.MoveDirectory(backupProjectPath, options.ProjectPath, false); err != nil { if err := filesystem.MoveDirectory(backupProjectPath, options.ProjectPath, false); err != nil {
log.Warn().Err(err).Msg("failed restoring backup folder") log.Warn().Err(err).Msg("failed restoring backup folder")

View file

@ -7,12 +7,14 @@ import (
"strings" "strings"
gittypes "github.com/portainer/portainer/api/git/types" gittypes "github.com/portainer/portainer/api/git/types"
"github.com/rs/zerolog/log"
"github.com/go-git/go-git/v5" "github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/config" "github.com/go-git/go-git/v5/config"
"github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/filemode" "github.com/go-git/go-git/v5/plumbing/filemode"
"github.com/go-git/go-git/v5/plumbing/object" "github.com/go-git/go-git/v5/plumbing/object"
"github.com/go-git/go-git/v5/plumbing/transport"
githttp "github.com/go-git/go-git/v5/plumbing/transport/http" githttp "github.com/go-git/go-git/v5/plumbing/transport/http"
"github.com/go-git/go-git/v5/storage/memory" "github.com/go-git/go-git/v5/storage/memory"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -33,7 +35,7 @@ func (c *gitClient) download(ctx context.Context, dst string, opt cloneOption) e
URL: opt.repositoryUrl, URL: opt.repositoryUrl,
Depth: opt.depth, Depth: opt.depth,
InsecureSkipTLS: opt.tlsSkipVerify, InsecureSkipTLS: opt.tlsSkipVerify,
Auth: getAuth(opt.username, opt.password), Auth: getAuth(opt.authType, opt.username, opt.password),
Tags: git.NoTags, Tags: git.NoTags,
} }
@ -51,7 +53,10 @@ func (c *gitClient) download(ctx context.Context, dst string, opt cloneOption) e
} }
if !c.preserveGitDirectory { if !c.preserveGitDirectory {
os.RemoveAll(filepath.Join(dst, ".git")) err := os.RemoveAll(filepath.Join(dst, ".git"))
if err != nil {
log.Error().Err(err).Msg("failed to remove .git directory")
}
} }
return nil return nil
@ -64,7 +69,7 @@ func (c *gitClient) latestCommitID(ctx context.Context, opt fetchOption) (string
}) })
listOptions := &git.ListOptions{ listOptions := &git.ListOptions{
Auth: getAuth(opt.username, opt.password), Auth: getAuth(opt.authType, opt.username, opt.password),
InsecureSkipTLS: opt.tlsSkipVerify, InsecureSkipTLS: opt.tlsSkipVerify,
} }
@ -94,7 +99,23 @@ func (c *gitClient) latestCommitID(ctx context.Context, opt fetchOption) (string
return "", errors.Errorf("could not find ref %q in the repository", opt.referenceName) return "", errors.Errorf("could not find ref %q in the repository", opt.referenceName)
} }
func getAuth(username, password string) *githttp.BasicAuth { func getAuth(authType gittypes.GitCredentialAuthType, username, password string) transport.AuthMethod {
if password == "" {
return nil
}
switch authType {
case gittypes.GitCredentialAuthType_Basic:
return getBasicAuth(username, password)
case gittypes.GitCredentialAuthType_Token:
return getTokenAuth(password)
default:
log.Warn().Msg("unknown git credentials authorization type, defaulting to None")
return nil
}
}
func getBasicAuth(username, password string) *githttp.BasicAuth {
if password != "" { if password != "" {
if username == "" { if username == "" {
username = "token" username = "token"
@ -108,6 +129,15 @@ func getAuth(username, password string) *githttp.BasicAuth {
return nil return nil
} }
func getTokenAuth(token string) *githttp.TokenAuth {
if token != "" {
return &githttp.TokenAuth{
Token: token,
}
}
return nil
}
func (c *gitClient) listRefs(ctx context.Context, opt baseOption) ([]string, error) { func (c *gitClient) listRefs(ctx context.Context, opt baseOption) ([]string, error) {
rem := git.NewRemote(memory.NewStorage(), &config.RemoteConfig{ rem := git.NewRemote(memory.NewStorage(), &config.RemoteConfig{
Name: "origin", Name: "origin",
@ -115,7 +145,7 @@ func (c *gitClient) listRefs(ctx context.Context, opt baseOption) ([]string, err
}) })
listOptions := &git.ListOptions{ listOptions := &git.ListOptions{
Auth: getAuth(opt.username, opt.password), Auth: getAuth(opt.authType, opt.username, opt.password),
InsecureSkipTLS: opt.tlsSkipVerify, InsecureSkipTLS: opt.tlsSkipVerify,
} }
@ -143,7 +173,7 @@ func (c *gitClient) listFiles(ctx context.Context, opt fetchOption) ([]string, e
Depth: 1, Depth: 1,
SingleBranch: true, SingleBranch: true,
ReferenceName: plumbing.ReferenceName(opt.referenceName), ReferenceName: plumbing.ReferenceName(opt.referenceName),
Auth: getAuth(opt.username, opt.password), Auth: getAuth(opt.authType, opt.username, opt.password),
InsecureSkipTLS: opt.tlsSkipVerify, InsecureSkipTLS: opt.tlsSkipVerify,
Tags: git.NoTags, Tags: git.NoTags,
} }

View file

@ -2,6 +2,8 @@ package git
import ( import (
"context" "context"
"net/http"
"net/http/httptest"
"path/filepath" "path/filepath"
"testing" "testing"
"time" "time"
@ -24,7 +26,15 @@ func TestService_ClonePrivateRepository_GitHub(t *testing.T) {
dst := t.TempDir() dst := t.TempDir()
repositoryUrl := privateGitRepoURL repositoryUrl := privateGitRepoURL
err := service.CloneRepository(dst, repositoryUrl, "refs/heads/main", username, accessToken, false) err := service.CloneRepository(
dst,
repositoryUrl,
"refs/heads/main",
username,
accessToken,
gittypes.GitCredentialAuthType_Basic,
false,
)
assert.NoError(t, err) assert.NoError(t, err)
assert.FileExists(t, filepath.Join(dst, "README.md")) assert.FileExists(t, filepath.Join(dst, "README.md"))
} }
@ -37,7 +47,14 @@ func TestService_LatestCommitID_GitHub(t *testing.T) {
service := newService(context.TODO(), 0, 0) service := newService(context.TODO(), 0, 0)
repositoryUrl := privateGitRepoURL repositoryUrl := privateGitRepoURL
id, err := service.LatestCommitID(repositoryUrl, "refs/heads/main", username, accessToken, false) id, err := service.LatestCommitID(
repositoryUrl,
"refs/heads/main",
username,
accessToken,
gittypes.GitCredentialAuthType_Basic,
false,
)
assert.NoError(t, err) assert.NoError(t, err)
assert.NotEmpty(t, id, "cannot guarantee commit id, but it should be not empty") assert.NotEmpty(t, id, "cannot guarantee commit id, but it should be not empty")
} }
@ -50,7 +67,7 @@ func TestService_ListRefs_GitHub(t *testing.T) {
service := newService(context.TODO(), 0, 0) service := newService(context.TODO(), 0, 0)
repositoryUrl := privateGitRepoURL repositoryUrl := privateGitRepoURL
refs, err := service.ListRefs(repositoryUrl, username, accessToken, false, false) refs, err := service.ListRefs(repositoryUrl, username, accessToken, gittypes.GitCredentialAuthType_Basic, false, false)
assert.NoError(t, err) assert.NoError(t, err)
assert.GreaterOrEqual(t, len(refs), 1) assert.GreaterOrEqual(t, len(refs), 1)
} }
@ -63,8 +80,8 @@ func TestService_ListRefs_Github_Concurrently(t *testing.T) {
service := newService(context.TODO(), repositoryCacheSize, 200*time.Millisecond) service := newService(context.TODO(), repositoryCacheSize, 200*time.Millisecond)
repositoryUrl := privateGitRepoURL repositoryUrl := privateGitRepoURL
go service.ListRefs(repositoryUrl, username, accessToken, false, false) go service.ListRefs(repositoryUrl, username, accessToken, gittypes.GitCredentialAuthType_Basic, false, false)
service.ListRefs(repositoryUrl, username, accessToken, false, false) service.ListRefs(repositoryUrl, username, accessToken, gittypes.GitCredentialAuthType_Basic, false, false)
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)
} }
@ -202,7 +219,17 @@ func TestService_ListFiles_GitHub(t *testing.T) {
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
paths, err := service.ListFiles(tt.args.repositoryUrl, tt.args.referenceName, tt.args.username, tt.args.password, false, false, tt.extensions, false) paths, err := service.ListFiles(
tt.args.repositoryUrl,
tt.args.referenceName,
tt.args.username,
tt.args.password,
gittypes.GitCredentialAuthType_Basic,
false,
false,
tt.extensions,
false,
)
if tt.expect.shouldFail { if tt.expect.shouldFail {
assert.Error(t, err) assert.Error(t, err)
if tt.expect.err != nil { if tt.expect.err != nil {
@ -226,8 +253,28 @@ func TestService_ListFiles_Github_Concurrently(t *testing.T) {
username := getRequiredValue(t, "GITHUB_USERNAME") username := getRequiredValue(t, "GITHUB_USERNAME")
service := newService(context.TODO(), repositoryCacheSize, 200*time.Millisecond) service := newService(context.TODO(), repositoryCacheSize, 200*time.Millisecond)
go service.ListFiles(repositoryUrl, "refs/heads/main", username, accessToken, false, false, []string{}, false) go service.ListFiles(
service.ListFiles(repositoryUrl, "refs/heads/main", username, accessToken, false, false, []string{}, false) repositoryUrl,
"refs/heads/main",
username,
accessToken,
gittypes.GitCredentialAuthType_Basic,
false,
false,
[]string{},
false,
)
service.ListFiles(
repositoryUrl,
"refs/heads/main",
username,
accessToken,
gittypes.GitCredentialAuthType_Basic,
false,
false,
[]string{},
false,
)
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)
} }
@ -240,8 +287,18 @@ func TestService_purgeCache_Github(t *testing.T) {
username := getRequiredValue(t, "GITHUB_USERNAME") username := getRequiredValue(t, "GITHUB_USERNAME")
service := NewService(context.TODO()) service := NewService(context.TODO())
service.ListRefs(repositoryUrl, username, accessToken, false, false) service.ListRefs(repositoryUrl, username, accessToken, gittypes.GitCredentialAuthType_Basic, false, false)
service.ListFiles(repositoryUrl, "refs/heads/main", username, accessToken, false, false, []string{}, false) service.ListFiles(
repositoryUrl,
"refs/heads/main",
username,
accessToken,
gittypes.GitCredentialAuthType_Basic,
false,
false,
[]string{},
false,
)
assert.Equal(t, 1, service.repoRefCache.Len()) assert.Equal(t, 1, service.repoRefCache.Len())
assert.Equal(t, 1, service.repoFileCache.Len()) assert.Equal(t, 1, service.repoFileCache.Len())
@ -261,8 +318,18 @@ func TestService_purgeCacheByTTL_Github(t *testing.T) {
// 40*timeout is designed for giving enough time for ListRefs and ListFiles to cache the result // 40*timeout is designed for giving enough time for ListRefs and ListFiles to cache the result
service := newService(context.TODO(), 2, 40*timeout) service := newService(context.TODO(), 2, 40*timeout)
service.ListRefs(repositoryUrl, username, accessToken, false, false) service.ListRefs(repositoryUrl, username, accessToken, gittypes.GitCredentialAuthType_Basic, false, false)
service.ListFiles(repositoryUrl, "refs/heads/main", username, accessToken, false, false, []string{}, false) service.ListFiles(
repositoryUrl,
"refs/heads/main",
username,
accessToken,
gittypes.GitCredentialAuthType_Basic,
false,
false,
[]string{},
false,
)
assert.Equal(t, 1, service.repoRefCache.Len()) assert.Equal(t, 1, service.repoRefCache.Len())
assert.Equal(t, 1, service.repoFileCache.Len()) assert.Equal(t, 1, service.repoFileCache.Len())
@ -293,12 +360,12 @@ func TestService_HardRefresh_ListRefs_GitHub(t *testing.T) {
service := newService(context.TODO(), 2, 0) service := newService(context.TODO(), 2, 0)
repositoryUrl := privateGitRepoURL repositoryUrl := privateGitRepoURL
refs, err := service.ListRefs(repositoryUrl, username, accessToken, false, false) refs, err := service.ListRefs(repositoryUrl, username, accessToken, gittypes.GitCredentialAuthType_Basic, false, false)
assert.NoError(t, err) assert.NoError(t, err)
assert.GreaterOrEqual(t, len(refs), 1) assert.GreaterOrEqual(t, len(refs), 1)
assert.Equal(t, 1, service.repoRefCache.Len()) assert.Equal(t, 1, service.repoRefCache.Len())
_, err = service.ListRefs(repositoryUrl, username, "fake-token", false, false) _, err = service.ListRefs(repositoryUrl, username, "fake-token", gittypes.GitCredentialAuthType_Basic, false, false)
assert.Error(t, err) assert.Error(t, err)
assert.Equal(t, 1, service.repoRefCache.Len()) assert.Equal(t, 1, service.repoRefCache.Len())
} }
@ -311,26 +378,46 @@ func TestService_HardRefresh_ListRefs_And_RemoveAllCaches_GitHub(t *testing.T) {
service := newService(context.TODO(), 2, 0) service := newService(context.TODO(), 2, 0)
repositoryUrl := privateGitRepoURL repositoryUrl := privateGitRepoURL
refs, err := service.ListRefs(repositoryUrl, username, accessToken, false, false) refs, err := service.ListRefs(repositoryUrl, username, accessToken, gittypes.GitCredentialAuthType_Basic, false, false)
assert.NoError(t, err) assert.NoError(t, err)
assert.GreaterOrEqual(t, len(refs), 1) assert.GreaterOrEqual(t, len(refs), 1)
assert.Equal(t, 1, service.repoRefCache.Len()) assert.Equal(t, 1, service.repoRefCache.Len())
files, err := service.ListFiles(repositoryUrl, "refs/heads/main", username, accessToken, false, false, []string{}, false) files, err := service.ListFiles(
repositoryUrl,
"refs/heads/main",
username,
accessToken,
gittypes.GitCredentialAuthType_Basic,
false,
false,
[]string{},
false,
)
assert.NoError(t, err) assert.NoError(t, err)
assert.GreaterOrEqual(t, len(files), 1) assert.GreaterOrEqual(t, len(files), 1)
assert.Equal(t, 1, service.repoFileCache.Len()) assert.Equal(t, 1, service.repoFileCache.Len())
files, err = service.ListFiles(repositoryUrl, "refs/heads/test", username, accessToken, false, false, []string{}, false) files, err = service.ListFiles(
repositoryUrl,
"refs/heads/test",
username,
accessToken,
gittypes.GitCredentialAuthType_Basic,
false,
false,
[]string{},
false,
)
assert.NoError(t, err) assert.NoError(t, err)
assert.GreaterOrEqual(t, len(files), 1) assert.GreaterOrEqual(t, len(files), 1)
assert.Equal(t, 2, service.repoFileCache.Len()) assert.Equal(t, 2, service.repoFileCache.Len())
_, err = service.ListRefs(repositoryUrl, username, "fake-token", false, false) _, err = service.ListRefs(repositoryUrl, username, "fake-token", gittypes.GitCredentialAuthType_Basic, false, false)
assert.Error(t, err) assert.Error(t, err)
assert.Equal(t, 1, service.repoRefCache.Len()) assert.Equal(t, 1, service.repoRefCache.Len())
_, err = service.ListRefs(repositoryUrl, username, "fake-token", true, false) _, err = service.ListRefs(repositoryUrl, username, "fake-token", gittypes.GitCredentialAuthType_Basic, true, false)
assert.Error(t, err) assert.Error(t, err)
assert.Equal(t, 1, service.repoRefCache.Len()) assert.Equal(t, 1, service.repoRefCache.Len())
// The relevant file caches should be removed too // The relevant file caches should be removed too
@ -344,12 +431,72 @@ func TestService_HardRefresh_ListFiles_GitHub(t *testing.T) {
accessToken := getRequiredValue(t, "GITHUB_PAT") accessToken := getRequiredValue(t, "GITHUB_PAT")
username := getRequiredValue(t, "GITHUB_USERNAME") username := getRequiredValue(t, "GITHUB_USERNAME")
repositoryUrl := privateGitRepoURL repositoryUrl := privateGitRepoURL
files, err := service.ListFiles(repositoryUrl, "refs/heads/main", username, accessToken, false, false, []string{}, false) files, err := service.ListFiles(
repositoryUrl,
"refs/heads/main",
username,
accessToken,
gittypes.GitCredentialAuthType_Basic,
false,
false,
[]string{},
false,
)
assert.NoError(t, err) assert.NoError(t, err)
assert.GreaterOrEqual(t, len(files), 1) assert.GreaterOrEqual(t, len(files), 1)
assert.Equal(t, 1, service.repoFileCache.Len()) assert.Equal(t, 1, service.repoFileCache.Len())
_, err = service.ListFiles(repositoryUrl, "refs/heads/main", username, "fake-token", false, true, []string{}, false) _, err = service.ListFiles(
repositoryUrl,
"refs/heads/main",
username,
"fake-token",
gittypes.GitCredentialAuthType_Basic,
false,
true,
[]string{},
false,
)
assert.Error(t, err) assert.Error(t, err)
assert.Equal(t, 0, service.repoFileCache.Len()) assert.Equal(t, 0, service.repoFileCache.Len())
} }
func TestService_CloneRepository_TokenAuth(t *testing.T) {
ensureIntegrationTest(t)
service := newService(context.TODO(), 2, 0)
var requests []*http.Request
testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
requests = append(requests, r)
}))
accessToken := "test_access_token"
username := "test_username"
repositoryUrl := testServer.URL
// Since we aren't hitting a real git server we ignore the error
_ = service.CloneRepository(
"test_dir",
repositoryUrl,
"refs/heads/main",
username,
accessToken,
gittypes.GitCredentialAuthType_Token,
false,
)
testServer.Close()
if len(requests) != 1 {
t.Fatalf("expected 1 request sent but got %d", len(requests))
}
gotAuthHeader := requests[0].Header.Get("Authorization")
if gotAuthHeader == "" {
t.Fatal("no Authorization header in git request")
}
expectedAuthHeader := "Bearer test_access_token"
if gotAuthHeader != expectedAuthHeader {
t.Fatalf("expected Authorization header %q but got %q", expectedAuthHeader, gotAuthHeader)
}
}

View file

@ -38,7 +38,7 @@ func Test_ClonePublicRepository_Shallow(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
t.Logf("Cloning into %s", dir) t.Logf("Cloning into %s", dir)
err := service.CloneRepository(dir, repositoryURL, referenceName, "", "", false) err := service.CloneRepository(dir, repositoryURL, referenceName, "", "", gittypes.GitCredentialAuthType_Basic, false)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, 1, getCommitHistoryLength(t, err, dir), "cloned repo has incorrect depth") assert.Equal(t, 1, getCommitHistoryLength(t, err, dir), "cloned repo has incorrect depth")
} }
@ -50,7 +50,7 @@ func Test_ClonePublicRepository_NoGitDirectory(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
t.Logf("Cloning into %s", dir) t.Logf("Cloning into %s", dir)
err := service.CloneRepository(dir, repositoryURL, referenceName, "", "", false) err := service.CloneRepository(dir, repositoryURL, referenceName, "", "", gittypes.GitCredentialAuthType_Basic, false)
assert.NoError(t, err) assert.NoError(t, err)
assert.NoDirExists(t, filepath.Join(dir, ".git")) assert.NoDirExists(t, filepath.Join(dir, ".git"))
} }
@ -84,7 +84,7 @@ func Test_latestCommitID(t *testing.T) {
repositoryURL := setup(t) repositoryURL := setup(t)
referenceName := "refs/heads/main" referenceName := "refs/heads/main"
id, err := service.LatestCommitID(repositoryURL, referenceName, "", "", false) id, err := service.LatestCommitID(repositoryURL, referenceName, "", "", gittypes.GitCredentialAuthType_Basic, false)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "68dcaa7bd452494043c64252ab90db0f98ecf8d2", id) assert.Equal(t, "68dcaa7bd452494043c64252ab90db0f98ecf8d2", id)
@ -95,7 +95,7 @@ func Test_ListRefs(t *testing.T) {
repositoryURL := setup(t) repositoryURL := setup(t)
fs, err := service.ListRefs(repositoryURL, "", "", false, false) fs, err := service.ListRefs(repositoryURL, "", "", gittypes.GitCredentialAuthType_Basic, false, false)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, []string{"refs/heads/main"}, fs) assert.Equal(t, []string{"refs/heads/main"}, fs)
@ -107,7 +107,17 @@ func Test_ListFiles(t *testing.T) {
repositoryURL := setup(t) repositoryURL := setup(t)
referenceName := "refs/heads/main" referenceName := "refs/heads/main"
fs, err := service.ListFiles(repositoryURL, referenceName, "", "", false, false, []string{".yml"}, false) fs, err := service.ListFiles(
repositoryURL,
referenceName,
"",
"",
gittypes.GitCredentialAuthType_Basic,
false,
false,
[]string{".yml"},
false,
)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, []string{"docker-compose.yml"}, fs) assert.Equal(t, []string{"docker-compose.yml"}, fs)
@ -255,7 +265,7 @@ func Test_listFilesPrivateRepository(t *testing.T) {
name: "list tree with real repository and head ref but no credential", name: "list tree with real repository and head ref but no credential",
args: fetchOption{ args: fetchOption{
baseOption: baseOption{ baseOption: baseOption{
repositoryUrl: privateGitRepoURL + "fake", repositoryUrl: privateGitRepoURL,
username: "", username: "",
password: "", password: "",
}, },

View file

@ -8,6 +8,7 @@ import (
"time" "time"
lru "github.com/hashicorp/golang-lru" lru "github.com/hashicorp/golang-lru"
gittypes "github.com/portainer/portainer/api/git/types"
"github.com/rs/zerolog/log" "github.com/rs/zerolog/log"
"golang.org/x/sync/singleflight" "golang.org/x/sync/singleflight"
) )
@ -22,6 +23,7 @@ type baseOption struct {
repositoryUrl string repositoryUrl string
username string username string
password string password string
authType gittypes.GitCredentialAuthType
tlsSkipVerify bool tlsSkipVerify bool
} }
@ -123,13 +125,22 @@ func (service *Service) timerHasStopped() bool {
// CloneRepository clones a git repository using the specified URL in the specified // CloneRepository clones a git repository using the specified URL in the specified
// destination folder. // destination folder.
func (service *Service) CloneRepository(destination, repositoryURL, referenceName, username, password string, tlsSkipVerify bool) error { func (service *Service) CloneRepository(
destination,
repositoryURL,
referenceName,
username,
password string,
authType gittypes.GitCredentialAuthType,
tlsSkipVerify bool,
) error {
options := cloneOption{ options := cloneOption{
fetchOption: fetchOption{ fetchOption: fetchOption{
baseOption: baseOption{ baseOption: baseOption{
repositoryUrl: repositoryURL, repositoryUrl: repositoryURL,
username: username, username: username,
password: password, password: password,
authType: authType,
tlsSkipVerify: tlsSkipVerify, tlsSkipVerify: tlsSkipVerify,
}, },
referenceName: referenceName, referenceName: referenceName,
@ -155,12 +166,20 @@ func (service *Service) cloneRepository(destination string, options cloneOption)
} }
// LatestCommitID returns SHA1 of the latest commit of the specified reference // LatestCommitID returns SHA1 of the latest commit of the specified reference
func (service *Service) LatestCommitID(repositoryURL, referenceName, username, password string, tlsSkipVerify bool) (string, error) { func (service *Service) LatestCommitID(
repositoryURL,
referenceName,
username,
password string,
authType gittypes.GitCredentialAuthType,
tlsSkipVerify bool,
) (string, error) {
options := fetchOption{ options := fetchOption{
baseOption: baseOption{ baseOption: baseOption{
repositoryUrl: repositoryURL, repositoryUrl: repositoryURL,
username: username, username: username,
password: password, password: password,
authType: authType,
tlsSkipVerify: tlsSkipVerify, tlsSkipVerify: tlsSkipVerify,
}, },
referenceName: referenceName, referenceName: referenceName,
@ -170,7 +189,14 @@ func (service *Service) LatestCommitID(repositoryURL, referenceName, username, p
} }
// ListRefs will list target repository's references without cloning the repository // ListRefs will list target repository's references without cloning the repository
func (service *Service) ListRefs(repositoryURL, username, password string, hardRefresh bool, tlsSkipVerify bool) ([]string, error) { func (service *Service) ListRefs(
repositoryURL,
username,
password string,
authType gittypes.GitCredentialAuthType,
hardRefresh bool,
tlsSkipVerify bool,
) ([]string, error) {
refCacheKey := generateCacheKey(repositoryURL, username, password, strconv.FormatBool(tlsSkipVerify)) refCacheKey := generateCacheKey(repositoryURL, username, password, strconv.FormatBool(tlsSkipVerify))
if service.cacheEnabled && hardRefresh { if service.cacheEnabled && hardRefresh {
// Should remove the cache explicitly, so that the following normal list can show the correct result // Should remove the cache explicitly, so that the following normal list can show the correct result
@ -196,6 +222,7 @@ func (service *Service) ListRefs(repositoryURL, username, password string, hardR
repositoryUrl: repositoryURL, repositoryUrl: repositoryURL,
username: username, username: username,
password: password, password: password,
authType: authType,
tlsSkipVerify: tlsSkipVerify, tlsSkipVerify: tlsSkipVerify,
} }
@ -215,18 +242,62 @@ var singleflightGroup = &singleflight.Group{}
// ListFiles will list all the files of the target repository with specific extensions. // ListFiles will list all the files of the target repository with specific extensions.
// If extension is not provided, it will list all the files under the target repository // If extension is not provided, it will list all the files under the target repository
func (service *Service) ListFiles(repositoryURL, referenceName, username, password string, dirOnly, hardRefresh bool, includedExts []string, tlsSkipVerify bool) ([]string, error) { func (service *Service) ListFiles(
repoKey := generateCacheKey(repositoryURL, referenceName, username, password, strconv.FormatBool(tlsSkipVerify), strconv.FormatBool(dirOnly)) repositoryURL,
referenceName,
username,
password string,
authType gittypes.GitCredentialAuthType,
dirOnly,
hardRefresh bool,
includedExts []string,
tlsSkipVerify bool,
) ([]string, error) {
repoKey := generateCacheKey(
repositoryURL,
referenceName,
username,
password,
strconv.FormatBool(tlsSkipVerify),
strconv.Itoa(int(authType)),
strconv.FormatBool(dirOnly),
)
fs, err, _ := singleflightGroup.Do(repoKey, func() (any, error) { fs, err, _ := singleflightGroup.Do(repoKey, func() (any, error) {
return service.listFiles(repositoryURL, referenceName, username, password, dirOnly, hardRefresh, tlsSkipVerify) return service.listFiles(
repositoryURL,
referenceName,
username,
password,
authType,
dirOnly,
hardRefresh,
tlsSkipVerify,
)
}) })
return filterFiles(fs.([]string), includedExts), err return filterFiles(fs.([]string), includedExts), err
} }
func (service *Service) listFiles(repositoryURL, referenceName, username, password string, dirOnly, hardRefresh bool, tlsSkipVerify bool) ([]string, error) { func (service *Service) listFiles(
repoKey := generateCacheKey(repositoryURL, referenceName, username, password, strconv.FormatBool(tlsSkipVerify), strconv.FormatBool(dirOnly)) repositoryURL,
referenceName,
username,
password string,
authType gittypes.GitCredentialAuthType,
dirOnly,
hardRefresh bool,
tlsSkipVerify bool,
) ([]string, error) {
repoKey := generateCacheKey(
repositoryURL,
referenceName,
username,
password,
strconv.FormatBool(tlsSkipVerify),
strconv.Itoa(int(authType)),
strconv.FormatBool(dirOnly),
)
if service.cacheEnabled && hardRefresh { if service.cacheEnabled && hardRefresh {
// Should remove the cache explicitly, so that the following normal list can show the correct result // Should remove the cache explicitly, so that the following normal list can show the correct result
@ -247,6 +318,7 @@ func (service *Service) listFiles(repositoryURL, referenceName, username, passwo
repositoryUrl: repositoryURL, repositoryUrl: repositoryURL,
username: username, username: username,
password: password, password: password,
authType: authType,
tlsSkipVerify: tlsSkipVerify, tlsSkipVerify: tlsSkipVerify,
}, },
referenceName: referenceName, referenceName: referenceName,

View file

@ -1,12 +1,21 @@
package gittypes package gittypes
import "errors" import (
"errors"
)
var ( var (
ErrIncorrectRepositoryURL = errors.New("git repository could not be found, please ensure that the URL is correct") ErrIncorrectRepositoryURL = errors.New("git repository could not be found, please ensure that the URL is correct")
ErrAuthenticationFailure = errors.New("authentication failed, please ensure that the git credentials are correct") ErrAuthenticationFailure = errors.New("authentication failed, please ensure that the git credentials are correct")
) )
type GitCredentialAuthType int
const (
GitCredentialAuthType_Basic GitCredentialAuthType = iota
GitCredentialAuthType_Token
)
// RepoConfig represents a configuration for a repo // RepoConfig represents a configuration for a repo
type RepoConfig struct { type RepoConfig struct {
// The repo url // The repo url
@ -24,10 +33,11 @@ type RepoConfig struct {
} }
type GitAuthentication struct { type GitAuthentication struct {
Username string Username string
Password string Password string
AuthorizationType GitCredentialAuthType
// Git credentials identifier when the value is not 0 // Git credentials identifier when the value is not 0
// When the value is 0, Username and Password are set without using saved credential // When the value is 0, Username, Password, and Authtype are set without using saved credential
// This is introduced since 2.15.0 // This is introduced since 2.15.0
GitCredentialID int `example:"0"` GitCredentialID int `example:"0"`
} }

View file

@ -29,7 +29,14 @@ func UpdateGitObject(gitService portainer.GitService, objId string, gitConfig *g
return false, "", errors.WithMessagef(err, "failed to get credentials for %v", objId) return false, "", errors.WithMessagef(err, "failed to get credentials for %v", objId)
} }
newHash, err := gitService.LatestCommitID(gitConfig.URL, gitConfig.ReferenceName, username, password, gitConfig.TLSSkipVerify) newHash, err := gitService.LatestCommitID(
gitConfig.URL,
gitConfig.ReferenceName,
username,
password,
gittypes.GitCredentialAuthType_Basic,
gitConfig.TLSSkipVerify,
)
if err != nil { if err != nil {
return false, "", errors.WithMessagef(err, "failed to fetch latest commit id of %v", objId) return false, "", errors.WithMessagef(err, "failed to fetch latest commit id of %v", objId)
} }
@ -62,6 +69,7 @@ func UpdateGitObject(gitService portainer.GitService, objId string, gitConfig *g
cloneParams.auth = &gitAuth{ cloneParams.auth = &gitAuth{
username: username, username: username,
password: password, password: password,
authType: gitConfig.Authentication.AuthorizationType,
} }
} }
@ -89,14 +97,31 @@ type cloneRepositoryParameters struct {
} }
type gitAuth struct { type gitAuth struct {
authType gittypes.GitCredentialAuthType
username string username string
password string password string
} }
func cloneGitRepository(gitService portainer.GitService, cloneParams *cloneRepositoryParameters) error { func cloneGitRepository(gitService portainer.GitService, cloneParams *cloneRepositoryParameters) error {
if cloneParams.auth != nil { if cloneParams.auth != nil {
return gitService.CloneRepository(cloneParams.toDir, cloneParams.url, cloneParams.ref, cloneParams.auth.username, cloneParams.auth.password, cloneParams.tlsSkipVerify) return gitService.CloneRepository(
cloneParams.toDir,
cloneParams.url,
cloneParams.ref,
cloneParams.auth.username,
cloneParams.auth.password,
cloneParams.auth.authType,
cloneParams.tlsSkipVerify,
)
} }
return gitService.CloneRepository(cloneParams.toDir, cloneParams.url, cloneParams.ref, "", "", cloneParams.tlsSkipVerify) return gitService.CloneRepository(
cloneParams.toDir,
cloneParams.url,
cloneParams.ref,
"",
"",
gittypes.GitCredentialAuthType_Basic,
cloneParams.tlsSkipVerify,
)
} }

View file

@ -3,9 +3,9 @@ package update
import ( import (
"time" "time"
"github.com/asaskevich/govalidator"
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
httperrors "github.com/portainer/portainer/api/http/errors" httperrors "github.com/portainer/portainer/api/http/errors"
"github.com/portainer/portainer/pkg/validate"
) )
func ValidateAutoUpdateSettings(autoUpdate *portainer.AutoUpdateSettings) error { func ValidateAutoUpdateSettings(autoUpdate *portainer.AutoUpdateSettings) error {
@ -17,7 +17,7 @@ func ValidateAutoUpdateSettings(autoUpdate *portainer.AutoUpdateSettings) error
return httperrors.NewInvalidPayloadError("Webhook or Interval must be provided") return httperrors.NewInvalidPayloadError("Webhook or Interval must be provided")
} }
if autoUpdate.Webhook != "" && !govalidator.IsUUID(autoUpdate.Webhook) { if autoUpdate.Webhook != "" && !validate.IsUUID(autoUpdate.Webhook) {
return httperrors.NewInvalidPayloadError("invalid Webhook format") return httperrors.NewInvalidPayloadError("invalid Webhook format")
} }

View file

@ -1,19 +1,17 @@
package git package git
import ( import (
"github.com/asaskevich/govalidator"
gittypes "github.com/portainer/portainer/api/git/types" gittypes "github.com/portainer/portainer/api/git/types"
httperrors "github.com/portainer/portainer/api/http/errors" httperrors "github.com/portainer/portainer/api/http/errors"
"github.com/portainer/portainer/pkg/validate"
) )
func ValidateRepoConfig(repoConfig *gittypes.RepoConfig) error { func ValidateRepoConfig(repoConfig *gittypes.RepoConfig) error {
if len(repoConfig.URL) == 0 || !govalidator.IsURL(repoConfig.URL) { if len(repoConfig.URL) == 0 || !validate.IsURL(repoConfig.URL) {
return httperrors.NewInvalidPayloadError("Invalid repository URL. Must correspond to a valid URL format") return httperrors.NewInvalidPayloadError("Invalid repository URL. Must correspond to a valid URL format")
} }
return ValidateRepoAuthentication(repoConfig.Authentication) return ValidateRepoAuthentication(repoConfig.Authentication)
} }
func ValidateRepoAuthentication(auth *gittypes.GitAuthentication) error { func ValidateRepoAuthentication(auth *gittypes.GitAuthentication) error {

View file

@ -32,15 +32,12 @@ type Service struct {
} }
// NewService initializes a new service. // NewService initializes a new service.
func NewService() *Service { func NewService(insecureSkipVerify bool) *Service {
tlsConfig := crypto.CreateTLSConfiguration()
tlsConfig.InsecureSkipVerify = true
return &Service{ return &Service{
httpsClient: &http.Client{ httpsClient: &http.Client{
Timeout: httpClientTimeout, Timeout: httpClientTimeout,
Transport: &http.Transport{ Transport: &http.Transport{
TLSClientConfig: tlsConfig, TLSClientConfig: crypto.CreateTLSConfiguration(insecureSkipVerify),
}, },
}, },
} }

View file

@ -0,0 +1,14 @@
package openamt
import (
"net/http"
"testing"
"github.com/stretchr/testify/require"
)
func TestNewService(t *testing.T) {
service := NewService(true)
require.NotNil(t, service)
require.True(t, service.httpsClient.Transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify) //nolint:forbidigo
}

View file

@ -1,7 +1,6 @@
package client package client
import ( import (
"crypto/tls"
"errors" "errors"
"fmt" "fmt"
"io" "io"
@ -11,6 +10,7 @@ import (
"time" "time"
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/crypto"
"github.com/rs/zerolog/log" "github.com/rs/zerolog/log"
"github.com/segmentio/encoding/json" "github.com/segmentio/encoding/json"
@ -105,21 +105,28 @@ func Get(url string, timeout int) ([]byte, error) {
// ExecutePingOperation will send a SystemPing operation HTTP request to a Docker environment(endpoint) // ExecutePingOperation will send a SystemPing operation HTTP request to a Docker environment(endpoint)
// using the specified host and optional TLS configuration. // using the specified host and optional TLS configuration.
// It uses a new Http.Client for each operation. // It uses a new Http.Client for each operation.
func ExecutePingOperation(host string, tlsConfig *tls.Config) (bool, error) { func ExecutePingOperation(host string, tlsConfiguration portainer.TLSConfiguration) (bool, error) {
transport := &http.Transport{} transport := &http.Transport{}
scheme := "http" scheme := "http"
if tlsConfig != nil {
if tlsConfiguration.TLS {
tlsConfig, err := crypto.CreateTLSConfigurationFromDisk(tlsConfiguration)
if err != nil {
return false, err
}
transport.TLSClientConfig = tlsConfig transport.TLSClientConfig = tlsConfig
scheme = "https" scheme = "https"
} }
client := &http.Client{ client := &http.Client{
Timeout: time.Second * 3, Timeout: 3 * time.Second,
Transport: transport, Transport: transport,
} }
target := strings.Replace(host, "tcp://", scheme+"://", 1) target := strings.Replace(host, "tcp://", scheme+"://", 1)
return pingOperation(client, target) return pingOperation(client, target)
} }

View file

@ -0,0 +1,31 @@
package client
import (
"testing"
portainer "github.com/portainer/portainer/api"
"github.com/stretchr/testify/require"
)
func TestExecutePingOperationFailure(t *testing.T) {
host := "http://localhost:1"
config := portainer.TLSConfiguration{
TLS: true,
TLSSkipVerify: true,
}
// Invalid host
ok, err := ExecutePingOperation(host, config)
require.False(t, ok)
require.Error(t, err)
// Invalid TLS configuration
config.TLSCertPath = "/invalid/path/to/cert"
config.TLSKeyPath = "/invalid/path/to/key"
ok, err = ExecutePingOperation(host, config)
require.False(t, ok)
require.Error(t, err)
}

View file

@ -2,6 +2,7 @@ package csrf
import ( import (
"crypto/rand" "crypto/rand"
"errors"
"fmt" "fmt"
"net/http" "net/http"
"os" "os"
@ -9,7 +10,8 @@ import (
"github.com/portainer/portainer/api/http/security" "github.com/portainer/portainer/api/http/security"
httperror "github.com/portainer/portainer/pkg/libhttp/error" httperror "github.com/portainer/portainer/pkg/libhttp/error"
gorillacsrf "github.com/gorilla/csrf" gcsrf "github.com/gorilla/csrf"
"github.com/rs/zerolog/log"
"github.com/urfave/negroni" "github.com/urfave/negroni"
) )
@ -19,7 +21,7 @@ func SkipCSRFToken(w http.ResponseWriter) {
w.Header().Set(csrfSkipHeader, "1") w.Header().Set(csrfSkipHeader, "1")
} }
func WithProtect(handler http.Handler) (http.Handler, error) { func WithProtect(handler http.Handler, trustedOrigins []string) (http.Handler, error) {
// IsDockerDesktopExtension is used to check if we should skip csrf checks in the request bouncer (ShouldSkipCSRFCheck) // IsDockerDesktopExtension is used to check if we should skip csrf checks in the request bouncer (ShouldSkipCSRFCheck)
// DOCKER_EXTENSION is set to '1' in build/docker-extension/docker-compose.yml // DOCKER_EXTENSION is set to '1' in build/docker-extension/docker-compose.yml
isDockerDesktopExtension := false isDockerDesktopExtension := false
@ -34,10 +36,12 @@ func WithProtect(handler http.Handler) (http.Handler, error) {
return nil, fmt.Errorf("failed to generate CSRF token: %w", err) return nil, fmt.Errorf("failed to generate CSRF token: %w", err)
} }
handler = gorillacsrf.Protect( handler = gcsrf.Protect(
token, token,
gorillacsrf.Path("/"), gcsrf.Path("/"),
gorillacsrf.Secure(false), gcsrf.Secure(false),
gcsrf.TrustedOrigins(trustedOrigins),
gcsrf.ErrorHandler(withErrorHandler(trustedOrigins)),
)(handler) )(handler)
return withSkipCSRF(handler, isDockerDesktopExtension), nil return withSkipCSRF(handler, isDockerDesktopExtension), nil
@ -55,7 +59,7 @@ func withSendCSRFToken(handler http.Handler) http.Handler {
} }
if statusCode := sw.Status(); statusCode >= 200 && statusCode < 300 { if statusCode := sw.Status(); statusCode >= 200 && statusCode < 300 {
sw.Header().Set("X-CSRF-Token", gorillacsrf.Token(r)) sw.Header().Set("X-CSRF-Token", gcsrf.Token(r))
} }
}) })
@ -73,9 +77,33 @@ func withSkipCSRF(handler http.Handler, isDockerDesktopExtension bool) http.Hand
} }
if skip { if skip {
r = gorillacsrf.UnsafeSkipCheck(r) r = gcsrf.UnsafeSkipCheck(r)
} }
handler.ServeHTTP(w, r) handler.ServeHTTP(w, r)
}) })
} }
func withErrorHandler(trustedOrigins []string) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
err := gcsrf.FailureReason(r)
if errors.Is(err, gcsrf.ErrBadOrigin) || errors.Is(err, gcsrf.ErrBadReferer) || errors.Is(err, gcsrf.ErrNoReferer) {
log.Error().Err(err).
Str("request_url", r.URL.String()).
Str("host", r.Host).
Str("x_forwarded_proto", r.Header.Get("X-Forwarded-Proto")).
Str("forwarded", r.Header.Get("Forwarded")).
Str("origin", r.Header.Get("Origin")).
Str("referer", r.Header.Get("Referer")).
Strs("trusted_origins", trustedOrigins).
Msg("Failed to validate Origin or Referer")
}
http.Error(
w,
http.StatusText(http.StatusForbidden)+" - "+err.Error(),
http.StatusForbidden,
)
})
}

View file

@ -2,6 +2,7 @@ package auth
import ( import (
"net/http" "net/http"
"strconv"
"strings" "strings"
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
@ -82,6 +83,11 @@ func (handler *Handler) authenticate(rw http.ResponseWriter, r *http.Request) *h
} }
} }
// Clear any existing user caches
if user != nil {
handler.KubernetesClientFactory.ClearUserClientCache(strconv.Itoa(int(user.ID)))
}
if user != nil && isUserInitialAdmin(user) || settings.AuthenticationMethod == portainer.AuthenticationInternal { if user != nil && isUserInitialAdmin(user) || settings.AuthenticationMethod == portainer.AuthenticationInternal {
return handler.authenticateInternal(rw, user, payload.Password) return handler.authenticateInternal(rw, user, payload.Password)
} }

View file

@ -8,6 +8,7 @@ import (
"github.com/portainer/portainer/api/http/proxy" "github.com/portainer/portainer/api/http/proxy"
"github.com/portainer/portainer/api/http/proxy/factory/kubernetes" "github.com/portainer/portainer/api/http/proxy/factory/kubernetes"
"github.com/portainer/portainer/api/http/security" "github.com/portainer/portainer/api/http/security"
"github.com/portainer/portainer/api/kubernetes/cli"
httperror "github.com/portainer/portainer/pkg/libhttp/error" httperror "github.com/portainer/portainer/pkg/libhttp/error"
"github.com/gorilla/mux" "github.com/gorilla/mux"
@ -23,16 +24,18 @@ type Handler struct {
OAuthService portainer.OAuthService OAuthService portainer.OAuthService
ProxyManager *proxy.Manager ProxyManager *proxy.Manager
KubernetesTokenCacheManager *kubernetes.TokenCacheManager KubernetesTokenCacheManager *kubernetes.TokenCacheManager
KubernetesClientFactory *cli.ClientFactory
passwordStrengthChecker security.PasswordStrengthChecker passwordStrengthChecker security.PasswordStrengthChecker
bouncer security.BouncerService bouncer security.BouncerService
} }
// NewHandler creates a handler to manage authentication operations. // NewHandler creates a handler to manage authentication operations.
func NewHandler(bouncer security.BouncerService, rateLimiter *security.RateLimiter, passwordStrengthChecker security.PasswordStrengthChecker) *Handler { func NewHandler(bouncer security.BouncerService, rateLimiter *security.RateLimiter, passwordStrengthChecker security.PasswordStrengthChecker, kubernetesClientFactory *cli.ClientFactory) *Handler {
h := &Handler{ h := &Handler{
Router: mux.NewRouter(), Router: mux.NewRouter(),
passwordStrengthChecker: passwordStrengthChecker, passwordStrengthChecker: passwordStrengthChecker,
bouncer: bouncer, bouncer: bouncer,
KubernetesClientFactory: kubernetesClientFactory,
} }
h.Handle("/auth/oauth/validate", h.Handle("/auth/oauth/validate",

View file

@ -2,6 +2,7 @@ package auth
import ( import (
"net/http" "net/http"
"strconv"
"github.com/portainer/portainer/api/http/security" "github.com/portainer/portainer/api/http/security"
"github.com/portainer/portainer/api/logoutcontext" "github.com/portainer/portainer/api/logoutcontext"
@ -23,6 +24,7 @@ func (handler *Handler) logout(w http.ResponseWriter, r *http.Request) *httperro
if tokenData != nil { if tokenData != nil {
handler.KubernetesTokenCacheManager.RemoveUserFromCache(tokenData.ID) handler.KubernetesTokenCacheManager.RemoveUserFromCache(tokenData.ID)
handler.KubernetesClientFactory.ClearUserClientCache(strconv.Itoa(int(tokenData.ID)))
logoutcontext.Cancel(tokenData.Token) logoutcontext.Cancel(tokenData.Token)
} }

View file

@ -18,10 +18,15 @@ import (
"github.com/portainer/portainer/api/crypto" "github.com/portainer/portainer/api/crypto"
"github.com/portainer/portainer/api/http/offlinegate" "github.com/portainer/portainer/api/http/offlinegate"
"github.com/portainer/portainer/api/internal/testhelpers" "github.com/portainer/portainer/api/internal/testhelpers"
"github.com/portainer/portainer/pkg/fips"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
func init() {
fips.InitFIPS(false)
}
func listFiles(dir string) []string { func listFiles(dir string) []string {
items := make([]string, 0) items := make([]string, 0)
filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {

View file

@ -16,8 +16,8 @@ import (
httperror "github.com/portainer/portainer/pkg/libhttp/error" httperror "github.com/portainer/portainer/pkg/libhttp/error"
"github.com/portainer/portainer/pkg/libhttp/request" "github.com/portainer/portainer/pkg/libhttp/request"
"github.com/portainer/portainer/pkg/libhttp/response" "github.com/portainer/portainer/pkg/libhttp/response"
"github.com/portainer/portainer/pkg/validate"
"github.com/asaskevich/govalidator"
"github.com/rs/zerolog/log" "github.com/rs/zerolog/log"
"github.com/segmentio/encoding/json" "github.com/segmentio/encoding/json"
) )
@ -228,7 +228,7 @@ func (payload *customTemplateFromGitRepositoryPayload) Validate(r *http.Request)
if len(payload.Description) == 0 { if len(payload.Description) == 0 {
return errors.New("Invalid custom template description") return errors.New("Invalid custom template description")
} }
if len(payload.RepositoryURL) == 0 || !govalidator.IsURL(payload.RepositoryURL) { if len(payload.RepositoryURL) == 0 || !validate.IsURL(payload.RepositoryURL) {
return errors.New("Invalid repository URL. Must correspond to a valid URL format") return errors.New("Invalid repository URL. Must correspond to a valid URL format")
} }
if payload.RepositoryAuthentication && (len(payload.RepositoryUsername) == 0 || len(payload.RepositoryPassword) == 0) { if payload.RepositoryAuthentication && (len(payload.RepositoryUsername) == 0 || len(payload.RepositoryPassword) == 0) {
@ -482,28 +482,3 @@ func (handler *Handler) createCustomTemplateFromFileUpload(r *http.Request) (*po
return customTemplate, nil return customTemplate, nil
} }
// @id CustomTemplateCreate
// @summary Create a custom template
// @description Create a custom template.
// @description **Access policy**: authenticated
// @tags custom_templates
// @security ApiKeyAuth
// @security jwt
// @accept json,multipart/form-data
// @produce json
// @param method query string true "method for creating template" Enums(string, file, repository)
// @param body body object true "for body documentation see the relevant /custom_templates/{method} endpoint"
// @success 200 {object} portainer.CustomTemplate
// @failure 400 "Invalid request"
// @failure 500 "Server error"
// @deprecated
// @router /custom_templates [post]
func deprecatedCustomTemplateCreateUrlParser(w http.ResponseWriter, r *http.Request) (string, *httperror.HandlerError) {
method, err := request.RetrieveQueryParameter(r, "method", false)
if err != nil {
return "", httperror.BadRequest("Invalid query parameter: method", err)
}
return "/custom_templates/create/" + method, nil
}

View file

@ -20,12 +20,17 @@ import (
"github.com/portainer/portainer/api/internal/authorization" "github.com/portainer/portainer/api/internal/authorization"
"github.com/portainer/portainer/api/internal/testhelpers" "github.com/portainer/portainer/api/internal/testhelpers"
"github.com/portainer/portainer/api/jwt" "github.com/portainer/portainer/api/jwt"
"github.com/portainer/portainer/pkg/fips"
httperror "github.com/portainer/portainer/pkg/libhttp/error" httperror "github.com/portainer/portainer/pkg/libhttp/error"
"github.com/segmentio/encoding/json" "github.com/segmentio/encoding/json"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
func init() {
fips.InitFIPS(false)
}
var testFileContent = "abcdefg" var testFileContent = "abcdefg"
type TestGitService struct { type TestGitService struct {
@ -33,13 +38,28 @@ type TestGitService struct {
targetFilePath string targetFilePath string
} }
func (g *TestGitService) CloneRepository(destination string, repositoryURL, referenceName string, username, password string, tlsSkipVerify bool) error { func (g *TestGitService) CloneRepository(
destination string,
repositoryURL,
referenceName string,
username,
password string,
authType gittypes.GitCredentialAuthType,
tlsSkipVerify bool,
) error {
time.Sleep(100 * time.Millisecond) time.Sleep(100 * time.Millisecond)
return createTestFile(g.targetFilePath) return createTestFile(g.targetFilePath)
} }
func (g *TestGitService) LatestCommitID(repositoryURL, referenceName, username, password string, tlsSkipVerify bool) (string, error) { func (g *TestGitService) LatestCommitID(
repositoryURL,
referenceName,
username,
password string,
authType gittypes.GitCredentialAuthType,
tlsSkipVerify bool,
) (string, error) {
return "", nil return "", nil
} }
@ -56,11 +76,26 @@ type InvalidTestGitService struct {
targetFilePath string targetFilePath string
} }
func (g *InvalidTestGitService) CloneRepository(dest, repoUrl, refName, username, password string, tlsSkipVerify bool) error { func (g *InvalidTestGitService) CloneRepository(
dest,
repoUrl,
refName,
username,
password string,
authType gittypes.GitCredentialAuthType,
tlsSkipVerify bool,
) error {
return errors.New("simulate network error") return errors.New("simulate network error")
} }
func (g *InvalidTestGitService) LatestCommitID(repositoryURL, referenceName, username, password string, tlsSkipVerify bool) (string, error) { func (g *InvalidTestGitService) LatestCommitID(
repositoryURL,
referenceName,
username,
password string,
authType gittypes.GitCredentialAuthType,
tlsSkipVerify bool,
) (string, error) {
return "", nil return "", nil
} }

View file

@ -71,7 +71,7 @@ func (handler *Handler) customTemplateList(w http.ResponseWriter, r *http.Reques
customTemplates = filterByType(customTemplates, templateTypes) customTemplates = filterByType(customTemplates, templateTypes)
if edge != nil { if edge != nil {
customTemplates = slicesx.Filter(customTemplates, func(customTemplate portainer.CustomTemplate) bool { customTemplates = slicesx.FilterInPlace(customTemplates, func(customTemplate portainer.CustomTemplate) bool {
return customTemplate.EdgeTemplate == *edge return customTemplate.EdgeTemplate == *edge
}) })
} }

View file

@ -15,8 +15,7 @@ import (
httperror "github.com/portainer/portainer/pkg/libhttp/error" httperror "github.com/portainer/portainer/pkg/libhttp/error"
"github.com/portainer/portainer/pkg/libhttp/request" "github.com/portainer/portainer/pkg/libhttp/request"
"github.com/portainer/portainer/pkg/libhttp/response" "github.com/portainer/portainer/pkg/libhttp/response"
"github.com/portainer/portainer/pkg/validate"
"github.com/asaskevich/govalidator"
) )
type customTemplateUpdatePayload struct { type customTemplateUpdatePayload struct {
@ -38,14 +37,16 @@ type customTemplateUpdatePayload struct {
RepositoryURL string `example:"https://github.com/openfaas/faas" validate:"required"` RepositoryURL string `example:"https://github.com/openfaas/faas" validate:"required"`
// Reference name of a Git repository hosting the Stack file // Reference name of a Git repository hosting the Stack file
RepositoryReferenceName string `example:"refs/heads/master"` RepositoryReferenceName string `example:"refs/heads/master"`
// Use basic authentication to clone the Git repository // Use authentication to clone the Git repository
RepositoryAuthentication bool `example:"true"` RepositoryAuthentication bool `example:"true"`
// Username used in basic authentication. Required when RepositoryAuthentication is true // Username used in basic authentication. Required when RepositoryAuthentication is true
// and RepositoryGitCredentialID is 0 // and RepositoryGitCredentialID is 0. Ignored if RepositoryAuthType is token
RepositoryUsername string `example:"myGitUsername"` RepositoryUsername string `example:"myGitUsername"`
// Password used in basic authentication. Required when RepositoryAuthentication is true // Password used in basic authentication or token used in token authentication.
// and RepositoryGitCredentialID is 0 // Required when RepositoryAuthentication is true and RepositoryGitCredentialID is 0
RepositoryPassword string `example:"myGitPassword"` RepositoryPassword string `example:"myGitPassword"`
// RepositoryAuthorizationType is the authorization type to use
RepositoryAuthorizationType gittypes.GitCredentialAuthType `example:"0"`
// GitCredentialID used to identify the bound git credential. Required when RepositoryAuthentication // GitCredentialID used to identify the bound git credential. Required when RepositoryAuthentication
// is true and RepositoryUsername/RepositoryPassword are not provided // is true and RepositoryUsername/RepositoryPassword are not provided
RepositoryGitCredentialID int `example:"0"` RepositoryGitCredentialID int `example:"0"`
@ -170,7 +171,7 @@ func (handler *Handler) customTemplateUpdate(w http.ResponseWriter, r *http.Requ
customTemplate.EdgeTemplate = payload.EdgeTemplate customTemplate.EdgeTemplate = payload.EdgeTemplate
if payload.RepositoryURL != "" { if payload.RepositoryURL != "" {
if !govalidator.IsURL(payload.RepositoryURL) { if !validate.IsURL(payload.RepositoryURL) {
return httperror.BadRequest("Invalid repository URL. Must correspond to a valid URL format", err) return httperror.BadRequest("Invalid repository URL. Must correspond to a valid URL format", err)
} }
@ -183,12 +184,15 @@ func (handler *Handler) customTemplateUpdate(w http.ResponseWriter, r *http.Requ
repositoryUsername := "" repositoryUsername := ""
repositoryPassword := "" repositoryPassword := ""
repositoryAuthType := gittypes.GitCredentialAuthType_Basic
if payload.RepositoryAuthentication { if payload.RepositoryAuthentication {
repositoryUsername = payload.RepositoryUsername repositoryUsername = payload.RepositoryUsername
repositoryPassword = payload.RepositoryPassword repositoryPassword = payload.RepositoryPassword
repositoryAuthType = payload.RepositoryAuthorizationType
gitConfig.Authentication = &gittypes.GitAuthentication{ gitConfig.Authentication = &gittypes.GitAuthentication{
Username: payload.RepositoryUsername, Username: payload.RepositoryUsername,
Password: payload.RepositoryPassword, Password: payload.RepositoryPassword,
AuthorizationType: payload.RepositoryAuthorizationType,
} }
} }
@ -198,6 +202,7 @@ func (handler *Handler) customTemplateUpdate(w http.ResponseWriter, r *http.Requ
ReferenceName: gitConfig.ReferenceName, ReferenceName: gitConfig.ReferenceName,
Username: repositoryUsername, Username: repositoryUsername,
Password: repositoryPassword, Password: repositoryPassword,
AuthType: repositoryAuthType,
TLSSkipVerify: gitConfig.TLSSkipVerify, TLSSkipVerify: gitConfig.TLSSkipVerify,
}) })
if err != nil { if err != nil {
@ -206,7 +211,14 @@ func (handler *Handler) customTemplateUpdate(w http.ResponseWriter, r *http.Requ
defer cleanBackup() defer cleanBackup()
commitHash, err := handler.GitService.LatestCommitID(gitConfig.URL, gitConfig.ReferenceName, repositoryUsername, repositoryPassword, gitConfig.TLSSkipVerify) commitHash, err := handler.GitService.LatestCommitID(
gitConfig.URL,
gitConfig.ReferenceName,
repositoryUsername,
repositoryPassword,
repositoryAuthType,
gitConfig.TLSSkipVerify,
)
if err != nil { if err != nil {
return httperror.InternalServerError("Unable get latest commit id", fmt.Errorf("failed to fetch latest commit id of the template %v: %w", customTemplate.ID, err)) return httperror.InternalServerError("Unable get latest commit id", fmt.Errorf("failed to fetch latest commit id of the template %v: %w", customTemplate.ID, err))
} }

View file

@ -7,7 +7,6 @@ import (
"github.com/gorilla/mux" "github.com/gorilla/mux"
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices" "github.com/portainer/portainer/api/dataservices"
"github.com/portainer/portainer/api/http/middlewares"
"github.com/portainer/portainer/api/http/security" "github.com/portainer/portainer/api/http/security"
httperror "github.com/portainer/portainer/pkg/libhttp/error" httperror "github.com/portainer/portainer/pkg/libhttp/error"
) )
@ -33,7 +32,6 @@ func NewHandler(bouncer security.BouncerService, dataStore dataservices.DataStor
h.Handle("/custom_templates/create/{method}", h.Handle("/custom_templates/create/{method}",
bouncer.AuthenticatedAccess(httperror.LoggerHandler(h.customTemplateCreate))).Methods(http.MethodPost) bouncer.AuthenticatedAccess(httperror.LoggerHandler(h.customTemplateCreate))).Methods(http.MethodPost)
h.Handle("/custom_templates", middlewares.Deprecated(h, deprecatedCustomTemplateCreateUrlParser)).Methods(http.MethodPost) // Deprecated
h.Handle("/custom_templates", h.Handle("/custom_templates",
bouncer.AuthenticatedAccess(httperror.LoggerHandler(h.customTemplateList))).Methods(http.MethodGet) bouncer.AuthenticatedAccess(httperror.LoggerHandler(h.customTemplateList))).Methods(http.MethodGet)
h.Handle("/custom_templates/{id}", h.Handle("/custom_templates/{id}",

View file

@ -6,6 +6,7 @@ import (
"github.com/docker/docker/api/types" "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/image" "github.com/docker/docker/api/types/image"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/api/types/swarm" "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/api/types/volume" "github.com/docker/docker/api/types/volume"
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
@ -116,12 +117,12 @@ func (h *Handler) dashboard(w http.ResponseWriter, r *http.Request) *httperror.H
return err return err
} }
networks, err := cli.NetworkList(r.Context(), types.NetworkListOptions{}) networks, err := cli.NetworkList(r.Context(), network.ListOptions{})
if err != nil { if err != nil {
return httperror.InternalServerError("Unable to retrieve Docker networks", err) return httperror.InternalServerError("Unable to retrieve Docker networks", err)
} }
networks, err = utils.FilterByResourceControl(tx, networks, portainer.NetworkResourceControl, context, func(c types.NetworkResource) string { networks, err = utils.FilterByResourceControl(tx, networks, portainer.NetworkResourceControl, context, func(c network.Summary) string {
return c.Name return c.Name
}) })
if err != nil { if err != nil {

View file

@ -1,10 +1,11 @@
package images package images
import ( import (
"context"
"fmt"
"net/http" "net/http"
"strings" "strings"
"github.com/portainer/portainer/api/docker/client"
"github.com/portainer/portainer/api/http/handler/docker/utils" "github.com/portainer/portainer/api/http/handler/docker/utils"
"github.com/portainer/portainer/api/set" "github.com/portainer/portainer/api/set"
httperror "github.com/portainer/portainer/pkg/libhttp/error" httperror "github.com/portainer/portainer/pkg/libhttp/error"
@ -46,17 +47,16 @@ func (handler *Handler) imagesList(w http.ResponseWriter, r *http.Request) *http
return httpErr return httpErr
} }
images, err := cli.ImageList(r.Context(), image.ListOptions{}) nodeNames := make(map[string]string)
// Pass the node names map to the context so the custom NodeNameTransport can use it
ctx := context.WithValue(r.Context(), "nodeNames", nodeNames)
images, err := cli.ImageList(ctx, image.ListOptions{})
if err != nil { if err != nil {
return httperror.InternalServerError("Unable to retrieve Docker images", err) return httperror.InternalServerError("Unable to retrieve Docker images", err)
} }
// Extract the node name from the custom transport
nodeNames := make(map[string]string)
if t, ok := cli.HTTPClient().Transport.(*client.NodeNameTransport); ok {
nodeNames = t.NodeNames()
}
withUsage, err := request.RetrieveBooleanQueryParameter(r, "withUsage", true) withUsage, err := request.RetrieveBooleanQueryParameter(r, "withUsage", true)
if err != nil { if err != nil {
return httperror.BadRequest("Invalid query parameter: withUsage", err) return httperror.BadRequest("Invalid query parameter: withUsage", err)
@ -85,8 +85,12 @@ func (handler *Handler) imagesList(w http.ResponseWriter, r *http.Request) *http
} }
imagesList[i] = ImageResponse{ imagesList[i] = ImageResponse{
Created: image.Created, Created: image.Created,
NodeName: nodeNames[image.ID], // Only works if the order of `images` is not changed between unmarshaling the agent's response
// in NodeNameTransport.RoundTrip() (api/docker/client/client.go)
// and docker's cli.ImageList()
// As both functions unmarshal the same response body, the resulting array will be ordered the same way.
NodeName: nodeNames[fmt.Sprintf("%s-%d", image.ID, i)],
ID: image.ID, ID: image.ID,
Size: image.Size, Size: image.Size,
Tags: image.RepoTags, Tags: image.RepoTags,

View file

@ -4,6 +4,7 @@ import (
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices" "github.com/portainer/portainer/api/dataservices"
"github.com/portainer/portainer/api/internal/endpointutils" "github.com/portainer/portainer/api/internal/endpointutils"
"github.com/portainer/portainer/api/roar"
) )
type endpointSetType map[portainer.EndpointID]bool type endpointSetType map[portainer.EndpointID]bool
@ -49,22 +50,29 @@ func GetEndpointsByTags(tx dataservices.DataStoreTx, tagIDs []portainer.TagID, p
return results, nil return results, nil
} }
func getTrustedEndpoints(tx dataservices.DataStoreTx, endpointIDs []portainer.EndpointID) ([]portainer.EndpointID, error) { func getTrustedEndpoints(tx dataservices.DataStoreTx, endpointIDs roar.Roar[portainer.EndpointID]) ([]portainer.EndpointID, error) {
var innerErr error
results := []portainer.EndpointID{} results := []portainer.EndpointID{}
for _, endpointID := range endpointIDs {
endpointIDs.Iterate(func(endpointID portainer.EndpointID) bool {
endpoint, err := tx.Endpoint().Endpoint(endpointID) endpoint, err := tx.Endpoint().Endpoint(endpointID)
if err != nil { if err != nil {
return nil, err innerErr = err
return false
} }
if !endpoint.UserTrusted { if !endpoint.UserTrusted {
continue return true
} }
results = append(results, endpoint.ID) results = append(results, endpoint.ID)
}
return results, nil return true
})
return results, innerErr
} }
func mapEndpointGroupToEndpoints(endpoints []portainer.Endpoint) map[portainer.EndpointGroupID]endpointSetType { func mapEndpointGroupToEndpoints(endpoints []portainer.Endpoint) map[portainer.EndpointGroupID]endpointSetType {

View file

@ -7,6 +7,7 @@ import (
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices" "github.com/portainer/portainer/api/dataservices"
"github.com/portainer/portainer/api/internal/endpointutils" "github.com/portainer/portainer/api/internal/endpointutils"
"github.com/portainer/portainer/api/roar"
httperror "github.com/portainer/portainer/pkg/libhttp/error" httperror "github.com/portainer/portainer/pkg/libhttp/error"
"github.com/portainer/portainer/pkg/libhttp/request" "github.com/portainer/portainer/pkg/libhttp/request"
) )
@ -52,6 +53,7 @@ func calculateEndpointsOrTags(tx dataservices.DataStoreTx, edgeGroup *portainer.
} }
edgeGroup.Endpoints = endpointIDs edgeGroup.Endpoints = endpointIDs
edgeGroup.EndpointIDs = roar.FromSlice(endpointIDs)
return nil return nil
} }
@ -94,6 +96,7 @@ func (handler *Handler) edgeGroupCreate(w http.ResponseWriter, r *http.Request)
Dynamic: payload.Dynamic, Dynamic: payload.Dynamic,
TagIDs: []portainer.TagID{}, TagIDs: []portainer.TagID{},
Endpoints: []portainer.EndpointID{}, Endpoints: []portainer.EndpointID{},
EndpointIDs: roar.Roar[portainer.EndpointID]{},
PartialMatch: payload.PartialMatch, PartialMatch: payload.PartialMatch,
} }
@ -108,5 +111,5 @@ func (handler *Handler) edgeGroupCreate(w http.ResponseWriter, r *http.Request)
return nil return nil
}) })
return txResponse(w, edgeGroup, err) return txResponse(w, shadowedEdgeGroup{EdgeGroup: *edgeGroup}, err)
} }

View file

@ -0,0 +1,62 @@
package edgegroups
import (
"net/http"
"net/http/httptest"
"strconv"
"strings"
"testing"
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/datastore"
"github.com/portainer/portainer/api/internal/testhelpers"
"github.com/segmentio/encoding/json"
"github.com/stretchr/testify/require"
)
func TestEdgeGroupCreateHandler(t *testing.T) {
_, store := datastore.MustNewTestStore(t, true, true)
handler := NewHandler(testhelpers.NewTestRequestBouncer())
handler.DataStore = store
err := store.EndpointGroup().Create(&portainer.EndpointGroup{
ID: 1,
Name: "Test Group",
})
require.NoError(t, err)
for i := range 3 {
err = store.Endpoint().Create(&portainer.Endpoint{
ID: portainer.EndpointID(i + 1),
Name: "Test Endpoint " + strconv.Itoa(i+1),
Type: portainer.EdgeAgentOnDockerEnvironment,
GroupID: 1,
})
require.NoError(t, err)
err = store.EndpointRelation().Create(&portainer.EndpointRelation{
EndpointID: portainer.EndpointID(i + 1),
EdgeStacks: map[portainer.EdgeStackID]bool{},
})
require.NoError(t, err)
}
rr := httptest.NewRecorder()
req := httptest.NewRequest(
http.MethodPost,
"/edge_groups",
strings.NewReader(`{"Name": "New Edge Group", "Endpoints": [1, 2, 3]}`),
)
handler.ServeHTTP(rr, req)
require.Equal(t, http.StatusOK, rr.Result().StatusCode)
var responseGroup portainer.EdgeGroup
err = json.NewDecoder(rr.Body).Decode(&responseGroup)
require.NoError(t, err)
require.ElementsMatch(t, []portainer.EndpointID{1, 2, 3}, responseGroup.Endpoints)
}

View file

@ -5,6 +5,7 @@ import (
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices" "github.com/portainer/portainer/api/dataservices"
"github.com/portainer/portainer/api/roar"
httperror "github.com/portainer/portainer/pkg/libhttp/error" httperror "github.com/portainer/portainer/pkg/libhttp/error"
"github.com/portainer/portainer/pkg/libhttp/request" "github.com/portainer/portainer/pkg/libhttp/request"
) )
@ -33,7 +34,9 @@ func (handler *Handler) edgeGroupInspect(w http.ResponseWriter, r *http.Request)
return err return err
}) })
return txResponse(w, edgeGroup, err) edgeGroup.Endpoints = edgeGroup.EndpointIDs.ToSlice()
return txResponse(w, shadowedEdgeGroup{EdgeGroup: *edgeGroup}, err)
} }
func getEdgeGroup(tx dataservices.DataStoreTx, ID portainer.EdgeGroupID) (*portainer.EdgeGroup, error) { func getEdgeGroup(tx dataservices.DataStoreTx, ID portainer.EdgeGroupID) (*portainer.EdgeGroup, error) {
@ -50,7 +53,7 @@ func getEdgeGroup(tx dataservices.DataStoreTx, ID portainer.EdgeGroupID) (*porta
return nil, httperror.InternalServerError("Unable to retrieve environments and environment groups for Edge group", err) return nil, httperror.InternalServerError("Unable to retrieve environments and environment groups for Edge group", err)
} }
edgeGroup.Endpoints = endpoints edgeGroup.EndpointIDs = roar.FromSlice(endpoints)
} }
return edgeGroup, err return edgeGroup, err

View file

@ -0,0 +1,176 @@
package edgegroups
import (
"net/http"
"net/http/httptest"
"strconv"
"testing"
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/datastore"
"github.com/portainer/portainer/api/internal/testhelpers"
"github.com/portainer/portainer/api/roar"
"github.com/segmentio/encoding/json"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestEdgeGroupInspectHandler(t *testing.T) {
_, store := datastore.MustNewTestStore(t, true, true)
handler := NewHandler(testhelpers.NewTestRequestBouncer())
handler.DataStore = store
err := store.EndpointGroup().Create(&portainer.EndpointGroup{
ID: 1,
Name: "Test Group",
})
require.NoError(t, err)
for i := range 3 {
err = store.Endpoint().Create(&portainer.Endpoint{
ID: portainer.EndpointID(i + 1),
Name: "Test Endpoint " + strconv.Itoa(i+1),
Type: portainer.EdgeAgentOnDockerEnvironment,
GroupID: 1,
})
require.NoError(t, err)
err = store.EndpointRelation().Create(&portainer.EndpointRelation{
EndpointID: portainer.EndpointID(i + 1),
EdgeStacks: map[portainer.EdgeStackID]bool{},
})
require.NoError(t, err)
}
err = store.EdgeGroup().Create(&portainer.EdgeGroup{
ID: 1,
Name: "Test Edge Group",
EndpointIDs: roar.FromSlice([]portainer.EndpointID{1, 2, 3}),
})
require.NoError(t, err)
rr := httptest.NewRecorder()
req := httptest.NewRequest(
http.MethodGet,
"/edge_groups/1",
nil,
)
handler.ServeHTTP(rr, req)
require.Equal(t, http.StatusOK, rr.Result().StatusCode)
var responseGroup portainer.EdgeGroup
err = json.NewDecoder(rr.Body).Decode(&responseGroup)
require.NoError(t, err)
assert.ElementsMatch(t, []portainer.EndpointID{1, 2, 3}, responseGroup.Endpoints)
}
func TestEmptyEdgeGroupInspectHandler(t *testing.T) {
_, store := datastore.MustNewTestStore(t, true, true)
handler := NewHandler(testhelpers.NewTestRequestBouncer())
handler.DataStore = store
err := store.EndpointGroup().Create(&portainer.EndpointGroup{
ID: 1,
Name: "Test Group",
})
require.NoError(t, err)
err = store.EdgeGroup().Create(&portainer.EdgeGroup{
ID: 1,
Name: "Test Edge Group",
EndpointIDs: roar.Roar[portainer.EndpointID]{},
})
require.NoError(t, err)
rr := httptest.NewRecorder()
req := httptest.NewRequest(
http.MethodGet,
"/edge_groups/1",
nil,
)
handler.ServeHTTP(rr, req)
require.Equal(t, http.StatusOK, rr.Result().StatusCode)
var responseGroup portainer.EdgeGroup
err = json.NewDecoder(rr.Body).Decode(&responseGroup)
require.NoError(t, err)
// Make sure the frontend does not get a null value but a [] instead
require.NotNil(t, responseGroup.Endpoints)
require.Len(t, responseGroup.Endpoints, 0)
}
func TestDynamicEdgeGroupInspectHandler(t *testing.T) {
_, store := datastore.MustNewTestStore(t, true, true)
handler := NewHandler(testhelpers.NewTestRequestBouncer())
handler.DataStore = store
err := store.EndpointGroup().Create(&portainer.EndpointGroup{
ID: 1,
Name: "Test Group",
})
require.NoError(t, err)
err = store.Tag().Create(&portainer.Tag{
ID: 1,
Name: "Test Tag",
Endpoints: map[portainer.EndpointID]bool{
1: true,
2: true,
3: true,
},
})
require.NoError(t, err)
for i := range 3 {
err = store.Endpoint().Create(&portainer.Endpoint{
ID: portainer.EndpointID(i + 1),
Name: "Test Endpoint " + strconv.Itoa(i+1),
Type: portainer.EdgeAgentOnDockerEnvironment,
GroupID: 1,
TagIDs: []portainer.TagID{1},
UserTrusted: true,
})
require.NoError(t, err)
err = store.EndpointRelation().Create(&portainer.EndpointRelation{
EndpointID: portainer.EndpointID(i + 1),
EdgeStacks: map[portainer.EdgeStackID]bool{},
})
require.NoError(t, err)
}
err = store.EdgeGroup().Create(&portainer.EdgeGroup{
ID: 1,
Name: "Test Edge Group",
Dynamic: true,
TagIDs: []portainer.TagID{1},
})
require.NoError(t, err)
rr := httptest.NewRecorder()
req := httptest.NewRequest(
http.MethodGet,
"/edge_groups/1",
nil,
)
handler.ServeHTTP(rr, req)
require.Equal(t, http.StatusOK, rr.Result().StatusCode)
var responseGroup portainer.EdgeGroup
err = json.NewDecoder(rr.Body).Decode(&responseGroup)
require.NoError(t, err)
require.ElementsMatch(t, []portainer.EndpointID{1, 2, 3}, responseGroup.Endpoints)
}

View file

@ -7,11 +7,17 @@ import (
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices" "github.com/portainer/portainer/api/dataservices"
"github.com/portainer/portainer/api/roar"
httperror "github.com/portainer/portainer/pkg/libhttp/error" httperror "github.com/portainer/portainer/pkg/libhttp/error"
) )
type decoratedEdgeGroup struct { type shadowedEdgeGroup struct {
portainer.EdgeGroup portainer.EdgeGroup
EndpointIds int `json:"EndpointIds,omitempty"` // Shadow to avoid exposing in the API
}
type decoratedEdgeGroup struct {
shadowedEdgeGroup
HasEdgeStack bool `json:"HasEdgeStack"` HasEdgeStack bool `json:"HasEdgeStack"`
HasEdgeJob bool `json:"HasEdgeJob"` HasEdgeJob bool `json:"HasEdgeJob"`
EndpointTypes []portainer.EndpointType EndpointTypes []portainer.EndpointType
@ -76,8 +82,8 @@ func getEdgeGroupList(tx dataservices.DataStoreTx) ([]decoratedEdgeGroup, error)
} }
edgeGroup := decoratedEdgeGroup{ edgeGroup := decoratedEdgeGroup{
EdgeGroup: orgEdgeGroup, shadowedEdgeGroup: shadowedEdgeGroup{EdgeGroup: orgEdgeGroup},
EndpointTypes: []portainer.EndpointType{}, EndpointTypes: []portainer.EndpointType{},
} }
if edgeGroup.Dynamic { if edgeGroup.Dynamic {
endpointIDs, err := GetEndpointsByTags(tx, edgeGroup.TagIDs, edgeGroup.PartialMatch) endpointIDs, err := GetEndpointsByTags(tx, edgeGroup.TagIDs, edgeGroup.PartialMatch)
@ -88,15 +94,16 @@ func getEdgeGroupList(tx dataservices.DataStoreTx) ([]decoratedEdgeGroup, error)
edgeGroup.Endpoints = endpointIDs edgeGroup.Endpoints = endpointIDs
edgeGroup.TrustedEndpoints = endpointIDs edgeGroup.TrustedEndpoints = endpointIDs
} else { } else {
trustedEndpoints, err := getTrustedEndpoints(tx, edgeGroup.Endpoints) trustedEndpoints, err := getTrustedEndpoints(tx, edgeGroup.EndpointIDs)
if err != nil { if err != nil {
return nil, httperror.InternalServerError("Unable to retrieve environments for Edge group", err) return nil, httperror.InternalServerError("Unable to retrieve environments for Edge group", err)
} }
edgeGroup.Endpoints = edgeGroup.EndpointIDs.ToSlice()
edgeGroup.TrustedEndpoints = trustedEndpoints edgeGroup.TrustedEndpoints = trustedEndpoints
} }
endpointTypes, err := getEndpointTypes(tx, edgeGroup.Endpoints) endpointTypes, err := getEndpointTypes(tx, edgeGroup.EndpointIDs)
if err != nil { if err != nil {
return nil, httperror.InternalServerError("Unable to retrieve environment types for Edge group", err) return nil, httperror.InternalServerError("Unable to retrieve environment types for Edge group", err)
} }
@ -111,15 +118,26 @@ func getEdgeGroupList(tx dataservices.DataStoreTx) ([]decoratedEdgeGroup, error)
return decoratedEdgeGroups, nil return decoratedEdgeGroups, nil
} }
func getEndpointTypes(tx dataservices.DataStoreTx, endpointIds []portainer.EndpointID) ([]portainer.EndpointType, error) { func getEndpointTypes(tx dataservices.DataStoreTx, endpointIds roar.Roar[portainer.EndpointID]) ([]portainer.EndpointType, error) {
var innerErr error
typeSet := map[portainer.EndpointType]bool{} typeSet := map[portainer.EndpointType]bool{}
for _, endpointID := range endpointIds {
endpointIds.Iterate(func(endpointID portainer.EndpointID) bool {
endpoint, err := tx.Endpoint().Endpoint(endpointID) endpoint, err := tx.Endpoint().Endpoint(endpointID)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed fetching environment: %w", err) innerErr = fmt.Errorf("failed fetching environment: %w", err)
return false
} }
typeSet[endpoint.Type] = true typeSet[endpoint.Type] = true
return true
})
if innerErr != nil {
return nil, innerErr
} }
endpointTypes := make([]portainer.EndpointType, 0, len(typeSet)) endpointTypes := make([]portainer.EndpointType, 0, len(typeSet))

View file

@ -1,11 +1,19 @@
package edgegroups package edgegroups
import ( import (
"net/http"
"net/http/httptest"
"strconv"
"testing" "testing"
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/datastore"
"github.com/portainer/portainer/api/internal/testhelpers" "github.com/portainer/portainer/api/internal/testhelpers"
"github.com/portainer/portainer/api/roar"
"github.com/segmentio/encoding/json"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
func Test_getEndpointTypes(t *testing.T) { func Test_getEndpointTypes(t *testing.T) {
@ -38,7 +46,7 @@ func Test_getEndpointTypes(t *testing.T) {
} }
for _, test := range tests { for _, test := range tests {
ans, err := getEndpointTypes(datastore, test.endpointIds) ans, err := getEndpointTypes(datastore, roar.FromSlice(test.endpointIds))
assert.NoError(t, err, "getEndpointTypes shouldn't fail") assert.NoError(t, err, "getEndpointTypes shouldn't fail")
assert.ElementsMatch(t, test.expected, ans, "getEndpointTypes expected to return %b for %v, but returned %b", test.expected, test.endpointIds, ans) assert.ElementsMatch(t, test.expected, ans, "getEndpointTypes expected to return %b for %v, but returned %b", test.expected, test.endpointIds, ans)
@ -48,6 +56,61 @@ func Test_getEndpointTypes(t *testing.T) {
func Test_getEndpointTypes_failWhenEndpointDontExist(t *testing.T) { func Test_getEndpointTypes_failWhenEndpointDontExist(t *testing.T) {
datastore := testhelpers.NewDatastore(testhelpers.WithEndpoints([]portainer.Endpoint{})) datastore := testhelpers.NewDatastore(testhelpers.WithEndpoints([]portainer.Endpoint{}))
_, err := getEndpointTypes(datastore, []portainer.EndpointID{1}) _, err := getEndpointTypes(datastore, roar.FromSlice([]portainer.EndpointID{1}))
assert.Error(t, err, "getEndpointTypes should fail") assert.Error(t, err, "getEndpointTypes should fail")
} }
func TestEdgeGroupListHandler(t *testing.T) {
_, store := datastore.MustNewTestStore(t, true, true)
handler := NewHandler(testhelpers.NewTestRequestBouncer())
handler.DataStore = store
err := store.EndpointGroup().Create(&portainer.EndpointGroup{
ID: 1,
Name: "Test Group",
})
require.NoError(t, err)
for i := range 3 {
err = store.Endpoint().Create(&portainer.Endpoint{
ID: portainer.EndpointID(i + 1),
Name: "Test Endpoint " + strconv.Itoa(i+1),
Type: portainer.EdgeAgentOnDockerEnvironment,
GroupID: 1,
})
require.NoError(t, err)
err = store.EndpointRelation().Create(&portainer.EndpointRelation{
EndpointID: portainer.EndpointID(i + 1),
EdgeStacks: map[portainer.EdgeStackID]bool{},
})
require.NoError(t, err)
}
err = store.EdgeGroup().Create(&portainer.EdgeGroup{
ID: 1,
Name: "Test Edge Group",
EndpointIDs: roar.FromSlice([]portainer.EndpointID{1, 2, 3}),
})
require.NoError(t, err)
rr := httptest.NewRecorder()
req := httptest.NewRequest(
http.MethodGet,
"/edge_groups",
nil,
)
handler.ServeHTTP(rr, req)
require.Equal(t, http.StatusOK, rr.Result().StatusCode)
var responseGroups []decoratedEdgeGroup
err = json.NewDecoder(rr.Body).Decode(&responseGroups)
require.NoError(t, err)
require.Len(t, responseGroups, 1)
require.ElementsMatch(t, []portainer.EndpointID{1, 2, 3}, responseGroups[0].Endpoints)
require.Len(t, responseGroups[0].TrustedEndpoints, 0)
}

Some files were not shown because too many files have changed in this diff Show more