1
0
Fork 0
mirror of https://github.com/documize/community.git synced 2025-08-08 06:55:28 +02:00

Compare commits

...

15 commits

Author SHA1 Message Date
Harvey Kandola
efb092ef8f Prep v5.13.0 release 2024-12-31 13:09:30 -05:00
Harvey Kandola
3fc0a15f87 es-AR 2024-12-31 12:05:21 -05:00
Harvey Kandola
c841c85478 Add en-AR i18n 2024-12-30 13:49:39 -05:00
Harvey Kandola
2dae03332b Prep release 2024-06-18 11:53:23 -04:00
Harvey Kandola
44b1f263cd Restore activity and audit logs 2024-06-18 11:53:15 -04:00
Harvey Kandola
982e16737e Bump version 2024-02-19 11:55:21 -05:00
Harvey Kandola
f641e42434 Remove default conversion service URL 2024-02-19 11:55:15 -05:00
Harvey Kandola
8895db56af Skip restore of user logs 2024-02-19 11:54:50 -05:00
Harvey Kandola
acb59e1b43 Bump Go deps 2024-02-19 11:54:27 -05:00
Harvey Kandola
f2ba294be8 Prep 5.11.3 2024-02-16 10:53:02 -05:00
Harvey Kandola
69940cb7f1
Merge pull request #398 from mb3m/fix-win-build
Fix Windows build script
2024-02-02 06:44:23 -05:00
Harvey Kandola
6bfdda7178
Merge pull request #399 from mb3m/fix-french
French translation updates
2024-02-02 06:42:35 -05:00
Thomas Bolon
027fdf108c french translation updates
- uppercase with accent (E => É)
- Label => Étiquette
- misc terms
- space before punctuations ("?" => " ?")
2024-02-02 11:44:50 +01:00
Thomas Bolon
1f12df76aa typo 2024-02-02 11:17:49 +01:00
Thomas Bolon
d811b88896 fix windows build
- add a cd .. after ember build: the script moves to gui\ directory but
  does not move back, despite the rest of the script expecting running
  from the root directory.

- add multiple echo commands to help debug error

- fix robocopy wrong usage where glob patterns are specified on the
  source path instead of a specific third argument
2024-02-02 11:15:47 +01:00
64 changed files with 24149 additions and 12734 deletions

View file

@ -12,9 +12,9 @@ All you need to provide is your database -- PostgreSQL, Microsoft SQL Server or
## Latest Release
[Community edition: v5.11.2](https://github.com/documize/community/releases)
[Community edition: v5.13.0](https://github.com/documize/community/releases)
[Community+ edition: v5.11.2](https://www.documize.com/community/get-started)
[Community+ edition: v5.13.0](https://www.documize.com/community/get-started)
The Community+ edition is the "enterprise" offering with advanced capabilities and customer support:
@ -64,7 +64,7 @@ For all database types, Full-Text Search (FTS) support is mandatory.
## Technology Stack
- Go (v1.21.5)
- Go (v1.23.4)
- Ember JS (v3.12.0)
## Authentication Options
@ -89,6 +89,7 @@ Languages supported out-of-the-box:
- Portuguese (Brazil) (Português - Brasil)
- Japanese (日本語)
- Italian
- Spanish Argentinian
PR's welcome for additional languages.

View file

@ -7,6 +7,7 @@ echo "Building Ember assets..."
cd gui
call ember b -o dist-prod/ --environment=production
::Call allows the rest of the file to run
cd ..
rd /s /q edition\static\public
mkdir edition\static\public
@ -25,18 +26,26 @@ robocopy /e /NFL /NDL /NJH gui\dist-prod\sections edition\static\public\sections
echo "Copying i18n folder"
robocopy /e /NFL /NDL /NJH gui\dist-prod\i18n edition\static\public\i18n
echo "Copying static files"
copy gui\dist-prod\*.* edition\static
echo "Copying favicon.ico"
copy gui\dist-prod\favicon.ico edition\static\public
echo "Copying manifest.json"
copy gui\dist-prod\manifest.json edition\static\public
echo "Copying mail templates"
rd /s /q edition\static\mail
mkdir edition\static\mail
copy domain\mail\*.html edition\static\mail
echo "Copying database templates"
copy core\database\templates\*.html edition\static
rd /s /q edition\static\i18n
mkdir edition\static\i18n
robocopy /e /NFL /NDL /NJH gui\dist-prod\i18n\*.json edition\static\i18n
robocopy /e /NFL /NDL /NJH gui\dist-prod\i18n edition\static\i18n *.json
rd /s /q edition\static\scripts
mkdir edition\static\scripts
@ -51,7 +60,7 @@ robocopy /e /NFL /NDL /NJH core\database\scripts\sqlserver edition\static\script
rd /s /q edition\static\onboard
mkdir edition\static\onboard
robocopy /e /NFL /NDL /NJH domain\onboard\*.json edition\static\onboard
robocopy /e /NFL /NDL /NJH domain\onboard edition\static\onboard *.json
echo "Compiling Windows"
set GOOS=windows

View file

@ -8,7 +8,7 @@ echo "Build process started $NOW"
echo "Building Ember assets..."
cd gui
export NODE_OPTIONS=--openssl-legacy-provider
# export NODE_OPTIONS=--openssl-legacy-provider
ember build ---environment=production --output-path dist-prod --suppress-sizes true
cd ..

View file

@ -1,3 +1,3 @@
/* community edition */
ALTER TABLE organization ADD COLUMN `service` VARCHAR(100) NOT NULL DEFAULT 'https://api.documize.com' AFTER `domain`;
ALTER TABLE organization ADD COLUMN `service` VARCHAR(100) NOT NULL DEFAULT '' AFTER `domain`;

View file

@ -36,7 +36,7 @@ ALTER TABLE dmz_org
CHANGE `title` `c_title` VARCHAR(500) NOT NULL,
CHANGE `message` `c_message` VARCHAR(500) NOT NULL,
CHANGE `domain` `c_domain` VARCHAR(200) NOT NULL DEFAULT '',
CHANGE `service` `c_service` VARCHAR(200) NOT NULL DEFAULT 'https://api.documize.com',
CHANGE `service` `c_service` VARCHAR(200) NOT NULL DEFAULT '',
CHANGE `email` `c_email` VARCHAR(500) NOT NULL DEFAULT '',
CHANGE `allowanonymousaccess` `c_anonaccess` BOOL NOT NULL DEFAULT 0,
CHANGE `authprovider` `c_authprovider` CHAR(20) NOT NULL DEFAULT 'documize',

View file

@ -228,7 +228,7 @@ CREATE TABLE dmz_org (
c_title varchar(500) COLLATE ucs_basic NOT NULL,
c_message varchar(500) COLLATE ucs_basic NOT NULL,
c_domain varchar(200) COLLATE ucs_basic NOT NULL DEFAULT '',
c_service varchar(200) COLLATE ucs_basic NOT NULL DEFAULT 'https://api.documize.com',
c_service varchar(200) COLLATE ucs_basic NOT NULL DEFAULT '',
c_email varchar(500) COLLATE ucs_basic NOT NULL DEFAULT '',
c_anonaccess bool NOT NULL DEFAULT '0',
c_authprovider varchar(20) COLLATE ucs_basic NOT NULL DEFAULT 'documize',

View file

@ -212,7 +212,7 @@ CREATE TABLE dmz_org (
c_title NVARCHAR(500) COLLATE Latin1_General_CS_AS NOT NULL,
c_message NVARCHAR(500) COLLATE Latin1_General_CS_AS NOT NULL,
c_domain NVARCHAR(200) COLLATE Latin1_General_CS_AS NOT NULL DEFAULT '',
c_service NVARCHAR(200) COLLATE Latin1_General_CS_AS NOT NULL DEFAULT 'https://api.documize.com',
c_service NVARCHAR(200) COLLATE Latin1_General_CS_AS NOT NULL DEFAULT '',
c_email NVARCHAR(500) COLLATE Latin1_General_CS_AS NOT NULL DEFAULT '',
c_anonaccess BIT NOT NULL DEFAULT '0',
c_authprovider NVARCHAR(20) COLLATE Latin1_General_CS_AS NOT NULL DEFAULT 'documize',

View file

@ -26,6 +26,7 @@ func SupportedLocales() (locales []string) {
locales = append(locales, "fr-FR")
locales = append(locales, "ja-JP")
locales = append(locales, "it-IT")
locales = append(locales, "es-AR")
return
}

View file

@ -40,9 +40,9 @@ func main() {
// Specify the product edition.
rt.Product = domain.Product{}
rt.Product.Major = "5"
rt.Product.Minor = "11"
rt.Product.Patch = "2"
rt.Product.Revision = "1705427184"
rt.Product.Minor = "13"
rt.Product.Patch = "0"
rt.Product.Revision = "1735665467719"
rt.Product.Version = fmt.Sprintf("%s.%s.%s", rt.Product.Major, rt.Product.Minor, rt.Product.Patch)
rt.Product.Edition = domain.CommunityEdition
rt.Product.Title = "Community"

9
go.mod
View file

@ -22,13 +22,13 @@ require (
github.com/lib/pq v1.10.9
github.com/mb0/diff v0.0.0-20131118162322-d8d9a906c24d // indirect
github.com/microcosm-cc/bluemonday v1.0.26
github.com/microsoft/go-mssqldb v1.5.0
github.com/microsoft/go-mssqldb v1.6.0
github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d
github.com/pkg/errors v0.9.1
github.com/rainycape/unidecode v0.0.0-20150907023854-cb7f23ec59be // indirect
github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect
golang.org/x/crypto v0.18.0
golang.org/x/net v0.20.0
golang.org/x/crypto v0.19.0
golang.org/x/net v0.21.0
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc
gopkg.in/cas.v2 v2.2.2
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
@ -39,7 +39,7 @@ require (
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
github.com/aymerick/douceur v0.2.0 // indirect
github.com/fatih/structs v1.1.0 // indirect
github.com/felixge/httpsnoop v1.0.3 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/go-asn1-ber/asn1-ber v1.5.5 // indirect
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 // indirect
@ -47,4 +47,5 @@ require (
github.com/google/uuid v1.3.1 // indirect
github.com/gorilla/css v1.0.1 // indirect
github.com/trivago/tgo v1.0.7 // indirect
golang.org/x/text v0.14.0 // indirect
)

70
go.sum
View file

@ -1,9 +1,17 @@
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.1 h1:/iHxaJhsFr0+xVFfbMr5vxz848jyiWuIEDhYq3y5odY=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.1/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM=
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.0 h1:yfJe15aSwEQ6Oo6J+gdfdulPNoZ3TEhmbhLIoxZcA+U=
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.0/go.mod h1:Q28U+75mpCaSCDowNEmhIo/rmgdkqmkmzI7N6TGR4UY=
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v0.8.0 h1:T028gtTPiYt/RMUfs8nVsAL7FDQrfLlrm/NnRG/zcC4=
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v0.8.0/go.mod h1:cw4zVQgBby0Z5f2v0itn6se2dDP17nTjbZFXW5uPyHA=
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o=
github.com/AzureAD/microsoft-authentication-library-for-go v1.1.0 h1:HCc0+LpPfpCKs6LGGLAhwBARt9632unrVcI6i8s/8os=
github.com/AzureAD/microsoft-authentication-library-for-go v1.1.0/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74 h1:Kk6a4nehpJ3UuJRqlA3JxYxBZEqCeOmATOvrbT4p9RA=
@ -20,8 +28,6 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko=
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
github.com/documize/blackfriday v2.0.0+incompatible h1:qjRGAIVwZlHBtA/b9u0LtseYM3v3WpIXofPCwNjcUsE=
github.com/documize/blackfriday v2.0.0+incompatible/go.mod h1:89vHw0Rs0jcipRrgirvJJn6hvRgbk4KX6rask9ZQ84Y=
github.com/documize/glick v0.0.0-20160503134043-a8ccbef88237 h1:i9vV99+Zl6G82LGwwk7TBf5WwbniTNZZfYCIrEvqRpY=
@ -32,8 +38,8 @@ github.com/documize/slug v1.1.1 h1:OCJRbWxbOgrgiBYSbVzuFwxb9wVu4oy1LxvLJOC2s8Y=
github.com/documize/slug v1.1.1/go.mod h1:Vi7fQ5PzeOpXAiIrk1WCEDRihjTfU/bf4eWUPSD7tkU=
github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/go-asn1-ber/asn1-ber v1.5.5 h1:MNHlNMBDgEKD4TcKr36vQN68BA00aDfjIt3/bD50WnA=
github.com/go-asn1-ber/asn1-ber v1.5.5/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
github.com/go-ldap/ldap/v3 v3.4.6 h1:ert95MdbiG7aWo/oPYp9btL3KJlMPKnP58r09rI8T+A=
@ -42,9 +48,10 @@ github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LB
github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang-jwt/jwt/v4 v4.4.3/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE=
github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA=
github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A=
@ -58,7 +65,6 @@ github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4=
github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8=
@ -67,16 +73,6 @@ github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyE
github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w=
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs=
github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM=
github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo=
github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg=
github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs=
github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
@ -86,6 +82,7 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
@ -96,12 +93,11 @@ github.com/mb0/diff v0.0.0-20131118162322-d8d9a906c24d h1:eAS2t2Vy+6psf9LZ4T5WXW
github.com/mb0/diff v0.0.0-20131118162322-d8d9a906c24d/go.mod h1:3YMHqrw2Qu3Liy82v4QdAG17e9k91HZ7w3hqlpWqhDo=
github.com/microcosm-cc/bluemonday v1.0.26 h1:xbqSvqzQMeEHCqMi64VAs4d8uy6Mequs3rQ0k/Khz58=
github.com/microcosm-cc/bluemonday v1.0.26/go.mod h1:JyzOCs9gkyQyjs+6h10UEVSe02CGwkhd72Xdqh78TWs=
github.com/microsoft/go-mssqldb v1.5.0 h1:CgENxkwtOBNj3Jg6T1X209y2blCfTTcwuOlznd2k9fk=
github.com/microsoft/go-mssqldb v1.5.0/go.mod h1:lmWsjHD8XX/Txr0f8ZqgbEZSC+BZjmEQy/Ms+rLrvho=
github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8=
github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
github.com/microsoft/go-mssqldb v1.6.0 h1:mM3gYdVwEPFrlg/Dvr2DNVEgYFG7L42l+dGc67NNNpc=
github.com/microsoft/go-mssqldb v1.6.0/go.mod h1:00mDtPbeQCRGC1HwOOR5K/gr30P1NcEG0vx6Kbv2aJU=
github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d h1:VhgPp6v9qf9Agr/56bj7Y/xa04UccTW04VP0Qed4vnQ=
github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d/go.mod h1:YUTz3bUH2ZwIWBy3CJBeOBEugqcmXREj14T+iG/4k4U=
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU=
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
@ -116,64 +112,56 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5I
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/trivago/tgo v1.0.7 h1:uaWH/XIy9aWYWpjm2CU3RpcqZXmX2ysQ9/Go+d9gyrM=
github.com/trivago/tgo v1.0.7/go.mod h1:w4dpD+3tzNIIiIfkWWa85w5/B77tlvdZckQ+6PkFnhc=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc=
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220330033206-e17cdc41300f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
@ -187,9 +175,7 @@ gopkg.in/cas.v2 v2.2.2/go.mod h1:mlmjh4qM/Jm3eSDD0QVr5GaaSW3nOonSUSWkLLvNYnI=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View file

@ -9,7 +9,7 @@ import Service, { inject as service } from '@ember/service';
import $ from 'jquery';
export default Service.extend({
langs: { enUS: [], deDE: [] , zhCN: [], ptBR: [], frFR: [], jaJP: [], itIT: [] },
langs: { enUS: [], deDE: [] , zhCN: [], ptBR: [], frFR: [], jaJP: [], itIT: [], esAR: [] },
locales: [],
session: service(),
@ -36,6 +36,9 @@ export default Service.extend({
$.getJSON("/i18n/it-IT.json", (data) => {
this.langs.itIT = data;
});
$.getJSON("/i18n/es-AR.json", (data) => {
this.langs.esAR = data;
});
},
localize(key, ...args) {
@ -63,7 +66,10 @@ export default Service.extend({
case "it-IT":
str = this.langs.itIT[key];
break;
}
case "es-AR":
str = this.langs.esAR[key];
break;
}
if (_.isUndefined(str)) {
// eslint-disable-next-line no-console

View file

@ -41,7 +41,7 @@
<label for="conversionEndpoint">{{localize 'customize_conversion_url'}}</label>
{{input id="conversionEndpoint" type="text" value=model.general.conversionEndpoint class=(if hasConversionEndpointInputError "form-control is-invalid" "form-control")}}
<small class="form-text text-muted">
{{localize 'customize_conversion_explain'}} (e.g. https://api.documize.com, <a href="https://docs.documize.com/s/WNEpptWJ9AABRnha/administration-guides/d/WO0pt_MXigAB6sJ7/general-options">read the documentation</a>)
{{localize 'customize_conversion_explain'}} (See <a href="https://docs.documize.com/s/WNEpptWJ9AABRnha/administration-guides/d/WO0pt_MXigAB6sJ7/general-options">documentation</a>)
</small>
</div>
<div class="form-group">

View file

@ -38,7 +38,7 @@
<div class="form-group">
<label for="activation-key">Activation Key</label>
{{textarea id="activation-key" value=model.activationKey rows="5" class=(if hasKeyError "form-control is-invalid" "form-control")}}
<small class="form-text text-muted">You can get from <a href="https://www.documize.com/community" target="_blank">https://www.documize.com/community</a></small>
<small class="form-text text-muted">Get from here: <a href="https://www.documize.com/community" target="_blank">https://www.documize.com/community</a></small>
</div>
{{/if}}
{{ui/ui-button submit=true color=constants.Color.Green light=true label=buttonLabel onClick=(action "save")}}

5281
gui/package-lock.json generated

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,6 @@
{
"name": "documize",
"version": "5.11.2",
"version": "5.13.0",
"private": true,
"description": "Documize Community",
"repository": "",

758
gui/public/i18n/es-AR.json Normal file
View file

@ -0,0 +1,758 @@
{
"all": "Todos",
"none": "Ninguno",
"never": "Nunca",
"add": "Agregar",
"added": "Agregado",
"activate": "Activar",
"summary": "Resumen",
"approve": "Aprobar",
"authenticate": "Autenticar",
"cancel": "Cancelar",
"close": "Cerrar",
"copy": "Copiar",
"copied": "Copiado",
"delete": "Borrar",
"deleted": "Eliminado",
"remove": "Eliminar",
"duplicate": "Duplicado",
"duplicated": "Duplicado",
"edit": "Editar",
"edited": "Editado",
"download": "Descargar",
"upload": "subir",
"uploaded": "Subido",
"import": "Importar",
"export": "Exportar",
"exported": "Exportado",
"pdf": "PDF",
"test": "Prueba",
"configure": "Configurar",
"error": "Error",
"file": "Archivo",
"insert": "Insertar",
"invite": "Invitar",
"message": "Mensaje",
"join": "Unirse",
"leave": "Salir",
"login": "Iniciar sesión",
"logout": "Cerrar sesión",
"authentication": "Autenticación",
"move": "Mover",
"moved": "Movido",
"next": "Siguiente",
"ok": "Aceptar",
"preview": "Vista previa",
"preview_wait": "Generando vista previa...",
"pdf_prepare": "Preparando PDF...",
"publish": "Publicar",
"print": "Imprimir",
"reject": "Rechazar",
"rejected": "Rechazado",
"reply": "Responder",
"replied": "Respondido",
"reset": "Restablecer",
"request": "Solicitar",
"requested": "Solicitado",
"save": "Guardar",
"saved": "Guardado",
"search": "Buscar",
"send": "Enviar",
"share": "Compartir",
"discard": "Descartar",
"continue": "Continuar",
"code": "código",
"signin": "Iniciar sesión",
"sort": "Ordenar",
"sort_ascending": "Ascendente",
"sort_descending": "Descendente",
"sort_by_name": "Nombre",
"sort_by_created": "Fecha de creación",
"sort_by_revised": "Última actualización",
"unassigned": "Sin asignar",
"update": "Actualizar",
"updating": "Actualizando",
"viewed": "Visto",
"name": "Nombre",
"description": "Descripción",
"excerpt": "Extracto",
"icon": "Icono",
"color": "Color",
"visibility": "Visibilidad",
"running": "En ejecución...",
"firstname": "Nombre",
"lastname": "Apellido",
"email": "Correo electrónico",
"email_recipient": "Correo electrónico del destinatario",
"password": "Contraseña",
"password_new": "Nueva contraseña",
"username": "Nombre de usuario",
"user": "Usuario",
"password_confirm": "Confirmar contraseña",
"encryption": "Cifrado",
"notice": "Aviso",
"backup": "Copia de seguridad",
"restore": "Restaurar",
"completed": "Completado",
"please_wait": "Por favor, espere...",
"filter": "Filtro",
"not_found": "No encontrado",
"nothing_found": "No se encontró nada",
"expand_collapse": "Expandir/contraer",
"options": "Opciones",
"settings": "Configuración",
"about": "Acerca de",
"meta": "Meta",
"permissions": "Permisos",
"profile": "Perfil",
"go_top": "Ir al inicio",
"help": "Ayuda",
"reference": "referencia",
"references": "referencias",
"move_up": "Subir",
"move_down": "Bajar",
"indent": "Sangría",
"outdent": "Anular sangría",
"default": "Predeterminado",
"no_undo": "Proceda con precaución, ya que no se puede deshacer",
"show_hide": "Mostrar/ocultar",
"sent": "Enviado",
"everyone": "Todos",
"ip": "IP",
"event": "Evento",
"when": "Cuándo",
"last_seen": "Última vez visto",
"change": "Cambiar",
"no_access": "Acceso denegado",
"drag_drop_reorder": "Arrastrar y soltar para reordenar",
"select": "Seleccionar",
"locale": "Configuración regional",
"public": "Público",
"public_explain": "Público: puede ser visto por todos",
"protected": "Protegido",
"protected_explain": "Protegido: el acceso está restringido a usuarios seleccionados",
"personal": "Personal",
"personal_explain": "Personal: solo yo puedo verlo",
"label": "Etiqueta",
"labels": "Etiquetas",
"labels_none": "Sin etiquetas",
"label_unclassified": "Sin clasificar",
"draft": "Borrador",
"drafted": "En borrador",
"draft_explain": "Borrador: debe ser aprobado antes de la publicación, sin historial de revisión",
"drafts": "Borradores",
"live": "En vivo",
"live_explain": "En vivo: publicado al crearse",
"archived": "Archivado",
"archived_explain": "Archivado: no visible para nadie",
"approved": "Aprobado",
"reverted": "Revertido",
"published": "Publicado",
"space": "Espacio",
"spaces": "Espacios",
"category": "Categoría",
"category_explain": "Asignar categorías a los documentos y decidir quién puede verlos",
"categories": "Categorías",
"tag": "Etiqueta",
"tags": "Etiquetas",
"tag_rules": "Especificar hasta {1} etiquetas: minúsculas, caracteres, números, guiones solamente",
"revisions": "Revisiones",
"versions": "Versiones",
"version": "Versión",
"versioned": "Con versiones",
"versions_explain": "Crear varias versiones del mismo contenido: los usuarios pueden seleccionar qué versión ver",
"change_control": "Control de cambios",
"change_control_explain": "Seleccionar método de control de cambios",
"pin": "Fijar",
"pinned": "Fijado",
"unpin": "Desfijar",
"unpinned": "Desfijar",
"blocks": "Bloques de contenido",
"blocks_explain": "Los bloques de contenido proporcionan contenido reutilizable que se puede insertar en cualquier documento",
"block_delete_confirm": "¿Está seguro de que desea eliminar este bloque de contenido reutilizable?",
"actions": "Acciones",
"activity": "Actividad",
"activity_explain": "Informes de actividad del usuario que incluyen adiciones, vistas, actualizaciones y aprobaciones",
"reports": "Informes",
"content": "Contenido",
"template": "Plantilla",
"templates": "Plantillas",
"document": "Documento",
"documents": "Documentos",
"attachments": "Archivos adjuntos",
"history": "Historial",
"bookmark": "Marcador",
"bookmarks": "Marcadores",
"bookmark_remove": "Eliminar marcador",
"new": "Nuevo",
"copy_link": "Copiar enlace",
"created": "Creado",
"markdown": "Markdown aceptado",
"rename": "Cambiar nombre",
"status": "Estado",
"expiry": "Caducidad",
"feedback": "Comentarios",
"un_categorized": "Sin categorizar",
"toc": "Tabla de contenidos",
"new_canvas": "Lienzo en blanco",
"new_template": "Desde la plantilla",
"new_import": "Importar archivos",
"new_import_explain1": "Haga clic para seleccionar archivos o arrastre y suelte archivos",
"new_import_explain2": ".doc, .docx, .md, .markdown",
"new_content": "Nuevo contenido",
"export_html": "Exportar como HTML",
"export_html_explain1": "Exportar todo el contenido del espacio como HTML o seleccionar categorías",
"export_html_explain2": "Todo el contenido del espacio se exportará como un único archivo HTML autocontenido",
"import_convert": "Convirtiendo {1}",
"import_success": "Convertido correctamente {1}",
"add_recent": "Agregado recientemente",
"update_recent": "Actualizado recientemente",
"space_change": "Mover a otro espacio",
"space_change_prompt": "Seleccionar espacio",
"space_new": "Nuevo espacio",
"space_name": "Nombre del espacio",
"space_name_rules": "Solo caracteres y números",
"space_description": "Descripción del espacio",
"space_clone": "Clonar espacio",
"space_select": "Seleccionar espacio",
"space_delete": "Eliminar espacio",
"space_delete_prompt": "Escriba el nombre del espacio para confirmar",
"space_delete_warn": "¡Esto eliminará todos los documentos y plantillas dentro de este espacio!",
"space_copy": "Copiar plantillas, permisos, documentos de un espacio existente",
"space_copy_template": "Copiar plantillas",
"space_copy_permission": "Copiar permisos",
"space_copy_document": "Copiar documentos",
"space_empty_state": "Agregar documentos mediante + CONTENT",
"space_lockout": "Los permisos del espacio le impiden ver y crear documentos",
"space_invite_message": "Hola, estoy compartiendo contigo el espacio {1} (en {2}) para que ambos podamos colaborar en la documentación.",
"protection_none": "Se permiten cambios sin aprobación",
"protection_lock": "Bloqueado, no se permiten cambios",
"protection_review": "Los cambios requieren aprobación antes de su publicación",
"template_published": "Plantilla publicada",
"block_explain": "Los bloques de contenido proporcionan contenido reutilizable que se puede insertar en cualquier documento",
"block_published": "Bloque publicado",
"upload_attachment": "Subir archivos adjuntos",
"content_revisions": "Revisiones de contenido",
"content_revisions_explain": "Revisar cambios de contenido anteriores y revertir ediciones",
"move_documents": "Mover documentos",
"delete_documents": "Eliminar documentos",
"feedback_enable": "Habilitar comentarios",
"feedback_prompt": "Mensaje para comentarios",
"feedback_prompt_hint": "Ingrese un mensaje solicitando comentarios del usuario",
"feedback_prompt_explain": "Especifique el mensaje, p. ej. ¿Le resultó útil? ¿Encontraste lo que necesitabas?",
"feedback_thanks": "¡Gracias por tus comentarios!",
"feedback_help_yes": "¡Sí, gracias!",
"feedback_help_no": "No realmente",
"likes_prompt": "¿Esto te ayudó?",
"delete_confirm": "¿Estás seguro de que deseas eliminar?",
"category_assignment_summary": "Asignado a {1} documentos y visible para {2} usuarios/grupos",
"category_no_access": "No tienes permiso de visualización para esta categoría",
"category_default": "Categoría predeterminada para contenido nuevo",
"category_default_explain": "Se aplica automáticamente a los documentos recién creados",
"category_delete_confirm": "¿Estás seguro de que deseas eliminar esta categoría?",
"category_permissions": "Permisos de categoría",
"category_permissions_explain": "Seleccione quién puede ver los documentos dentro de la categoría",
"category_none": "Este espacio no tiene categorías todavía",
"approval_anybody": "Se requiere la aprobación de cualquier aprobador",
"approval_majority": "Se requiere la aprobación de la mayoría de los aprobadores",
"approval_unanimous": "Se requiere la aprobación unánime de todos los aprobadores",
"approval_pending": "Cambios pendientes",
"approval_awaiting": "En espera de aprobación",
"protection_type_open": "Abierto",
"protection_type_protected": "Protegido",
"protection_type_locked": "Bloqueado",
"doc_request_contribution": "Solicitar contribución",
"doc_request_feedback": "Solicitar comentarios",
"doc_request_read": "Solicitar lectura",
"doc_request_publication": "Solicitar publicación",
"doc_request_approval": "Solicitar aprobación",
"doc_withdraw_approval": "Solicitud de retiro",
"doc_withdraw_approval_reason": "Explicación",
"doc_withdraw_approval_reason_explain": "Nota útil que explica el motivo, p. ej. correcciones, adiciones",
"doc_review_publish": "Puede revisar y aprobar los cambios",
"doc_review_discard": "Me gustaría retirar esta solicitud de aprobación",
"doc_contribution_pending": "Tiene un cambio pendiente",
"doc_contribution_under_review": "Su solicitud de aprobación está bajo revisión",
"doc_contribution_rejected": "Su cambio ha sido rechazado",
"doc_contribution_review": "Revisar cambios ({1})",
"doc_change_status": "{1} cambio(s) en progreso, {2} en espera de revisión, {3} rechazados",
"doc_approve_reject": "Aprobar/Rechazar cambios",
"doc_approve_method": "Método de aprobación",
"doc_approve_status": "Estado de aprobación",
"doc_approver_status": "{1} aprobaciones y {2} rechazos con {3} aprobadores involucrados en esta revisión",
"doc_publish": "¿Puedes mover este documento de Borrador a En Vivo? ¡Estamos listos para publicarlo!",
"doc_category_explain": "Asigna una o más categorías para ayudar a organizar el contenido dentro de este espacio",
"doc_action_request_ask": "Preguntar",
"doc_action_request_to": "Para",
"doc_action_request_by": "Por",
"revision_none": "No se han realizado revisiones",
"revision_restore": "Restaurar sección",
"revision_restore_confirm": "¿Estás seguro de que quieres restaurar esta revisión?",
"template_save": "Guardar como plantilla",
"template_name_explain": "Un buen nombre de plantilla transmite el tipo de documento",
"template_desc_explain": "Explica el caso de uso para esta plantilla",
"print_explain": "Imprime todo el contenido o selecciona secciones",
"doc_delete_confirm": "¿Estás seguro de que quieres eliminar este documento?",
"attachment_delete": "Eliminar archivo adjunto",
"attachment_delete_confirm": "¿Está seguro de que desea eliminar este adjunto?",
"link_insert": "Insertar vínculo",
"link_type_section": "Sección",
"link_type_section_explain": "Enlace al contenido de este documento",
"link_type_attachment": "Archivo adjunto",
"link_type_attachment_explain": "Enlace a un adjunto de este documento",
"link_type_search": "Buscar",
"link_type_search_explain": "Nombre del documento, contenido, nombre del adjunto",
"link_type_network": "Red",
"link_type_network_explain": "Especifique la ubicación de la unidad de red/recurso compartido/carpeta",
"link_type_network_example": "p. ej. //recurso compartido/carpeta",
"discard_changes": "Descartar cambios",
"discard_confirm": "Ha realizado cambios: ¿desea continuar editando o descartar los cambios?",
"share_expire_2": "en 2 días",
"share_expire_7": "en 7 días",
"share_expire_14": "en 14 días",
"share_expire_30": "en 30 días",
"share_expire_60": "en 60 días",
"share_expire_90": "en 90 días",
"actions_mine": "Acciones asignadas a usted",
"actions_none": "No tiene acciones",
"actions_due": "vencidas",
"actions_see_note": "leer nota",
"actions_mark_complete": "marcar como completada",
"actions_completed": "completada",
"actions_see_more": "{1} acciones completadas →",
"actions_requested": "Acciones que ha asignado a otros",
"actions_requested_none": "No ha realizado ninguna solicitud",
"actions_requested_more": "{1} solicitudes completadas recientemente →",
"actions_requested_complete": "{1} solicitudes completadas →",
"activity_period": "Período de tiempo",
"activity_filter_past": "Pasado",
"activity_filter_days": "Días",
"activity_filter_for": "Durante",
"audit_24": "24 horas",
"audit_7": "7 días",
"audit_14": "14 días",
"audit_30": "30 días",
"audit_90": "90 días",
"chat_leave_comment": "Dejar comentario",
"chat_applies_to": "Se aplica a",
"chat_delete_confirm": "¿Está seguro de que desea eliminar este comentario?",
"chat_reply_delete_confirm": "¿Está seguro de que desea eliminar esta respuesta?",
"doc_secure_share": "Compartir mediante un enlace externo seguro",
"doc_secure_shared_by": "Compartido por",
"doc_version_viewed": "Cambiar el nombre de la versión",
"doc_version_remove": "Desversionar",
"doc_version_create": "Crear versión del documento",
"doc_version_explain": "Las versiones se asignan a los documentos donde cada versión es un documento diferente.",
"doc_version_label": "Etiqueta de versión",
"doc_version_label_explain": "p. ej. 1.0, v1.0, Versión 1, Versión 2018.1",
"doc_version_this": "Crear nueva versión de este documento",
"doc_version_select": "o seleccionar otro documento",
"doc_version_assign": "Asignar versión a este documento",
"doc_version_dropdown": "<asignar versión a otro documento>",
"doc_version_remove_confirm": "¿Está seguro de que desea eliminar esta versión y todo su contenido?",
"doc_version_remove_name": "Confirme escribiendo el nombre del documento a continuación",
"doc_version_unversion": "La operación de anulación de la versión hará que el documento sea independiente y mantendrá intacto todo el contenido.",
"doc_version_unversion_confirm": "¿Está seguro de que desea anular la versión del documento?",
"archived_content": "Contenido archivado",
"archived_content_explain": "Marcar contenido archivado como activo",
"unarchive_content": "Desarchivar contenido",
"document_permissions": "Permisos de documentos",
"space_permissions": "Permisos de espacio",
"space_permissions_explain": "Asignar permisos a usuarios o grupos e invitar a nuevos usuarios a este espacio",
"space_permission_view": "Ver",
"space_permission_view_explain": "Ver contenido dentro de este espacio",
"space_permission_manage": "Administrar",
"space_permission_manage_explain": "Administrar todos los aspectos del espacio excepto la eliminación",
"space_permission_owner": "Propietario",
"space_permission_owner_explain": "Administrar y eliminar espacio",
"space_permission_doc_create": "Crear",
"space_permission_doc_create_explain": "Puede crear nuevos documentos en el espacio",
"space_permission_doc_edit": "Editar",
"space_permission_doc_edit_explain": "Puede cambiar el contenido del documento",
"space_permission_doc_delete": "Eliminar",
"space_permission_doc_delete_explain": "Puede eliminar documentos del espacio",
"space_permission_doc_move": "Mover",
"space_permission_doc_move_explain": "Puede mover documentos de este espacio a otro espacio",
"space_permission_doc_copy": "Copiar",
"space_permission_doc_copy_explain": "Puede duplicar documentos",
"space_permission_doc_template": "Plantillas",
"space_permission_doc_template_explain": "Puede crear y publicar plantillas de documentos",
"space_permission_doc_approval": "Aprobación",
"space_permission_doc_approval_explain": "Puede (1) aprobar o rechazar cambios en los documentos; (2) mover documentos de borrador a activo",
"space_permission_doc_draft": "Borrador",
"space_permission_doc_draft_explain": "Puede ver y trabajar en documentos marcados como borrador",
"space_permission_doc_version": "Versiones",
"space_permission_doc_version_explain": "Puede crear versiones de documentos y vincularlas",
"space_permission_add_user": "Agregar usuarios a este espacio",
"space_permission_invite_user": "Invitar usuarios a este espacio",
"space_permission_invite_explain": "Separar con comas varias direcciones de correo electrónico",
"section_insert": "Insertar sección",
"section_insert_here": "Insertar sección aquí",
"section_copy_explain": "Esta sección y todas las secciones anidadas se copiarán al documento seleccionado",
"section_move_explain": "Esta sección y todas las secciones anidadas se moverán al documento seleccionado",
"section_delete": "¿Está seguro de que desea eliminar esta sección?",
"section_delete_children": "Eliminar también las secciones secundarias",
"section_publish": "Publicar bloque de contenido reutilizable",
"section_publish_name_explain": "Proporcionar un título breve para el bloque de contenido reutilizable",
"section_publish_desc_explain": "Descripción breve para ayudar a otros a comprender el bloque de contenido reutilizable",
"section_airtable": "Airtable",
"section_airtable_explain": "Parte hoja de cálculo, parte base de datos y totalmente flexible (https://airtable.com)",
"section_airtable_code": "Insertar Airtable código",
"section_code_tip": "Nombre conciso que describe el fragmento de código",
"section_drawio": "Diagrams.net",
"section_drawio_explain": "Para crear diagramas de flujo, diagramas de procesos, organigramas, UML, diagramas ER, diagramas de red y mucho más (https://www.diagrams.net)",
"section_iframe": "iFrame",
"section_iframe_explain": "Incrustar un iFrame",
"section_iframe_code": "Código de incrustación de iFrame",
"section_gemini": "Gemini",
"section_gemini_explain": "Software de soporte técnico y seguimiento de problemas empresariales Gemini (https://www.countersoft.com)",
"section_gemini_url": "URL de Gemini",
"section_gemini_url_explain": "p. ej. Español: https://helpdesk.countersoft.com",
"section_gemini_key": "Clave API (del perfil de usuario)",
"section_gemini_workspace": "Espacio de trabajo",
"section_jira": "Software Jira",
"section_jira_explain": "Jira proporciona seguimiento de problemas y software ágil",
"section_jira_admin": "El administrador de su comunidad Documize debe proporcionar detalles de conexión de Jira antes del uso.",
"section_jira_no_auth": "Conector Jira no autenticado",
"section_papertrail": "Papertrail",
"section_papertrail_explain": "Muestra tus registros basados en la nube (https://papertrailapp.com)",
"section_papertrail_key": "Clave API de Papertrail",
"section_papertrail_search": "Consulta de búsqueda",
"section_papertrail_search_explain": "Determina qué entradas de registro quieres mostrar, p. ej. bob OR (algunafrase AND sally)",
"section_papertrail_max": "Resultados máximos",
"section_papertrail_max_explain": "¿Cuántas entradas de registro desea?",
"section_papertrail_group": "Grupo",
"section_papertrail_group_explain": "Grupo de Papertrail opcional",
"section_pdf": "Visualizador de PDF",
"section_pdf_upload_explain": "Se utilizará el primer PDF cargado",
"section_pdf_height": "Altura de la vista previa",
"section_pdf_height_explain": "¿Qué altura tiene la vista previa del PDF?",
"section_pdf_start": "Página de inicio",
"section_pdf_start_explain": "La primera página que se mostrará",
"section_pdf_sidebar": "Barra lateral",
"section_pdf_sidebar_explain": "Opcionalmente, establezca el contenido de la barra lateral",
"section_pdf_sidebar_none": "Ninguno",
"section_pdf_sidebar_bookmark": "Marcadores",
"section_pdf_sidebar_thumbnail": "Miniaturas",
"section_plantuml": "PlantUML",
"section_plantuml_explain": "Crear diagramas UML a partir de un lenguaje de texto simple (http://plantuml.com)",
"section_plantuml_link": "Diagrama PlantUML",
"section_plantuml_type_sequence": "Secuencia",
"section_plantuml_type_usecase": "Caso de uso",
"section_plantuml_type_class": "Clase",
"section_plantuml_type_activity": "Actividad",
"section_plantuml_type_activity2": "Actividad (nueva sintaxis)",
"section_plantuml_type_component": "Componente",
"section_plantuml_type_state": "Estado",
"section_tabular_import_csv": "Importar CSV",
"section_tabular_csv": "CSV a tabla",
"section_tabular_warn": "Nota: los datos de la tabla existente se reemplazarán",
"section_tabular_paste": "Pegar datos CSV",
"section_tabular_format": "Se admiten los delimitadores comunes",
"section_tabular_first_row": "Encabezado de la primera fila",
"section_tabular_first_row_explain": "Habilitar si la primera fila contiene encabezados",
"section_trello": "Trello",
"section_trello_explain": "Trello es la forma visual de administrar sus proyectos y organizar cualquier cosa (https://trello.com)",
"section_trello_none": "No tiene tableros de equipo para compartir; los tableros personales nunca se muestran",
"section_trello_admin": "Su administrador de la Comunidad Documize debe configurar Trello antes de usarlo",
"section_trello_board": "Tablero",
"section_trello_list": "Lista",
"login_cas": "Authenticating with CAS...",
"login_cass_error": "CAS authentication failure",
"login_keycloak": "Authenticating with Keycloak...",
"login_keycloak_error": "Keycloak authentication failure",
"login_invalid": "Invalid credentials",
"forgot_password": "Forgot your password?",
"reset_password": "Reset Password",
"reset_password_next": "Thanks. Check your email for instructions.",
"password_strong": "Choose a strong password",
"password_match": "Passwords must match",
"welcome": "Welcome to Documize",
"welcome_explain": "Let's set up your account and get you started",
"profile_explain": "Manage your profile and password",
"search_hint": "keywords, tags",
"search_too_short": "Your search query is too short",
"search_doc_name": "Document name",
"search_doc_content": "Document content",
"search_tag_name": "Tag name",
"search_attachment_name": "Attachment name",
"search_example_title": "Query examples",
"search_explain": "Find content using keywords and operators",
"search_example1": "Show results that contain at least one of the two words",
"search_example2": "Show results that contain both words",
"search_example3": "Show results that contain the word \"apple\", but rank rows higher if they also contain \"macintosh\"",
"search_example4": "Show results that contain the word \"apple\" but not \"macintosh\"",
"search_example5": "Show results that contain the words \"apple\" and \"turnover\", or \"apple\" and \"strudel\" (in any order), but rank \"apple turnover\" higher than \"apple strudel\"",
"search_example6": "Show results that contain words such as \"apple\", \"apples\", \"applesauce\", or \"applet\"",
"search_example7": "Show results that contain the exact phrase \"some words\" (for example, rows that contain \"some words of wisdom\" but not \"some noise words\")",
"search_example8": "Show results that contain either word",
"search_example9": "Show results that have \"google\", either \"apple\" or \"microsoft\" but not \"ibm\"",
"search_example10": "Show results that contain words that start with \"apple\", such as \"applesauce\" or \"applet\"",
"space_density_complete": "Complete",
"space_density_comfort": "Comfort",
"space_density_compact": "Compact",
"backup_explain1": "Documize Community es una aplicación multiusuario que permite que tanto 'tech.mycompany.com' como 'sales.mycompany.com' se ejecuten utilizando el mismo ejecutable/base de datos. Como administrador global de Documize Community, realizará una copia de seguridad completa de todo el sistema en todos los inquilinos. El administrador de inquilinos de Documize Community puede iniciar sesión para realizar una copia de seguridad a nivel de inquilino (por ejemplo, marketing.mycompany.com).",
"backup_explain2": "Documize Community es una aplicación multiusuario que permite que tanto 'tech.mycompany.com' como 'sales.mycompany.com' se ejecuten utilizando el mismo ejecutable/base de datos. Como administrador global de Documize Community, realizará una copia de seguridad completa de todo el sistema en todos los inquilinos. Como administrador de inquilinos de Documize Community, puede realizar una copia de seguridad a nivel de inquilino (por ejemplo, marketing.mycompany.com).",
"backup_explain3": "Puede llevar varios minutos completar el proceso de copia de seguridad; tenga paciencia mientras se realiza la operación de copia de seguridad en progreso.",
"backup_retain": "Mantener archivo de respaldo en el servidor",
"backup_running": "Respaldo en ejecución, espere...",
"backup_tenant": "Inquilino de respaldo",
"backup_system": "Sistema de respaldo",
"backup_failed": "Respaldo fallido -- verifique los registros del servidor",
"backup_success": "Respaldo exitoso",
"backup_start": "Iniciar respaldo",
"backup_run": "Ejecutar respaldo",
"restore_explain1": "La restauración desde un respaldo del sistema solo debe realizarse en una base de datos de Documize Community vacía.",
"restore_explain2": "La operación de restauración recreará usuarios, grupos, permisos, espacios, categorías y contenido.",
"restore_explain3": "Puede tomar varios minutos completar el proceso de restauración -- tenga paciencia mientras la operación de restauración está en progreso.",
"restore_select_file": "Elija archivo de respaldo",
"restore_running": "Espere, Restaurar en ejecución...",
"restore_failed": "Falló la restauración -- verifique los registros del servidor",
"restore_success": "Restauración completada -- reinicie su navegador e inicie sesión",
"restore_confirm": "Confirmar restauración",
"restore_confirm_input": "Escriba RESTORE para comenzar el proceso",
"restore_warn": "Solo debe restaurar en una instancia de Documize Community vacía",
"changelog_available": "Actualización del producto disponible",
"changelog_guidance": "Para actualizar, reemplace el binario existente y reinicie Documize Community.",
"customize_name": "Nombre del sitio",
"customize_name_explain": "Proporcione un título corto para esta instancia de Documize Community",
"customize_message": "Mensaje del sitio",
"customize_message_explain": "Proporcione un mensaje corto que explique esta instancia de Documize Community",
"customize_theme": "Tema del sitio",
"customize_logo": "Logotipo del sitio",
"customize_logo_default": "Usar predeterminado",
"customize_logo_upload": "Cargar personalizado",
"customize_logo_explain": "Puede elegir cargar un logotipo pequeño (p. ej. 64px x 64px)",
"customize_subdomain": "Subdominio de la URL del sitio",
"customize_subdomain_explain": "Si está alojando en 'docs.example.org', entonces el valor del subdominio debe establecerse en 'docs'",
"customize_anon": "Espacios públicos visibles para usuarios anónimos",
"customize_anon_explain": "Compartir contenido con visitantes del sitio no autenticados",
"customize_conversion_url": "URL del servicio de conversión",
"customize_conversion_explain": "Punto final para gestionar la importación/exportación",
"customize_tags": "Etiquetas máximas por documento",
"customize_tags_explain": "Cuántas etiquetas se pueden asignar a un documento (entre 3 y 10 etiquetas)",
"integration_jira_url": "URL",
"integration_jira_url_explain": "Nombre de dominio completo para su Jira ejemplo, http://jira.example.org",
"integration_jira_username": "Nombre de usuario",
"integration_jira_username_explain": "Su nombre de usuario/correo electrónico de inicio de sesión de Jira",
"integration_jira_password": "Contraseña",
"integration_jira_password_explain": "Proporcione un token de API si usa Atlassian Cloud o una contraseña al alojar Jira por su cuenta",
"integration_trello_appkey": "Clave de la aplicación",
"search_reindex": "Puede tomar hasta 30 minutos reconstruir el índice de búsqueda",
"search_reindex_rebuild": "Reconstruir",
"search_reindex_start": "Iniciando el proceso de reindexación de búsqueda",
"search_reindex_finish": "Reindexación de búsqueda completada",
"smtp_host": "Host",
"smtp_host_explain": "p. ej. my.host.com",
"smtp_port": "Puerto",
"smtp_port_explain": "p. ej. 587",
"smtp_username": "Nombre de usuario",
"smtp_username_explain": "p. ej. Nombre de usuario de inicio de sesión para el servidor SMTP",
"smtp_password": "Contraseña",
"smtp_password_explain": "p. ej. Contraseña de inicio de sesión para el servidor SMTP",
"smtp_sender_email": "Correo electrónico del remitente",
"smtp_sender_email_explain": "p. ej. usuario@example.org",
"smtp_sender_name": "Nombre del remitente",
"smtp_sender_name_explain": "p. ej. Documize",
"smtp_fqdn": "Nombre de dominio completo del servidor remitente",
"smtp_fqdn_explain": "(opcional) SMTP puede requerir un nombre de dominio válido, p. ej. docs.example.org",
"smtp_anon_auth": "Autenticación anónima (ignorar credenciales)",
"smtp_base64": "Credenciales de codificación Base64",
"smtp_ssl": "Usar SSL",
"smtp_save_test": "Guardar y probar",
"smtp_sent_test_email": "Enviándole un correo electrónico de prueba",
"smtp_missing": "Faltan configuraciones del servidor de correo",
"space_admin_export": "Exportar todo el contenido",
"space_admin_export_running": "Exportación en ejecución...",
"space_admin_make_owner": "Agregarme como propietario",
"space_admin_empty": "No hay espacios compartidos para administrar",
"space_admin_delete_title": "Eliminación de espacio",
"space_admin_delete_check": "¿Está seguro de que desea eliminar este espacio y todos los documentos?",
"space_admin_confirm": "Escriba el nombre del espacio para confirmar",
"space_admin_confirm_explain": "¡Esto eliminará todos los documentos y plantillas dentro de este espacio!",
"label_add": "Agregar etiqueta",
"label_update": "Actualizar etiqueta",
"label_delete": "Eliminar etiqueta",
"label_delete_confirm": "¿Está seguro de que desea eliminar la etiqueta {1}?",
"user_admin_add": "Agregar usuarios",
"user_admin_bulk": "Carga masiva",
"user_admin_bulk_format": "Lista delimitada por comas: nombre, apellido, correo electrónico",
"group_add": "Agregar Grupo",
"group_name_explain": "Ingrese el nombre del grupo, p. ej. Administradores, desarrolladores, equipo Acme",
"group_member_add": "Agregar miembro",
"group_member_remove": "Eliminar miembro",
"group_delete": "Eliminar grupo",
"group_delete_confirm": "¿Está seguro de que desea eliminar este grupo?",
"group_delete_name": "Escriba el nombre del grupo para confirmar",
"group_delete_name_explain": "Esto eliminará la información de membresía del grupo y los permisos asociados",
"group_none": "No hay grupos",
"group_edit": "Editar grupo",
"find_user": "Buscar usuarios",
"find_user_syntax": "Buscar nombre, apellido, correo electrónico",
"user_max_display": "Máximo de usuarios para mostrar",
"user_assign_group": "Asignar grupos de usuarios",
"user_delete_confirm": "¿Está seguro de que desea eliminar al usuario {1}?",
"user_selected_delete_confirm": "¿Está seguro de que desea eliminar los usuarios seleccionados?",
"user_delete": "Eliminar usuario",
"permission_spaces": "Espacios",
"permission_spaces_explain": "Puede agregar espacios, tanto personales como compartidos con otros",
"permission_visible": "Visible",
"permission_visible_explain": "Puede ver los nombres de los usuarios y grupos, puede deshabilitarlos para usuarios externos como clientes/socios",
"permission_admin": "Administrador",
"permission_admin_explain": "Puede administrar todos los aspectos de Documize Community, como esta pantalla",
"permission_analytics": "Análisis",
"permission_analytics_explain": "Puede ver informes analíticos",
"permission_active": "Activo",
"permission_active_explain": "Puede iniciar sesión y usar Documize Community",
"auth_role_space": "Puede crear espacios",
"auth_disable_logout": "Deshabilitar cierre de sesión",
"auth_dual_login": "Inicio de sesión dual",
"auth_dual_login_explain": "Habilitar inicio de sesión a través de LDAP y correo electrónico/contraseña normal (útil para probar LDAP)",
"auth_email_password": "Correo electrónico/contraseña integrados",
"auth_connect_keycloak": "Conectarse al servidor de autenticación",
"auth_connect_ldap": "Conectarse a LDAP/Active Directory",
"auth_connect_cas": "Conectarse al servidor de autenticación central",
"auth_keycloak_running": "Sincronización de usuarios de Keycloak en ejecución...",
"auth_keycloak_sync": "Sincronizar con Keycloak",
"auth_ldap_running": "Sincronización de usuarios de LDAP en ejecución...",
"auth_ldap_sync": "Sincronizar con LDAP",
"auth_keycloak_url": "URL del servidor de Keycloak",
"auth_keycloak_realm": "Dominio de Keycloak",
"auth_keycloak_pk": "Clave pública de dominio Keycloak",
"auth_keycloak_pk_explain": "Copiar la clave pública RSA desde Configuración de dominio > Claves",
"auth_keycloak_oidc": "ID de cliente de Keycloak OIDC",
"auth_keycloak_group": "ID de grupo de Keycloak (opcional)",
"auth_keycloak_group_explain": "Si desea sincronizar usuarios en un grupo en particular (por ejemplo, 'Usuarios de la comunidad de Documize'), proporcione la ID de grupo (por ejemplo, 511d8b61-1ec8-45f6-bc8d-5de64d54c9d2)",
"auth_keycloak_username": "Nombre de usuario de Keycloak",
"auth_keycloak_username_explain": "Se utiliza para conectar con Keycloak y sincronizar usuarios con Documize Community (crear usuario en Master Realm y asignar el rol 'view-users' contra el Realm especificado anteriormente)",
"auth_keycloak_password": "Contraseña de Keycloak",
"auth_keycloak_password_explain": "Se utiliza para conectar con Keycloak y sincronizar usuarios con Documize Community",
"auth_ldap_server": "Servidor LDAP",
"auth_ldap_server_explain": "Dirección IP o de host, p. ej. ldap.example.org, 127.0.0.1",
"auth_ldap_port": "Puerto del servidor LDAP",
"auth_ldap_port_explain": "Número de puerto, p. ej. 389",
"auth_ldap_base": "DN base",
"auth_ldap_base_explain": "Punto de inicio para filtros de búsqueda, p. ej. ou=users,dc=example,dc=com",
"auth_ldap_bind": "DN de enlace",
"auth_ldap_bind_explain": "Credenciales de inicio de sesión para el servidor LDAP",
"auth_ldap_password": "Contraseña de enlace",
"auth_ldap_filter_user": "Filtro de usuario",
"auth_ldap_filter_user_explain1": "Filtro de búsqueda para encontrar usuarios, p. ej. (|(objectClass=person)(objectClass=user)(objectClass=inetOrgPerson))",
"auth_ldap_filter_user_explain2": "Especificar filtro de usuario y/o filtro de grupo",
"auth_ldap_filter_group": "Filtro de grupo",
"auth_ldap_filter_group_explain": "Filtro de búsqueda para encontrar usuarios a través de grupos, p. ej. (&(objectClass=group)(|(cn=ship_crew)(cn=admin_staff)))",
"auth_ldap_rdn": "Atributo de usuario RDN",
"auth_ldap_rdn_explain1": "Atributo de nombre de usuario/inicio de sesión, p. ej. uid en LDAP, sAMAccountName en Active Directory",
"auth_ldap_rdn_explain2": "Atributos de usuario utilizados para recuperar datos al usar el filtro de usuario",
"auth_ldap_firstname": "Nombre del atributo de usuario",
"auth_ldap_firstname_explain": "Atributo de nombre, p. ej. givenName",
"auth_ldap_lastname": "Atributo de usuario Apellido",
"auth_ldap_lastname_explain": "Atributo de apellido, p. ej. sn",
"auth_ldap_email": "Correo electrónico del atributo de usuario",
"auth_ldap_email_explain": "Atributo de correo electrónico, p. ej. mail",
"auth_ldap_group": "Miembro del atributo de grupo",
"auth_ldap_group_explain1": "Atributo que identifica a un miembro individual del grupo, p. ej. miembro o miembro único",
"auth_ldap_group_explain2": "Atributos de grupo utilizados para recuperar datos al usar el filtro de grupo",
"auth_ldap_preview": "Vista previa de LDAP",
"auth_ldap_preview_result": "Conexión exitosa, se encontraron {1} usuarios",
"auth_ldap_preview_error": "No se puede conectar",
"auth_cas_url": "URL del servidor CAS",
"auth_cas_url_explain": "p. ej. http://localhost:8888/auth",
"auth_cas_back_url": "URL CAS de Documize",
"auth_cas_back_url_explain": "p. ej. http://Documize-URL/auth/cas",
"administration": "Administración",
"admin_general": "General",
"admin_general_explain": "Opciones para personalizar Documize Community",
"admin_user_management": "Administración de usuarios",
"admin_user_management_explain": "Administrar información básica, contraseñas y permisos",
"admin_user_groups": "Grupos de usuarios",
"admin_user_groups_explain": "Definir grupos para una administración más sencilla de los usuarios y los permisos",
"admin_integrations": "Integraciones",
"admin_integrations_explain": "Habilitar y configurar integraciones de terceros",
"admin_mail_server": "Servidor de correo",
"admin_mail_server_explain": "Especificar los detalles del servidor de correo necesarios para enviar correos electrónicos de invitación y notificación a los usuarios",
"admin_audit_log": "Registro de auditoría",
"admin_backup": "Copia de seguridad y restauración",
"admin_backup_explain": "Exportar todo el contenido a un único archivo zip autónomo",
"admin_billing": "Facturación",
"admin_changelog": "Registro de cambios",
"admin_auth_explain": "Elegir proveedor de autenticación de usuarios: Documize, Redhat Keycloak, LDAP/AD, Central Authentication Server",
"admin_spaces_explain": "Eliminar espacios, tomar posesión de espacios compartidos y huérfanos",
"admin_labels_explain": "Agrupar y navegar por espacios con etiquetas visuales",
"admin_search_explain": "Reconstruir el índice de búsqueda",
"setup_default_message": "La instancia de la Comunidad Documize contiene toda nuestra documentación",
"get_in_touch": "Póngase en contacto",
"product_feedback": "¿Tiene una idea, sugerencia o algún comentario sobre un producto?",
"product_news": "Novedades sobre el producto",
"product_news_explain": "Últimas novedades y actualizaciones sobre el producto",
"product_update": "Actualización disponible",
"product_whats_new": "Novedades",
"product_activation_key_missing": "Falta la clave de activación",
"account_update": "Actualizar cuenta",
"404": "¡Ups! No se pudo encontrar esa página.",
"404_explain": "¿Quizás el contenido que está buscando ya no está disponible?",
"close_account": "Cierre mi cuenta de Documize.",
"third_party": "La comunidad de Documize utiliza bibliotecas y componentes de código abierto de terceros",
"server_ldap_error1": "Error: omitiendo la sincronización de usuarios con LDAP ya que no es la opción configurada",
"server_ldap_error2": "Error: no se pueden leer los datos de configuración de LDAP",
"server_ldap_error3": "Error: no se pueden obtener los usuarios de LDAP: {1}",
"server_ldap_complete": "Sincronización completa con el servidor LDAP",
"server_ldap_summary": "La sincronización LDAP encontró {1} usuarios, se agregaron {2} nuevos usuarios, se ignoraron {3} usuarios con datos faltantes",
"server_keycloak_error1": "Error: omitiendo la sincronización de usuarios con LDAP Keycloak ya que no es la opción configurada",
"server_keycloak_error2": "Error: no se pueden leer los datos de configuración de Keycloak",
"server_keycloak_error3": "Error: no se pueden obtener los usuarios de Keycloak: {1}",
"server_keycloak_summary": "La sincronización de Keycloak encontró {1} usuarios, se agregaron {2} nuevos usuarios, se ignoraron {3} usuarios con datos faltantes",
"server_smtp_success": "Correo electrónico enviado correctamente",
"server_smtp_test_subject": "Prueba SMTP de Documize Community",
"server_smtp_test_body": "Este es un correo electrónico de prueba de Documize Community que utiliza la configuración SMTP actual",
"server_error_user": "Error: no se pueden obtener los usuarios",
"server_error_org": "Error: no se puede obtener el registro de la organización",
"mail_template_click_here": "Haga clic aquí",
"mail_template_sender": "Su colega",
"mail_template_approval": "{1} le ha otorgado el rol de aprobación de documentos",
"mail_template_approval_explain": "Se le solicita que apruebe todos los cambios en el siguiente documento:",
"mail_template_password": "Su contraseña temporal:",
"mail_template_user_invite": "{1} le ha invitado a la Comunidad Documize",
"mail_template_user_existing": "{1} le ha invitado a su cuenta de la Comunidad Documize",
"mail_template_reset_password": "Su solicitud de restablecimiento de contraseña de la Comunidad Documize",
"mail_template_shared": "{1} ha compartido el espacio {2} con usted",
"mail_template_invited": "{1} ha compartido el espacio {2} con usted en la Comunidad Documize",
"mail_template_approval_request": "{1} ha solicitado la aprobación de cambio de documento",
"mail_template_approval_request_explain": "Apruebe el cambio al documento titulado {1}",
"mail_template_approval_withdraw": "{1} ha retirado la solicitud de aprobación de cambio de documento",
"mail_template_approval_withdraw_explain": "{1} La solicitud de aprobación ha sido retirada para el documento",
"mail_template_publish_explain": "Puede publicar el documento {1} cambiando el estado de Borrador a En vivo.",
"mail_template_reviewer": "Revisión de cambio de documento completada por {1}",
"mail_template_approval_1": "Su cambio no ha sido publicado porque ningún revisor aprobó el cambio.",
"mail_template_approval_2": "Su cambio no ha sido publicado porque la mayoría de los revisores no aprobaron el cambio.",
"mail_template_approval_3": "Su cambio no ha sido publicado porque no se obtuvo la aprobación unánime logrado.",
"mail_template_approval_4": "Su cambio ha sido publicado porque un revisor aprobó el cambio.",
"mail_template_approval_5": "Su cambio ha sido publicado porque la mayoría de los revisores aprobaron el cambio.",
"mail_template_approval_6": "Su cambio ha sido publicado con aprobación unánime.",
"mail_template_approval_7": "Su cambio aún está bajo revisión y requiere que cualquier otro revisor apruebe el cambio.",
"mail_template_approval_8": "Su cambio aún está bajo revisión y requiere que la mayoría de los revisores aprueben el cambio.",
"mail_template_approval_9": "Su cambio aún está bajo revisión y necesita aprobación unánime.",
"mail_template_review_outcome": "{1} revisó su cambio para el documento {2} y fue marcado como {3}. {4}",
"mail_template_action_read": "Lea el documento {1} antes del {2}.",
"mail_template_action_feedback": "Envíe comentarios sobre el documento {1} antes del {2}.",
"mail_template_action_contribute": "Contribuya con contenido al documento {1} antes del {2}.",
"mail_template_publish_request": "{1} ha solicitado la publicación del documento",
"mail_template_action_assigned": "{1} le ha asignado una acción de documento",
"mail_template_action_done": "{1} ha completado su acción de documento",
"mail_template_action_read_done": "{1} ha leído el documento {2} (fecha de publicación: {3})",
"mail_template_action_feedback_done": "{1} ha proporcionado comentarios sobre el documento {2} (fecha de publicación: {3})",
"mail_template_action_contribute_done": "{1} ha contribuido con contenido a documento {2} (fecha de entrega: {3})",
"mail_template_share_doc": "{1} ha compartido un documento contigo",
"mail_template_share_viewed": "{1} ha sido visto",
"mail_template_feedback_received": "{1} te ha enviado comentarios sobre {2}"
}

View file

@ -17,10 +17,10 @@
"remove": "Supprimer",
"duplicate": "Dupliquer",
"duplicated": "Dupliqué",
"edit": "Editer",
"edited": "Edité",
"edit": "Éditer",
"edited": "Édité",
"download": "Télécharger",
"upload": "uploader",
"upload": "Uploader",
"uploaded": "Uploadé",
"import": "Importer",
"export": "Exporter",
@ -44,9 +44,9 @@
"ok": "OK",
"preview": "Prévisualiser",
"preview_wait": "Génération de la visualisation...",
"pdf_prepare": "Preparation du PDF...",
"pdf_prepare": "Préparation du PDF...",
"publish": "Publier",
"print": "imprimer",
"print": "Imprimer",
"reject": "Rejeter",
"rejected": "Rejeté",
"reply": "Répondre",
@ -65,7 +65,7 @@
"signin": "Se connecter",
"sort": "Trier",
"sort_ascending": "Ascendant",
"sort_descending": "Descendand",
"sort_descending": "Descendant",
"sort_by_name": "Nom",
"sort_by_created": "Date de création",
"sort_by_revised": "Dernière modification",
@ -89,22 +89,22 @@
"username": "Nom d'utilisateur",
"user": "Utilisateur",
"password_confirm": "Confimer Mot de Passe",
"encryption": "Encryption",
"encryption": "Chiffrement",
"notice": "Remarque",
"backup": "Backup",
"backup": "Sauvegarder",
"restore": "Restaurer",
"completed": "Terminé",
"please_wait": "Attendez s'il vous plait..",
"filter": "Filtrer",
"not_found": "Introuvable",
"nothing_found": "Rien n'a été trouvé",
"expand_collapse": "Agrandir/Rassembler",
"expand_collapse": "Agrandir/Réduire",
"options": "Options",
"settings": "Paramètres",
"about": "A Propos",
"about": "À Propos",
"meta": "Meta",
"permissions": "Permissions",
"profile": "Profile",
"profile": "Profil",
"go_top": "Aller en haut",
"help": "Aide",
"reference": "référence",
@ -113,30 +113,30 @@
"move_down": "Déplacer vers le bas",
"indent": "Retrait",
"outdent": "Retrait extérieur",
"default": "Defaut",
"default": "Défaut",
"no_undo": "Procédez avec prudence car il n'y a pas d'annulation",
"show_hide": "Montrer/cacher",
"sent": "Envoyer",
"everyone": "Tout le monde",
"ip": "IP",
"event": "Evénement",
"event": "Événement",
"when": "Quand",
"last_seen": "Vue pour la dernière fois",
"change": "Changer",
"no_access": "Accès interdit",
"drag_drop_reorder": "Glisser/déposer pour réordonner",
"select": "Selectionner",
"locale": "Paramètres régionnaux",
"locale": "Paramètres régionaux",
"public": "Publique",
"public_explain": "Publique - Peut être vue par tout le monde",
"public_explain": "Publique - peut être vue par tout le monde",
"protected": "Protégé",
"protected_explain": "Protégé - l'accès est limité aux utilisateurs sélectionnés",
"personal": "Personnel",
"personal_explain": "Personnel - visible uniquement par moi",
"label": "Label",
"labels": "Labels",
"labels_none": "Aucun labels",
"label": "Étiquette",
"labels": "Étiquettes",
"labels_none": "Aucune étiquette",
"label_unclassified": "Non-classifié",
"draft": "Brouillon",
"drafted": "Brouillon",
@ -145,7 +145,7 @@
"live": "Direct",
"live_explain": "Direct - publié à la création",
"archived": "Archivé",
"archived_explain": "Archivé - non visible par personne",
"archived_explain": "Archivé - non visible par quiconque",
"approved": "Approuvé",
"reverted": "Inversé",
"published": "Publié",
@ -160,17 +160,17 @@
"revisions": "Révisions",
"versions": "Versions",
"version": "Version",
"versioned": "Versioné",
"versioned": "Versionné",
"versions_explain": "Créez plusieurs versions du même contenu — les utilisateurs peuvent sélectionner la version à afficher",
"change_control": "Le contrôle des modifications",
"change_control_explain": "Sélectionnez la méthode de contrôle des modifications",
"pin": "Epingle",
"pinned": "Epinglé",
"pin": "Épingle",
"pinned": "Épinglé",
"unpin": "Désépingler",
"unpinned": "Désépinglé",
"blocks": "Content Blocks",
"blocks_explain": "Les blocs de contenu fournissent un contenu réutilisable qui peut être inséré dans n'importe quel document",
"block_delete_confirm": "Voulez-vous vraiment supprimer ce bloc de contenu réutilisable?",
"block_delete_confirm": "Voulez-vous vraiment supprimer ce bloc de contenu réutilisable ?",
"actions": "Actions",
"activity": "Activité",
"activity_explain": "Rapports sur l'activité des utilisateurs, y compris les ajouts, les vues, les mises à jour, les approbations",
@ -206,25 +206,25 @@
"export_html_explain1": "exportez tout le contenu de l'espace au format HTML ou sélectionnez des catégories.",
"export_html_explain2": "Tout le contenu de l'espace sera exporté sous la forme d'un seul fichier HTML auto-inclus.",
"import_convert": "En conversion {1}",
"import_success": "Conversion réussie{1}",
"import_success": "Conversion réussie {1}",
"add_recent": "Ajouté récemment",
"update_recent": "Mise à jour récemment",
"space_change": "Déplacer dans un nouvel espace",
"space_change_prompt": "Selectionner un espace",
"space_change_prompt": "Sélectionner un espace",
"space_new": "Nouvel Espace",
"space_name": "Nom de l'espace",
"space_name_rules": "Caractères et chiffres uniquement",
"space_description": "Description de l'espace",
"space_clone": "Cloner l'espace",
"space_select": "Selectionner l'espace",
"space_select": "Sélectionner l'espace",
"space_delete": "Supprimer l'espace",
"space_delete_prompt": "Veuillez saisir le nom de l'espace pour confirmer",
"space_delete_warn": "Cela supprimera tous les documents et modèles de cet espace!",
"space_delete_warn": "Cela supprimera tous les documents et modèles de cet espace !",
"space_copy": "Copier des modèles, des autorisations, des documents à partir d'un espace existant",
"space_copy_template": "Copier les modèles",
"space_copy_permission": "Copier les permissions",
"space_copy_document": "Copier les documents",
"space_empty_state": "Ajouter des documents via + CONTENT",
"space_empty_state": "Ajouter des documents via + CONTENU",
"space_lockout": "Les autorisations de l'espace vous empêchent d'afficher et de créer des documents",
"space_invite_message": "Bonjour, je partage l'espace {1} (dans {2}) avec vous afin que nous puissions tous les deux collaborer sur cette documentation.",
"protection_none": "Modifications autorisées sans approbation",
@ -242,16 +242,16 @@
"feedback_prompt": "Texte de commentaires",
"feedback_prompt_hint": "Entrez le texte invitant l'utilisateur à renseigner un commentaire",
"feedback_prompt_explain": "Spécifiez le texte qui apparait, par ex. Cela vous a-t-il aidé ? Est-ce que cela a été utile? Avez-vous trouvé ce dont vous aviez besoin ?",
"feedback_thanks": "Merci pour les commentaires!",
"feedback_help_yes": "Oui, merci!",
"feedback_thanks": "Merci pour les commentaires !",
"feedback_help_yes": "Oui, merci !",
"feedback_help_no": "Pas vraiment",
"likes_prompt": "Cela vous a-t-il aidé ?",
"delete_confirm": "Etes-vous sûr de vouloir supprimer?",
"delete_confirm": "Êtes-vous sûr de vouloir supprimer?",
"category_assignment_summary": "Attribué à {1} documents et visible par {2} utilisateurs/groupes",
"category_no_access": "Vous n'avez pas d'autorisation d'affichage pour cette catégorie",
"category_default": "Catégorie par défaut pour le nouveau contenu",
"category_default_explain": "S'applique automatiquement aux documents nouvellement créés",
"category_delete_confirm": "Voulez-vous vraiment supprimer cette catégorie?",
"category_delete_confirm": "Voulez-vous vraiment supprimer cette catégorie ?",
"category_permissions": "Autorisations de catégorie",
"category_permissions_explain": "Sélectionnez qui peut afficher les documents dans la catégorie",
"category_none": "Cet espace n'a pas encore de catégories",
@ -263,7 +263,7 @@
"approval_awaiting": "En attente d'approbation",
"protection_type_open": "Libre",
"protection_type_protected": "Protégé",
"protection_type_locked": "Vérouillé",
"protection_type_locked": "Verrouillé",
"doc_request_contribution": "Demande de contribution",
"doc_request_feedback": "Demande de retour/commentaires",
"doc_request_read": "Demande de lecture",
@ -339,8 +339,8 @@
"audit_90": "90 jours",
"chat_leave_comment": "Laisser un commentaire",
"chat_applies_to": "S'applique à",
"chat_delete_confirm": "Êtes-vous sûr de vouloir supprimer ce commentaire?",
"chat_reply_delete_confirm": "Êtes-vous sûr de vouloir supprimer cette réponse?",
"chat_delete_confirm": "Êtes-vous sûr de vouloir supprimer ce commentaire ?",
"chat_reply_delete_confirm": "Êtes-vous sûr de vouloir supprimer cette réponse ?",
"doc_secure_share": "Partager via un lien externe sécurisé",
"doc_secure_shared_by": "Partagé par",
"doc_version_viewed": "Renommer la version",
@ -383,7 +383,7 @@
"space_permission_doc_template": "Modèles",
"space_permission_doc_template_explain": "Peut créer et publier des modèles de document",
"space_permission_doc_approval": "Approbation",
"space_permission_doc_approval_explain": "Peut (1) approuver ou rejeter les modifications du document; (2) déplacer les documents du brouillon à la version publique",
"space_permission_doc_approval_explain": "Peut (1) approuver ou rejeter les modifications du document ; (2) déplacer les documents du brouillon à la version publique",
"space_permission_doc_draft": "Brouillon",
"space_permission_doc_draft_explain": "Peut afficher et travailler sur les documents marqués en tant que brouillon",
"space_permission_doc_version": "Versions",
@ -426,17 +426,17 @@
"section_papertrail_search": "Requête de recherche",
"section_papertrail_search_explain": "Déterminez les entrées de journal que vous voulez afficher, par exemple bob OU (\"exemple de phrase\" ET sally)",
"section_papertrail_max": "Résultats maximums",
"section_papertrail_max_explain": "Combien d'entrées de journal voulez-vous afficher?",
"section_papertrail_max_explain": "Combien d'entrées de journal voulez-vous afficher ?",
"section_papertrail_group": "Groupe",
"section_papertrail_group_explain": "Groupe Papertrail facultatif",
"section_pdf": "Visualiseur PDF",
"section_pdf_upload_explain": "Le premier PDF téléchargé sera utilisé",
"section_pdf_height": "Hauteur de l'aperçu",
"section_pdf_height_explain": "Quelle est la hauteur de l'aperçu du PDF?",
"section_pdf_height_explain": "Quelle est la hauteur de l'aperçu du PDF ?",
"section_pdf_start": "Page de démarrage",
"section_pdf_start_explain": "La première page à afficher",
"section_pdf_sidebar": "Barre latérale",
"section_pdf_sidebar_explain": "Optionnellement, définir le contenu de la barre latérale",
"section_pdf_sidebar_explain": "En option, définir le contenu de la barre latérale",
"section_pdf_sidebar_none": "Aucun",
"section_pdf_sidebar_bookmark": "Signets",
"section_pdf_sidebar_thumbnail": "Vignettes",
@ -697,25 +697,25 @@
"product_whats_new": "Quoi de neuf",
"product_activation_key_missing": "Clé d'activation manquante",
"account_update": "Mettre à jour le compte",
"404": "Oups! Cette page n'a pas pu être trouvée.",
"404": "Oups ! Cette page n'a pas pu être trouvée.",
"404_explain": "Peut-être que le contenu que vous recherchez n'est plus disponible?",
"close_account": "Veuillez fermer mon compte Documize.",
"third_party": "La communauté Documize utilise des bibliothèques et des composants open source provenant de tiers.",
"server_ldap_error1": "Erreur: saut de la synchronisation de l'utilisateur avec LDAP car ce n'est pas l'option configurée",
"server_ldap_error2": "Erreur: impossible de lire les données de configuration LDAP",
"server_ldap_error3": "Erreur: impossible de récupérer les utilisateurs LDAP: {1}",
"server_ldap_error1": "Erreur : saut de la synchronisation de l'utilisateur avec LDAP car ce n'est pas l'option configurée",
"server_ldap_error2": "Erreur : impossible de lire les données de configuration LDAP",
"server_ldap_error3": "Erreur : impossible de récupérer les utilisateurs LDAP: {1}",
"server_ldap_complete": "Synchronisation terminée avec le serveur LDAP",
"server_ldap_summary": "La synchronisation LDAP a trouvé {1} utilisateurs, {2} nouveaux utilisateurs ajoutés, {3} utilisateurs avec des données manquantes ignorés",
"server_keycloak_error1": "Erreur: saut de la synchronisation de l'utilisateur avec Keycloak car ce n'est pas l'option configurée",
"server_keycloak_error2": "Erreur: impossible de lire les données de configuration Keycloak",
"server_keycloak_error3": "Erreur: impossible de récupérer les utilisateurs Keycloak: {1}",
"server_keycloak_error1": "Erreur : saut de la synchronisation de l'utilisateur avec Keycloak car ce n'est pas l'option configurée",
"server_keycloak_error2": "Erreur : impossible de lire les données de configuration Keycloak",
"server_keycloak_error3": "Erreur : impossible de récupérer les utilisateurs Keycloak: {1}",
"server_keycloak_summary": "La synchronisation Keycloak a trouvé {1} utilisateurs, {2} nouveaux utilisateurs ajoutés, {3} utilisateurs avec des données manquantes ignorés",
"server_smtp_success": "E-mail envoyé avec succès",
"server_smtp_test_subject": "Test SMTP de la communauté Documize",
"server_smtp_test_body": "Il s'agit d'un email de test de la communauté Documize en utilisant les paramètres SMTP actuels.",
"server_error_user": "Erreur: impossible de récupérer les utilisateurs",
"server_error_org": "Erreur: impossible de récupérer l'enregistrement de l'organisation",
"server_error_user": "Erreur : impossible de récupérer les utilisateurs",
"server_error_org": "Erreur : impossible de récupérer l'enregistrement de l'organisation",
"mail_template_click_here": "Cliquez ici",
"mail_template_sender": "Votre collègue",

File diff suppressed because it is too large Load diff

View file

@ -1,6 +0,0 @@
language: go
go:
- 1.6
- 1.7
- 1.8

View file

@ -1,7 +1,7 @@
.PHONY: ci generate clean
ci: clean generate
go test -v ./...
go test -race -v ./...
generate:
go generate .

View file

@ -7,8 +7,8 @@ http.Handlers.
Doing this requires non-trivial wrapping of the http.ResponseWriter interface,
which is also exposed for users interested in a more low-level API.
[![GoDoc](https://godoc.org/github.com/felixge/httpsnoop?status.svg)](https://godoc.org/github.com/felixge/httpsnoop)
[![Build Status](https://travis-ci.org/felixge/httpsnoop.svg?branch=master)](https://travis-ci.org/felixge/httpsnoop)
[![Go Reference](https://pkg.go.dev/badge/github.com/felixge/httpsnoop.svg)](https://pkg.go.dev/github.com/felixge/httpsnoop)
[![Build Status](https://github.com/felixge/httpsnoop/actions/workflows/main.yaml/badge.svg)](https://github.com/felixge/httpsnoop/actions/workflows/main.yaml)
## Usage Example

View file

@ -52,7 +52,7 @@ func (m *Metrics) CaptureMetrics(w http.ResponseWriter, fn func(http.ResponseWri
return func(code int) {
next(code)
if !headerWritten {
if !(code >= 100 && code <= 199) && !headerWritten {
m.Code = code
headerWritten = true
}

View file

@ -1,5 +1,5 @@
// +build go1.8
// Code generated by "httpsnoop/codegen"; DO NOT EDIT
// Code generated by "httpsnoop/codegen"; DO NOT EDIT.
package httpsnoop

View file

@ -1,5 +1,5 @@
// +build !go1.8
// Code generated by "httpsnoop/codegen"; DO NOT EDIT
// Code generated by "httpsnoop/codegen"; DO NOT EDIT.
package httpsnoop

View file

@ -1,5 +1,19 @@
# Changelog
## 1.6.0
### Changed
* Go.mod updated to Go 1.17
* Azure SDK for Go dependencies updated
### Features
* Added `ActiveDirectoryAzCli` and `ActiveDirectoryDeviceCode` authentication types to `azuread` package
* Always Encrypted encryption and decryption with 2 hour key cache (#116)
* 'pfx', 'MSSQL_CERTIFICATE_STORE', and 'AZURE_KEY_VAULT' encryption key providers
* TDS8 can now be used for connections by setting encrypt="strict"
## 1.5.0
### Features

View file

@ -1,4 +1,4 @@
# A pure Go MSSQL driver for Go's database/sql package
# Microsoft's official Go MSSQL driver
[![Go Reference](https://pkg.go.dev/badge/github.com/microsoft/go-mssqldb.svg)](https://pkg.go.dev/github.com/microsoft/go-mssqldb)
[![Build status](https://ci.appveyor.com/api/projects/status/jrln8cs62wj9i0a2?svg=true)](https://ci.appveyor.com/project/microsoft/go-mssqldb)
@ -7,7 +7,7 @@
## Install
Requires Go 1.10 or above.
Requires Go 1.17 or above.
Install with `go install github.com/microsoft/go-mssqldb@latest`.
@ -25,9 +25,10 @@ Other supported formats are listed below.
* `connection timeout` - in seconds (default is 0 for no timeout), set to 0 for no timeout. Recommended to set to 0 and use context to manage query and connection timeouts.
* `dial timeout` - in seconds (default is 15 times the number of registered protocols), set to 0 for no timeout.
* `encrypt`
* `strict` - Data sent between client and server is encrypted E2E using [TDS8](https://learn.microsoft.com/en-us/sql/relational-databases/security/networking/tds-8?view=sql-server-ver16).
* `disable` - Data send between client and server is not encrypted.
* `false` - Data sent between client and server is not encrypted beyond the login packet. (Default)
* `true` - Data sent between client and server is encrypted.
* `false`/`optional`/`no`/`0`/`f` - Data sent between client and server is not encrypted beyond the login packet. (Default)
* `true`/`mandatory`/`yes`/`1`/`t` - Data sent between client and server is encrypted.
* `app name` - The application name (default is go-mssqldb)
* `authenticator` - Can be used to specify use of a registered authentication provider. (e.g. ntlm, winsspi (on windows) or krb5 (on linux))
@ -56,13 +57,14 @@ Other supported formats are listed below.
* `TrustServerCertificate`
* false - Server certificate is checked. Default is false if encrypt is specified.
* true - Server certificate is not checked. Default is true if encrypt is not specified. If trust server certificate is true, driver accepts any certificate presented by the server and any host name in that certificate. In this mode, TLS is susceptible to man-in-the-middle attacks. This should be used only for testing.
* `certificate` - The file that contains the public key certificate of the CA that signed the SQL Server certificate. The specified certificate overrides the go platform specific CA certificates.
* `certificate` - The file that contains the public key certificate of the CA that signed the SQL Server certificate. The specified certificate overrides the go platform specific CA certificates. Currently, certificates of PEM type are supported.
* `hostNameInCertificate` - Specifies the Common Name (CN) in the server certificate. Default value is the server host.
* `tlsmin` - Specifies the minimum TLS version for negotiating encryption with the server. Recognized values are `1.0`, `1.1`, `1.2`, `1.3`. If not set to a recognized value the default value for the `tls` package will be used. The default is currently `1.2`.
* `ServerSPN` - The kerberos SPN (Service Principal Name) for the server. Default is MSSQLSvc/host:port.
* `Workstation ID` - The workstation name (default is the host name)
* `ApplicationIntent` - Can be given the value `ReadOnly` to initiate a read-only connection to an Availability Group listener. The `database` must be specified when connecting with `Application Intent` set to `ReadOnly`.
* `protocol` - forces use of a protocol. Make sure the corresponding package is imported.
* `columnencryption` or `column encryption setting` - a boolean value indicating whether Always Encrypted should be enabled on the connection.
### Connection parameters for namedpipe package
* `pipe` - If set, no Browser query is made and named pipe used will be `\\<host>\pipe\<pipe>`
@ -216,6 +218,8 @@ The credential type is determined by the new `fedauth` connection string paramet
* `resource id=<resource id>` - optional resource id of user-assigned managed identity. If empty, system-assigned managed identity or user id are used (if both user id and resource id are provided, resource id will be used)
* `fedauth=ActiveDirectoryInteractive` - authenticates using credentials acquired from an external web browser. Only suitable for use with human interaction.
* `applicationclientid=<application id>` - This guid identifies an Azure Active Directory enterprise application that the AAD admin has approved for accessing Azure SQL database resources in the tenant. This driver does not have an associated application id of its own.
* `fedauth=ActiveDirectoryDeviceCode` - prints a message to stdout giving the user a URL and code to authenticate. Connection continues after user completes the login separately.
* `fedauth=ActiveDirectoryAzCli` - reuses local authentication the user already performed using Azure CLI.
```go
@ -377,8 +381,63 @@ db.QueryContext(ctx, `select * from t2 where user_name = @p1;`, mssql.VarChar(na
// Note: Mismatched data types on table and parameter may cause long running queries
```
## Using Always Encrypted
The protocol and cryptography details for AE are [detailed elsewhere](https://learn.microsoft.com/sql/relational-databases/security/encryption/always-encrypted-database-engine?view=sql-server-ver16).
### Enablement
To enable AE on a connection, set the `ColumnEncryption` value to true on a config or pass `columnencryption=true` in the connection string.
Decryption and encryption won't succeed, however, without also including a decryption key provider. To avoid code size impacts on non-AE applications, key providers are not included by default.
Include the local certificate providers:
```go
import (
"github.com/microsoft/go-mssqldb/aecmk/localcert"
)
```
You can also instantiate a key provider directly in code and hand it to a `Connector` instance.
```go
c := mssql.NewConnectorConfig(myconfig)
c.RegisterCekProvider(providerName, MyProviderType{})
```
### Decryption
If the correct key provider is included in your application, decryption of encrypted cells happens automatically with no extra server round trips.
### Encryption
Encryption of parameters passed to `Exec` and `Query` variants requires an extra round trip per query to fetch the encryption metadata. If the error returned by a query attempt indicates a type mismatch between the parameter and the destination table, most likely your input type is not a strict match for the SQL Server data type of the destination. You may be using a Go `string` when you need to use one of the driver-specific aliases like `VarChar` or `NVarCharMax`.
*** NOTE *** - Currently `char` and `varchar` types do not include a collation parameter component so can't be used for inserting encrypted values. Also, using a nullable sql package type like `sql.NullableInt32` to pass a `NULL` value for an encrypted column will not work unless the encrypted column type is `nvarchar`.
https://github.com/microsoft/go-mssqldb/issues/129
https://github.com/microsoft/go-mssqldb/issues/130
### Local certificate AE key provider
Key provider configuration is managed separately without any properties in the connection string.
The `pfx` provider exposes its instance as the variable `PfxKeyProvider`. You can give it passwords for certificates using `SetCertificatePassword(pathToCertificate, path)`. Use an empty string or `"*"` as the path to use the same password for all certificates.
The `MSSQL_CERTIFICATE_STORE` provider exposes its instance as the variable `WindowsCertificateStoreKeyProvider`.
Both providers can be constrained to an allowed list of encryption key paths by appending paths to `provider.AllowedLocations`.
### Azure Key Vault (AZURE_KEY_VAULT) key provider
Import this provider using `github.com/microsoft/go-mssqldb/aecmk/akv`
Constrain the provider to an allowed list of key vaults by appending vault host strings like "mykeyvault.vault.azure.net" to `akv.KeyProvider.AllowedLocations`.
## Important Notes
* [LastInsertId](https://golang.org/pkg/database/sql/#Result.LastInsertId) should
not be used with this driver (or SQL Server) due to how the TDS protocol
works. Please use the [OUTPUT Clause](https://docs.microsoft.com/en-us/sql/t-sql/queries/output-clause-transact-sql)
@ -409,6 +468,9 @@ db.QueryContext(ctx, `select * from t2 where user_name = @p1;`, mssql.VarChar(na
* A `namedpipe` package to support connections using named pipes (np:) on Windows
* A `sharedmemory` package to support connections using shared memory (lpc:) on Windows
* Dedicated Administrator Connection (DAC) is supported using `admin` protocol
* Always Encrypted
- `MSSQL_CERTIFICATE_STORE` provider on Windows
- `pfx` provider on Linux and Windows
## Tests
@ -449,6 +511,7 @@ To fix SQL Server 2008 R2 issue, install SQL Server 2008 R2 Service Pack 2.
To fix SQL Server 2008 issue, install Microsoft SQL Server 2008 Service Pack 3 and Cumulative update package 3 for SQL Server 2008 SP3.
More information: <http://support.microsoft.com/kb/2653857>
* Bulk copy does not yet support encrypting column values using Always Encrypted. Tracked in [#127](https://github.com/microsoft/go-mssqldb/issues/127)
# Contributing
This project is a fork of [https://github.com/denisenkom/go-mssqldb](https://github.com/denisenkom/go-mssqldb) and welcomes new and previous contributors. For more informaton on contributing to this project, please see [Contributing](./CONTRIBUTING.md).

View file

@ -0,0 +1,112 @@
package aecmk
import (
"fmt"
"sync"
"time"
)
const (
CertificateStoreKeyProvider = "MSSQL_CERTIFICATE_STORE"
CspKeyProvider = "MSSQL_CSP_PROVIDER"
CngKeyProvider = "MSSQL_CNG_STORE"
AzureKeyVaultKeyProvider = "AZURE_KEY_VAULT"
JavaKeyProvider = "MSSQL_JAVA_KEYSTORE"
KeyEncryptionAlgorithm = "RSA_OAEP"
)
// ColumnEncryptionKeyLifetime is the default lifetime of decrypted Column Encryption Keys in the global cache.
// The default is 2 hours
var ColumnEncryptionKeyLifetime time.Duration = 2 * time.Hour
type cekCacheEntry struct {
Expiry time.Time
Key []byte
}
type cekCache map[string]cekCacheEntry
type CekProvider struct {
Provider ColumnEncryptionKeyProvider
decryptedKeys cekCache
mutex sync.Mutex
}
func NewCekProvider(provider ColumnEncryptionKeyProvider) *CekProvider {
return &CekProvider{Provider: provider, decryptedKeys: make(cekCache), mutex: sync.Mutex{}}
}
func (cp *CekProvider) GetDecryptedKey(keyPath string, encryptedBytes []byte) (decryptedKey []byte, err error) {
cp.mutex.Lock()
ev, cachedKey := cp.decryptedKeys[keyPath]
if cachedKey {
if ev.Expiry.Before(time.Now()) {
delete(cp.decryptedKeys, keyPath)
cachedKey = false
} else {
decryptedKey = ev.Key
}
}
// decrypting a key can take a while, so let multiple callers race
// Key providers can choose to optimize their own concurrency.
// For example - there's probably minimal value in serializing access to a local certificate,
// but there'd be high value in having a queue of waiters for decrypting a key stored in the cloud.
cp.mutex.Unlock()
if !cachedKey {
decryptedKey = cp.Provider.DecryptColumnEncryptionKey(keyPath, KeyEncryptionAlgorithm, encryptedBytes)
}
if !cachedKey {
duration := cp.Provider.KeyLifetime()
if duration == nil {
duration = &ColumnEncryptionKeyLifetime
}
expiry := time.Now().Add(*duration)
cp.mutex.Lock()
cp.decryptedKeys[keyPath] = cekCacheEntry{Expiry: expiry, Key: decryptedKey}
cp.mutex.Unlock()
}
return
}
// no synchronization on this map. Providers register during init.
type ColumnEncryptionKeyProviderMap map[string]*CekProvider
var globalCekProviderFactoryMap = ColumnEncryptionKeyProviderMap{}
// ColumnEncryptionKeyProvider is the interface for decrypting and encrypting column encryption keys.
// It is similar to .Net https://learn.microsoft.com/dotnet/api/microsoft.data.sqlclient.sqlcolumnencryptionkeystoreprovider.
type ColumnEncryptionKeyProvider interface {
// DecryptColumnEncryptionKey decrypts the specified encrypted value of a column encryption key.
// The encrypted value is expected to be encrypted using the column master key with the specified key path and using the specified algorithm.
DecryptColumnEncryptionKey(masterKeyPath string, encryptionAlgorithm string, encryptedCek []byte) []byte
// EncryptColumnEncryptionKey encrypts a column encryption key using the column master key with the specified key path and using the specified algorithm.
EncryptColumnEncryptionKey(masterKeyPath string, encryptionAlgorithm string, cek []byte) []byte
// SignColumnMasterKeyMetadata digitally signs the column master key metadata with the column master key
// referenced by the masterKeyPath parameter. The input values used to generate the signature should be the
// specified values of the masterKeyPath and allowEnclaveComputations parameters. May return an empty slice if not supported.
SignColumnMasterKeyMetadata(masterKeyPath string, allowEnclaveComputations bool) []byte
// VerifyColumnMasterKeyMetadata verifies the specified signature is valid for the column master key
// with the specified key path and the specified enclave behavior. Return nil if not supported.
VerifyColumnMasterKeyMetadata(masterKeyPath string, allowEnclaveComputations bool) *bool
// KeyLifetime is an optional Duration. Keys fetched by this provider will be discarded after their lifetime expires.
// If it returns nil, the keys will expire based on the value of ColumnEncryptionKeyLifetime.
// If it returns zero, the keys will not be cached.
KeyLifetime() *time.Duration
}
func RegisterCekProvider(name string, provider ColumnEncryptionKeyProvider) error {
_, ok := globalCekProviderFactoryMap[name]
if ok {
return fmt.Errorf("CEK provider %s is already registered", name)
}
globalCekProviderFactoryMap[name] = &CekProvider{Provider: provider, decryptedKeys: cekCache{}, mutex: sync.Mutex{}}
return nil
}
func GetGlobalCekProviders() (providers ColumnEncryptionKeyProviderMap) {
providers = make(ColumnEncryptionKeyProviderMap)
for i, p := range globalCekProviderFactoryMap {
providers[i] = p
}
return
}

View file

@ -11,52 +11,29 @@ environment:
SQLUSER: sa
SQLPASSWORD: Password12!
DATABASE: test
GOVERSION: 113
GOVERSION: 117
COLUMNENCRYPTION:
APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019
RACE: -race -cpu 4
TAGS:
matrix:
- GOVERSION: 110
SQLINSTANCE: SQL2017
- GOVERSION: 111
SQLINSTANCE: SQL2017
- GOVERSION: 112
SQLINSTANCE: SQL2017
- SQLINSTANCE: SQL2017
- SQLINSTANCE: SQL2016
- SQLINSTANCE: SQL2014
- SQLINSTANCE: SQL2012SP1
- SQLINSTANCE: SQL2008R2SP2
# Go 1.14+ and SQL2019 are available on the Visual Studio 2019 image only
- APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019
GOVERSION: 114
- GOVERSION: 118
SQLINSTANCE: SQL2017
- GOVERSION: 120
RACE:
SQLINSTANCE: SQL2019
- APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019
GOVERSION: 115
SQLINSTANCE: SQL2019
- APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019
GOVERSION: 115
SQLINSTANCE: SQL2017
- APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019
GOVERSION: 116
SQLINSTANCE: SQL2017
- APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019
GOVERSION: 117
SQLINSTANCE: SQL2017
- APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019
GOVERSION: 118
SQLINSTANCE: SQL2017
COLUMNENCRYPTION: 1
# Cover 32bit and named pipes protocol
- APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019
GOVERSION: 118-x86
- GOVERSION: 119-x86
SQLINSTANCE: SQL2017
GOARCH: 386
RACE:
PROTOCOL: np
TAGS: -tags np
# Cover SSPI and lpc protocol
- APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019
GOVERSION: 118
- GOVERSION: 120
RACE:
SQLINSTANCE: SQL2019
PROTOCOL: lpc
TAGS: -tags sm
@ -67,9 +44,6 @@ install:
- set PATH=%GOPATH%\bin;%GOROOT%\bin;%PATH%
- go version
- go env
- go get -u github.com/golang-sql/civil
- go get -u github.com/golang-sql/sqlexp
- go get -u golang.org/x/crypto/md4
build_script:
- go build

View file

@ -250,6 +250,10 @@ func (b *Bulk) createColMetadata() []byte {
buf.WriteByte(byte(tokenColMetadata)) // token
binary.Write(buf, binary.LittleEndian, uint16(len(b.bulkColumns))) // column count
// TODO: Write a valid CEK table if any parameters have cekTableEntry values
if b.cn.sess.alwaysEncrypted {
binary.Write(buf, binary.LittleEndian, uint16(0))
}
for i, col := range b.bulkColumns {
if b.cn.sess.loginAck.TDSVersion >= verTDS72 {

View file

@ -0,0 +1,40 @@
package mssql
const (
CertificateStoreKeyProvider = "MSSQL_CERTIFICATE_STORE"
CspKeyProvider = "MSSQL_CSP_PROVIDER"
CngKeyProvider = "MSSQL_CNG_STORE"
AzureKeyVaultKeyProvider = "AZURE_KEY_VAULT"
JavaKeyProvider = "MSSQL_JAVA_KEYSTORE"
KeyEncryptionAlgorithm = "RSA_OAEP"
)
// cek ==> Column Encryption Key
// Every row of an encrypted table has an associated list of keys used to decrypt its columns
type cekTable struct {
entries []cekTableEntry
}
type encryptionKeyInfo struct {
encryptedKey []byte
databaseID int
cekID int
cekVersion int
cekMdVersion []byte
keyPath string
keyStoreName string
algorithmName string
}
type cekTableEntry struct {
databaseID int
keyId int
keyVersion int
mdVersion []byte
valueCount int
cekValues []encryptionKeyInfo
}
func newCekTable(size uint16) cekTable {
return cekTable{entries: make([]cekTableEntry, size)}
}

292
vendor/github.com/microsoft/go-mssqldb/encrypt.go generated vendored Normal file
View file

@ -0,0 +1,292 @@
package mssql
import (
"context"
"database/sql/driver"
"encoding/binary"
"fmt"
"io"
"strings"
"github.com/microsoft/go-mssqldb/internal/github.com/swisscom/mssql-always-encrypted/pkg/algorithms"
"github.com/microsoft/go-mssqldb/internal/github.com/swisscom/mssql-always-encrypted/pkg/encryption"
"github.com/microsoft/go-mssqldb/internal/github.com/swisscom/mssql-always-encrypted/pkg/keys"
)
type ColumnEncryptionType int
var (
ColumnEncryptionPlainText ColumnEncryptionType = 0
ColumnEncryptionDeterministic ColumnEncryptionType = 1
ColumnEncryptionRandomized ColumnEncryptionType = 2
)
type cekData struct {
ordinal int
database_id int
id int
version int
metadataVersion []byte
encryptedValue []byte
cmkStoreName string
cmkPath string
algorithm string
//byEnclave bool
//cmkSignature string
decryptedValue []byte
}
type parameterEncData struct {
ordinal int
name string
algorithm int
encType ColumnEncryptionType
cekOrdinal int
ruleVersion int
}
type paramMapEntry struct {
cek *cekData
p *parameterEncData
}
// when Always Encrypted is turned on, we have to ask the server for metadata about how to encrypt input parameters.
// This function stores the relevant encryption parameters in a copy of the args so they can be
// encrypted just before being sent to the server
func (s *Stmt) encryptArgs(ctx context.Context, args []namedValue) (encryptedArgs []namedValue, err error) {
q := Stmt{c: s.c,
paramCount: s.paramCount,
query: "sp_describe_parameter_encryption",
skipEncryption: true,
}
oldouts := s.c.outs
s.c.clearOuts()
newArgs, err := s.prepareEncryptionQuery(isProc(s.query), s.query, args)
if err != nil {
return
}
// TODO: Consider not using recursion.
rows, err := q.queryContext(ctx, newArgs)
if err != nil {
s.c.outs = oldouts
return
}
cekInfo, paramsInfo, err := processDescribeParameterEncryption(rows)
rows.Close()
s.c.outs = oldouts
if err != nil {
return
}
if len(cekInfo) == 0 {
return args, nil
}
err = s.decryptCek(cekInfo)
if err != nil {
return
}
paramMap := make(map[string]paramMapEntry)
for _, p := range paramsInfo {
if p.encType == ColumnEncryptionPlainText {
paramMap[p.name] = paramMapEntry{nil, p}
} else {
paramMap[p.name] = paramMapEntry{cekInfo[p.cekOrdinal-1], p}
}
}
encryptedArgs = make([]namedValue, len(args))
for i, a := range args {
encryptedArgs[i] = a
name := ""
if len(a.Name) > 0 {
name = "@" + a.Name
} else {
name = fmt.Sprintf("@p%d", a.Ordinal)
}
info := paramMap[name]
if info.p.encType == ColumnEncryptionPlainText || a.Value == nil {
continue
}
encryptedArgs[i].encrypt = getEncryptor(info)
}
return encryptedArgs, nil
}
// returns the arguments to sp_describe_parameter_encryption
// sp_describe_parameter_encryption
// [ @tsql = ] N'Transact-SQL_batch' ,
// [ @params = ] N'parameters'
// [ ;]
func (s *Stmt) prepareEncryptionQuery(isProc bool, q string, args []namedValue) (newArgs []namedValue, err error) {
newArgs = make([]namedValue, 2)
if isProc {
newArgs[0] = namedValue{Name: "tsql", Ordinal: 0, Value: buildStoredProcedureStatementForColumnEncryption(q, args)}
} else {
newArgs[0] = namedValue{Name: "tsql", Ordinal: 0, Value: q}
}
params, err := s.buildParametersForColumnEncryption(args)
if err != nil {
return
}
newArgs[1] = namedValue{Name: "params", Ordinal: 1, Value: params}
return
}
func (s *Stmt) buildParametersForColumnEncryption(args []namedValue) (parameters string, err error) {
_, decls, err := s.makeRPCParams(args, false)
if err != nil {
return
}
parameters = strings.Join(decls, ", ")
return
}
func (s *Stmt) decryptCek(cekInfo []*cekData) error {
for _, info := range cekInfo {
kp, ok := s.c.sess.aeSettings.keyProviders[info.cmkStoreName]
if !ok {
return fmt.Errorf("No provider found for key store %s", info.cmkStoreName)
}
dk, err := kp.GetDecryptedKey(info.cmkPath, info.encryptedValue)
if err != nil {
return err
}
info.decryptedValue = dk
}
return nil
}
func getEncryptor(info paramMapEntry) valueEncryptor {
k := keys.NewAeadAes256CbcHmac256(info.cek.decryptedValue)
alg := algorithms.NewAeadAes256CbcHmac256Algorithm(k, encryption.From(byte(info.p.encType)), byte(info.cek.version))
// Metadata to append to an encrypted parameter. Doesn't include original typeinfo
// https://learn.microsoft.com/en-us/openspecs/windows_protocols/ms-tds/619c43b6-9495-4a58-9e49-a4950db245b3
// ParamCipherInfo = TYPE_INFO
// EncryptionAlgo (byte)
// [AlgoName] (b_varchar) unused, no custom algorithm
// EncryptionType (byte)
// DatabaseId (ulong)
// CekId (ulong)
// CekVersion (ulong)
// CekMDVersion (ulonglong) - really a byte array
// NormVersion (byte)
// algo+ enctype+ dbid+ keyid+ keyver+ normversion
metadataLen := 1 + 1 + 4 + 4 + 4 + 1
metadataLen += len(info.cek.metadataVersion)
metadata := make([]byte, metadataLen)
offset := 0
// AEAD_AES_256_CBC_HMAC_SHA256
metadata[offset] = byte(info.p.algorithm)
offset++
metadata[offset] = byte(info.p.encType)
offset++
binary.LittleEndian.PutUint32(metadata[offset:], uint32(info.cek.database_id))
offset += 4
binary.LittleEndian.PutUint32(metadata[offset:], uint32(info.cek.id))
offset += 4
binary.LittleEndian.PutUint32(metadata[offset:], uint32(info.cek.version))
offset += 4
copy(metadata[offset:], info.cek.metadataVersion)
offset += len(info.cek.metadataVersion)
metadata[offset] = byte(info.p.ruleVersion)
return func(b []byte) ([]byte, []byte, error) {
encryptedData, err := alg.Encrypt(b)
if err != nil {
return nil, nil, err
}
return encryptedData, metadata, nil
}
}
// Based on the .Net implementation at https://github.com/dotnet/SqlClient/blob/2b31810ce69b88d707450e2059ee8fbde63f774f/src/Microsoft.Data.SqlClient/netcore/src/Microsoft/Data/SqlClient/SqlCommand.cs#L6040
func buildStoredProcedureStatementForColumnEncryption(sproc string, args []namedValue) string {
b := new(strings.Builder)
_, _ = b.WriteString("EXEC ")
q := TSQLQuoter{}
sproc = q.ID(sproc)
b.WriteString(sproc)
// Unlike ADO.Net, go-mssqldb doesn't support ReturnValue named parameters
first := true
for _, a := range args {
if !first {
b.WriteRune(',')
}
first = false
b.WriteRune(' ')
name := a.Name
if len(name) == 0 {
name = fmt.Sprintf("@p%d", a.Ordinal)
}
appendPrefixedParameterName(b, name)
if len(a.Name) > 0 {
b.WriteRune('=')
appendPrefixedParameterName(b, a.Name)
}
if isOutputValue(a.Value) {
b.WriteString(" OUTPUT")
}
}
return b.String()
}
func appendPrefixedParameterName(b *strings.Builder, p string) {
if len(p) > 0 {
if p[0] != '@' {
b.WriteRune('@')
}
b.WriteString(p)
}
}
func processDescribeParameterEncryption(rows driver.Rows) (cekInfo []*cekData, paramInfo []*parameterEncData, err error) {
cekInfo = make([]*cekData, 0)
values := make([]driver.Value, 9)
qerr := rows.Next(values)
for qerr == nil {
cekInfo = append(cekInfo, &cekData{ordinal: int(values[0].(int64)),
database_id: int(values[1].(int64)),
id: int(values[2].(int64)),
version: int(values[3].(int64)),
metadataVersion: values[4].([]byte),
encryptedValue: values[5].([]byte),
cmkStoreName: values[6].(string),
cmkPath: values[7].(string),
algorithm: values[8].(string),
})
qerr = rows.Next(values)
}
if len(cekInfo) == 0 || qerr != io.EOF {
if qerr != io.EOF {
err = qerr
}
// No encryption needed
return
}
r := rows.(driver.RowsNextResultSet)
err = r.NextResultSet()
if err != nil {
return
}
paramInfo = make([]*parameterEncData, 0)
qerr = rows.Next(values[:6])
for qerr == nil {
paramInfo = append(paramInfo, &parameterEncData{ordinal: int(values[0].(int64)),
name: values[1].(string),
algorithm: int(values[2].(int64)),
encType: ColumnEncryptionType(values[3].(int64)),
cekOrdinal: int(values[4].(int64)),
ruleVersion: int(values[5].(int64)),
})
qerr = rows.Next(values[:6])
}
if len(paramInfo) == 0 || qerr != io.EOF {
if qerr != io.EOF {
err = qerr
} else {
err = fmt.Errorf("No parameter encryption rows were returned from sp_describe_parameter_encryption")
}
}
return
}

View file

@ -0,0 +1,20 @@
Copyright (c) 2021 Swisscom (Switzerland) Ltd
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View file

@ -0,0 +1,120 @@
package algorithms
import (
"crypto/rand"
"crypto/subtle"
"fmt"
"github.com/microsoft/go-mssqldb/internal/github.com/swisscom/mssql-always-encrypted/pkg/crypto"
"github.com/microsoft/go-mssqldb/internal/github.com/swisscom/mssql-always-encrypted/pkg/encryption"
"github.com/microsoft/go-mssqldb/internal/github.com/swisscom/mssql-always-encrypted/pkg/keys"
)
// https://tools.ietf.org/html/draft-mcgrew-aead-aes-cbc-hmac-sha2-05
// https://winprotocoldoc.blob.core.windows.net/productionwindowsarchives/MS-TDS/%5bMS-TDS%5d.pdf
var _ Algorithm = &AeadAes256CbcHmac256Algorithm{}
type AeadAes256CbcHmac256Algorithm struct {
algorithmVersion byte
deterministic bool
blockSizeBytes int
keySizeBytes int
minimumCipherTextLengthBytesNoAuthTag int
minimumCipherTextLengthBytesWithAuthTag int
cek keys.AeadAes256CbcHmac256
version []byte
versionSize []byte
}
func NewAeadAes256CbcHmac256Algorithm(key keys.AeadAes256CbcHmac256, encType encryption.Type, algorithmVersion byte) AeadAes256CbcHmac256Algorithm {
const keySizeBytes = 256 / 8
const blockSizeBytes = 16
const minimumCipherTextLengthBytesNoAuthTag = 1 + 2*blockSizeBytes
const minimumCipherTextLengthBytesWithAuthTag = minimumCipherTextLengthBytesNoAuthTag + keySizeBytes
a := AeadAes256CbcHmac256Algorithm{
algorithmVersion: algorithmVersion,
deterministic: encType.Deterministic,
blockSizeBytes: blockSizeBytes,
keySizeBytes: keySizeBytes,
cek: key,
minimumCipherTextLengthBytesNoAuthTag: minimumCipherTextLengthBytesNoAuthTag,
minimumCipherTextLengthBytesWithAuthTag: minimumCipherTextLengthBytesWithAuthTag,
version: []byte{0x01},
versionSize: []byte{1},
}
a.version[0] = algorithmVersion
return a
}
func (a *AeadAes256CbcHmac256Algorithm) Encrypt(cleartext []byte) ([]byte, error) {
buf := make([]byte, 0)
var iv []byte
if a.deterministic {
iv = crypto.Sha256Hmac(cleartext, a.cek.IvKey())
if len(iv) > a.blockSizeBytes {
iv = iv[:a.blockSizeBytes]
}
} else {
iv = make([]byte, a.blockSizeBytes)
_, err := rand.Read(iv)
if err != nil {
panic(err)
}
}
buf = append(buf, a.algorithmVersion)
aescdbc := crypto.NewAESCbcPKCS5(a.cek.EncryptionKey(), iv)
ciphertext := aescdbc.Encrypt(cleartext)
authTag := a.prepareAuthTag(iv, ciphertext)
buf = append(buf, authTag...)
buf = append(buf, iv...)
buf = append(buf, ciphertext...)
return buf, nil
}
func (a *AeadAes256CbcHmac256Algorithm) Decrypt(ciphertext []byte) ([]byte, error) {
// This algorithm always has the auth tag!
minimumCiphertextLength := a.minimumCipherTextLengthBytesWithAuthTag
if len(ciphertext) < minimumCiphertextLength {
return nil, fmt.Errorf("invalid ciphertext length: at least %v bytes expected", minimumCiphertextLength)
}
idx := 0
if ciphertext[idx] != a.algorithmVersion {
return nil, fmt.Errorf("invalid algorithm version used: %v found but %v expected", ciphertext[idx],
a.algorithmVersion)
}
idx++
authTag := ciphertext[idx : idx+a.keySizeBytes]
idx += a.keySizeBytes
iv := ciphertext[idx : idx+a.blockSizeBytes]
idx += len(iv)
realCiphertext := ciphertext[idx:]
ourAuthTag := a.prepareAuthTag(iv, realCiphertext)
// bytes.Compare is subject to timing attacks
if subtle.ConstantTimeCompare(ourAuthTag, authTag) != 1 {
return nil, fmt.Errorf("invalid auth tag")
}
// decrypt
aescdbc := crypto.NewAESCbcPKCS5(a.cek.EncryptionKey(), iv)
cleartext := aescdbc.Decrypt(realCiphertext)
return cleartext, nil
}
func (a *AeadAes256CbcHmac256Algorithm) prepareAuthTag(iv []byte, ciphertext []byte) []byte {
var input = make([]byte, 0)
input = append(input, a.algorithmVersion)
input = append(input, iv...)
input = append(input, ciphertext...)
input = append(input, a.versionSize...)
return crypto.Sha256Hmac(input, a.cek.MacKey())
}

View file

@ -0,0 +1,6 @@
package algorithms
type Algorithm interface {
Encrypt([]byte) ([]byte, error)
Decrypt([]byte) ([]byte, error)
}

View file

@ -0,0 +1,69 @@
package crypto
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"fmt"
)
// Inspired by: https://gist.github.com/hothero/7d085573f5cb7cdb5801d7adcf66dcf3
type AESCbcPKCS5 struct {
key []byte
iv []byte
block cipher.Block
}
func NewAESCbcPKCS5(key []byte, iv []byte) AESCbcPKCS5 {
a := AESCbcPKCS5{
key: key,
iv: iv,
block: nil,
}
a.initCipher()
return a
}
func (a AESCbcPKCS5) Encrypt(cleartext []byte) (cipherText []byte) {
if a.block == nil {
a.initCipher()
}
blockMode := cipher.NewCBCEncrypter(a.block, a.iv)
paddedCleartext := PKCS5Padding(cleartext, blockMode.BlockSize())
cipherText = make([]byte, len(paddedCleartext))
blockMode.CryptBlocks(cipherText, paddedCleartext)
return
}
func (a AESCbcPKCS5) Decrypt(ciphertext []byte) []byte {
if a.block == nil {
a.initCipher()
}
blockMode := cipher.NewCBCDecrypter(a.block, a.iv)
var cleartext = make([]byte, len(ciphertext))
blockMode.CryptBlocks(cleartext, ciphertext)
return PKCS5Trim(cleartext)
}
func PKCS5Padding(inArr []byte, blockSize int) []byte {
padding := blockSize - len(inArr)%blockSize
padText := bytes.Repeat([]byte{byte(padding)}, padding)
return append(inArr, padText...)
}
func PKCS5Trim(inArr []byte) []byte {
padding := inArr[len(inArr)-1]
return inArr[:len(inArr)-int(padding)]
}
func (a *AESCbcPKCS5) initCipher() {
block, err := aes.NewCipher(a.key)
if err != nil {
panic(fmt.Errorf("unable to create cipher: %v", err))
}
a.block = block
}

View file

@ -0,0 +1,12 @@
package crypto
import (
"crypto/hmac"
"crypto/sha256"
)
func Sha256Hmac(input []byte, key []byte) []byte {
sha256Hmac := hmac.New(sha256.New, key)
sha256Hmac.Write(input)
return sha256Hmac.Sum(nil)
}

View file

@ -0,0 +1,37 @@
package encryption
type Type struct {
Deterministic bool
Name string
Value byte
}
var Plaintext = Type{
Deterministic: false,
Name: "Plaintext",
Value: 0,
}
var Deterministic = Type{
Deterministic: true,
Name: "Deterministic",
Value: 1,
}
var Randomized = Type{
Deterministic: false,
Name: "Randomized",
Value: 2,
}
func From(encType byte) Type {
switch encType {
case 0:
return Plaintext
case 1:
return Deterministic
case 2:
return Randomized
}
return Plaintext
}

View file

@ -0,0 +1,51 @@
package keys
import (
"fmt"
"github.com/microsoft/go-mssqldb/internal/github.com/swisscom/mssql-always-encrypted/pkg/crypto"
"github.com/microsoft/go-mssqldb/internal/github.com/swisscom/mssql-always-encrypted/pkg/utils"
)
var _ Key = &AeadAes256CbcHmac256{}
type AeadAes256CbcHmac256 struct {
rootKey []byte
encryptionKey []byte
macKey []byte
ivKey []byte
}
func NewAeadAes256CbcHmac256(rootKey []byte) AeadAes256CbcHmac256 {
const keySize = 256
const encryptionKeySaltFormat = "Microsoft SQL Server cell encryption key with encryption algorithm:%v and key length:%v"
const macKeySaltFormat = "Microsoft SQL Server cell MAC key with encryption algorithm:%v and key length:%v"
const ivKeySaltFormat = "Microsoft SQL Server cell IV key with encryption algorithm:%v and key length:%v"
const algorithmName = "AEAD_AES_256_CBC_HMAC_SHA256"
encryptionKeySalt := utils.ProcessUTF16LE(fmt.Sprintf(encryptionKeySaltFormat, algorithmName, keySize))
macKeySalt := utils.ProcessUTF16LE(fmt.Sprintf(macKeySaltFormat, algorithmName, keySize))
ivKeySalt := utils.ProcessUTF16LE(fmt.Sprintf(ivKeySaltFormat, algorithmName, keySize))
return AeadAes256CbcHmac256{
rootKey: rootKey,
encryptionKey: crypto.Sha256Hmac(encryptionKeySalt, rootKey),
macKey: crypto.Sha256Hmac(macKeySalt, rootKey),
ivKey: crypto.Sha256Hmac(ivKeySalt, rootKey)}
}
func (a AeadAes256CbcHmac256) IvKey() []byte {
return a.ivKey
}
func (a AeadAes256CbcHmac256) MacKey() []byte {
return a.macKey
}
func (a AeadAes256CbcHmac256) EncryptionKey() []byte {
return a.encryptionKey
}
func (a AeadAes256CbcHmac256) RootKey() []byte {
return a.rootKey
}

View file

@ -0,0 +1,5 @@
package keys
type Key interface {
RootKey() []byte
}

View file

@ -0,0 +1,18 @@
package utils
import (
"encoding/binary"
"unicode/utf16"
)
func ConvertUTF16ToLittleEndianBytes(u []uint16) []byte {
b := make([]byte, 2*len(u))
for index, value := range u {
binary.LittleEndian.PutUint16(b[index*2:], value)
}
return b
}
func ProcessUTF16LE(inputString string) []byte {
return ConvertUTF16ToLittleEndianBytes(utf16.Encode([]rune(inputString)))
}

View file

@ -21,10 +21,17 @@ type (
BrowserMsg byte
)
const (
DsnTypeURL = 1
DsnTypeOdbc = 2
DsnTypeAdo = 3
)
const (
EncryptionOff = 0
EncryptionRequired = 1
EncryptionDisabled = 3
EncryptionStrict = 4
)
const (
@ -44,6 +51,34 @@ const (
BrowserDAC BrowserMsg = 0x0f
)
const (
Database = "database"
Encrypt = "encrypt"
Password = "password"
ChangePassword = "change password"
UserID = "user id"
Port = "port"
TrustServerCertificate = "trustservercertificate"
Certificate = "certificate"
TLSMin = "tlsmin"
PacketSize = "packet size"
LogParam = "log"
ConnectionTimeout = "connection timeout"
HostNameInCertificate = "hostnameincertificate"
KeepAlive = "keepalive"
ServerSpn = "serverspn"
WorkstationID = "workstation id"
AppName = "app name"
ApplicationIntent = "applicationintent"
FailoverPartner = "failoverpartner"
FailOverPort = "failoverport"
DisableRetry = "disableretry"
Server = "server"
Protocol = "protocol"
DialTimeout = "dial timeout"
Pipe = "pipe"
)
type Config struct {
Port uint64
Host string
@ -88,6 +123,10 @@ type Config struct {
ProtocolParameters map[string]interface{}
// BrowserMsg is the message identifier to fetch instance data from SQL browser
BrowserMessage BrowserMsg
// ChangePassword is used to set the login's password during login. Ignored for non-SQL authentication.
ChangePassword string
//ColumnEncryption is true if the application needs to decrypt or encrypt Always Encrypted values
ColumnEncryption bool
}
// Build a tls.Config object from the supplied certificate.
@ -128,24 +167,26 @@ func parseTLS(params map[string]string, host string) (Encryption, *tls.Config, e
trustServerCert := false
var encryption Encryption = EncryptionOff
encrypt, ok := params["encrypt"]
encrypt, ok := params[Encrypt]
if ok {
if strings.EqualFold(encrypt, "DISABLE") {
encrypt = strings.ToLower(encrypt)
switch encrypt {
case "mandatory", "yes", "1", "t", "true":
encryption = EncryptionRequired
case "disable":
encryption = EncryptionDisabled
} else {
e, err := strconv.ParseBool(encrypt)
if err != nil {
f := "invalid encrypt '%s': %s"
return encryption, nil, fmt.Errorf(f, encrypt, err.Error())
}
if e {
encryption = EncryptionRequired
}
case "strict":
encryption = EncryptionStrict
case "optional", "no", "0", "f", "false":
encryption = EncryptionOff
default:
f := "invalid encrypt '%s'"
return encryption, nil, fmt.Errorf(f, encrypt)
}
} else {
trustServerCert = true
}
trust, ok := params["trustservercertificate"]
trust, ok := params[TrustServerCertificate]
if ok {
var err error
trustServerCert, err = strconv.ParseBool(trust)
@ -154,9 +195,12 @@ func parseTLS(params map[string]string, host string) (Encryption, *tls.Config, e
return encryption, nil, fmt.Errorf(f, trust, err.Error())
}
}
certificate := params["certificate"]
certificate := params[Certificate]
if encryption != EncryptionDisabled {
tlsMin := params["tlsmin"]
tlsMin := params[TLSMin]
if encrypt == "strict" {
trustServerCert = false
}
tlsConfig, err := SetupTLS(certificate, trustServerCert, host, tlsMin)
if err != nil {
return encryption, nil, fmt.Errorf("failed to setup TLS: %w", err)
@ -168,6 +212,38 @@ func parseTLS(params map[string]string, host string) (Encryption, *tls.Config, e
var skipSetup = errors.New("skip setting up TLS")
func getDsnType(dsn string) int {
if strings.HasPrefix(dsn, "sqlserver://") {
return DsnTypeURL
}
if strings.HasPrefix(dsn, "odbc:") {
return DsnTypeOdbc
}
return DsnTypeAdo
}
func getDsnParams(dsn string) (map[string]string, error) {
var params map[string]string
var err error
switch getDsnType(dsn) {
case DsnTypeOdbc:
params, err = splitConnectionStringOdbc(dsn[len("odbc:"):])
if err != nil {
return params, err
}
case DsnTypeURL:
params, err = splitConnectionStringURL(dsn)
if err != nil {
return params, err
}
default:
params = splitConnectionString(dsn)
}
return params, nil
}
func Parse(dsn string) (Config, error) {
p := Config{
ProtocolParameters: map[string]interface{}{},
@ -176,23 +252,14 @@ func Parse(dsn string) (Config, error) {
var params map[string]string
var err error
if strings.HasPrefix(dsn, "odbc:") {
params, err = splitConnectionStringOdbc(dsn[len("odbc:"):])
if err != nil {
return p, err
}
} else if strings.HasPrefix(dsn, "sqlserver://") {
params, err = splitConnectionStringURL(dsn)
if err != nil {
return p, err
}
} else {
params = splitConnectionString(dsn)
}
params, err = getDsnParams(dsn)
if err != nil {
return p, err
}
p.Parameters = params
strlog, ok := params["log"]
strlog, ok := params[LogParam]
if ok {
flags, err := strconv.ParseUint(strlog, 10, 64)
if err != nil {
@ -201,12 +268,12 @@ func Parse(dsn string) (Config, error) {
p.LogFlags = Log(flags)
}
p.Database = params["database"]
p.User = params["user id"]
p.Password = params["password"]
p.Database = params[Database]
p.User = params[UserID]
p.Password = params[Password]
p.ChangePassword = params[ChangePassword]
p.Port = 0
strport, ok := params["port"]
strport, ok := params[Port]
if ok {
var err error
p.Port, err = strconv.ParseUint(strport, 10, 16)
@ -217,7 +284,7 @@ func Parse(dsn string) (Config, error) {
}
// https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/configure-the-network-packet-size-server-configuration-option\
strpsize, ok := params["packet size"]
strpsize, ok := params[PacketSize]
if ok {
var err error
psize, err := strconv.ParseUint(strpsize, 0, 16)
@ -242,7 +309,7 @@ func Parse(dsn string) (Config, error) {
//
// Do not set a connection timeout. Use Context to manage such things.
// Default to zero, but still allow it to be set.
if strconntimeout, ok := params["connection timeout"]; ok {
if strconntimeout, ok := params[ConnectionTimeout]; ok {
timeout, err := strconv.ParseUint(strconntimeout, 10, 64)
if err != nil {
f := "invalid connection timeout '%v': %v"
@ -254,7 +321,7 @@ func Parse(dsn string) (Config, error) {
// default keep alive should be 30 seconds according to spec:
// https://msdn.microsoft.com/en-us/library/dd341108.aspx
p.KeepAlive = 30 * time.Second
if keepAlive, ok := params["keepalive"]; ok {
if keepAlive, ok := params[KeepAlive]; ok {
timeout, err := strconv.ParseUint(keepAlive, 10, 64)
if err != nil {
f := "invalid keepAlive value '%s': %s"
@ -263,12 +330,12 @@ func Parse(dsn string) (Config, error) {
p.KeepAlive = time.Duration(timeout) * time.Second
}
serverSPN, ok := params["serverspn"]
serverSPN, ok := params[ServerSpn]
if ok {
p.ServerSPN = serverSPN
} // If not set by the app, ServerSPN will be set by the successful dialer.
workstation, ok := params["workstation id"]
workstation, ok := params[WorkstationID]
if ok {
p.Workstation = workstation
} else {
@ -278,13 +345,13 @@ func Parse(dsn string) (Config, error) {
}
}
appname, ok := params["app name"]
appname, ok := params[AppName]
if !ok {
appname = "go-mssqldb"
}
p.AppName = appname
appintent, ok := params["applicationintent"]
appintent, ok := params[ApplicationIntent]
if ok {
if appintent == "ReadOnly" {
if p.Database == "" {
@ -294,12 +361,12 @@ func Parse(dsn string) (Config, error) {
}
}
failOverPartner, ok := params["failoverpartner"]
failOverPartner, ok := params[FailoverPartner]
if ok {
p.FailOverPartner = failOverPartner
}
failOverPort, ok := params["failoverport"]
failOverPort, ok := params[FailOverPort]
if ok {
var err error
p.FailOverPort, err = strconv.ParseUint(failOverPort, 0, 16)
@ -309,7 +376,7 @@ func Parse(dsn string) (Config, error) {
}
}
disableRetry, ok := params["disableretry"]
disableRetry, ok := params[DisableRetry]
if ok {
var err error
p.DisableRetry, err = strconv.ParseBool(disableRetry)
@ -321,8 +388,8 @@ func Parse(dsn string) (Config, error) {
p.DisableRetry = disableRetryDefault
}
server := params["server"]
protocol, ok := params["protocol"]
server := params[Server]
protocol, ok := params[Protocol]
for _, parser := range ProtocolParsers {
if (!ok && !parser.Hidden()) || parser.Protocol() == protocol {
@ -348,7 +415,7 @@ func Parse(dsn string) (Config, error) {
f = 1
}
p.DialTimeout = time.Duration(15*f) * time.Second
if strdialtimeout, ok := params["dial timeout"]; ok {
if strdialtimeout, ok := params[DialTimeout]; ok {
timeout, err := strconv.ParseUint(strdialtimeout, 10, 64)
if err != nil {
f := "invalid dial timeout '%v': %v"
@ -358,7 +425,7 @@ func Parse(dsn string) (Config, error) {
p.DialTimeout = time.Duration(timeout) * time.Second
}
hostInCertificate, ok := params["hostnameincertificate"]
hostInCertificate, ok := params[HostNameInCertificate]
if ok {
p.HostInCertificateProvided = true
} else {
@ -371,6 +438,19 @@ func Parse(dsn string) (Config, error) {
return p, err
}
if c, ok := params["columnencryption"]; ok {
columnEncryption, err := strconv.ParseBool(c)
if err != nil {
if strings.EqualFold(c, "Enabled") {
columnEncryption = true
} else if strings.EqualFold(c, "Disabled") {
columnEncryption = false
} else {
return p, fmt.Errorf("invalid columnencryption '%v' : %v", columnEncryption, err.Error())
}
}
p.ColumnEncryption = columnEncryption
}
return p, nil
}
@ -379,10 +459,10 @@ func Parse(dsn string) (Config, error) {
func (p Config) URL() *url.URL {
q := url.Values{}
if p.Database != "" {
q.Add("database", p.Database)
q.Add(Database, p.Database)
}
if p.LogFlags != 0 {
q.Add("log", strconv.FormatUint(uint64(p.LogFlags), 10))
q.Add(LogParam, strconv.FormatUint(uint64(p.LogFlags), 10))
}
host := p.Host
protocol := ""
@ -397,8 +477,8 @@ func (p Config) URL() *url.URL {
if p.Port > 0 {
host = fmt.Sprintf("%s:%d", host, p.Port)
}
q.Add("disableRetry", fmt.Sprintf("%t", p.DisableRetry))
protocolParam, ok := p.Parameters["protocol"]
q.Add(DisableRetry, fmt.Sprintf("%t", p.DisableRetry))
protocolParam, ok := p.Parameters[Protocol]
if ok {
if protocol != "" && protocolParam != protocol {
panic("Mismatched protocol parameters!")
@ -406,11 +486,11 @@ func (p Config) URL() *url.URL {
protocol = protocolParam
}
if protocol != "" {
q.Add("protocol", protocol)
q.Add(Protocol, protocol)
}
pipe, ok := p.Parameters["pipe"]
pipe, ok := p.Parameters[Pipe]
if ok {
q.Add("pipe", pipe)
q.Add(Pipe, pipe)
}
res := url.URL{
Scheme: "sqlserver",
@ -420,7 +500,17 @@ func (p Config) URL() *url.URL {
if p.Instance != "" {
res.Path = p.Instance
}
q.Add("dial timeout", strconv.FormatFloat(float64(p.DialTimeout.Seconds()), 'f', 0, 64))
q.Add(DialTimeout, strconv.FormatFloat(float64(p.DialTimeout.Seconds()), 'f', 0, 64))
switch p.Encryption {
case EncryptionDisabled:
q.Add(Encrypt, "DISABLE")
case EncryptionRequired:
q.Add(Encrypt, "true")
}
if p.ColumnEncryption {
q.Add("columnencryption", "true")
}
if len(q) > 0 {
res.RawQuery = q.Encode()
}
@ -428,15 +518,17 @@ func (p Config) URL() *url.URL {
return &res
}
// ADO connection string keywords at https://github.com/dotnet/SqlClient/blob/main/src/Microsoft.Data.SqlClient/src/Microsoft/Data/Common/DbConnectionStringCommon.cs
var adoSynonyms = map[string]string{
"application name": "app name",
"data source": "server",
"address": "server",
"network address": "server",
"addr": "server",
"user": "user id",
"uid": "user id",
"initial catalog": "database",
"application name": AppName,
"data source": Server,
"address": Server,
"network address": Server,
"addr": Server,
"user": UserID,
"uid": UserID,
"initial catalog": Database,
"column encryption setting": "columnencryption",
}
func splitConnectionString(dsn string) (res map[string]string) {
@ -460,18 +552,18 @@ func splitConnectionString(dsn string) (res map[string]string) {
name = synonym
}
// "server" in ADO can include a protocol and a port.
if name == "server" {
if name == Server {
for _, parser := range ProtocolParsers {
prot := parser.Protocol() + ":"
if strings.HasPrefix(value, prot) {
res["protocol"] = parser.Protocol()
res[Protocol] = parser.Protocol()
}
value = strings.TrimPrefix(value, prot)
}
serverParts := strings.Split(value, ",")
if len(serverParts) == 2 && len(serverParts[1]) > 0 {
value = serverParts[0]
res["port"] = serverParts[1]
res[Port] = serverParts[1]
}
}
res[name] = value
@ -493,10 +585,10 @@ func splitConnectionStringURL(dsn string) (map[string]string, error) {
}
if u.User != nil {
res["user id"] = u.User.Username()
res[UserID] = u.User.Username()
p, exists := u.User.Password()
if exists {
res["password"] = p
res[Password] = p
}
}
@ -506,13 +598,13 @@ func splitConnectionStringURL(dsn string) (map[string]string, error) {
}
if len(u.Path) > 0 {
res["server"] = host + "\\" + u.Path[1:]
res[Server] = host + "\\" + u.Path[1:]
} else {
res["server"] = host
res[Server] = host
}
if len(port) > 0 {
res["port"] = port
res[Port] = port
}
query := u.Query()

View file

@ -17,6 +17,7 @@ import (
"unicode"
"github.com/golang-sql/sqlexp"
"github.com/microsoft/go-mssqldb/aecmk"
"github.com/microsoft/go-mssqldb/internal/querytext"
"github.com/microsoft/go-mssqldb/msdsn"
)
@ -69,10 +70,7 @@ func (d *Driver) OpenConnector(dsn string) (*Connector, error) {
return nil, err
}
return &Connector{
params: params,
driver: d,
}, nil
return newConnector(params, d), nil
}
func (d *Driver) Open(dsn string) (driver.Conn, error) {
@ -122,10 +120,8 @@ func NewConnector(dsn string) (*Connector, error) {
if err != nil {
return nil, err
}
c := &Connector{
params: params,
driver: driverInstanceNoProcess,
}
c := newConnector(params, driverInstanceNoProcess)
return c, nil
}
@ -146,9 +142,14 @@ func NewConnectorWithAccessTokenProvider(dsn string, tokenProvider func(ctx cont
// NewConnectorConfig creates a new Connector for a DSN Config struct.
// The returned connector may be used with sql.OpenDB.
func NewConnectorConfig(config msdsn.Config) *Connector {
return newConnector(config, driverInstanceNoProcess)
}
func newConnector(config msdsn.Config, driver *Driver) *Connector {
return &Connector{
params: config,
driver: driverInstanceNoProcess,
params: config,
driver: driver,
keyProviders: make(aecmk.ColumnEncryptionKeyProviderMap),
}
}
@ -199,6 +200,8 @@ type Connector struct {
//
// If Dialer is not set, normal net dialers are used.
Dialer Dialer
keyProviders aecmk.ColumnEncryptionKeyProviderMap
}
type Dialer interface {
@ -219,6 +222,11 @@ func (c *Connector) getDialer(p *msdsn.Config) Dialer {
return createDialer(p)
}
// RegisterCekProvider associates the given provider with the named key store. If an entry of the given name already exists, that entry is overwritten
func (c *Connector) RegisterCekProvider(name string, provider aecmk.ColumnEncryptionKeyProvider) {
c.keyProviders[name] = aecmk.NewCekProvider(provider)
}
type Conn struct {
connector *Connector
sess *tdsSession
@ -403,7 +411,7 @@ func (d *Driver) open(ctx context.Context, dsn string) (*Conn, error) {
if err != nil {
return nil, err
}
c := &Connector{params: params}
c := newConnector(params, nil)
return d.connect(ctx, c, params)
}
@ -445,10 +453,11 @@ func (c *Conn) Close() error {
}
type Stmt struct {
c *Conn
query string
paramCount int
notifSub *queryNotifSub
c *Conn
query string
paramCount int
notifSub *queryNotifSub
skipEncryption bool
}
type queryNotifSub struct {
@ -472,7 +481,7 @@ func (c *Conn) prepareContext(ctx context.Context, query string) (*Stmt, error)
if c.processQueryText {
query, paramCount = querytext.ParseParams(query)
}
return &Stmt{c, query, paramCount, nil}, nil
return &Stmt{c, query, paramCount, nil, false}, nil
}
func (s *Stmt) Close() error {
@ -654,16 +663,38 @@ func (s *Stmt) makeRPCParams(args []namedValue, isProc bool) ([]param, []string,
if isOutputValue(val.Value) {
output = outputSuffix
}
decls[i] = fmt.Sprintf("%s %s%s", name, makeDecl(params[i+offset].ti), output)
tiDecl := params[i+offset].ti
if val.encrypt != nil {
// Encrypted parameters have a few requirements:
// 1. Copy original typeinfo to a block after the data
// 2. Set the parameter type to varbinary(max)
// 3. Append the crypto metadata bytes
params[i+offset].tiOriginal = params[i+offset].ti
params[i+offset].Flags |= fEncrypted
encryptedBytes, metadata, err := val.encrypt(params[i+offset].buffer)
if err != nil {
return nil, nil, err
}
params[i+offset].cipherInfo = metadata
params[i+offset].ti.TypeId = typeBigVarBin
params[i+offset].buffer = encryptedBytes
params[i+offset].ti.Size = 0
}
decls[i] = fmt.Sprintf("%s %s%s", name, makeDecl(tiDecl), output)
}
return params, decls, nil
}
// Encrypts the input bytes. Returns the encrypted bytes followed by the encryption metadata to append to the packet.
type valueEncryptor func(bytes []byte) ([]byte, []byte, error)
type namedValue struct {
Name string
Ordinal int
Value driver.Value
encrypt valueEncryptor
}
func convertOldArgs(args []driver.Value) []namedValue {
@ -677,6 +708,10 @@ func convertOldArgs(args []driver.Value) []namedValue {
return list
}
func (s *Stmt) doEncryption() bool {
return !s.skipEncryption && s.c.sess.alwaysEncrypted
}
func (s *Stmt) Query(args []driver.Value) (driver.Rows, error) {
defer s.c.clearOuts()
@ -687,6 +722,12 @@ func (s *Stmt) queryContext(ctx context.Context, args []namedValue) (rows driver
if !s.c.connectionGood {
return nil, driver.ErrBadConn
}
if s.doEncryption() && len(args) > 0 {
args, err = s.encryptArgs(ctx, args)
}
if err != nil {
return nil, err
}
if err = s.sendQuery(ctx, args); err != nil {
return nil, s.c.checkBadConn(ctx, err, true)
}
@ -754,6 +795,12 @@ func (s *Stmt) exec(ctx context.Context, args []namedValue) (res driver.Result,
if !s.c.connectionGood {
return nil, driver.ErrBadConn
}
if s.doEncryption() && len(args) > 0 {
args, err = s.encryptArgs(ctx, args)
}
if err != nil {
return nil, err
}
if err = s.sendQuery(ctx, args); err != nil {
return nil, s.c.checkBadConn(ctx, err, true)
}
@ -872,7 +919,7 @@ func (rc *Rows) NextResultSet() error {
// the value type that can be used to scan types into. For example, the database
// column type "bigint" this should return "reflect.TypeOf(int64(0))".
func (r *Rows) ColumnTypeScanType(index int) reflect.Type {
return makeGoLangScanType(r.cols[index].ti)
return makeGoLangScanType(r.cols[index].originalTypeInfo())
}
// RowsColumnTypeDatabaseTypeName may be implemented by Rows. It should return the
@ -881,7 +928,7 @@ func (r *Rows) ColumnTypeScanType(index int) reflect.Type {
// "DECIMAL", "SMALLINT", "INT", "BIGINT", "BOOL", "[]BIGINT", "JSONB", "XML",
// "TIMESTAMP".
func (r *Rows) ColumnTypeDatabaseTypeName(index int) string {
return makeGoLangTypeName(r.cols[index].ti)
return makeGoLangTypeName(r.cols[index].originalTypeInfo())
}
// RowsColumnTypeLength may be implemented by Rows. It should return the length
@ -897,7 +944,7 @@ func (r *Rows) ColumnTypeDatabaseTypeName(index int) string {
// int (0, false)
// bytea(30) (30, true)
func (r *Rows) ColumnTypeLength(index int) (int64, bool) {
return makeGoLangTypeLength(r.cols[index].ti)
return makeGoLangTypeLength(r.cols[index].originalTypeInfo())
}
// It should return
@ -908,7 +955,7 @@ func (r *Rows) ColumnTypeLength(index int) (int64, bool) {
// int (0, 0, false)
// decimal (math.MaxInt64, math.MaxInt64, true)
func (r *Rows) ColumnTypePrecisionScale(index int) (int64, int64, bool) {
return makeGoLangTypePrecisionScale(r.cols[index].ti)
return makeGoLangTypePrecisionScale(r.cols[index].originalTypeInfo())
}
// The nullable value should
@ -974,12 +1021,20 @@ func (s *Stmt) makeParam(val driver.Value) (res param, err error) {
res.ti.TypeId = typeIntN
res.ti.Size = 8
res.buffer = []byte{}
case byte:
res.ti.TypeId = typeIntN
res.buffer = []byte{val}
res.ti.Size = 1
case float64:
res.ti.TypeId = typeFltN
res.ti.Size = 8
res.buffer = make([]byte, 8)
binary.LittleEndian.PutUint64(res.buffer, math.Float64bits(val))
case float32:
res.ti.TypeId = typeFltN
res.ti.Size = 4
res.buffer = make([]byte, 4)
binary.LittleEndian.PutUint32(res.buffer, math.Float32bits(val))
case sql.NullFloat64:
// only null values should be getting here
res.ti.TypeId = typeFltN
@ -1043,7 +1098,7 @@ func (c *Conn) Ping(ctx context.Context) error {
if !c.connectionGood {
return driver.ErrBadConn
}
stmt := &Stmt{c, `select 1;`, 0, nil}
stmt := &Stmt{c, `select 1;`, 0, nil, true}
_, err := stmt.ExecContext(ctx, nil)
return err
}
@ -1108,7 +1163,7 @@ func (s *Stmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driv
}
list := make([]namedValue, len(args))
for i, nv := range args {
list[i] = namedValue(nv)
list[i] = namedValueFromDriverNamedValue(nv)
}
return s.queryContext(ctx, list)
}
@ -1121,11 +1176,15 @@ func (s *Stmt) ExecContext(ctx context.Context, args []driver.NamedValue) (drive
}
list := make([]namedValue, len(args))
for i, nv := range args {
list[i] = namedValue(nv)
list[i] = namedValueFromDriverNamedValue(nv)
}
return s.exec(ctx, list)
}
func namedValueFromDriverNamedValue(v driver.NamedValue) namedValue {
return namedValue{Name: v.Name, Ordinal: v.Ordinal, Value: v.Value, encrypt: nil}
}
// Rowsq implements the sqlexp messages model for Query and QueryContext
// Theory: We could also implement the non-experimental model this way
type Rowsq struct {
@ -1316,7 +1375,7 @@ scan:
// the value type that can be used to scan types into. For example, the database
// column type "bigint" this should return "reflect.TypeOf(int64(0))".
func (r *Rowsq) ColumnTypeScanType(index int) reflect.Type {
return makeGoLangScanType(r.cols[index].ti)
return makeGoLangScanType(r.cols[index].originalTypeInfo())
}
// RowsColumnTypeDatabaseTypeName may be implemented by Rows. It should return the
@ -1325,7 +1384,7 @@ func (r *Rowsq) ColumnTypeScanType(index int) reflect.Type {
// "DECIMAL", "SMALLINT", "INT", "BIGINT", "BOOL", "[]BIGINT", "JSONB", "XML",
// "TIMESTAMP".
func (r *Rowsq) ColumnTypeDatabaseTypeName(index int) string {
return makeGoLangTypeName(r.cols[index].ti)
return makeGoLangTypeName(r.cols[index].originalTypeInfo())
}
// RowsColumnTypeLength may be implemented by Rows. It should return the length
@ -1341,7 +1400,7 @@ func (r *Rowsq) ColumnTypeDatabaseTypeName(index int) string {
// int (0, false)
// bytea(30) (30, true)
func (r *Rowsq) ColumnTypeLength(index int) (int64, bool) {
return makeGoLangTypeLength(r.cols[index].ti)
return makeGoLangTypeLength(r.cols[index].originalTypeInfo())
}
// It should return
@ -1352,7 +1411,7 @@ func (r *Rowsq) ColumnTypeLength(index int) (int64, bool) {
// int (0, 0, false)
// decimal (math.MaxInt64, math.MaxInt64, true)
func (r *Rowsq) ColumnTypePrecisionScale(index int) (int64, int64, bool) {
return makeGoLangTypePrecisionScale(r.cols[index].ti)
return makeGoLangTypePrecisionScale(r.cols[index].originalTypeInfo())
}
// The nullable value should

View file

@ -29,12 +29,18 @@ type MssqlStmt = Stmt // Deprecated: users should transition to th
var _ driver.NamedValueChecker = &Conn{}
// VarChar parameter types.
// VarChar is used to encode a string parameter as VarChar instead of a sized NVarChar
type VarChar string
// NVarCharMax is used to encode a string parameter as NVarChar(max) instead of a sized NVarChar
type NVarCharMax string
// VarCharMax is used to encode a string parameter as VarChar(max) instead of a sized NVarChar
type VarCharMax string
// NChar is used to encode a string parameter as NChar instead of a sized NVarChar
type NChar string
// DateTime1 encodes parameters to original DateTime SQL types.
type DateTime1 time.Time
@ -45,12 +51,16 @@ func convertInputParameter(val interface{}) (interface{}, error) {
switch v := val.(type) {
case int, int16, int32, int64, int8:
return val, nil
case byte:
return val, nil
case VarChar:
return val, nil
case NVarCharMax:
return val, nil
case VarCharMax:
return val, nil
case NChar:
return val, nil
case DateTime1:
return val, nil
case DateTimeOffset:
@ -61,8 +71,10 @@ func convertInputParameter(val interface{}) (interface{}, error) {
return val, nil
case civil.Time:
return val, nil
// case *apd.Decimal:
// return nil
// case *apd.Decimal:
// return nil
case float32:
return val, nil
default:
return driver.DefaultParameterConverter.ConvertValue(v)
}
@ -144,6 +156,10 @@ func (s *Stmt) makeParamExtra(val driver.Value) (res param, err error) {
res.ti.TypeId = typeNVarChar
res.buffer = str2ucs2(string(val))
res.ti.Size = 0 // currently zero forces nvarchar(max)
case NChar:
res.ti.TypeId = typeNChar
res.buffer = str2ucs2(string(val))
res.ti.Size = len(res.buffer)
case DateTime1:
t := time.Time(val)
res.ti.TypeId = typeDateTimeN

40
vendor/github.com/microsoft/go-mssqldb/quoter.go generated vendored Normal file
View file

@ -0,0 +1,40 @@
package mssql
import (
"strings"
)
// TSQLQuoter implements sqlexp.Quoter
type TSQLQuoter struct {
}
// ID quotes identifiers such as schema, table, or column names.
// This implementation handles multi-part names.
func (TSQLQuoter) ID(name string) string {
return "[" + strings.Replace(name, "]", "]]", -1) + "]"
}
// Value quotes database values such as string or []byte types as strings
// that are suitable and safe to embed in SQL text. The returned value
// of a string will include all surrounding quotes.
//
// If a value type is not supported it must panic.
func (TSQLQuoter) Value(v interface{}) string {
switch v := v.(type) {
default:
panic("unsupported value")
case string:
return sqlString(v)
case VarChar:
return sqlString(string(v))
case VarCharMax:
return sqlString(string(v))
case NVarCharMax:
return sqlString(string(v))
}
}
func sqlString(v string) string {
return "'" + strings.Replace(string(v), "'", "''", -1) + "'"
}

View file

@ -13,13 +13,16 @@ type procId struct {
const (
fByRevValue = 1
fDefaultValue = 2
fEncrypted = 8
)
type param struct {
Name string
Flags uint8
ti typeInfo
buffer []byte
Name string
Flags uint8
ti typeInfo
buffer []byte
tiOriginal typeInfo
cipherInfo []byte
}
var (
@ -78,6 +81,15 @@ func sendRpc(buf *tdsBuffer, headers []headerStruct, proc procId, flags uint16,
if err != nil {
return
}
if (param.Flags & fEncrypted) == fEncrypted {
err = writeTypeInfo(buf, &param.tiOriginal)
if err != nil {
return
}
if _, err = buf.Write(param.cipherInfo); err != nil {
return
}
}
}
return buf.FinishPacket()
}

View file

@ -15,6 +15,7 @@ import (
"unicode/utf16"
"unicode/utf8"
"github.com/microsoft/go-mssqldb/aecmk"
"github.com/microsoft/go-mssqldb/integratedauth"
"github.com/microsoft/go-mssqldb/msdsn"
)
@ -102,6 +103,7 @@ const (
verTDS73 = verTDS73A
verTDS73B = 0x730B0003
verTDS74 = 0x74000004
verTDS80 = 0x08000000
)
// packet types
@ -143,6 +145,7 @@ const (
encryptOn = 1 // Encryption is available and on.
encryptNotSup = 2 // Encryption is not available.
encryptReq = 3 // Encryption is required.
encryptStrict = 4
)
const (
@ -157,16 +160,23 @@ const (
)
type tdsSession struct {
buf *tdsBuffer
loginAck loginAckStruct
database string
partner string
columns []columnStruct
tranid uint64
logFlags uint64
logger ContextLogger
routedServer string
routedPort uint16
buf *tdsBuffer
loginAck loginAckStruct
database string
partner string
columns []columnStruct
tranid uint64
logFlags uint64
logger ContextLogger
routedServer string
routedPort uint16
alwaysEncrypted bool
aeSettings *alwaysEncryptedSettings
}
type alwaysEncryptedSettings struct {
enclaveType string
keyProviders aecmk.ColumnEncryptionKeyProviderMap
}
const (
@ -178,10 +188,26 @@ const (
)
type columnStruct struct {
UserType uint32
Flags uint16
ColName string
ti typeInfo
UserType uint32
Flags uint16
ColName string
ti typeInfo
cryptoMeta *cryptoMetadata
}
func (c columnStruct) isEncrypted() bool {
return isEncryptedFlag(c.Flags)
}
func isEncryptedFlag(flags uint16) bool {
return colFlagEncrypted == (flags & colFlagEncrypted)
}
func (c columnStruct) originalTypeInfo() typeInfo {
if c.isEncrypted() {
return c.cryptoMeta.typeInfo
}
return c.ti
}
type keySlice []uint8
@ -577,7 +603,7 @@ func sendLogin(w *tdsBuffer, login *login) error {
language := str2ucs2(login.Language)
database := str2ucs2(login.Database)
atchdbfile := str2ucs2(login.AtchDBFile)
changepassword := str2ucs2(login.ChangePassword)
changepassword := manglePassword(login.ChangePassword)
featureExt := login.FeatureExt.toBytes()
hdr := loginHeader{
@ -638,6 +664,9 @@ func sendLogin(w *tdsBuffer, login *login) error {
offset += hdr.ExtensionLength // DWORD
featureExtOffset = uint32(offset)
}
if len(changepassword) > 0 {
hdr.OptionFlags3 |= fChangePassword
}
hdr.Length = uint32(offset) + uint32(featureExtLen)
var err error
@ -977,6 +1006,8 @@ func preparePreloginFields(p msdsn.Config, fe *featureExtFedAuth) map[uint8][]by
encrypt = encryptOn
case msdsn.EncryptionOff:
encrypt = encryptOff
case msdsn.EncryptionStrict:
encrypt = encryptStrict
}
v := getDriverVersion(driverVersion)
fields := map[uint8][]byte{
@ -1023,6 +1054,12 @@ func interpretPreloginResponse(p msdsn.Config, fe *featureExtFedAuth, fields map
}
func prepareLogin(ctx context.Context, c *Connector, p msdsn.Config, logger ContextLogger, auth integratedauth.IntegratedAuthenticator, fe *featureExtFedAuth, packetSize uint32) (l *login, err error) {
var TDSVersion uint32
if p.Encryption == msdsn.EncryptionStrict {
TDSVersion = verTDS80
} else {
TDSVersion = verTDS74
}
var typeFlags uint8
if p.ReadOnlyIntent {
typeFlags |= fReadOnlyIntent
@ -1035,17 +1072,21 @@ func prepareLogin(ctx context.Context, c *Connector, p msdsn.Config, logger Cont
serverName = p.Host
}
l = &login{
TDSVersion: verTDS74,
PacketSize: packetSize,
Database: p.Database,
OptionFlags2: fODBC, // to get unlimited TEXTSIZE
OptionFlags1: fUseDB | fSetLang,
HostName: p.Workstation,
ServerName: serverName,
AppName: p.AppName,
TypeFlags: typeFlags,
CtlIntName: "go-mssqldb",
ClientProgVer: getDriverVersion(driverVersion),
TDSVersion: TDSVersion,
PacketSize: packetSize,
Database: p.Database,
OptionFlags2: fODBC, // to get unlimited TEXTSIZE
OptionFlags1: fUseDB | fSetLang,
HostName: p.Workstation,
ServerName: serverName,
AppName: p.AppName,
TypeFlags: typeFlags,
CtlIntName: "go-mssqldb",
ClientProgVer: getDriverVersion(driverVersion),
ChangePassword: p.ChangePassword,
}
if p.ColumnEncryption {
_ = l.FeatureExt.Add(&featureExtColumnEncryption{})
}
switch {
case fe.FedAuthLibrary == FedAuthLibrarySecurityToken:
@ -1061,14 +1102,14 @@ func prepareLogin(ctx context.Context, c *Connector, p msdsn.Config, logger Cont
return nil, err
}
l.FeatureExt.Add(fe)
_ = l.FeatureExt.Add(fe)
case fe.FedAuthLibrary == FedAuthLibraryADAL:
if uint64(p.LogFlags)&logDebug != 0 {
logger.Log(ctx, msdsn.LogDebug, "Starting federated authentication using ADAL")
}
l.FeatureExt.Add(fe)
_ = l.FeatureExt.Add(fe)
case auth != nil:
if uint64(p.LogFlags)&logDebug != 0 {
@ -1092,8 +1133,29 @@ func prepareLogin(ctx context.Context, c *Connector, p msdsn.Config, logger Cont
return l, nil
}
func connect(ctx context.Context, c *Connector, logger ContextLogger, p msdsn.Config) (res *tdsSession, err error) {
func getTLSConn(conn *timeoutConn, p msdsn.Config, alpnSeq string) (tlsConn *tls.Conn, err error) {
var config *tls.Config
if pc := p.TLSConfig; pc != nil {
config = pc
}
if config == nil {
config, err = msdsn.SetupTLS("", false, p.Host, "")
if err != nil {
return nil, err
}
}
//Set ALPN Sequence
config.NextProtos = []string{alpnSeq}
tlsConn = tls.Client(conn.c, config)
err = tlsConn.Handshake()
if err != nil {
return nil, fmt.Errorf("TLS Handshake failed: %w", err)
}
return tlsConn, nil
}
func connect(ctx context.Context, c *Connector, logger ContextLogger, p msdsn.Config) (res *tdsSession, err error) {
isTransportEncrypted := false
// if instance is specified use instance resolution service
if len(p.Instance) > 0 && p.Port != 0 && uint64(p.LogFlags)&logDebug != 0 {
// both instance name and port specified
@ -1133,14 +1195,25 @@ initiate_connection:
}
toconn := newTimeoutConn(conn, p.ConnTimeout)
outbuf := newTdsBuffer(packetSize, toconn)
if p.Encryption == msdsn.EncryptionStrict {
outbuf.transport, err = getTLSConn(toconn, p, "tds/8.0")
if err != nil {
return nil, err
}
isTransportEncrypted = true
}
sess := tdsSession{
buf: outbuf,
logger: logger,
logFlags: uint64(p.LogFlags),
buf: outbuf,
logger: logger,
logFlags: uint64(p.LogFlags),
aeSettings: &alwaysEncryptedSettings{keyProviders: aecmk.GetGlobalCekProviders()},
}
for i, p := range c.keyProviders {
sess.aeSettings.keyProviders[i] = p
}
fedAuth := &featureExtFedAuth{
FedAuthLibrary: FedAuthLibraryReserved,
}
@ -1166,42 +1239,47 @@ initiate_connection:
return nil, err
}
if encrypt != encryptNotSup {
var config *tls.Config
if pc := p.TLSConfig; pc != nil {
config = pc
if config.DynamicRecordSizingDisabled == false {
config = config.Clone()
//We need not perform TLS handshake if the communication channel is already encrypted (encrypt=strict)
if !isTransportEncrypted {
if encrypt != encryptNotSup {
var config *tls.Config
if pc := p.TLSConfig; pc != nil {
config = pc
if !config.DynamicRecordSizingDisabled {
config = config.Clone()
// fix for https://github.com/microsoft/go-mssqldb/issues/166
// Go implementation of TLS payload size heuristic algorithm splits single TDS package to multiple TCP segments,
// while SQL Server seems to expect one TCP segment per encrypted TDS package.
// Setting DynamicRecordSizingDisabled to true disables that algorithm and uses 16384 bytes per TLS package
config.DynamicRecordSizingDisabled = true
// fix for https://github.com/microsoft/go-mssqldb/issues/166
// Go implementation of TLS payload size heuristic algorithm splits single TDS package to multiple TCP segments,
// while SQL Server seems to expect one TCP segment per encrypted TDS package.
// Setting DynamicRecordSizingDisabled to true disables that algorithm and uses 16384 bytes per TLS package
config.DynamicRecordSizingDisabled = true
}
}
}
if config == nil {
config, err = msdsn.SetupTLS("", false, p.Host, "")
if config == nil {
config, err = msdsn.SetupTLS("", false, p.Host, "")
if err != nil {
return nil, err
}
}
// setting up connection handler which will allow wrapping of TLS handshake packets inside TDS stream
handshakeConn := tlsHandshakeConn{buf: outbuf}
passthrough := passthroughConn{c: &handshakeConn}
tlsConn := tls.Client(&passthrough, config)
err = tlsConn.Handshake()
passthrough.c = toconn
outbuf.transport = tlsConn
if err != nil {
return nil, err
return nil, fmt.Errorf("TLS Handshake failed: %v", err)
}
if encrypt == encryptOff {
outbuf.afterFirst = func() {
outbuf.transport = toconn
}
}
}
// setting up connection handler which will allow wrapping of TLS handshake packets inside TDS stream
handshakeConn := tlsHandshakeConn{buf: outbuf}
passthrough := passthroughConn{c: &handshakeConn}
tlsConn := tls.Client(&passthrough, config)
err = tlsConn.Handshake()
passthrough.c = toconn
outbuf.transport = tlsConn
if err != nil {
return nil, fmt.Errorf("TLS Handshake failed: %v", err)
}
if encrypt == encryptOff {
outbuf.afterFirst = func() {
outbuf.transport = toconn
}
}
}
auth, err := integratedauth.GetIntegratedAuthenticator(p)
@ -1288,6 +1366,18 @@ initiate_connection:
case loginAckStruct:
sess.loginAck = token
loginAck = true
case featureExtAck:
for _, v := range token {
switch v := v.(type) {
case colAckStruct:
if v.Version <= 2 && v.Version > 0 {
sess.alwaysEncrypted = true
if len(v.EnclaveType) > 0 {
sess.aeSettings.enclaveType = string(v.EnclaveType)
}
}
}
}
case doneStruct:
if token.isError() {
tokenErr := token.getError()
@ -1317,3 +1407,21 @@ initiate_connection:
}
return &sess, nil
}
type featureExtColumnEncryption struct {
}
func (f *featureExtColumnEncryption) featureID() byte {
return featExtCOLUMNENCRYPTION
}
func (f *featureExtColumnEncryption) toBytes() []byte {
/*
1 = The client supports column encryption without enclave computations.
2 = The client SHOULD<25> support column encryption when encrypted data require enclave computations.
3 = The client SHOULD<26> support column encryption when encrypted data require enclave computations
with the additional ability to cache column encryption keys that are to be sent to the enclave
and the ability to retry queries when the keys sent by the client do not match what is needed for the query to run.
*/
return []byte{0x01}
}

View file

@ -1,6 +1,7 @@
package mssql
import (
"bytes"
"context"
"encoding/binary"
"fmt"
@ -10,7 +11,11 @@ import (
"strconv"
"github.com/golang-sql/sqlexp"
"github.com/microsoft/go-mssqldb/internal/github.com/swisscom/mssql-always-encrypted/pkg/algorithms"
"github.com/microsoft/go-mssqldb/internal/github.com/swisscom/mssql-always-encrypted/pkg/encryption"
"github.com/microsoft/go-mssqldb/internal/github.com/swisscom/mssql-always-encrypted/pkg/keys"
"github.com/microsoft/go-mssqldb/msdsn"
"golang.org/x/text/encoding/unicode"
)
//go:generate go run golang.org/x/tools/cmd/stringer -type token
@ -92,10 +97,15 @@ const (
fedAuthInfoSPN = 0x02
)
const (
cipherAlgCustom = 0x00
)
// COLMETADATA flags
// https://msdn.microsoft.com/en-us/library/dd357363.aspx
const (
colFlagNullable = 1
colFlagNullable = 1
colFlagEncrypted = 0x0800
// TODO implement more flags
)
@ -533,7 +543,14 @@ type fedAuthAckStruct struct {
Signature []byte
}
func parseFeatureExtAck(r *tdsBuffer) map[byte]interface{} {
type colAckStruct struct {
Version int
EnclaveType string
}
type featureExtAck map[byte]interface{}
func parseFeatureExtAck(r *tdsBuffer) featureExtAck {
ack := map[byte]interface{}{}
for feature := r.byte(); feature != featExtTERMINATOR; feature = r.byte() {
@ -555,7 +572,21 @@ func parseFeatureExtAck(r *tdsBuffer) map[byte]interface{} {
length -= 32
}
ack[feature] = fedAuthAck
case featExtCOLUMNENCRYPTION:
colAck := colAckStruct{Version: int(r.byte())}
length--
if length > 0 {
// enclave type is sent as utf16 le
enclaveLength := r.byte() * 2
length--
enclaveBytes := make([]byte, enclaveLength)
r.ReadFull(enclaveBytes)
// if the enclave type is malformed we'll just ignore it
colAck.EnclaveType, _ = ucs22str(enclaveBytes)
length -= uint32(enclaveLength)
}
ack[feature] = colAck
}
// Skip unprocessed bytes
@ -568,34 +599,265 @@ func parseFeatureExtAck(r *tdsBuffer) map[byte]interface{} {
}
// http://msdn.microsoft.com/en-us/library/dd357363.aspx
func parseColMetadata72(r *tdsBuffer) (columns []columnStruct) {
func parseColMetadata72(r *tdsBuffer, s *tdsSession) (columns []columnStruct) {
count := r.uint16()
if count == 0xffff {
// no metadata is sent
return nil
}
columns = make([]columnStruct, count)
var cekTable *cekTable
if s.alwaysEncrypted {
// column encryption key list
cekTable = readCekTable(r)
}
for i := range columns {
column := &columns[i]
column.UserType = r.uint32()
column.Flags = r.uint16()
baseTi := getBaseTypeInfo(r, true)
typeInfo := readTypeInfo(r, baseTi.TypeId, column.cryptoMeta)
typeInfo.UserType = baseTi.UserType
typeInfo.Flags = baseTi.Flags
typeInfo.TypeId = baseTi.TypeId
column.Flags = baseTi.Flags
column.UserType = baseTi.UserType
column.ti = typeInfo
if column.isEncrypted() && s.alwaysEncrypted {
// Read Crypto Metadata
cryptoMeta := parseCryptoMetadata(r, cekTable)
cryptoMeta.typeInfo.Flags = baseTi.Flags
column.cryptoMeta = &cryptoMeta
} else {
column.cryptoMeta = nil
}
// parsing TYPE_INFO structure
column.ti = readTypeInfo(r)
column.ColName = r.BVarChar()
}
return columns
}
// http://msdn.microsoft.com/en-us/library/dd357254.aspx
func parseRow(r *tdsBuffer, columns []columnStruct, row []interface{}) {
for i, column := range columns {
row[i] = column.ti.Reader(&column.ti, r)
func getBaseTypeInfo(r *tdsBuffer, parseFlags bool) typeInfo {
userType := r.uint32()
flags := uint16(0)
if parseFlags {
flags = r.uint16()
}
tId := r.byte()
return typeInfo{
UserType: userType,
Flags: flags,
TypeId: tId}
}
type cryptoMetadata struct {
entry *cekTableEntry
ordinal uint16
algorithmId byte
algorithmName *string
encType byte
normRuleVer byte
typeInfo typeInfo
}
func parseCryptoMetadata(r *tdsBuffer, cekTable *cekTable) cryptoMetadata {
ordinal := uint16(0)
if cekTable != nil {
ordinal = r.uint16()
}
typeInfo := getBaseTypeInfo(r, false)
ti := readTypeInfo(r, typeInfo.TypeId, nil)
ti.UserType = typeInfo.UserType
ti.Flags = typeInfo.Flags
ti.TypeId = typeInfo.TypeId
algorithmId := r.byte()
var algName *string = nil
if algorithmId == cipherAlgCustom {
// Read the name when a custom algorithm is used
nameLen := int(r.byte())
var algNameUtf16 = make([]byte, nameLen*2)
r.ReadFull(algNameUtf16)
algNameBytes, _ := unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM).NewDecoder().Bytes(algNameUtf16)
mAlgName := string(algNameBytes)
algName = &mAlgName
}
encType := r.byte()
normRuleVer := r.byte()
var entry *cekTableEntry = nil
if cekTable != nil {
if int(ordinal) > len(cekTable.entries)-1 {
panic(fmt.Errorf("invalid ordinal, cekTable only has %d entries", len(cekTable.entries)))
}
entry = &cekTable.entries[ordinal]
}
return cryptoMetadata{
entry: entry,
ordinal: ordinal,
algorithmId: algorithmId,
algorithmName: algName,
encType: encType,
normRuleVer: normRuleVer,
typeInfo: ti,
}
}
func readCekTable(r *tdsBuffer) *cekTable {
tableSize := r.uint16()
var cekTable *cekTable = nil
if tableSize != 0 {
mCekTable := newCekTable(tableSize)
for i := uint16(0); i < tableSize; i++ {
mCekTable.entries[i] = readCekTableEntry(r)
}
cekTable = &mCekTable
}
return cekTable
}
func readCekTableEntry(r *tdsBuffer) cekTableEntry {
databaseId := r.int32()
cekID := r.int32()
cekVersion := r.int32()
var cekMdVersion = make([]byte, 8)
_, err := r.Read(cekMdVersion)
if err != nil {
panic("unable to read cekMdVersion")
}
cekValueCount := uint(r.byte())
// not using ucs22str because we already know the data is utf16
enc := unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM)
utf16dec := enc.NewDecoder()
cekValues := make([]encryptionKeyInfo, cekValueCount)
for i := uint(0); i < cekValueCount; i++ {
encryptedCekLength := r.uint16()
encryptedCek := make([]byte, encryptedCekLength)
r.ReadFull(encryptedCek)
keyStoreLength := r.byte()
keyStoreNameUtf16 := make([]byte, keyStoreLength*2)
r.ReadFull(keyStoreNameUtf16)
keyStoreName, _ := utf16dec.Bytes(keyStoreNameUtf16)
keyPathLength := r.uint16()
keyPathUtf16 := make([]byte, keyPathLength*2)
r.ReadFull(keyPathUtf16)
keyPath, _ := utf16dec.Bytes(keyPathUtf16)
algLength := r.byte()
algNameUtf16 := make([]byte, algLength*2)
r.ReadFull(algNameUtf16)
algName, _ := utf16dec.Bytes(algNameUtf16)
cekValues[i] = encryptionKeyInfo{
encryptedKey: encryptedCek,
databaseID: int(databaseId),
cekID: int(cekID),
cekVersion: int(cekVersion),
cekMdVersion: cekMdVersion,
keyPath: string(keyPath),
keyStoreName: string(keyStoreName),
algorithmName: string(algName),
}
}
return cekTableEntry{
databaseID: int(databaseId),
keyId: int(cekID),
keyVersion: int(cekVersion),
mdVersion: cekMdVersion,
valueCount: int(cekValueCount),
cekValues: cekValues,
}
}
// http://msdn.microsoft.com/en-us/library/dd357254.aspx
func parseRow(r *tdsBuffer, s *tdsSession, columns []columnStruct, row []interface{}) {
for i, column := range columns {
columnContent := column.ti.Reader(&column.ti, r, nil)
if columnContent == nil {
row[i] = columnContent
continue
}
if column.isEncrypted() {
buffer := decryptColumn(column, s, columnContent)
// Decrypt
row[i] = column.cryptoMeta.typeInfo.Reader(&column.cryptoMeta.typeInfo, &buffer, column.cryptoMeta)
} else {
row[i] = columnContent
}
}
}
type RWCBuffer struct {
buffer *bytes.Reader
}
func (R RWCBuffer) Read(p []byte) (n int, err error) {
return R.buffer.Read(p)
}
func (R RWCBuffer) Write(p []byte) (n int, err error) {
return 0, nil
}
func (R RWCBuffer) Close() error {
return nil
}
func decryptColumn(column columnStruct, s *tdsSession, columnContent interface{}) tdsBuffer {
encType := encryption.From(column.cryptoMeta.encType)
cekValue := column.cryptoMeta.entry.cekValues[column.cryptoMeta.ordinal]
if (s.logFlags & uint64(msdsn.LogDebug)) == uint64(msdsn.LogDebug) {
s.logger.Log(context.Background(), msdsn.LogDebug, fmt.Sprintf("Decrypting column %s. Key path: %s, Key store:%s, Algo: %s", column.ColName, cekValue.keyPath, cekValue.keyStoreName, cekValue.algorithmName))
}
cekProvider, ok := s.aeSettings.keyProviders[cekValue.keyStoreName]
if !ok {
panic(fmt.Errorf("Unable to find provider %s to decrypt CEK", cekValue.keyStoreName))
}
cek, err := cekProvider.GetDecryptedKey(cekValue.keyPath, column.cryptoMeta.entry.cekValues[0].encryptedKey)
if err != nil {
panic(err)
}
k := keys.NewAeadAes256CbcHmac256(cek)
alg := algorithms.NewAeadAes256CbcHmac256Algorithm(k, encType, byte(cekValue.cekVersion))
d, err := alg.Decrypt(columnContent.([]byte))
if err != nil {
panic(err)
}
// Decrypt returns a minimum of 8 bytes so truncate to the actual data size
if column.cryptoMeta.typeInfo.Size > 0 && column.cryptoMeta.typeInfo.Size < len(d) {
d = d[:column.cryptoMeta.typeInfo.Size]
}
var newBuff []byte
newBuff = append(newBuff, d...)
rwc := RWCBuffer{
buffer: bytes.NewReader(newBuff),
}
column.cryptoMeta.typeInfo.Buffer = d
buffer := tdsBuffer{rpos: 0, rsize: len(newBuff), rbuf: newBuff, transport: rwc}
return buffer
}
// http://msdn.microsoft.com/en-us/library/dd304783.aspx
func parseNbcRow(r *tdsBuffer, columns []columnStruct, row []interface{}) {
func parseNbcRow(r *tdsBuffer, s *tdsSession, columns []columnStruct, row []interface{}) {
bitlen := (len(columns) + 7) / 8
pres := make([]byte, bitlen)
r.ReadFull(pres)
@ -604,7 +866,15 @@ func parseNbcRow(r *tdsBuffer, columns []columnStruct, row []interface{}) {
row[i] = nil
continue
}
row[i] = col.ti.Reader(&col.ti, r)
columnContent := col.ti.Reader(&col.ti, r, nil)
if col.isEncrypted() {
buffer := decryptColumn(col, s, columnContent)
// Decrypt
row[i] = col.cryptoMeta.typeInfo.Reader(&col.cryptoMeta.typeInfo, &buffer, col.cryptoMeta)
} else {
row[i] = columnContent
}
}
}
@ -637,7 +907,7 @@ func parseInfo(r *tdsBuffer) (res Error) {
}
// https://msdn.microsoft.com/en-us/library/dd303881.aspx
func parseReturnValue(r *tdsBuffer) (nv namedValue) {
func parseReturnValue(r *tdsBuffer, s *tdsSession) (nv namedValue) {
/*
ParamOrdinal
ParamName
@ -648,13 +918,21 @@ func parseReturnValue(r *tdsBuffer) (nv namedValue) {
CryptoMetadata
Value
*/
r.uint16()
nv.Name = r.BVarChar()
r.byte()
r.uint32() // UserType (uint16 prior to 7.2)
r.uint16()
ti := readTypeInfo(r)
nv.Value = ti.Reader(&ti, r)
_ = r.uint16() // ParamOrdinal
nv.Name = r.BVarChar() // ParamName
_ = r.byte() // Status
ti := getBaseTypeInfo(r, true) // UserType + Flags + TypeInfo
var cryptoMetadata *cryptoMetadata = nil
if s.alwaysEncrypted && (ti.Flags&fEncrypted) == fEncrypted {
cm := parseCryptoMetadata(r, nil) // CryptoMetadata
cryptoMetadata = &cm
}
ti2 := readTypeInfo(r, ti.TypeId, cryptoMetadata)
nv.Value = ti2.Reader(&ti2, r, cryptoMetadata)
return
}
@ -664,6 +942,17 @@ func processSingleResponse(ctx context.Context, sess *tdsSession, ch chan tokenS
if sess.logFlags&logErrors != 0 {
sess.logger.Log(ctx, msdsn.LogErrors, fmt.Sprintf("Intercepted panic %v", err))
}
if outs.msgq != nil {
var derr error
switch e := err.(type) {
case error:
derr = e
default:
derr = fmt.Errorf("Unhandled session error %v", e)
}
_ = sqlexp.ReturnMessageEnqueue(ctx, outs.msgq, sqlexp.MsgError{Error: derr})
}
ch <- err
}
close(ch)
@ -760,7 +1049,7 @@ func processSingleResponse(ctx context.Context, sess *tdsSession, ch chan tokenS
ch <- done
if done.Status&doneCount != 0 {
if sess.logFlags&logRows != 0 {
sess.logger.Log(ctx, msdsn.LogRows, fmt.Sprintf("(%d row(s) affected)", done.RowCount))
sess.logger.Log(ctx, msdsn.LogRows, fmt.Sprintf("(Rows affected: %d)", done.RowCount))
}
if (colsReceived || done.CurCmd != cmdSelect) && outs.msgq != nil {
@ -781,7 +1070,7 @@ func processSingleResponse(ctx context.Context, sess *tdsSession, ch chan tokenS
return
}
case tokenColMetadata:
columns = parseColMetadata72(sess.buf)
columns = parseColMetadata72(sess.buf, sess)
ch <- columns
colsReceived = true
if outs.msgq != nil {
@ -790,11 +1079,11 @@ func processSingleResponse(ctx context.Context, sess *tdsSession, ch chan tokenS
case tokenRow:
row := make([]interface{}, len(columns))
parseRow(sess.buf, columns, row)
parseRow(sess.buf, sess, columns, row)
ch <- row
case tokenNbcRow:
row := make([]interface{}, len(columns))
parseNbcRow(sess.buf, columns, row)
parseNbcRow(sess.buf, sess, columns, row)
ch <- row
case tokenEnvChange:
processEnvChg(ctx, sess)
@ -822,7 +1111,7 @@ func processSingleResponse(ctx context.Context, sess *tdsSession, ch chan tokenS
_ = sqlexp.ReturnMessageEnqueue(ctx, outs.msgq, sqlexp.MsgNotice{Message: info})
}
case tokenReturnValue:
nv := parseReturnValue(sess.buf)
nv := parseReturnValue(sess.buf, sess)
if len(nv.Name) > 0 {
name := nv.Name[1:] // Remove the leading "@".
if ov, has := outs.params[name]; has {

View file

@ -89,6 +89,8 @@ const (
// http://msdn.microsoft.com/en-us/library/dd358284.aspx
type typeInfo struct {
TypeId uint8
UserType uint32
Flags uint16
Size int
Scale uint8
Prec uint8
@ -96,7 +98,7 @@ type typeInfo struct {
Collation cp.Collation
UdtInfo udtInfo
XmlInfo xmlInfo
Reader func(ti *typeInfo, r *tdsBuffer) (res interface{})
Reader func(ti *typeInfo, r *tdsBuffer, cryptoMeta *cryptoMetadata) (res interface{})
Writer func(w io.Writer, ti typeInfo, buf []byte) (err error)
}
@ -119,9 +121,9 @@ type xmlInfo struct {
XmlSchemaCollection string
}
func readTypeInfo(r *tdsBuffer) (res typeInfo) {
res.TypeId = r.byte()
switch res.TypeId {
func readTypeInfo(r *tdsBuffer, typeId byte, c *cryptoMetadata) (res typeInfo) {
res.TypeId = typeId
switch typeId {
case typeNull, typeInt1, typeBit, typeInt2, typeInt4, typeDateTim4,
typeFlt4, typeMoney, typeDateTime, typeFlt8, typeMoney4, typeInt8:
// those are fixed length types
@ -140,7 +142,7 @@ func readTypeInfo(r *tdsBuffer) (res typeInfo) {
res.Reader = readFixedType
res.Buffer = make([]byte, res.Size)
default: // all others are VARLENTYPE
readVarLen(&res, r)
readVarLen(&res, r, c)
}
return
}
@ -315,7 +317,7 @@ func decodeDateTime(buf []byte) time.Time {
0, 0, secs, ns, time.UTC)
}
func readFixedType(ti *typeInfo, r *tdsBuffer) interface{} {
func readFixedType(ti *typeInfo, r *tdsBuffer, c *cryptoMetadata) interface{} {
r.ReadFull(ti.Buffer)
buf := ti.Buffer
switch ti.TypeId {
@ -349,8 +351,13 @@ func readFixedType(ti *typeInfo, r *tdsBuffer) interface{} {
panic("shoulnd't get here")
}
func readByteLenType(ti *typeInfo, r *tdsBuffer) interface{} {
size := r.byte()
func readByteLenType(ti *typeInfo, r *tdsBuffer, c *cryptoMetadata) interface{} {
var size byte
if c != nil {
size = byte(r.rsize)
} else {
size = r.byte()
}
if size == 0 {
return nil
}
@ -433,7 +440,7 @@ func readByteLenType(ti *typeInfo, r *tdsBuffer) interface{} {
default:
badStreamPanicf("Invalid typeid")
}
panic("shoulnd't get here")
panic("shouldn't get here")
}
func writeByteLenType(w io.Writer, ti typeInfo, buf []byte) (err error) {
@ -448,8 +455,13 @@ func writeByteLenType(w io.Writer, ti typeInfo, buf []byte) (err error) {
return
}
func readShortLenType(ti *typeInfo, r *tdsBuffer) interface{} {
size := r.uint16()
func readShortLenType(ti *typeInfo, r *tdsBuffer, c *cryptoMetadata) interface{} {
var size uint16
if c != nil {
size = uint16(r.rsize)
} else {
size = r.uint16()
}
if size == 0xffff {
return nil
}
@ -491,7 +503,7 @@ func writeShortLenType(w io.Writer, ti typeInfo, buf []byte) (err error) {
return
}
func readLongLenType(ti *typeInfo, r *tdsBuffer) interface{} {
func readLongLenType(ti *typeInfo, r *tdsBuffer, c *cryptoMetadata) interface{} {
// information about this format can be found here:
// http://msdn.microsoft.com/en-us/library/dd304783.aspx
// and here:
@ -566,7 +578,7 @@ func writeCollation(w io.Writer, col cp.Collation) (err error) {
// reads variant value
// http://msdn.microsoft.com/en-us/library/dd303302.aspx
func readVariantType(ti *typeInfo, r *tdsBuffer) interface{} {
func readVariantType(ti *typeInfo, r *tdsBuffer, c *cryptoMetadata) interface{} {
size := r.int32()
if size == 0 {
return nil
@ -658,41 +670,47 @@ func readVariantType(ti *typeInfo, r *tdsBuffer) interface{} {
// partially length prefixed stream
// http://msdn.microsoft.com/en-us/library/dd340469.aspx
func readPLPType(ti *typeInfo, r *tdsBuffer) interface{} {
size := r.uint64()
var buf *bytes.Buffer
switch size {
case _PLP_NULL:
// null
return nil
case _UNKNOWN_PLP_LEN:
// size unknown
buf = bytes.NewBuffer(make([]byte, 0, 1000))
default:
buf = bytes.NewBuffer(make([]byte, 0, size))
}
for {
chunksize := r.uint32()
if chunksize == 0 {
break
func readPLPType(ti *typeInfo, r *tdsBuffer, c *cryptoMetadata) interface{} {
var bytesToDecode []byte
if c == nil {
size := r.uint64()
var buf *bytes.Buffer
switch size {
case _PLP_NULL:
// null
return nil
case _UNKNOWN_PLP_LEN:
// size unknown
buf = bytes.NewBuffer(make([]byte, 0, 1000))
default:
buf = bytes.NewBuffer(make([]byte, 0, size))
}
if _, err := io.CopyN(buf, r, int64(chunksize)); err != nil {
badStreamPanicf("Reading PLP type failed: %s", err.Error())
for {
chunksize := r.uint32()
if chunksize == 0 {
break
}
if _, err := io.CopyN(buf, r, int64(chunksize)); err != nil {
badStreamPanicf("Reading PLP type failed: %s", err.Error())
}
}
bytesToDecode = buf.Bytes()
} else {
bytesToDecode = r.rbuf
}
switch ti.TypeId {
case typeXml:
return decodeXml(*ti, buf.Bytes())
return decodeXml(*ti, bytesToDecode)
case typeBigVarChar, typeBigChar, typeText:
return decodeChar(ti.Collation, buf.Bytes())
return decodeChar(ti.Collation, bytesToDecode)
case typeBigVarBin, typeBigBinary, typeImage:
return buf.Bytes()
return bytesToDecode
case typeNVarChar, typeNChar, typeNText:
return decodeNChar(buf.Bytes())
return decodeNChar(bytesToDecode)
case typeUdt:
return decodeUdt(*ti, buf.Bytes())
return decodeUdt(*ti, bytesToDecode)
}
panic("shoulnd't get here")
panic("shouldn't get here")
}
func writePLPType(w io.Writer, ti typeInfo, buf []byte) (err error) {
@ -719,7 +737,7 @@ func writePLPType(w io.Writer, ti typeInfo, buf []byte) (err error) {
}
}
func readVarLen(ti *typeInfo, r *tdsBuffer) {
func readVarLen(ti *typeInfo, r *tdsBuffer, c *cryptoMetadata) {
switch ti.TypeId {
case typeDateN:
ti.Size = 3

View file

@ -4,7 +4,7 @@ import "fmt"
// Update this variable with the release tag before pushing the tag
// This value is written to the prelogin and login7 packets during a new connection
const driverVersion = "v1.5.0"
const driverVersion = "v1.6.0"
func getDriverVersion(ver string) uint32 {
var majorVersion uint32

View file

@ -910,9 +910,6 @@ func (z *Tokenizer) readTagAttrKey() {
return
}
switch c {
case ' ', '\n', '\r', '\t', '\f', '/':
z.pendingAttr[0].end = z.raw.end - 1
return
case '=':
if z.pendingAttr[0].start+1 == z.raw.end {
// WHATWG 13.2.5.32, if we see an equals sign before the attribute name
@ -920,7 +917,9 @@ func (z *Tokenizer) readTagAttrKey() {
continue
}
fallthrough
case '>':
case ' ', '\n', '\r', '\t', '\f', '/', '>':
// WHATWG 13.2.5.33 Attribute name state
// We need to reconsume the char in the after attribute name state to support the / character
z.raw.end--
z.pendingAttr[0].end = z.raw.end
return
@ -939,6 +938,11 @@ func (z *Tokenizer) readTagAttrVal() {
if z.err != nil {
return
}
if c == '/' {
// WHATWG 13.2.5.34 After attribute name state
// U+002F SOLIDUS (/) - Switch to the self-closing start tag state.
return
}
if c != '=' {
z.raw.end--
return

27
vendor/golang.org/x/text/LICENSE generated vendored Normal file
View file

@ -0,0 +1,27 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

22
vendor/golang.org/x/text/PATENTS generated vendored Normal file
View file

@ -0,0 +1,22 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

335
vendor/golang.org/x/text/encoding/encoding.go generated vendored Normal file
View file

@ -0,0 +1,335 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package encoding defines an interface for character encodings, such as Shift
// JIS and Windows 1252, that can convert to and from UTF-8.
//
// Encoding implementations are provided in other packages, such as
// golang.org/x/text/encoding/charmap and
// golang.org/x/text/encoding/japanese.
package encoding // import "golang.org/x/text/encoding"
import (
"errors"
"io"
"strconv"
"unicode/utf8"
"golang.org/x/text/encoding/internal/identifier"
"golang.org/x/text/transform"
)
// TODO:
// - There seems to be some inconsistency in when decoders return errors
// and when not. Also documentation seems to suggest they shouldn't return
// errors at all (except for UTF-16).
// - Encoders seem to rely on or at least benefit from the input being in NFC
// normal form. Perhaps add an example how users could prepare their output.
// Encoding is a character set encoding that can be transformed to and from
// UTF-8.
type Encoding interface {
// NewDecoder returns a Decoder.
NewDecoder() *Decoder
// NewEncoder returns an Encoder.
NewEncoder() *Encoder
}
// A Decoder converts bytes to UTF-8. It implements transform.Transformer.
//
// Transforming source bytes that are not of that encoding will not result in an
// error per se. Each byte that cannot be transcoded will be represented in the
// output by the UTF-8 encoding of '\uFFFD', the replacement rune.
type Decoder struct {
transform.Transformer
// This forces external creators of Decoders to use names in struct
// initializers, allowing for future extendibility without having to break
// code.
_ struct{}
}
// Bytes converts the given encoded bytes to UTF-8. It returns the converted
// bytes or nil, err if any error occurred.
func (d *Decoder) Bytes(b []byte) ([]byte, error) {
b, _, err := transform.Bytes(d, b)
if err != nil {
return nil, err
}
return b, nil
}
// String converts the given encoded string to UTF-8. It returns the converted
// string or "", err if any error occurred.
func (d *Decoder) String(s string) (string, error) {
s, _, err := transform.String(d, s)
if err != nil {
return "", err
}
return s, nil
}
// Reader wraps another Reader to decode its bytes.
//
// The Decoder may not be used for any other operation as long as the returned
// Reader is in use.
func (d *Decoder) Reader(r io.Reader) io.Reader {
return transform.NewReader(r, d)
}
// An Encoder converts bytes from UTF-8. It implements transform.Transformer.
//
// Each rune that cannot be transcoded will result in an error. In this case,
// the transform will consume all source byte up to, not including the offending
// rune. Transforming source bytes that are not valid UTF-8 will be replaced by
// `\uFFFD`. To return early with an error instead, use transform.Chain to
// preprocess the data with a UTF8Validator.
type Encoder struct {
transform.Transformer
// This forces external creators of Encoders to use names in struct
// initializers, allowing for future extendibility without having to break
// code.
_ struct{}
}
// Bytes converts bytes from UTF-8. It returns the converted bytes or nil, err if
// any error occurred.
func (e *Encoder) Bytes(b []byte) ([]byte, error) {
b, _, err := transform.Bytes(e, b)
if err != nil {
return nil, err
}
return b, nil
}
// String converts a string from UTF-8. It returns the converted string or
// "", err if any error occurred.
func (e *Encoder) String(s string) (string, error) {
s, _, err := transform.String(e, s)
if err != nil {
return "", err
}
return s, nil
}
// Writer wraps another Writer to encode its UTF-8 output.
//
// The Encoder may not be used for any other operation as long as the returned
// Writer is in use.
func (e *Encoder) Writer(w io.Writer) io.Writer {
return transform.NewWriter(w, e)
}
// ASCIISub is the ASCII substitute character, as recommended by
// https://unicode.org/reports/tr36/#Text_Comparison
const ASCIISub = '\x1a'
// Nop is the nop encoding. Its transformed bytes are the same as the source
// bytes; it does not replace invalid UTF-8 sequences.
var Nop Encoding = nop{}
type nop struct{}
func (nop) NewDecoder() *Decoder {
return &Decoder{Transformer: transform.Nop}
}
func (nop) NewEncoder() *Encoder {
return &Encoder{Transformer: transform.Nop}
}
// Replacement is the replacement encoding. Decoding from the replacement
// encoding yields a single '\uFFFD' replacement rune. Encoding from UTF-8 to
// the replacement encoding yields the same as the source bytes except that
// invalid UTF-8 is converted to '\uFFFD'.
//
// It is defined at http://encoding.spec.whatwg.org/#replacement
var Replacement Encoding = replacement{}
type replacement struct{}
func (replacement) NewDecoder() *Decoder {
return &Decoder{Transformer: replacementDecoder{}}
}
func (replacement) NewEncoder() *Encoder {
return &Encoder{Transformer: replacementEncoder{}}
}
func (replacement) ID() (mib identifier.MIB, other string) {
return identifier.Replacement, ""
}
type replacementDecoder struct{ transform.NopResetter }
func (replacementDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
if len(dst) < 3 {
return 0, 0, transform.ErrShortDst
}
if atEOF {
const fffd = "\ufffd"
dst[0] = fffd[0]
dst[1] = fffd[1]
dst[2] = fffd[2]
nDst = 3
}
return nDst, len(src), nil
}
type replacementEncoder struct{ transform.NopResetter }
func (replacementEncoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
r, size := rune(0), 0
for ; nSrc < len(src); nSrc += size {
r = rune(src[nSrc])
// Decode a 1-byte rune.
if r < utf8.RuneSelf {
size = 1
} else {
// Decode a multi-byte rune.
r, size = utf8.DecodeRune(src[nSrc:])
if size == 1 {
// All valid runes of size 1 (those below utf8.RuneSelf) were
// handled above. We have invalid UTF-8 or we haven't seen the
// full character yet.
if !atEOF && !utf8.FullRune(src[nSrc:]) {
err = transform.ErrShortSrc
break
}
r = '\ufffd'
}
}
if nDst+utf8.RuneLen(r) > len(dst) {
err = transform.ErrShortDst
break
}
nDst += utf8.EncodeRune(dst[nDst:], r)
}
return nDst, nSrc, err
}
// HTMLEscapeUnsupported wraps encoders to replace source runes outside the
// repertoire of the destination encoding with HTML escape sequences.
//
// This wrapper exists to comply to URL and HTML forms requiring a
// non-terminating legacy encoder. The produced sequences may lead to data
// loss as they are indistinguishable from legitimate input. To avoid this
// issue, use UTF-8 encodings whenever possible.
func HTMLEscapeUnsupported(e *Encoder) *Encoder {
return &Encoder{Transformer: &errorHandler{e, errorToHTML}}
}
// ReplaceUnsupported wraps encoders to replace source runes outside the
// repertoire of the destination encoding with an encoding-specific
// replacement.
//
// This wrapper is only provided for backwards compatibility and legacy
// handling. Its use is strongly discouraged. Use UTF-8 whenever possible.
func ReplaceUnsupported(e *Encoder) *Encoder {
return &Encoder{Transformer: &errorHandler{e, errorToReplacement}}
}
type errorHandler struct {
*Encoder
handler func(dst []byte, r rune, err repertoireError) (n int, ok bool)
}
// TODO: consider making this error public in some form.
type repertoireError interface {
Replacement() byte
}
func (h errorHandler) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
nDst, nSrc, err = h.Transformer.Transform(dst, src, atEOF)
for err != nil {
rerr, ok := err.(repertoireError)
if !ok {
return nDst, nSrc, err
}
r, sz := utf8.DecodeRune(src[nSrc:])
n, ok := h.handler(dst[nDst:], r, rerr)
if !ok {
return nDst, nSrc, transform.ErrShortDst
}
err = nil
nDst += n
if nSrc += sz; nSrc < len(src) {
var dn, sn int
dn, sn, err = h.Transformer.Transform(dst[nDst:], src[nSrc:], atEOF)
nDst += dn
nSrc += sn
}
}
return nDst, nSrc, err
}
func errorToHTML(dst []byte, r rune, err repertoireError) (n int, ok bool) {
buf := [8]byte{}
b := strconv.AppendUint(buf[:0], uint64(r), 10)
if n = len(b) + len("&#;"); n >= len(dst) {
return 0, false
}
dst[0] = '&'
dst[1] = '#'
dst[copy(dst[2:], b)+2] = ';'
return n, true
}
func errorToReplacement(dst []byte, r rune, err repertoireError) (n int, ok bool) {
if len(dst) == 0 {
return 0, false
}
dst[0] = err.Replacement()
return 1, true
}
// ErrInvalidUTF8 means that a transformer encountered invalid UTF-8.
var ErrInvalidUTF8 = errors.New("encoding: invalid UTF-8")
// UTF8Validator is a transformer that returns ErrInvalidUTF8 on the first
// input byte that is not valid UTF-8.
var UTF8Validator transform.Transformer = utf8Validator{}
type utf8Validator struct{ transform.NopResetter }
func (utf8Validator) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
n := len(src)
if n > len(dst) {
n = len(dst)
}
for i := 0; i < n; {
if c := src[i]; c < utf8.RuneSelf {
dst[i] = c
i++
continue
}
_, size := utf8.DecodeRune(src[i:])
if size == 1 {
// All valid runes of size 1 (those below utf8.RuneSelf) were
// handled above. We have invalid UTF-8 or we haven't seen the
// full character yet.
err = ErrInvalidUTF8
if !atEOF && !utf8.FullRune(src[i:]) {
err = transform.ErrShortSrc
}
return i, i, err
}
if i+size > len(dst) {
return i, i, transform.ErrShortDst
}
for ; size > 0; size-- {
dst[i] = src[i]
i++
}
}
if len(src) > len(dst) {
err = transform.ErrShortDst
}
return n, n, err
}

View file

@ -0,0 +1,81 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run gen.go
// Package identifier defines the contract between implementations of Encoding
// and Index by defining identifiers that uniquely identify standardized coded
// character sets (CCS) and character encoding schemes (CES), which we will
// together refer to as encodings, for which Encoding implementations provide
// converters to and from UTF-8. This package is typically only of concern to
// implementers of Indexes and Encodings.
//
// One part of the identifier is the MIB code, which is defined by IANA and
// uniquely identifies a CCS or CES. Each code is associated with data that
// references authorities, official documentation as well as aliases and MIME
// names.
//
// Not all CESs are covered by the IANA registry. The "other" string that is
// returned by ID can be used to identify other character sets or versions of
// existing ones.
//
// It is recommended that each package that provides a set of Encodings provide
// the All and Common variables to reference all supported encodings and
// commonly used subset. This allows Index implementations to include all
// available encodings without explicitly referencing or knowing about them.
package identifier
// Note: this package is internal, but could be made public if there is a need
// for writing third-party Indexes and Encodings.
// References:
// - http://source.icu-project.org/repos/icu/icu/trunk/source/data/mappings/convrtrs.txt
// - http://www.iana.org/assignments/character-sets/character-sets.xhtml
// - http://www.iana.org/assignments/ianacharset-mib/ianacharset-mib
// - http://www.ietf.org/rfc/rfc2978.txt
// - https://www.unicode.org/reports/tr22/
// - http://www.w3.org/TR/encoding/
// - https://encoding.spec.whatwg.org/
// - https://encoding.spec.whatwg.org/encodings.json
// - https://tools.ietf.org/html/rfc6657#section-5
// Interface can be implemented by Encodings to define the CCS or CES for which
// it implements conversions.
type Interface interface {
// ID returns an encoding identifier. Exactly one of the mib and other
// values should be non-zero.
//
// In the usual case it is only necessary to indicate the MIB code. The
// other string can be used to specify encodings for which there is no MIB,
// such as "x-mac-dingbat".
//
// The other string may only contain the characters a-z, A-Z, 0-9, - and _.
ID() (mib MIB, other string)
// NOTE: the restrictions on the encoding are to allow extending the syntax
// with additional information such as versions, vendors and other variants.
}
// A MIB identifies an encoding. It is derived from the IANA MIB codes and adds
// some identifiers for some encodings that are not covered by the IANA
// standard.
//
// See http://www.iana.org/assignments/ianacharset-mib.
type MIB uint16
// These additional MIB types are not defined in IANA. They are added because
// they are common and defined within the text repo.
const (
// Unofficial marks the start of encodings not registered by IANA.
Unofficial MIB = 10000 + iota
// Replacement is the WhatWG replacement encoding.
Replacement
// XUserDefined is the code for x-user-defined.
XUserDefined
// MacintoshCyrillic is the code for x-mac-cyrillic.
MacintoshCyrillic
)

File diff suppressed because it is too large Load diff

75
vendor/golang.org/x/text/encoding/internal/internal.go generated vendored Normal file
View file

@ -0,0 +1,75 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package internal contains code that is shared among encoding implementations.
package internal
import (
"golang.org/x/text/encoding"
"golang.org/x/text/encoding/internal/identifier"
"golang.org/x/text/transform"
)
// Encoding is an implementation of the Encoding interface that adds the String
// and ID methods to an existing encoding.
type Encoding struct {
encoding.Encoding
Name string
MIB identifier.MIB
}
// _ verifies that Encoding implements identifier.Interface.
var _ identifier.Interface = (*Encoding)(nil)
func (e *Encoding) String() string {
return e.Name
}
func (e *Encoding) ID() (mib identifier.MIB, other string) {
return e.MIB, ""
}
// SimpleEncoding is an Encoding that combines two Transformers.
type SimpleEncoding struct {
Decoder transform.Transformer
Encoder transform.Transformer
}
func (e *SimpleEncoding) NewDecoder() *encoding.Decoder {
return &encoding.Decoder{Transformer: e.Decoder}
}
func (e *SimpleEncoding) NewEncoder() *encoding.Encoder {
return &encoding.Encoder{Transformer: e.Encoder}
}
// FuncEncoding is an Encoding that combines two functions returning a new
// Transformer.
type FuncEncoding struct {
Decoder func() transform.Transformer
Encoder func() transform.Transformer
}
func (e FuncEncoding) NewDecoder() *encoding.Decoder {
return &encoding.Decoder{Transformer: e.Decoder()}
}
func (e FuncEncoding) NewEncoder() *encoding.Encoder {
return &encoding.Encoder{Transformer: e.Encoder()}
}
// A RepertoireError indicates a rune is not in the repertoire of a destination
// encoding. It is associated with an encoding-specific suggested replacement
// byte.
type RepertoireError byte
// Error implements the error interface.
func (r RepertoireError) Error() string {
return "encoding: rune not supported by encoding."
}
// Replacement returns the replacement string associated with this error.
func (r RepertoireError) Replacement() byte { return byte(r) }
var ErrASCIIReplacement = RepertoireError(encoding.ASCIISub)

82
vendor/golang.org/x/text/encoding/unicode/override.go generated vendored Normal file
View file

@ -0,0 +1,82 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package unicode
import (
"golang.org/x/text/transform"
)
// BOMOverride returns a new decoder transformer that is identical to fallback,
// except that the presence of a Byte Order Mark at the start of the input
// causes it to switch to the corresponding Unicode decoding. It will only
// consider BOMs for UTF-8, UTF-16BE, and UTF-16LE.
//
// This differs from using ExpectBOM by allowing a BOM to switch to UTF-8, not
// just UTF-16 variants, and allowing falling back to any encoding scheme.
//
// This technique is recommended by the W3C for use in HTML 5: "For
// compatibility with deployed content, the byte order mark (also known as BOM)
// is considered more authoritative than anything else."
// http://www.w3.org/TR/encoding/#specification-hooks
//
// Using BOMOverride is mostly intended for use cases where the first characters
// of a fallback encoding are known to not be a BOM, for example, for valid HTML
// and most encodings.
func BOMOverride(fallback transform.Transformer) transform.Transformer {
// TODO: possibly allow a variadic argument of unicode encodings to allow
// specifying details of which fallbacks are supported as well as
// specifying the details of the implementations. This would also allow for
// support for UTF-32, which should not be supported by default.
return &bomOverride{fallback: fallback}
}
type bomOverride struct {
fallback transform.Transformer
current transform.Transformer
}
func (d *bomOverride) Reset() {
d.current = nil
d.fallback.Reset()
}
var (
// TODO: we could use decode functions here, instead of allocating a new
// decoder on every NewDecoder as IgnoreBOM decoders can be stateless.
utf16le = UTF16(LittleEndian, IgnoreBOM)
utf16be = UTF16(BigEndian, IgnoreBOM)
)
const utf8BOM = "\ufeff"
func (d *bomOverride) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
if d.current != nil {
return d.current.Transform(dst, src, atEOF)
}
if len(src) < 3 && !atEOF {
return 0, 0, transform.ErrShortSrc
}
d.current = d.fallback
bomSize := 0
if len(src) >= 2 {
if src[0] == 0xFF && src[1] == 0xFE {
d.current = utf16le.NewDecoder()
bomSize = 2
} else if src[0] == 0xFE && src[1] == 0xFF {
d.current = utf16be.NewDecoder()
bomSize = 2
} else if len(src) >= 3 &&
src[0] == utf8BOM[0] &&
src[1] == utf8BOM[1] &&
src[2] == utf8BOM[2] {
d.current = transform.Nop
bomSize = 3
}
}
if bomSize < len(src) {
nDst, nSrc, err = d.current.Transform(dst, src[bomSize:], atEOF)
}
return nDst, nSrc + bomSize, err
}

512
vendor/golang.org/x/text/encoding/unicode/unicode.go generated vendored Normal file
View file

@ -0,0 +1,512 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package unicode provides Unicode encodings such as UTF-16.
package unicode // import "golang.org/x/text/encoding/unicode"
import (
"bytes"
"errors"
"unicode/utf16"
"unicode/utf8"
"golang.org/x/text/encoding"
"golang.org/x/text/encoding/internal"
"golang.org/x/text/encoding/internal/identifier"
"golang.org/x/text/internal/utf8internal"
"golang.org/x/text/runes"
"golang.org/x/text/transform"
)
// TODO: I think the Transformers really should return errors on unmatched
// surrogate pairs and odd numbers of bytes. This is not required by RFC 2781,
// which leaves it open, but is suggested by WhatWG. It will allow for all error
// modes as defined by WhatWG: fatal, HTML and Replacement. This would require
// the introduction of some kind of error type for conveying the erroneous code
// point.
// UTF8 is the UTF-8 encoding. It neither removes nor adds byte order marks.
var UTF8 encoding.Encoding = utf8enc
// UTF8BOM is an UTF-8 encoding where the decoder strips a leading byte order
// mark while the encoder adds one.
//
// Some editors add a byte order mark as a signature to UTF-8 files. Although
// the byte order mark is not useful for detecting byte order in UTF-8, it is
// sometimes used as a convention to mark UTF-8-encoded files. This relies on
// the observation that the UTF-8 byte order mark is either an illegal or at
// least very unlikely sequence in any other character encoding.
var UTF8BOM encoding.Encoding = utf8bomEncoding{}
type utf8bomEncoding struct{}
func (utf8bomEncoding) String() string {
return "UTF-8-BOM"
}
func (utf8bomEncoding) ID() (identifier.MIB, string) {
return identifier.Unofficial, "x-utf8bom"
}
func (utf8bomEncoding) NewEncoder() *encoding.Encoder {
return &encoding.Encoder{
Transformer: &utf8bomEncoder{t: runes.ReplaceIllFormed()},
}
}
func (utf8bomEncoding) NewDecoder() *encoding.Decoder {
return &encoding.Decoder{Transformer: &utf8bomDecoder{}}
}
var utf8enc = &internal.Encoding{
&internal.SimpleEncoding{utf8Decoder{}, runes.ReplaceIllFormed()},
"UTF-8",
identifier.UTF8,
}
type utf8bomDecoder struct {
checked bool
}
func (t *utf8bomDecoder) Reset() {
t.checked = false
}
func (t *utf8bomDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
if !t.checked {
if !atEOF && len(src) < len(utf8BOM) {
if len(src) == 0 {
return 0, 0, nil
}
return 0, 0, transform.ErrShortSrc
}
if bytes.HasPrefix(src, []byte(utf8BOM)) {
nSrc += len(utf8BOM)
src = src[len(utf8BOM):]
}
t.checked = true
}
nDst, n, err := utf8Decoder.Transform(utf8Decoder{}, dst[nDst:], src, atEOF)
nSrc += n
return nDst, nSrc, err
}
type utf8bomEncoder struct {
written bool
t transform.Transformer
}
func (t *utf8bomEncoder) Reset() {
t.written = false
t.t.Reset()
}
func (t *utf8bomEncoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
if !t.written {
if len(dst) < len(utf8BOM) {
return nDst, 0, transform.ErrShortDst
}
nDst = copy(dst, utf8BOM)
t.written = true
}
n, nSrc, err := utf8Decoder.Transform(utf8Decoder{}, dst[nDst:], src, atEOF)
nDst += n
return nDst, nSrc, err
}
type utf8Decoder struct{ transform.NopResetter }
func (utf8Decoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
var pSrc int // point from which to start copy in src
var accept utf8internal.AcceptRange
// The decoder can only make the input larger, not smaller.
n := len(src)
if len(dst) < n {
err = transform.ErrShortDst
n = len(dst)
atEOF = false
}
for nSrc < n {
c := src[nSrc]
if c < utf8.RuneSelf {
nSrc++
continue
}
first := utf8internal.First[c]
size := int(first & utf8internal.SizeMask)
if first == utf8internal.FirstInvalid {
goto handleInvalid // invalid starter byte
}
accept = utf8internal.AcceptRanges[first>>utf8internal.AcceptShift]
if nSrc+size > n {
if !atEOF {
// We may stop earlier than necessary here if the short sequence
// has invalid bytes. Not checking for this simplifies the code
// and may avoid duplicate computations in certain conditions.
if err == nil {
err = transform.ErrShortSrc
}
break
}
// Determine the maximal subpart of an ill-formed subsequence.
switch {
case nSrc+1 >= n || src[nSrc+1] < accept.Lo || accept.Hi < src[nSrc+1]:
size = 1
case nSrc+2 >= n || src[nSrc+2] < utf8internal.LoCB || utf8internal.HiCB < src[nSrc+2]:
size = 2
default:
size = 3 // As we are short, the maximum is 3.
}
goto handleInvalid
}
if c = src[nSrc+1]; c < accept.Lo || accept.Hi < c {
size = 1
goto handleInvalid // invalid continuation byte
} else if size == 2 {
} else if c = src[nSrc+2]; c < utf8internal.LoCB || utf8internal.HiCB < c {
size = 2
goto handleInvalid // invalid continuation byte
} else if size == 3 {
} else if c = src[nSrc+3]; c < utf8internal.LoCB || utf8internal.HiCB < c {
size = 3
goto handleInvalid // invalid continuation byte
}
nSrc += size
continue
handleInvalid:
// Copy the scanned input so far.
nDst += copy(dst[nDst:], src[pSrc:nSrc])
// Append RuneError to the destination.
const runeError = "\ufffd"
if nDst+len(runeError) > len(dst) {
return nDst, nSrc, transform.ErrShortDst
}
nDst += copy(dst[nDst:], runeError)
// Skip the maximal subpart of an ill-formed subsequence according to
// the W3C standard way instead of the Go way. This Transform is
// probably the only place in the text repo where it is warranted.
nSrc += size
pSrc = nSrc
// Recompute the maximum source length.
if sz := len(dst) - nDst; sz < len(src)-nSrc {
err = transform.ErrShortDst
n = nSrc + sz
atEOF = false
}
}
return nDst + copy(dst[nDst:], src[pSrc:nSrc]), nSrc, err
}
// UTF16 returns a UTF-16 Encoding for the given default endianness and byte
// order mark (BOM) policy.
//
// When decoding from UTF-16 to UTF-8, if the BOMPolicy is IgnoreBOM then
// neither BOMs U+FEFF nor noncharacters U+FFFE in the input stream will affect
// the endianness used for decoding, and will instead be output as their
// standard UTF-8 encodings: "\xef\xbb\xbf" and "\xef\xbf\xbe". If the BOMPolicy
// is UseBOM or ExpectBOM a staring BOM is not written to the UTF-8 output.
// Instead, it overrides the default endianness e for the remainder of the
// transformation. Any subsequent BOMs U+FEFF or noncharacters U+FFFE will not
// affect the endianness used, and will instead be output as their standard
// UTF-8 encodings. For UseBOM, if there is no starting BOM, it will proceed
// with the default Endianness. For ExpectBOM, in that case, the transformation
// will return early with an ErrMissingBOM error.
//
// When encoding from UTF-8 to UTF-16, a BOM will be inserted at the start of
// the output if the BOMPolicy is UseBOM or ExpectBOM. Otherwise, a BOM will not
// be inserted. The UTF-8 input does not need to contain a BOM.
//
// There is no concept of a 'native' endianness. If the UTF-16 data is produced
// and consumed in a greater context that implies a certain endianness, use
// IgnoreBOM. Otherwise, use ExpectBOM and always produce and consume a BOM.
//
// In the language of https://www.unicode.org/faq/utf_bom.html#bom10, IgnoreBOM
// corresponds to "Where the precise type of the data stream is known... the
// BOM should not be used" and ExpectBOM corresponds to "A particular
// protocol... may require use of the BOM".
func UTF16(e Endianness, b BOMPolicy) encoding.Encoding {
return utf16Encoding{config{e, b}, mibValue[e][b&bomMask]}
}
// mibValue maps Endianness and BOMPolicy settings to MIB constants. Note that
// some configurations map to the same MIB identifier. RFC 2781 has requirements
// and recommendations. Some of the "configurations" are merely recommendations,
// so multiple configurations could match.
var mibValue = map[Endianness][numBOMValues]identifier.MIB{
BigEndian: [numBOMValues]identifier.MIB{
IgnoreBOM: identifier.UTF16BE,
UseBOM: identifier.UTF16, // BigEnding default is preferred by RFC 2781.
// TODO: acceptBOM | strictBOM would map to UTF16BE as well.
},
LittleEndian: [numBOMValues]identifier.MIB{
IgnoreBOM: identifier.UTF16LE,
UseBOM: identifier.UTF16, // LittleEndian default is allowed and preferred on Windows.
// TODO: acceptBOM | strictBOM would map to UTF16LE as well.
},
// ExpectBOM is not widely used and has no valid MIB identifier.
}
// All lists a configuration for each IANA-defined UTF-16 variant.
var All = []encoding.Encoding{
UTF8,
UTF16(BigEndian, UseBOM),
UTF16(BigEndian, IgnoreBOM),
UTF16(LittleEndian, IgnoreBOM),
}
// BOMPolicy is a UTF-16 encoding's byte order mark policy.
type BOMPolicy uint8
const (
writeBOM BOMPolicy = 0x01
acceptBOM BOMPolicy = 0x02
requireBOM BOMPolicy = 0x04
bomMask BOMPolicy = 0x07
// HACK: numBOMValues == 8 triggers a bug in the 1.4 compiler (cannot have a
// map of an array of length 8 of a type that is also used as a key or value
// in another map). See golang.org/issue/11354.
// TODO: consider changing this value back to 8 if the use of 1.4.* has
// been minimized.
numBOMValues = 8 + 1
// IgnoreBOM means to ignore any byte order marks.
IgnoreBOM BOMPolicy = 0
// Common and RFC 2781-compliant interpretation for UTF-16BE/LE.
// UseBOM means that the UTF-16 form may start with a byte order mark, which
// will be used to override the default encoding.
UseBOM BOMPolicy = writeBOM | acceptBOM
// Common and RFC 2781-compliant interpretation for UTF-16.
// ExpectBOM means that the UTF-16 form must start with a byte order mark,
// which will be used to override the default encoding.
ExpectBOM BOMPolicy = writeBOM | acceptBOM | requireBOM
// Used in Java as Unicode (not to be confused with Java's UTF-16) and
// ICU's UTF-16,version=1. Not compliant with RFC 2781.
// TODO (maybe): strictBOM: BOM must match Endianness. This would allow:
// - UTF-16(B|L)E,version=1: writeBOM | acceptBOM | requireBOM | strictBOM
// (UnicodeBig and UnicodeLittle in Java)
// - RFC 2781-compliant, but less common interpretation for UTF-16(B|L)E:
// acceptBOM | strictBOM (e.g. assigned to CheckBOM).
// This addition would be consistent with supporting ExpectBOM.
)
// Endianness is a UTF-16 encoding's default endianness.
type Endianness bool
const (
// BigEndian is UTF-16BE.
BigEndian Endianness = false
// LittleEndian is UTF-16LE.
LittleEndian Endianness = true
)
// ErrMissingBOM means that decoding UTF-16 input with ExpectBOM did not find a
// starting byte order mark.
var ErrMissingBOM = errors.New("encoding: missing byte order mark")
type utf16Encoding struct {
config
mib identifier.MIB
}
type config struct {
endianness Endianness
bomPolicy BOMPolicy
}
func (u utf16Encoding) NewDecoder() *encoding.Decoder {
return &encoding.Decoder{Transformer: &utf16Decoder{
initial: u.config,
current: u.config,
}}
}
func (u utf16Encoding) NewEncoder() *encoding.Encoder {
return &encoding.Encoder{Transformer: &utf16Encoder{
endianness: u.endianness,
initialBOMPolicy: u.bomPolicy,
currentBOMPolicy: u.bomPolicy,
}}
}
func (u utf16Encoding) ID() (mib identifier.MIB, other string) {
return u.mib, ""
}
func (u utf16Encoding) String() string {
e, b := "B", ""
if u.endianness == LittleEndian {
e = "L"
}
switch u.bomPolicy {
case ExpectBOM:
b = "Expect"
case UseBOM:
b = "Use"
case IgnoreBOM:
b = "Ignore"
}
return "UTF-16" + e + "E (" + b + " BOM)"
}
type utf16Decoder struct {
initial config
current config
}
func (u *utf16Decoder) Reset() {
u.current = u.initial
}
func (u *utf16Decoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
if len(src) < 2 && atEOF && u.current.bomPolicy&requireBOM != 0 {
return 0, 0, ErrMissingBOM
}
if len(src) == 0 {
return 0, 0, nil
}
if len(src) >= 2 && u.current.bomPolicy&acceptBOM != 0 {
switch {
case src[0] == 0xfe && src[1] == 0xff:
u.current.endianness = BigEndian
nSrc = 2
case src[0] == 0xff && src[1] == 0xfe:
u.current.endianness = LittleEndian
nSrc = 2
default:
if u.current.bomPolicy&requireBOM != 0 {
return 0, 0, ErrMissingBOM
}
}
u.current.bomPolicy = IgnoreBOM
}
var r rune
var dSize, sSize int
for nSrc < len(src) {
if nSrc+1 < len(src) {
x := uint16(src[nSrc+0])<<8 | uint16(src[nSrc+1])
if u.current.endianness == LittleEndian {
x = x>>8 | x<<8
}
r, sSize = rune(x), 2
if utf16.IsSurrogate(r) {
if nSrc+3 < len(src) {
x = uint16(src[nSrc+2])<<8 | uint16(src[nSrc+3])
if u.current.endianness == LittleEndian {
x = x>>8 | x<<8
}
// Save for next iteration if it is not a high surrogate.
if isHighSurrogate(rune(x)) {
r, sSize = utf16.DecodeRune(r, rune(x)), 4
}
} else if !atEOF {
err = transform.ErrShortSrc
break
}
}
if dSize = utf8.RuneLen(r); dSize < 0 {
r, dSize = utf8.RuneError, 3
}
} else if atEOF {
// Single trailing byte.
r, dSize, sSize = utf8.RuneError, 3, 1
} else {
err = transform.ErrShortSrc
break
}
if nDst+dSize > len(dst) {
err = transform.ErrShortDst
break
}
nDst += utf8.EncodeRune(dst[nDst:], r)
nSrc += sSize
}
return nDst, nSrc, err
}
func isHighSurrogate(r rune) bool {
return 0xDC00 <= r && r <= 0xDFFF
}
type utf16Encoder struct {
endianness Endianness
initialBOMPolicy BOMPolicy
currentBOMPolicy BOMPolicy
}
func (u *utf16Encoder) Reset() {
u.currentBOMPolicy = u.initialBOMPolicy
}
func (u *utf16Encoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
if u.currentBOMPolicy&writeBOM != 0 {
if len(dst) < 2 {
return 0, 0, transform.ErrShortDst
}
dst[0], dst[1] = 0xfe, 0xff
u.currentBOMPolicy = IgnoreBOM
nDst = 2
}
r, size := rune(0), 0
for nSrc < len(src) {
r = rune(src[nSrc])
// Decode a 1-byte rune.
if r < utf8.RuneSelf {
size = 1
} else {
// Decode a multi-byte rune.
r, size = utf8.DecodeRune(src[nSrc:])
if size == 1 {
// All valid runes of size 1 (those below utf8.RuneSelf) were
// handled above. We have invalid UTF-8 or we haven't seen the
// full character yet.
if !atEOF && !utf8.FullRune(src[nSrc:]) {
err = transform.ErrShortSrc
break
}
}
}
if r <= 0xffff {
if nDst+2 > len(dst) {
err = transform.ErrShortDst
break
}
dst[nDst+0] = uint8(r >> 8)
dst[nDst+1] = uint8(r)
nDst += 2
} else {
if nDst+4 > len(dst) {
err = transform.ErrShortDst
break
}
r1, r2 := utf16.EncodeRune(r)
dst[nDst+0] = uint8(r1 >> 8)
dst[nDst+1] = uint8(r1)
dst[nDst+2] = uint8(r2 >> 8)
dst[nDst+3] = uint8(r2)
nDst += 4
}
nSrc += size
}
if u.endianness == LittleEndian {
for i := 0; i < nDst; i += 2 {
dst[i], dst[i+1] = dst[i+1], dst[i]
}
}
return nDst, nSrc, err
}

View file

@ -0,0 +1,87 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package utf8internal contains low-level utf8-related constants, tables, etc.
// that are used internally by the text package.
package utf8internal
// The default lowest and highest continuation byte.
const (
LoCB = 0x80 // 1000 0000
HiCB = 0xBF // 1011 1111
)
// Constants related to getting information of first bytes of UTF-8 sequences.
const (
// ASCII identifies a UTF-8 byte as ASCII.
ASCII = as
// FirstInvalid indicates a byte is invalid as a first byte of a UTF-8
// sequence.
FirstInvalid = xx
// SizeMask is a mask for the size bits. Use use x&SizeMask to get the size.
SizeMask = 7
// AcceptShift is the right-shift count for the first byte info byte to get
// the index into the AcceptRanges table. See AcceptRanges.
AcceptShift = 4
// The names of these constants are chosen to give nice alignment in the
// table below. The first nibble is an index into acceptRanges or F for
// special one-byte cases. The second nibble is the Rune length or the
// Status for the special one-byte case.
xx = 0xF1 // invalid: size 1
as = 0xF0 // ASCII: size 1
s1 = 0x02 // accept 0, size 2
s2 = 0x13 // accept 1, size 3
s3 = 0x03 // accept 0, size 3
s4 = 0x23 // accept 2, size 3
s5 = 0x34 // accept 3, size 4
s6 = 0x04 // accept 0, size 4
s7 = 0x44 // accept 4, size 4
)
// First is information about the first byte in a UTF-8 sequence.
var First = [256]uint8{
// 1 2 3 4 5 6 7 8 9 A B C D E F
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x00-0x0F
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x10-0x1F
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x20-0x2F
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x30-0x3F
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x40-0x4F
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x50-0x5F
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x60-0x6F
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x70-0x7F
// 1 2 3 4 5 6 7 8 9 A B C D E F
xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x80-0x8F
xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x90-0x9F
xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xA0-0xAF
xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xB0-0xBF
xx, xx, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xC0-0xCF
s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xD0-0xDF
s2, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s4, s3, s3, // 0xE0-0xEF
s5, s6, s6, s6, s7, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xF0-0xFF
}
// AcceptRange gives the range of valid values for the second byte in a UTF-8
// sequence for any value for First that is not ASCII or FirstInvalid.
type AcceptRange struct {
Lo uint8 // lowest value for second byte.
Hi uint8 // highest value for second byte.
}
// AcceptRanges is a slice of AcceptRange values. For a given byte sequence b
//
// AcceptRanges[First[b[0]]>>AcceptShift]
//
// will give the value of AcceptRange for the multi-byte UTF-8 sequence starting
// at b[0].
var AcceptRanges = [...]AcceptRange{
0: {LoCB, HiCB},
1: {0xA0, HiCB},
2: {LoCB, 0x9F},
3: {0x90, HiCB},
4: {LoCB, 0x8F},
}

187
vendor/golang.org/x/text/runes/cond.go generated vendored Normal file
View file

@ -0,0 +1,187 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runes
import (
"unicode/utf8"
"golang.org/x/text/transform"
)
// Note: below we pass invalid UTF-8 to the tIn and tNotIn transformers as is.
// This is done for various reasons:
// - To retain the semantics of the Nop transformer: if input is passed to a Nop
// one would expect it to be unchanged.
// - It would be very expensive to pass a converted RuneError to a transformer:
// a transformer might need more source bytes after RuneError, meaning that
// the only way to pass it safely is to create a new buffer and manage the
// intermingling of RuneErrors and normal input.
// - Many transformers leave ill-formed UTF-8 as is, so this is not
// inconsistent. Generally ill-formed UTF-8 is only replaced if it is a
// logical consequence of the operation (as for Map) or if it otherwise would
// pose security concerns (as for Remove).
// - An alternative would be to return an error on ill-formed UTF-8, but this
// would be inconsistent with other operations.
// If returns a transformer that applies tIn to consecutive runes for which
// s.Contains(r) and tNotIn to consecutive runes for which !s.Contains(r). Reset
// is called on tIn and tNotIn at the start of each run. A Nop transformer will
// substitute a nil value passed to tIn or tNotIn. Invalid UTF-8 is translated
// to RuneError to determine which transformer to apply, but is passed as is to
// the respective transformer.
func If(s Set, tIn, tNotIn transform.Transformer) Transformer {
if tIn == nil && tNotIn == nil {
return Transformer{transform.Nop}
}
if tIn == nil {
tIn = transform.Nop
}
if tNotIn == nil {
tNotIn = transform.Nop
}
sIn, ok := tIn.(transform.SpanningTransformer)
if !ok {
sIn = dummySpan{tIn}
}
sNotIn, ok := tNotIn.(transform.SpanningTransformer)
if !ok {
sNotIn = dummySpan{tNotIn}
}
a := &cond{
tIn: sIn,
tNotIn: sNotIn,
f: s.Contains,
}
a.Reset()
return Transformer{a}
}
type dummySpan struct{ transform.Transformer }
func (d dummySpan) Span(src []byte, atEOF bool) (n int, err error) {
return 0, transform.ErrEndOfSpan
}
type cond struct {
tIn, tNotIn transform.SpanningTransformer
f func(rune) bool
check func(rune) bool // current check to perform
t transform.SpanningTransformer // current transformer to use
}
// Reset implements transform.Transformer.
func (t *cond) Reset() {
t.check = t.is
t.t = t.tIn
t.t.Reset() // notIn will be reset on first usage.
}
func (t *cond) is(r rune) bool {
if t.f(r) {
return true
}
t.check = t.isNot
t.t = t.tNotIn
t.tNotIn.Reset()
return false
}
func (t *cond) isNot(r rune) bool {
if !t.f(r) {
return true
}
t.check = t.is
t.t = t.tIn
t.tIn.Reset()
return false
}
// This implementation of Span doesn't help all too much, but it needs to be
// there to satisfy this package's Transformer interface.
// TODO: there are certainly room for improvements, though. For example, if
// t.t == transform.Nop (which will a common occurrence) it will save a bundle
// to special-case that loop.
func (t *cond) Span(src []byte, atEOF bool) (n int, err error) {
p := 0
for n < len(src) && err == nil {
// Don't process too much at a time as the Spanner that will be
// called on this block may terminate early.
const maxChunk = 4096
max := len(src)
if v := n + maxChunk; v < max {
max = v
}
atEnd := false
size := 0
current := t.t
for ; p < max; p += size {
r := rune(src[p])
if r < utf8.RuneSelf {
size = 1
} else if r, size = utf8.DecodeRune(src[p:]); size == 1 {
if !atEOF && !utf8.FullRune(src[p:]) {
err = transform.ErrShortSrc
break
}
}
if !t.check(r) {
// The next rune will be the start of a new run.
atEnd = true
break
}
}
n2, err2 := current.Span(src[n:p], atEnd || (atEOF && p == len(src)))
n += n2
if err2 != nil {
return n, err2
}
// At this point either err != nil or t.check will pass for the rune at p.
p = n + size
}
return n, err
}
func (t *cond) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
p := 0
for nSrc < len(src) && err == nil {
// Don't process too much at a time, as the work might be wasted if the
// destination buffer isn't large enough to hold the result or a
// transform returns an error early.
const maxChunk = 4096
max := len(src)
if n := nSrc + maxChunk; n < len(src) {
max = n
}
atEnd := false
size := 0
current := t.t
for ; p < max; p += size {
r := rune(src[p])
if r < utf8.RuneSelf {
size = 1
} else if r, size = utf8.DecodeRune(src[p:]); size == 1 {
if !atEOF && !utf8.FullRune(src[p:]) {
err = transform.ErrShortSrc
break
}
}
if !t.check(r) {
// The next rune will be the start of a new run.
atEnd = true
break
}
}
nDst2, nSrc2, err2 := current.Transform(dst[nDst:], src[nSrc:p], atEnd || (atEOF && p == len(src)))
nDst += nDst2
nSrc += nSrc2
if err2 != nil {
return nDst, nSrc, err2
}
// At this point either err != nil or t.check will pass for the rune at p.
p = nSrc + size
}
return nDst, nSrc, err
}

355
vendor/golang.org/x/text/runes/runes.go generated vendored Normal file
View file

@ -0,0 +1,355 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package runes provide transforms for UTF-8 encoded text.
package runes // import "golang.org/x/text/runes"
import (
"unicode"
"unicode/utf8"
"golang.org/x/text/transform"
)
// A Set is a collection of runes.
type Set interface {
// Contains returns true if r is contained in the set.
Contains(r rune) bool
}
type setFunc func(rune) bool
func (s setFunc) Contains(r rune) bool {
return s(r)
}
// Note: using funcs here instead of wrapping types result in cleaner
// documentation and a smaller API.
// In creates a Set with a Contains method that returns true for all runes in
// the given RangeTable.
func In(rt *unicode.RangeTable) Set {
return setFunc(func(r rune) bool { return unicode.Is(rt, r) })
}
// NotIn creates a Set with a Contains method that returns true for all runes not
// in the given RangeTable.
func NotIn(rt *unicode.RangeTable) Set {
return setFunc(func(r rune) bool { return !unicode.Is(rt, r) })
}
// Predicate creates a Set with a Contains method that returns f(r).
func Predicate(f func(rune) bool) Set {
return setFunc(f)
}
// Transformer implements the transform.Transformer interface.
type Transformer struct {
t transform.SpanningTransformer
}
func (t Transformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
return t.t.Transform(dst, src, atEOF)
}
func (t Transformer) Span(b []byte, atEOF bool) (n int, err error) {
return t.t.Span(b, atEOF)
}
func (t Transformer) Reset() { t.t.Reset() }
// Bytes returns a new byte slice with the result of converting b using t. It
// calls Reset on t. It returns nil if any error was found. This can only happen
// if an error-producing Transformer is passed to If.
func (t Transformer) Bytes(b []byte) []byte {
b, _, err := transform.Bytes(t, b)
if err != nil {
return nil
}
return b
}
// String returns a string with the result of converting s using t. It calls
// Reset on t. It returns the empty string if any error was found. This can only
// happen if an error-producing Transformer is passed to If.
func (t Transformer) String(s string) string {
s, _, err := transform.String(t, s)
if err != nil {
return ""
}
return s
}
// TODO:
// - Copy: copying strings and bytes in whole-rune units.
// - Validation (maybe)
// - Well-formed-ness (maybe)
const runeErrorString = string(utf8.RuneError)
// Remove returns a Transformer that removes runes r for which s.Contains(r).
// Illegal input bytes are replaced by RuneError before being passed to f.
func Remove(s Set) Transformer {
if f, ok := s.(setFunc); ok {
// This little trick cuts the running time of BenchmarkRemove for sets
// created by Predicate roughly in half.
// TODO: special-case RangeTables as well.
return Transformer{remove(f)}
}
return Transformer{remove(s.Contains)}
}
// TODO: remove transform.RemoveFunc.
type remove func(r rune) bool
func (remove) Reset() {}
// Span implements transform.Spanner.
func (t remove) Span(src []byte, atEOF bool) (n int, err error) {
for r, size := rune(0), 0; n < len(src); {
if r = rune(src[n]); r < utf8.RuneSelf {
size = 1
} else if r, size = utf8.DecodeRune(src[n:]); size == 1 {
// Invalid rune.
if !atEOF && !utf8.FullRune(src[n:]) {
err = transform.ErrShortSrc
} else {
err = transform.ErrEndOfSpan
}
break
}
if t(r) {
err = transform.ErrEndOfSpan
break
}
n += size
}
return
}
// Transform implements transform.Transformer.
func (t remove) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
for r, size := rune(0), 0; nSrc < len(src); {
if r = rune(src[nSrc]); r < utf8.RuneSelf {
size = 1
} else if r, size = utf8.DecodeRune(src[nSrc:]); size == 1 {
// Invalid rune.
if !atEOF && !utf8.FullRune(src[nSrc:]) {
err = transform.ErrShortSrc
break
}
// We replace illegal bytes with RuneError. Not doing so might
// otherwise turn a sequence of invalid UTF-8 into valid UTF-8.
// The resulting byte sequence may subsequently contain runes
// for which t(r) is true that were passed unnoticed.
if !t(utf8.RuneError) {
if nDst+3 > len(dst) {
err = transform.ErrShortDst
break
}
dst[nDst+0] = runeErrorString[0]
dst[nDst+1] = runeErrorString[1]
dst[nDst+2] = runeErrorString[2]
nDst += 3
}
nSrc++
continue
}
if t(r) {
nSrc += size
continue
}
if nDst+size > len(dst) {
err = transform.ErrShortDst
break
}
for i := 0; i < size; i++ {
dst[nDst] = src[nSrc]
nDst++
nSrc++
}
}
return
}
// Map returns a Transformer that maps the runes in the input using the given
// mapping. Illegal bytes in the input are converted to utf8.RuneError before
// being passed to the mapping func.
func Map(mapping func(rune) rune) Transformer {
return Transformer{mapper(mapping)}
}
type mapper func(rune) rune
func (mapper) Reset() {}
// Span implements transform.Spanner.
func (t mapper) Span(src []byte, atEOF bool) (n int, err error) {
for r, size := rune(0), 0; n < len(src); n += size {
if r = rune(src[n]); r < utf8.RuneSelf {
size = 1
} else if r, size = utf8.DecodeRune(src[n:]); size == 1 {
// Invalid rune.
if !atEOF && !utf8.FullRune(src[n:]) {
err = transform.ErrShortSrc
} else {
err = transform.ErrEndOfSpan
}
break
}
if t(r) != r {
err = transform.ErrEndOfSpan
break
}
}
return n, err
}
// Transform implements transform.Transformer.
func (t mapper) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
var replacement rune
var b [utf8.UTFMax]byte
for r, size := rune(0), 0; nSrc < len(src); {
if r = rune(src[nSrc]); r < utf8.RuneSelf {
if replacement = t(r); replacement < utf8.RuneSelf {
if nDst == len(dst) {
err = transform.ErrShortDst
break
}
dst[nDst] = byte(replacement)
nDst++
nSrc++
continue
}
size = 1
} else if r, size = utf8.DecodeRune(src[nSrc:]); size == 1 {
// Invalid rune.
if !atEOF && !utf8.FullRune(src[nSrc:]) {
err = transform.ErrShortSrc
break
}
if replacement = t(utf8.RuneError); replacement == utf8.RuneError {
if nDst+3 > len(dst) {
err = transform.ErrShortDst
break
}
dst[nDst+0] = runeErrorString[0]
dst[nDst+1] = runeErrorString[1]
dst[nDst+2] = runeErrorString[2]
nDst += 3
nSrc++
continue
}
} else if replacement = t(r); replacement == r {
if nDst+size > len(dst) {
err = transform.ErrShortDst
break
}
for i := 0; i < size; i++ {
dst[nDst] = src[nSrc]
nDst++
nSrc++
}
continue
}
n := utf8.EncodeRune(b[:], replacement)
if nDst+n > len(dst) {
err = transform.ErrShortDst
break
}
for i := 0; i < n; i++ {
dst[nDst] = b[i]
nDst++
}
nSrc += size
}
return
}
// ReplaceIllFormed returns a transformer that replaces all input bytes that are
// not part of a well-formed UTF-8 code sequence with utf8.RuneError.
func ReplaceIllFormed() Transformer {
return Transformer{&replaceIllFormed{}}
}
type replaceIllFormed struct{ transform.NopResetter }
func (t replaceIllFormed) Span(src []byte, atEOF bool) (n int, err error) {
for n < len(src) {
// ASCII fast path.
if src[n] < utf8.RuneSelf {
n++
continue
}
r, size := utf8.DecodeRune(src[n:])
// Look for a valid non-ASCII rune.
if r != utf8.RuneError || size != 1 {
n += size
continue
}
// Look for short source data.
if !atEOF && !utf8.FullRune(src[n:]) {
err = transform.ErrShortSrc
break
}
// We have an invalid rune.
err = transform.ErrEndOfSpan
break
}
return n, err
}
func (t replaceIllFormed) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
for nSrc < len(src) {
// ASCII fast path.
if r := src[nSrc]; r < utf8.RuneSelf {
if nDst == len(dst) {
err = transform.ErrShortDst
break
}
dst[nDst] = r
nDst++
nSrc++
continue
}
// Look for a valid non-ASCII rune.
if _, size := utf8.DecodeRune(src[nSrc:]); size != 1 {
if size != copy(dst[nDst:], src[nSrc:nSrc+size]) {
err = transform.ErrShortDst
break
}
nDst += size
nSrc += size
continue
}
// Look for short source data.
if !atEOF && !utf8.FullRune(src[nSrc:]) {
err = transform.ErrShortSrc
break
}
// We have an invalid rune.
if nDst+3 > len(dst) {
err = transform.ErrShortDst
break
}
dst[nDst+0] = runeErrorString[0]
dst[nDst+1] = runeErrorString[1]
dst[nDst+2] = runeErrorString[2]
nDst += 3
nSrc++
}
return nDst, nSrc, err
}

709
vendor/golang.org/x/text/transform/transform.go generated vendored Normal file
View file

@ -0,0 +1,709 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package transform provides reader and writer wrappers that transform the
// bytes passing through as well as various transformations. Example
// transformations provided by other packages include normalization and
// conversion between character sets.
package transform // import "golang.org/x/text/transform"
import (
"bytes"
"errors"
"io"
"unicode/utf8"
)
var (
// ErrShortDst means that the destination buffer was too short to
// receive all of the transformed bytes.
ErrShortDst = errors.New("transform: short destination buffer")
// ErrShortSrc means that the source buffer has insufficient data to
// complete the transformation.
ErrShortSrc = errors.New("transform: short source buffer")
// ErrEndOfSpan means that the input and output (the transformed input)
// are not identical.
ErrEndOfSpan = errors.New("transform: input and output are not identical")
// errInconsistentByteCount means that Transform returned success (nil
// error) but also returned nSrc inconsistent with the src argument.
errInconsistentByteCount = errors.New("transform: inconsistent byte count returned")
// errShortInternal means that an internal buffer is not large enough
// to make progress and the Transform operation must be aborted.
errShortInternal = errors.New("transform: short internal buffer")
)
// Transformer transforms bytes.
type Transformer interface {
// Transform writes to dst the transformed bytes read from src, and
// returns the number of dst bytes written and src bytes read. The
// atEOF argument tells whether src represents the last bytes of the
// input.
//
// Callers should always process the nDst bytes produced and account
// for the nSrc bytes consumed before considering the error err.
//
// A nil error means that all of the transformed bytes (whether freshly
// transformed from src or left over from previous Transform calls)
// were written to dst. A nil error can be returned regardless of
// whether atEOF is true. If err is nil then nSrc must equal len(src);
// the converse is not necessarily true.
//
// ErrShortDst means that dst was too short to receive all of the
// transformed bytes. ErrShortSrc means that src had insufficient data
// to complete the transformation. If both conditions apply, then
// either error may be returned. Other than the error conditions listed
// here, implementations are free to report other errors that arise.
Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error)
// Reset resets the state and allows a Transformer to be reused.
Reset()
}
// SpanningTransformer extends the Transformer interface with a Span method
// that determines how much of the input already conforms to the Transformer.
type SpanningTransformer interface {
Transformer
// Span returns a position in src such that transforming src[:n] results in
// identical output src[:n] for these bytes. It does not necessarily return
// the largest such n. The atEOF argument tells whether src represents the
// last bytes of the input.
//
// Callers should always account for the n bytes consumed before
// considering the error err.
//
// A nil error means that all input bytes are known to be identical to the
// output produced by the Transformer. A nil error can be returned
// regardless of whether atEOF is true. If err is nil, then n must
// equal len(src); the converse is not necessarily true.
//
// ErrEndOfSpan means that the Transformer output may differ from the
// input after n bytes. Note that n may be len(src), meaning that the output
// would contain additional bytes after otherwise identical output.
// ErrShortSrc means that src had insufficient data to determine whether the
// remaining bytes would change. Other than the error conditions listed
// here, implementations are free to report other errors that arise.
//
// Calling Span can modify the Transformer state as a side effect. In
// effect, it does the transformation just as calling Transform would, only
// without copying to a destination buffer and only up to a point it can
// determine the input and output bytes are the same. This is obviously more
// limited than calling Transform, but can be more efficient in terms of
// copying and allocating buffers. Calls to Span and Transform may be
// interleaved.
Span(src []byte, atEOF bool) (n int, err error)
}
// NopResetter can be embedded by implementations of Transformer to add a nop
// Reset method.
type NopResetter struct{}
// Reset implements the Reset method of the Transformer interface.
func (NopResetter) Reset() {}
// Reader wraps another io.Reader by transforming the bytes read.
type Reader struct {
r io.Reader
t Transformer
err error
// dst[dst0:dst1] contains bytes that have been transformed by t but
// not yet copied out via Read.
dst []byte
dst0, dst1 int
// src[src0:src1] contains bytes that have been read from r but not
// yet transformed through t.
src []byte
src0, src1 int
// transformComplete is whether the transformation is complete,
// regardless of whether or not it was successful.
transformComplete bool
}
const defaultBufSize = 4096
// NewReader returns a new Reader that wraps r by transforming the bytes read
// via t. It calls Reset on t.
func NewReader(r io.Reader, t Transformer) *Reader {
t.Reset()
return &Reader{
r: r,
t: t,
dst: make([]byte, defaultBufSize),
src: make([]byte, defaultBufSize),
}
}
// Read implements the io.Reader interface.
func (r *Reader) Read(p []byte) (int, error) {
n, err := 0, error(nil)
for {
// Copy out any transformed bytes and return the final error if we are done.
if r.dst0 != r.dst1 {
n = copy(p, r.dst[r.dst0:r.dst1])
r.dst0 += n
if r.dst0 == r.dst1 && r.transformComplete {
return n, r.err
}
return n, nil
} else if r.transformComplete {
return 0, r.err
}
// Try to transform some source bytes, or to flush the transformer if we
// are out of source bytes. We do this even if r.r.Read returned an error.
// As the io.Reader documentation says, "process the n > 0 bytes returned
// before considering the error".
if r.src0 != r.src1 || r.err != nil {
r.dst0 = 0
r.dst1, n, err = r.t.Transform(r.dst, r.src[r.src0:r.src1], r.err == io.EOF)
r.src0 += n
switch {
case err == nil:
if r.src0 != r.src1 {
r.err = errInconsistentByteCount
}
// The Transform call was successful; we are complete if we
// cannot read more bytes into src.
r.transformComplete = r.err != nil
continue
case err == ErrShortDst && (r.dst1 != 0 || n != 0):
// Make room in dst by copying out, and try again.
continue
case err == ErrShortSrc && r.src1-r.src0 != len(r.src) && r.err == nil:
// Read more bytes into src via the code below, and try again.
default:
r.transformComplete = true
// The reader error (r.err) takes precedence over the
// transformer error (err) unless r.err is nil or io.EOF.
if r.err == nil || r.err == io.EOF {
r.err = err
}
continue
}
}
// Move any untransformed source bytes to the start of the buffer
// and read more bytes.
if r.src0 != 0 {
r.src0, r.src1 = 0, copy(r.src, r.src[r.src0:r.src1])
}
n, r.err = r.r.Read(r.src[r.src1:])
r.src1 += n
}
}
// TODO: implement ReadByte (and ReadRune??).
// Writer wraps another io.Writer by transforming the bytes read.
// The user needs to call Close to flush unwritten bytes that may
// be buffered.
type Writer struct {
w io.Writer
t Transformer
dst []byte
// src[:n] contains bytes that have not yet passed through t.
src []byte
n int
}
// NewWriter returns a new Writer that wraps w by transforming the bytes written
// via t. It calls Reset on t.
func NewWriter(w io.Writer, t Transformer) *Writer {
t.Reset()
return &Writer{
w: w,
t: t,
dst: make([]byte, defaultBufSize),
src: make([]byte, defaultBufSize),
}
}
// Write implements the io.Writer interface. If there are not enough
// bytes available to complete a Transform, the bytes will be buffered
// for the next write. Call Close to convert the remaining bytes.
func (w *Writer) Write(data []byte) (n int, err error) {
src := data
if w.n > 0 {
// Append bytes from data to the last remainder.
// TODO: limit the amount copied on first try.
n = copy(w.src[w.n:], data)
w.n += n
src = w.src[:w.n]
}
for {
nDst, nSrc, err := w.t.Transform(w.dst, src, false)
if _, werr := w.w.Write(w.dst[:nDst]); werr != nil {
return n, werr
}
src = src[nSrc:]
if w.n == 0 {
n += nSrc
} else if len(src) <= n {
// Enough bytes from w.src have been consumed. We make src point
// to data instead to reduce the copying.
w.n = 0
n -= len(src)
src = data[n:]
if n < len(data) && (err == nil || err == ErrShortSrc) {
continue
}
}
switch err {
case ErrShortDst:
// This error is okay as long as we are making progress.
if nDst > 0 || nSrc > 0 {
continue
}
case ErrShortSrc:
if len(src) < len(w.src) {
m := copy(w.src, src)
// If w.n > 0, bytes from data were already copied to w.src and n
// was already set to the number of bytes consumed.
if w.n == 0 {
n += m
}
w.n = m
err = nil
} else if nDst > 0 || nSrc > 0 {
// Not enough buffer to store the remainder. Keep processing as
// long as there is progress. Without this case, transforms that
// require a lookahead larger than the buffer may result in an
// error. This is not something one may expect to be common in
// practice, but it may occur when buffers are set to small
// sizes during testing.
continue
}
case nil:
if w.n > 0 {
err = errInconsistentByteCount
}
}
return n, err
}
}
// Close implements the io.Closer interface.
func (w *Writer) Close() error {
src := w.src[:w.n]
for {
nDst, nSrc, err := w.t.Transform(w.dst, src, true)
if _, werr := w.w.Write(w.dst[:nDst]); werr != nil {
return werr
}
if err != ErrShortDst {
return err
}
src = src[nSrc:]
}
}
type nop struct{ NopResetter }
func (nop) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
n := copy(dst, src)
if n < len(src) {
err = ErrShortDst
}
return n, n, err
}
func (nop) Span(src []byte, atEOF bool) (n int, err error) {
return len(src), nil
}
type discard struct{ NopResetter }
func (discard) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
return 0, len(src), nil
}
var (
// Discard is a Transformer for which all Transform calls succeed
// by consuming all bytes and writing nothing.
Discard Transformer = discard{}
// Nop is a SpanningTransformer that copies src to dst.
Nop SpanningTransformer = nop{}
)
// chain is a sequence of links. A chain with N Transformers has N+1 links and
// N+1 buffers. Of those N+1 buffers, the first and last are the src and dst
// buffers given to chain.Transform and the middle N-1 buffers are intermediate
// buffers owned by the chain. The i'th link transforms bytes from the i'th
// buffer chain.link[i].b at read offset chain.link[i].p to the i+1'th buffer
// chain.link[i+1].b at write offset chain.link[i+1].n, for i in [0, N).
type chain struct {
link []link
err error
// errStart is the index at which the error occurred plus 1. Processing
// errStart at this level at the next call to Transform. As long as
// errStart > 0, chain will not consume any more source bytes.
errStart int
}
func (c *chain) fatalError(errIndex int, err error) {
if i := errIndex + 1; i > c.errStart {
c.errStart = i
c.err = err
}
}
type link struct {
t Transformer
// b[p:n] holds the bytes to be transformed by t.
b []byte
p int
n int
}
func (l *link) src() []byte {
return l.b[l.p:l.n]
}
func (l *link) dst() []byte {
return l.b[l.n:]
}
// Chain returns a Transformer that applies t in sequence.
func Chain(t ...Transformer) Transformer {
if len(t) == 0 {
return nop{}
}
c := &chain{link: make([]link, len(t)+1)}
for i, tt := range t {
c.link[i].t = tt
}
// Allocate intermediate buffers.
b := make([][defaultBufSize]byte, len(t)-1)
for i := range b {
c.link[i+1].b = b[i][:]
}
return c
}
// Reset resets the state of Chain. It calls Reset on all the Transformers.
func (c *chain) Reset() {
for i, l := range c.link {
if l.t != nil {
l.t.Reset()
}
c.link[i].p, c.link[i].n = 0, 0
}
}
// TODO: make chain use Span (is going to be fun to implement!)
// Transform applies the transformers of c in sequence.
func (c *chain) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
// Set up src and dst in the chain.
srcL := &c.link[0]
dstL := &c.link[len(c.link)-1]
srcL.b, srcL.p, srcL.n = src, 0, len(src)
dstL.b, dstL.n = dst, 0
var lastFull, needProgress bool // for detecting progress
// i is the index of the next Transformer to apply, for i in [low, high].
// low is the lowest index for which c.link[low] may still produce bytes.
// high is the highest index for which c.link[high] has a Transformer.
// The error returned by Transform determines whether to increase or
// decrease i. We try to completely fill a buffer before converting it.
for low, i, high := c.errStart, c.errStart, len(c.link)-2; low <= i && i <= high; {
in, out := &c.link[i], &c.link[i+1]
nDst, nSrc, err0 := in.t.Transform(out.dst(), in.src(), atEOF && low == i)
out.n += nDst
in.p += nSrc
if i > 0 && in.p == in.n {
in.p, in.n = 0, 0
}
needProgress, lastFull = lastFull, false
switch err0 {
case ErrShortDst:
// Process the destination buffer next. Return if we are already
// at the high index.
if i == high {
return dstL.n, srcL.p, ErrShortDst
}
if out.n != 0 {
i++
// If the Transformer at the next index is not able to process any
// source bytes there is nothing that can be done to make progress
// and the bytes will remain unprocessed. lastFull is used to
// detect this and break out of the loop with a fatal error.
lastFull = true
continue
}
// The destination buffer was too small, but is completely empty.
// Return a fatal error as this transformation can never complete.
c.fatalError(i, errShortInternal)
case ErrShortSrc:
if i == 0 {
// Save ErrShortSrc in err. All other errors take precedence.
err = ErrShortSrc
break
}
// Source bytes were depleted before filling up the destination buffer.
// Verify we made some progress, move the remaining bytes to the errStart
// and try to get more source bytes.
if needProgress && nSrc == 0 || in.n-in.p == len(in.b) {
// There were not enough source bytes to proceed while the source
// buffer cannot hold any more bytes. Return a fatal error as this
// transformation can never complete.
c.fatalError(i, errShortInternal)
break
}
// in.b is an internal buffer and we can make progress.
in.p, in.n = 0, copy(in.b, in.src())
fallthrough
case nil:
// if i == low, we have depleted the bytes at index i or any lower levels.
// In that case we increase low and i. In all other cases we decrease i to
// fetch more bytes before proceeding to the next index.
if i > low {
i--
continue
}
default:
c.fatalError(i, err0)
}
// Exhausted level low or fatal error: increase low and continue
// to process the bytes accepted so far.
i++
low = i
}
// If c.errStart > 0, this means we found a fatal error. We will clear
// all upstream buffers. At this point, no more progress can be made
// downstream, as Transform would have bailed while handling ErrShortDst.
if c.errStart > 0 {
for i := 1; i < c.errStart; i++ {
c.link[i].p, c.link[i].n = 0, 0
}
err, c.errStart, c.err = c.err, 0, nil
}
return dstL.n, srcL.p, err
}
// Deprecated: Use runes.Remove instead.
func RemoveFunc(f func(r rune) bool) Transformer {
return removeF(f)
}
type removeF func(r rune) bool
func (removeF) Reset() {}
// Transform implements the Transformer interface.
func (t removeF) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
for r, sz := rune(0), 0; len(src) > 0; src = src[sz:] {
if r = rune(src[0]); r < utf8.RuneSelf {
sz = 1
} else {
r, sz = utf8.DecodeRune(src)
if sz == 1 {
// Invalid rune.
if !atEOF && !utf8.FullRune(src) {
err = ErrShortSrc
break
}
// We replace illegal bytes with RuneError. Not doing so might
// otherwise turn a sequence of invalid UTF-8 into valid UTF-8.
// The resulting byte sequence may subsequently contain runes
// for which t(r) is true that were passed unnoticed.
if !t(r) {
if nDst+3 > len(dst) {
err = ErrShortDst
break
}
nDst += copy(dst[nDst:], "\uFFFD")
}
nSrc++
continue
}
}
if !t(r) {
if nDst+sz > len(dst) {
err = ErrShortDst
break
}
nDst += copy(dst[nDst:], src[:sz])
}
nSrc += sz
}
return
}
// grow returns a new []byte that is longer than b, and copies the first n bytes
// of b to the start of the new slice.
func grow(b []byte, n int) []byte {
m := len(b)
if m <= 32 {
m = 64
} else if m <= 256 {
m *= 2
} else {
m += m >> 1
}
buf := make([]byte, m)
copy(buf, b[:n])
return buf
}
const initialBufSize = 128
// String returns a string with the result of converting s[:n] using t, where
// n <= len(s). If err == nil, n will be len(s). It calls Reset on t.
func String(t Transformer, s string) (result string, n int, err error) {
t.Reset()
if s == "" {
// Fast path for the common case for empty input. Results in about a
// 86% reduction of running time for BenchmarkStringLowerEmpty.
if _, _, err := t.Transform(nil, nil, true); err == nil {
return "", 0, nil
}
}
// Allocate only once. Note that both dst and src escape when passed to
// Transform.
buf := [2 * initialBufSize]byte{}
dst := buf[:initialBufSize:initialBufSize]
src := buf[initialBufSize : 2*initialBufSize]
// The input string s is transformed in multiple chunks (starting with a
// chunk size of initialBufSize). nDst and nSrc are per-chunk (or
// per-Transform-call) indexes, pDst and pSrc are overall indexes.
nDst, nSrc := 0, 0
pDst, pSrc := 0, 0
// pPrefix is the length of a common prefix: the first pPrefix bytes of the
// result will equal the first pPrefix bytes of s. It is not guaranteed to
// be the largest such value, but if pPrefix, len(result) and len(s) are
// all equal after the final transform (i.e. calling Transform with atEOF
// being true returned nil error) then we don't need to allocate a new
// result string.
pPrefix := 0
for {
// Invariant: pDst == pPrefix && pSrc == pPrefix.
n := copy(src, s[pSrc:])
nDst, nSrc, err = t.Transform(dst, src[:n], pSrc+n == len(s))
pDst += nDst
pSrc += nSrc
// TODO: let transformers implement an optional Spanner interface, akin
// to norm's QuickSpan. This would even allow us to avoid any allocation.
if !bytes.Equal(dst[:nDst], src[:nSrc]) {
break
}
pPrefix = pSrc
if err == ErrShortDst {
// A buffer can only be short if a transformer modifies its input.
break
} else if err == ErrShortSrc {
if nSrc == 0 {
// No progress was made.
break
}
// Equal so far and !atEOF, so continue checking.
} else if err != nil || pPrefix == len(s) {
return string(s[:pPrefix]), pPrefix, err
}
}
// Post-condition: pDst == pPrefix + nDst && pSrc == pPrefix + nSrc.
// We have transformed the first pSrc bytes of the input s to become pDst
// transformed bytes. Those transformed bytes are discontiguous: the first
// pPrefix of them equal s[:pPrefix] and the last nDst of them equal
// dst[:nDst]. We copy them around, into a new dst buffer if necessary, so
// that they become one contiguous slice: dst[:pDst].
if pPrefix != 0 {
newDst := dst
if pDst > len(newDst) {
newDst = make([]byte, len(s)+nDst-nSrc)
}
copy(newDst[pPrefix:pDst], dst[:nDst])
copy(newDst[:pPrefix], s[:pPrefix])
dst = newDst
}
// Prevent duplicate Transform calls with atEOF being true at the end of
// the input. Also return if we have an unrecoverable error.
if (err == nil && pSrc == len(s)) ||
(err != nil && err != ErrShortDst && err != ErrShortSrc) {
return string(dst[:pDst]), pSrc, err
}
// Transform the remaining input, growing dst and src buffers as necessary.
for {
n := copy(src, s[pSrc:])
atEOF := pSrc+n == len(s)
nDst, nSrc, err := t.Transform(dst[pDst:], src[:n], atEOF)
pDst += nDst
pSrc += nSrc
// If we got ErrShortDst or ErrShortSrc, do not grow as long as we can
// make progress. This may avoid excessive allocations.
if err == ErrShortDst {
if nDst == 0 {
dst = grow(dst, pDst)
}
} else if err == ErrShortSrc {
if atEOF {
return string(dst[:pDst]), pSrc, err
}
if nSrc == 0 {
src = grow(src, 0)
}
} else if err != nil || pSrc == len(s) {
return string(dst[:pDst]), pSrc, err
}
}
}
// Bytes returns a new byte slice with the result of converting b[:n] using t,
// where n <= len(b). If err == nil, n will be len(b). It calls Reset on t.
func Bytes(t Transformer, b []byte) (result []byte, n int, err error) {
return doAppend(t, 0, make([]byte, len(b)), b)
}
// Append appends the result of converting src[:n] using t to dst, where
// n <= len(src), If err == nil, n will be len(src). It calls Reset on t.
func Append(t Transformer, dst, src []byte) (result []byte, n int, err error) {
if len(dst) == cap(dst) {
n := len(src) + len(dst) // It is okay for this to be 0.
b := make([]byte, n)
dst = b[:copy(b, dst)]
}
return doAppend(t, len(dst), dst[:cap(dst)], src)
}
func doAppend(t Transformer, pDst int, dst, src []byte) (result []byte, n int, err error) {
t.Reset()
pSrc := 0
for {
nDst, nSrc, err := t.Transform(dst[pDst:], src[pSrc:], true)
pDst += nDst
pSrc += nSrc
if err != ErrShortDst {
return dst[:pDst], pSrc, err
}
// Grow the destination buffer, but do not grow as long as we can make
// progress. This may avoid excessive allocations.
if nDst == 0 {
dst = grow(dst, pDst)
}
}
}

25
vendor/modules.txt vendored
View file

@ -33,7 +33,7 @@ github.com/documize/slug
# github.com/fatih/structs v1.1.0
## explicit
github.com/fatih/structs
# github.com/felixge/httpsnoop v1.0.3
# github.com/felixge/httpsnoop v1.0.4
## explicit; go 1.13
github.com/felixge/httpsnoop
# github.com/go-asn1-ber/asn1-ber v1.5.5
@ -92,14 +92,20 @@ github.com/mb0/diff
## explicit; go 1.21
github.com/microcosm-cc/bluemonday
github.com/microcosm-cc/bluemonday/css
# github.com/microsoft/go-mssqldb v1.5.0
## explicit; go 1.13
# github.com/microsoft/go-mssqldb v1.6.0
## explicit; go 1.17
github.com/microsoft/go-mssqldb
github.com/microsoft/go-mssqldb/aecmk
github.com/microsoft/go-mssqldb/integratedauth
github.com/microsoft/go-mssqldb/integratedauth/ntlm
github.com/microsoft/go-mssqldb/integratedauth/winsspi
github.com/microsoft/go-mssqldb/internal/cp
github.com/microsoft/go-mssqldb/internal/decimal
github.com/microsoft/go-mssqldb/internal/github.com/swisscom/mssql-always-encrypted/pkg/algorithms
github.com/microsoft/go-mssqldb/internal/github.com/swisscom/mssql-always-encrypted/pkg/crypto
github.com/microsoft/go-mssqldb/internal/github.com/swisscom/mssql-always-encrypted/pkg/encryption
github.com/microsoft/go-mssqldb/internal/github.com/swisscom/mssql-always-encrypted/pkg/keys
github.com/microsoft/go-mssqldb/internal/github.com/swisscom/mssql-always-encrypted/pkg/utils
github.com/microsoft/go-mssqldb/internal/querytext
github.com/microsoft/go-mssqldb/msdsn
# github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d
@ -118,17 +124,26 @@ github.com/shurcooL/sanitized_anchor_name
## explicit
github.com/trivago/tgo/tcontainer
github.com/trivago/tgo/treflect
# golang.org/x/crypto v0.18.0
# golang.org/x/crypto v0.19.0
## explicit; go 1.18
golang.org/x/crypto/bcrypt
golang.org/x/crypto/blowfish
golang.org/x/crypto/md4
# golang.org/x/net v0.20.0
# golang.org/x/net v0.21.0
## explicit; go 1.18
golang.org/x/net/context
golang.org/x/net/context/ctxhttp
golang.org/x/net/html
golang.org/x/net/html/atom
# golang.org/x/text v0.14.0
## explicit; go 1.18
golang.org/x/text/encoding
golang.org/x/text/encoding/internal
golang.org/x/text/encoding/internal/identifier
golang.org/x/text/encoding/unicode
golang.org/x/text/internal/utf8internal
golang.org/x/text/runes
golang.org/x/text/transform
# gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc
## explicit
gopkg.in/alexcesaro/quotedprintable.v3