diff --git a/.bingo/Variables.mk b/.bingo/Variables.mk
index 8c08ceade..2bb44be4f 100644
--- a/.bingo/Variables.mk
+++ b/.bingo/Variables.mk
@@ -23,6 +23,12 @@ $(BINGO): $(BINGO_DIR)/bingo.mod
@echo "(re)installing $(GOBIN)/bingo-v0.9.0"
@cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=bingo.mod -o=$(GOBIN)/bingo-v0.9.0 "github.com/bwplotka/bingo"
+CONFTEST := $(GOBIN)/conftest-v0.62.0
+$(CONFTEST): $(BINGO_DIR)/conftest.mod
+ @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies.
+ @echo "(re)installing $(GOBIN)/conftest-v0.62.0"
+ @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=conftest.mod -o=$(GOBIN)/conftest-v0.62.0 "github.com/open-policy-agent/conftest"
+
CONTROLLER_GEN := $(GOBIN)/controller-gen-v0.19.0
$(CONTROLLER_GEN): $(BINGO_DIR)/controller-gen.mod
@# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies.
@@ -47,17 +53,17 @@ $(GOJQ): $(BINGO_DIR)/gojq.mod
@echo "(re)installing $(GOBIN)/gojq-v0.12.17"
@cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=gojq.mod -o=$(GOBIN)/gojq-v0.12.17 "github.com/itchyny/gojq/cmd/gojq"
-GOLANGCI_LINT := $(GOBIN)/golangci-lint-v2.6.2
+GOLANGCI_LINT := $(GOBIN)/golangci-lint-v2.7.2
$(GOLANGCI_LINT): $(BINGO_DIR)/golangci-lint.mod
@# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies.
- @echo "(re)installing $(GOBIN)/golangci-lint-v2.6.2"
- @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=golangci-lint.mod -o=$(GOBIN)/golangci-lint-v2.6.2 "github.com/golangci/golangci-lint/v2/cmd/golangci-lint"
+ @echo "(re)installing $(GOBIN)/golangci-lint-v2.7.2"
+ @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=golangci-lint.mod -o=$(GOBIN)/golangci-lint-v2.7.2 "github.com/golangci/golangci-lint/v2/cmd/golangci-lint"
-GORELEASER := $(GOBIN)/goreleaser-v1.26.2
+GORELEASER := $(GOBIN)/goreleaser-v2.11.2
$(GORELEASER): $(BINGO_DIR)/goreleaser.mod
@# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies.
- @echo "(re)installing $(GOBIN)/goreleaser-v1.26.2"
- @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=goreleaser.mod -o=$(GOBIN)/goreleaser-v1.26.2 "github.com/goreleaser/goreleaser"
+ @echo "(re)installing $(GOBIN)/goreleaser-v2.11.2"
+ @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=goreleaser.mod -o=$(GOBIN)/goreleaser-v2.11.2 "github.com/goreleaser/goreleaser/v2"
HELM := $(GOBIN)/helm-v3.18.4
$(HELM): $(BINGO_DIR)/helm.mod
@@ -71,6 +77,12 @@ $(KIND): $(BINGO_DIR)/kind.mod
@echo "(re)installing $(GOBIN)/kind-v0.30.0"
@cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=kind.mod -o=$(GOBIN)/kind-v0.30.0 "sigs.k8s.io/kind"
+KUBE_SCORE := $(GOBIN)/kube-score-v1.20.0
+$(KUBE_SCORE): $(BINGO_DIR)/kube-score.mod
+ @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies.
+ @echo "(re)installing $(GOBIN)/kube-score-v1.20.0"
+ @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=kube-score.mod -o=$(GOBIN)/kube-score-v1.20.0 "github.com/zegl/kube-score/cmd/kube-score"
+
KUSTOMIZE := $(GOBIN)/kustomize-v5.7.1
$(KUSTOMIZE): $(BINGO_DIR)/kustomize.mod
@# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies.
diff --git a/.bingo/conftest.mod b/.bingo/conftest.mod
new file mode 100644
index 000000000..294b93132
--- /dev/null
+++ b/.bingo/conftest.mod
@@ -0,0 +1,5 @@
+module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT
+
+go 1.24.6
+
+require github.com/open-policy-agent/conftest v0.62.0
diff --git a/.bingo/conftest.sum b/.bingo/conftest.sum
new file mode 100644
index 000000000..b34a3b44b
--- /dev/null
+++ b/.bingo/conftest.sum
@@ -0,0 +1,2041 @@
+cel.dev/expr v0.20.0 h1:OunBvVCfvpWlt4dN7zg3FM6TDkzOePe1+foGJ9AXeeI=
+cel.dev/expr v0.20.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw=
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
+cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
+cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
+cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
+cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
+cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
+cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
+cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
+cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
+cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
+cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
+cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
+cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
+cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
+cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
+cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
+cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
+cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
+cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
+cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
+cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
+cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
+cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
+cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
+cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
+cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U=
+cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A=
+cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc=
+cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU=
+cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA=
+cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM=
+cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I=
+cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY=
+cloud.google.com/go v0.112.1 h1:uJSeirPke5UNZHIb4SxfZklVSiWWVqW4oXlETwZziwM=
+cloud.google.com/go v0.112.1/go.mod h1:+Vbu+Y1UU+I1rjmzeMOb/8RfkKJK2Gyxi1X6jJCZLo4=
+cloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE=
+cloud.google.com/go v0.116.0/go.mod h1:cEPSRWPzZEswwdr9BxE6ChEn01dWlTaF05LiC2Xs70U=
+cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4=
+cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw=
+cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E=
+cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o=
+cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE=
+cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM=
+cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ=
+cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw=
+cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY=
+cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg=
+cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ=
+cloud.google.com/go/aiplatform v1.36.1/go.mod h1:WTm12vJRPARNvJ+v6P52RDHCNe4AhvjcIZ/9/RRHy/k=
+cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw=
+cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI=
+cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4=
+cloud.google.com/go/analytics v0.17.0/go.mod h1:WXFa3WSym4IZ+JiKmavYdJwGG/CvpqiqczmL59bTD9M=
+cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE=
+cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE=
+cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk=
+cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc=
+cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8=
+cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc=
+cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04=
+cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8=
+cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY=
+cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM=
+cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc=
+cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU=
+cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI=
+cloud.google.com/go/apikeys v0.6.0/go.mod h1:kbpXu5upyiAlGkKrJgQl8A0rKNNJ7dQ377pdroRSSi8=
+cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno=
+cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak=
+cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84=
+cloud.google.com/go/appengine v1.7.0/go.mod h1:eZqpbHFCqRGa2aCdope7eC0SWLV1j0neb/QnMJVWx6A=
+cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E=
+cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4=
+cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0=
+cloud.google.com/go/area120 v0.7.0/go.mod h1:a3+8EUD1SX5RUcCs3MY5YasiO1z6yLiNLRiFrykbynY=
+cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k=
+cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ=
+cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk=
+cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0=
+cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc=
+cloud.google.com/go/artifactregistry v1.11.1/go.mod h1:lLYghw+Itq9SONbCa1YWBoWs1nOucMH0pwXN1rOBZFI=
+cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ=
+cloud.google.com/go/artifactregistry v1.12.0/go.mod h1:o6P3MIvtzTOnmvGagO9v/rOjjA0HmhJ+/6KAXrmYDCI=
+cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08=
+cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o=
+cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s=
+cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0=
+cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ=
+cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY=
+cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo=
+cloud.google.com/go/asset v1.12.0/go.mod h1:h9/sFOa4eDIyKmH6QMpm4eUK3pDojWnUhTgJlk762Hg=
+cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw=
+cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY=
+cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw=
+cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI=
+cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo=
+cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0=
+cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E=
+cloud.google.com/go/auth v0.13.0 h1:8Fu8TZy167JkW8Tj3q7dIkr2v4cndv41ouecJx0PAHs=
+cloud.google.com/go/auth v0.13.0/go.mod h1:COOjD9gwfKNKz+IIduatIhYJQIc0mG3H102r/EMxX6Q=
+cloud.google.com/go/auth/oauth2adapt v0.2.6 h1:V6a6XDu2lTwPZWOawrAa9HUK+DB2zfJyTuciBG5hFkU=
+cloud.google.com/go/auth/oauth2adapt v0.2.6/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8=
+cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0=
+cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8=
+cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8=
+cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM=
+cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU=
+cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc=
+cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI=
+cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss=
+cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE=
+cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE=
+cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g=
+cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4=
+cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8=
+cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM=
+cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU=
+cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
+cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
+cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
+cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
+cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
+cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
+cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA=
+cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw=
+cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc=
+cloud.google.com/go/bigquery v1.47.0/go.mod h1:sA9XOgy0A8vQK9+MWhEQTY6Tix87M/ZurWFIxmF9I/E=
+cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac=
+cloud.google.com/go/bigquery v1.49.0/go.mod h1:Sv8hMmTFFYBlt/ftw2uN6dFdQPzBlREY9yBh7Oy7/4Q=
+cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU=
+cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY=
+cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s=
+cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI=
+cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y=
+cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss=
+cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc=
+cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM=
+cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI=
+cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0=
+cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk=
+cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q=
+cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg=
+cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590=
+cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8=
+cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk=
+cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk=
+cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE=
+cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU=
+cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U=
+cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA=
+cloud.google.com/go/cloudbuild v1.6.0/go.mod h1:UIbc/w9QCbH12xX+ezUsgblrWv+Cv4Tw83GiSMHOn9M=
+cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg=
+cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s=
+cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM=
+cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk=
+cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA=
+cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY=
+cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI=
+cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4=
+cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI=
+cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y=
+cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs=
+cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow=
+cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM=
+cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M=
+cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s=
+cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU=
+cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U=
+cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU=
+cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU=
+cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU=
+cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE=
+cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo=
+cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA=
+cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs=
+cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU=
+cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE=
+cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU=
+cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
+cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM=
+cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
+cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY=
+cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY=
+cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I=
+cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg=
+cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY=
+cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck=
+cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w=
+cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg=
+cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo=
+cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4=
+cloud.google.com/go/container v1.14.0/go.mod h1:3AoJMPhHfLDxLvrlVWaK57IXzaPnLaZq63WX59aQBfM=
+cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA=
+cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I=
+cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4=
+cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI=
+cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s=
+cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0=
+cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs=
+cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc=
+cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE=
+cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM=
+cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M=
+cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0=
+cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8=
+cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM=
+cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ=
+cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE=
+cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo=
+cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE=
+cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0=
+cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA=
+cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE=
+cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38=
+cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w=
+cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8=
+cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I=
+cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ=
+cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM=
+cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA=
+cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A=
+cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ=
+cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs=
+cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s=
+cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI=
+cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4=
+cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo=
+cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA=
+cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c=
+cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
+cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
+cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM=
+cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c=
+cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo=
+cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ=
+cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g=
+cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4=
+cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs=
+cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww=
+cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c=
+cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s=
+cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI=
+cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ=
+cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4=
+cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0=
+cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8=
+cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek=
+cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0=
+cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM=
+cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4=
+cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE=
+cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM=
+cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q=
+cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4=
+cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU=
+cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU=
+cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k=
+cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4=
+cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM=
+cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs=
+cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y=
+cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg=
+cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE=
+cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk=
+cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w=
+cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc=
+cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY=
+cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU=
+cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI=
+cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8=
+cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M=
+cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc=
+cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw=
+cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw=
+cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY=
+cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w=
+cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI=
+cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs=
+cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg=
+cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE=
+cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk=
+cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg=
+cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY=
+cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08=
+cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw=
+cloud.google.com/go/functions v1.12.0/go.mod h1:AXWGrF3e2C/5ehvwYo/GH6O5s09tOPksiKhz+hH8WkA=
+cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c=
+cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM=
+cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA=
+cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w=
+cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM=
+cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0=
+cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60=
+cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo=
+cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg=
+cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o=
+cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A=
+cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw=
+cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0=
+cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0=
+cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E=
+cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw=
+cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA=
+cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI=
+cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y=
+cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc=
+cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM=
+cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o=
+cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo=
+cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c=
+cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY=
+cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc=
+cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc=
+cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg=
+cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE=
+cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY=
+cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY=
+cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0=
+cloud.google.com/go/iam v1.1.6 h1:bEa06k05IO4f4uJonbB5iAgKTPpABy1ayxaIZV/GHVc=
+cloud.google.com/go/iam v1.1.6/go.mod h1:O0zxdPeGBoFdWW3HWmBxJsk0pfvNM/p/qa82rWOGTwI=
+cloud.google.com/go/iam v1.2.2 h1:ozUSofHUGf/F4tCNy/mu9tHLTaxZFLOUiKzjcgWHGIA=
+cloud.google.com/go/iam v1.2.2/go.mod h1:0Ys8ccaZHdI1dEUilwzqng/6ps2YB6vRsjIe00/+6JY=
+cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc=
+cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A=
+cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk=
+cloud.google.com/go/iap v1.7.0/go.mod h1:beqQx56T9O1G1yNPph+spKpNibDlYIiIixiqsQXxLIo=
+cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74=
+cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM=
+cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY=
+cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4=
+cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs=
+cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g=
+cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o=
+cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE=
+cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA=
+cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg=
+cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0=
+cloud.google.com/go/kms v1.8.0/go.mod h1:4xFEhYFqvW+4VMELtZyxomGSYtSQKzM178ylFW4jMAg=
+cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w=
+cloud.google.com/go/kms v1.10.0/go.mod h1:ng3KTUtQQU9bPX3+QGLsflZIHlkbn8amFAMY63m8d24=
+cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI=
+cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic=
+cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI=
+cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE=
+cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8=
+cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY=
+cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8=
+cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08=
+cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo=
+cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw=
+cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M=
+cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE=
+cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc=
+cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo=
+cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE=
+cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM=
+cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA=
+cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI=
+cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw=
+cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY=
+cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4=
+cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w=
+cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I=
+cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE=
+cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM=
+cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA=
+cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY=
+cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM=
+cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY=
+cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s=
+cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8=
+cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI=
+cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo=
+cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk=
+cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4=
+cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w=
+cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw=
+cloud.google.com/go/monitoring v1.21.2 h1:FChwVtClH19E7pJ+e0xUhJPGksctZNVOk2UhMmblmdU=
+cloud.google.com/go/monitoring v1.21.2/go.mod h1:hS3pXvaG8KgWTSz+dAdyzPrGUYmi2Q+WFX8g2hqVEZU=
+cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA=
+cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o=
+cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM=
+cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8=
+cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E=
+cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM=
+cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8=
+cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4=
+cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY=
+cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ=
+cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU=
+cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k=
+cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU=
+cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY=
+cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34=
+cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA=
+cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0=
+cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE=
+cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ=
+cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4=
+cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs=
+cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI=
+cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA=
+cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk=
+cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ=
+cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE=
+cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc=
+cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc=
+cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs=
+cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg=
+cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo=
+cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw=
+cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw=
+cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E=
+cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU=
+cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70=
+cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo=
+cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs=
+cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0=
+cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA=
+cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk=
+cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg=
+cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE=
+cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw=
+cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc=
+cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0=
+cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI=
+cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg=
+cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs=
+cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
+cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
+cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
+cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
+cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI=
+cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0=
+cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8=
+cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4=
+cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg=
+cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k=
+cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM=
+cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4=
+cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o=
+cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk=
+cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo=
+cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE=
+cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U=
+cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA=
+cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c=
+cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg=
+cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4=
+cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac=
+cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg=
+cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c=
+cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs=
+cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70=
+cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ=
+cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y=
+cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A=
+cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA=
+cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM=
+cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ=
+cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA=
+cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0=
+cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots=
+cloud.google.com/go/resourcemanager v1.6.0/go.mod h1:YcpXGRs8fDzcUl1Xw8uOVmI8JEadvhRIkoXXUNVYcVo=
+cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI=
+cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU=
+cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg=
+cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA=
+cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4=
+cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY=
+cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc=
+cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y=
+cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14=
+cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do=
+cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo=
+cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM=
+cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg=
+cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s=
+cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI=
+cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk=
+cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44=
+cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc=
+cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc=
+cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA=
+cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4=
+cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4=
+cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU=
+cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4=
+cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0=
+cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU=
+cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q=
+cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA=
+cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8=
+cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0=
+cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU=
+cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc=
+cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk=
+cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk=
+cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0=
+cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag=
+cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU=
+cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s=
+cloud.google.com/go/servicecontrol v1.10.0/go.mod h1:pQvyvSRh7YzUF2efw7H87V92mxU8FnFDawMClGCNuAA=
+cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc=
+cloud.google.com/go/servicecontrol v1.11.1/go.mod h1:aSnNNlwEFBY+PWGQ2DoM0JJ/QUXqV5/ZD9DOLB7SnUk=
+cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs=
+cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg=
+cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4=
+cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U=
+cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY=
+cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s=
+cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco=
+cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo=
+cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc=
+cloud.google.com/go/servicemanagement v1.8.0/go.mod h1:MSS2TDlIEQD/fzsSGfCdJItQveu9NXnUniTrq/L8LK4=
+cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E=
+cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU=
+cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec=
+cloud.google.com/go/serviceusage v1.6.0/go.mod h1:R5wwQcbOWsyuOfbP9tGdAnCAc6B9DRwPG1xtWMDeuPA=
+cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4=
+cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw=
+cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A=
+cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos=
+cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk=
+cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M=
+cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM=
+cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ=
+cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0=
+cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco=
+cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0=
+cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI=
+cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
+cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
+cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
+cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
+cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
+cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
+cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y=
+cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc=
+cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s=
+cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y=
+cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4=
+cloud.google.com/go/storage v1.38.0 h1:Az68ZRGlnNTpIBbLjSMIV2BDcwwXYlRlQzis0llkpJg=
+cloud.google.com/go/storage v1.38.0/go.mod h1:tlUADB0mAb9BgYls9lq+8MGkfzOXuLrnHXlpHmvFJoY=
+cloud.google.com/go/storage v1.49.0 h1:zenOPBOWHCnojRd9aJZAyQXBYqkJkdQS42dxL55CIMw=
+cloud.google.com/go/storage v1.49.0/go.mod h1:k1eHhhpLvrPjVGfo0mOUPEJ4Y2+a/Hv5PiwehZI9qGU=
+cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w=
+cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I=
+cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4=
+cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw=
+cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw=
+cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g=
+cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM=
+cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA=
+cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c=
+cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8=
+cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4=
+cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc=
+cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ=
+cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg=
+cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM=
+cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28=
+cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y=
+cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA=
+cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk=
+cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs=
+cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg=
+cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0=
+cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos=
+cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos=
+cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk=
+cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw=
+cloud.google.com/go/video v1.12.0/go.mod h1:MLQew95eTuaNDEGriQdcYn0dTwf9oWiA4uYebxM5kdg=
+cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk=
+cloud.google.com/go/video v1.14.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ=
+cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ=
+cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU=
+cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4=
+cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M=
+cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU=
+cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU=
+cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0=
+cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo=
+cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo=
+cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY=
+cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E=
+cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY=
+cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0=
+cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE=
+cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g=
+cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc=
+cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY=
+cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208=
+cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8=
+cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY=
+cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w=
+cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8=
+cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes=
+cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE=
+cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg=
+cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc=
+cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A=
+cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg=
+cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo=
+cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ=
+cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng=
+cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0=
+cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M=
+cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M=
+cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA=
+cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw=
+cuelang.org/go v0.10.0 h1:Y1Pu4wwga5HkXfLFK1sWAYaSWIBdcsr5Cb5AWj2pOuE=
+cuelang.org/go v0.10.0/go.mod h1:HzlaqqqInHNiqE6slTP6+UtxT9hN6DAzgJgdbNxXvX8=
+cuelang.org/go v0.13.2 h1:SagzeEASX4E2FQnRbItsqa33sSelrJjQByLqH9uZCE8=
+cuelang.org/go v0.13.2/go.mod h1:8MoQXu+RcXsa2s9mebJN1HJ1orVDc9aI9/yKi6Dzsi4=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8=
+git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0=
+github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
+github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
+github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/CycloneDX/cyclonedx-go v0.9.1 h1:yffaWOZsv77oTJa/SdVZYdgAgFioCeycBUKkqS2qzQM=
+github.com/CycloneDX/cyclonedx-go v0.9.1/go.mod h1:NE/EWvzELOFlG6+ljX/QeMlVt9VKcTwu8u0ccsACEsw=
+github.com/CycloneDX/cyclonedx-go v0.9.2 h1:688QHn2X/5nRezKe2ueIVCt+NRqf7fl3AVQk+vaFcIo=
+github.com/CycloneDX/cyclonedx-go v0.9.2/go.mod h1:vcK6pKgO1WanCdd61qx4bFnSsDJQ6SbM2ZuMIgq86Jg=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0 h1:f2Qw/Ehhimh5uO1fayV0QIW7DShEQqhtUfhYc+cBPlw=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0/go.mod h1:2bIszWvQRlJVmJLiuLhukLImRjKPcYdzzsx6darK02A=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 h1:UQ0AhxogsIRZDkElkblfnwjc3IaltCm2HUMvezQaL7s=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1/go.mod h1:jyqM3eLpJ3IbIFDTKVz2rF9T/xWGW0rIriGwnz8l9Tk=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 h1:8nn+rsCvTq9axyEh382S0PFLBeaFwNsT43IrPWzctRU=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1/go.mod h1:viRWSEhtMZqz1rhwmOVKkWl6SwmVowfL9O2YR5gI2PE=
+github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk=
+github.com/KeisukeYamashita/go-vcl v0.4.0 h1:dFxZq2yVeaCWBJAT7Oh9Z+Pp8y32i7b11QHdzsuBcsk=
+github.com/KeisukeYamashita/go-vcl v0.4.0/go.mod h1:af2qGlXbsHDQN5abN7hyGNKtGhcFSaDdbLl4sfud+AU=
+github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
+github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8=
+github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
+github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
+github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo=
+github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
+github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM=
+github.com/agnivade/levenshtein v1.2.0 h1:U9L4IOT0Y3i0TIlUIDJ7rVUziKi/zPbrJGaFrtYH3SY=
+github.com/agnivade/levenshtein v1.2.0/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU=
+github.com/agnivade/levenshtein v1.2.1 h1:EHBY3UOn1gwdy/VbFwgo4cxecRznFk7fKWN1KOX7eoM=
+github.com/agnivade/levenshtein v1.2.1/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU=
+github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY=
+github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk=
+github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
+github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM=
+github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092 h1:aM1rlcoLz8y5B2r4tTLMiVTrMtpfY0O8EScKJxaSaEc=
+github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092/go.mod h1:rYqSE9HbjzpHTI74vwPvae4ZVYZd1lue2ta6xHPdblA=
+github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
+github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
+github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
+github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0=
+github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI=
+github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU=
+github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM=
+github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk=
+github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec=
+github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw=
+github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo=
+github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY=
+github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4=
+github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
+github.com/aws/aws-sdk-go v1.44.122/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
+github.com/aws/aws-sdk-go v1.49.6 h1:yNldzF5kzLBRvKlKz1S0bkvc2+04R1kt13KfBWQBfFA=
+github.com/aws/aws-sdk-go v1.49.6/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
+github.com/basgys/goxml2json v1.1.0 h1:4ln5i4rseYfXNd86lGEB+Vi652IsIXIvggKM/BhUKVw=
+github.com/basgys/goxml2json v1.1.0/go.mod h1:wH7a5Np/Q4QoECFIU8zTQlZwZkrilY0itPfecMw41Dw=
+github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas=
+github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4=
+github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
+github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
+github.com/bufbuild/protocompile v0.6.0 h1:Uu7WiSQ6Yj9DbkdnOe7U4mNKp58y9WDMKDn28/ZlunY=
+github.com/bufbuild/protocompile v0.6.0/go.mod h1:YNP35qEYoYGme7QMtz5SBCoN4kL4g12jTtjuzRNdjpE=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw=
+github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
+github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
+github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 h1:Om6kYQYDUk5wWbT0t0q6pvyM49i9XZAv9dDrkDA7gjk=
+github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
+github.com/cockroachdb/apd/v3 v3.2.1 h1:U+8j7t0axsIgvQUqthuNm82HIrYXodOV2iWLWtEaIwg=
+github.com/cockroachdb/apd/v3 v3.2.1/go.mod h1:klXJcjp+FffLTHlhIG69tezTDvdP065naDsHzKhYSqc=
+github.com/containerd/typeurl/v2 v2.2.0 h1:6NBDbQzr7I5LHgp34xAXYF5DOTQDn05X58lsPEmzLso=
+github.com/containerd/typeurl/v2 v2.2.0/go.mod h1:8XOOxnyatxSWuG8OfsZXVnAF4iZfedjS/8UHSPJnX4g=
+github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40=
+github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk=
+github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
+github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
+github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
+github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
+github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
+github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
+github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
+github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34=
+github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q=
+github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A=
+github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo=
+github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w=
+github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss=
+github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8=
+github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU=
+github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
+github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
+github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
+github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
+github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
+github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/go-akka/configuration v0.0.0-20200606091224-a002c0330665 h1:Iz3aEheYgn+//VX7VisgCmF/wW3BMtXCLbvHV4jMQJA=
+github.com/go-akka/configuration v0.0.0-20200606091224-a002c0330665/go.mod h1:19bUnum2ZAeftfwwLZ/wRe7idyfoW2MfmXO464Hrfbw=
+github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g=
+github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks=
+github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY=
+github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY=
+github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY=
+github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A=
+github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
+github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE=
+github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA=
+github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U=
+github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
+github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
+github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
+github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M=
+github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M=
+github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
+github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
+github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
+github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
+github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
+github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
+github.com/godoctor/godoctor v0.0.0-20181123222458-69df17f3a6f6/go.mod h1:+tyhT8jBF8E0XvdlSXOSL7Iko7DlNiongHq3q+wcsPs=
+github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
+github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ=
+github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
+github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
+github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
+github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
+github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
+github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
+github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
+github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-jsonnet v0.20.0 h1:WG4TTSARuV7bSm4PMB4ohjxe33IHT5WVTrJSU33uT4g=
+github.com/google/go-jsonnet v0.20.0/go.mod h1:VbgWF9JX7ztlv770x/TolZNGGFfiHEVx9G6ca2eUmeA=
+github.com/google/go-jsonnet v0.21.0 h1:43Bk3K4zMRP/aAZm9Po2uSEjY6ALCkYUVIcz9HLGMvA=
+github.com/google/go-jsonnet v0.21.0/go.mod h1:tCGAu8cpUpEZcdGMmdOu37nh8bGgqubhI5v2iSk3KJQ=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
+github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=
+github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw=
+github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM=
+github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA=
+github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
+github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
+github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg=
+github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k=
+github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k=
+github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs=
+github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
+github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw=
+github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
+github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
+github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM=
+github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM=
+github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c=
+github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo=
+github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY=
+github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8=
+github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI=
+github.com/googleapis/gax-go/v2 v2.12.2 h1:mhN09QQW1jEWeMF74zGR81R30z4VJzjZsfkUhuHF+DA=
+github.com/googleapis/gax-go/v2 v2.12.2/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc=
+github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q=
+github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA=
+github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4=
+github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
+github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
+github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w=
+github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
+github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
+github.com/hashicorp/go-getter v1.7.6 h1:5jHuM+aH373XNtXl9TNTUH5Qd69Trve11tHIrB+6yj4=
+github.com/hashicorp/go-getter v1.7.6/go.mod h1:W7TalhMmbPmsSMdNjD0ZskARur/9GJ17cfHTRtXV744=
+github.com/hashicorp/go-getter v1.7.8 h1:mshVHx1Fto0/MydBekWan5zUipGq7jO0novchgMmSiY=
+github.com/hashicorp/go-getter v1.7.8/go.mod h1:2c6CboOEb9jG6YvmC9xdD+tyAFsrUaJPedwXDGr0TM4=
+github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo=
+github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I=
+github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
+github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
+github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/hashicorp/hcl/v2 v2.6.0/go.mod h1:bQTN5mpo+jewjJgh8jr0JUguIi7qPHUF6yIfAEN3jqY=
+github.com/hashicorp/hcl/v2 v2.17.0 h1:z1XvSUyXd1HP10U4lrLg5e0JMVz6CPaJvAgxM0KNZVY=
+github.com/hashicorp/hcl/v2 v2.17.0/go.mod h1:gJyW2PTShkJqQBKpAmPO3yxMxIuoXkOF2TpqXzrQyx4=
+github.com/hashicorp/hcl/v2 v2.23.0 h1:Fphj1/gCylPxHutVSEOf2fBOh1VE4AuLV7+kbJf3qos=
+github.com/hashicorp/hcl/v2 v2.23.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA=
+github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
+github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
+github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
+github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
+github.com/jstemmer/go-junit-report v1.0.0 h1:8X1gzZpR+nVQLAht+L/foqOeX2l9DTZoaIPbEQHxsds=
+github.com/jstemmer/go-junit-report v1.0.0/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
+github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
+github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
+github.com/k0kubun/pp v3.0.1+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg=
+github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE=
+github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
+github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
+github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
+github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
+github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
+github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
+github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
+github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=
+github.com/logrusorgru/aurora v2.0.3+incompatible h1:tOpm7WcpBTn4fjmVfgpQq0EfczGlG91VSDkswnjF5A8=
+github.com/logrusorgru/aurora v2.0.3+incompatible/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4=
+github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA=
+github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA=
+github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o=
+github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
+github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
+github.com/magiconair/properties v1.8.10 h1:s31yESBquKXCV9a/ScB3ESkOjUYYv+X0rg8SYxI99mE=
+github.com/magiconair/properties v1.8.10/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
+github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
+github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
+github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
+github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
+github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
+github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
+github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
+github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
+github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY=
+github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE=
+github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU=
+github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8=
+github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
+github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0=
+github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
+github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/moby/buildkit v0.16.0 h1:wOVBj1o5YNVad/txPQNXUXdelm7Hs/i0PUFjzbK0VKE=
+github.com/moby/buildkit v0.16.0/go.mod h1:Xqx/5GlrqE1yIRORk0NSCVDFpQAU1WjlT6KHYZdisIQ=
+github.com/moby/buildkit v0.23.2 h1:gt/dkfcpgTXKx+B9I310kV767hhVqTvEyxGgI3mqsGQ=
+github.com/moby/buildkit v0.23.2/go.mod h1:iEjAfPQKIuO+8y6OcInInvzqTMiKMbb2RdJz1K/95a0=
+github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
+github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
+github.com/muhammadmuzzammil1998/jsonc v1.0.0 h1:8o5gBQn4ZA3NBA9DlTujCj2a4w0tqWrPVjDwhzkgTIs=
+github.com/muhammadmuzzammil1998/jsonc v1.0.0/go.mod h1:saF2fIVw4banK0H4+/EuqfFLpRnoy5S+ECwTOCcRcSU=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
+github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
+github.com/open-policy-agent/conftest v0.56.0 h1:Q27Y45rdUHAOTjkeTbmHf2kWgW+DeFauZMaDjJm98YA=
+github.com/open-policy-agent/conftest v0.56.0/go.mod h1:u4xu/0jtZnsenKf06J/tdm/7CtP8ODmZ/JsRPTDCXMg=
+github.com/open-policy-agent/conftest v0.62.0 h1:mk6Kbf8WTGjI8byKd59GWjIGsOPr+dmiEwjyDEZMWhk=
+github.com/open-policy-agent/conftest v0.62.0/go.mod h1:oX2ScMAaFCJ2f4bAy23GBibaUzn1b8lRs6gkhu4G+IA=
+github.com/open-policy-agent/opa v0.69.0 h1:s2igLw2Z6IvGWGuXSfugWkVultDMsM9pXiDuMp7ckWw=
+github.com/open-policy-agent/opa v0.69.0/go.mod h1:+qyXJGkpEJ6kpB1kGo8JSwHtVXbTdsGdQYPWWNYNj+4=
+github.com/open-policy-agent/opa v1.6.0 h1:/S/cnNQJ2MUMNzizHPbisTWBHowmLkPrugY5jjkPlRQ=
+github.com/open-policy-agent/opa v1.6.0/go.mod h1:zFmw4P+W62+CWGYRDDswfVYSCnPo6oYaktQnfIaRFC4=
+github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
+github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
+github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
+github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
+github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
+github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
+github.com/owenrumney/go-sarif v1.1.1/go.mod h1:dNDiPlF04ESR/6fHlPyq7gHKmrM0sHUvAGjsoh8ZH0U=
+github.com/owenrumney/go-sarif/v2 v2.3.3 h1:ubWDJcF5i3L/EIOER+ZyQ03IfplbSU1BLOE26uKQIIU=
+github.com/owenrumney/go-sarif/v2 v2.3.3/go.mod h1:MSqMMx9WqlBSY7pXoOZWgEsVB4FDNfhcaXDA1j6Sr+w=
+github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
+github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
+github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
+github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
+github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
+github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY=
+github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI=
+github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI=
+github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
+github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
+github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo=
+github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI=
+github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
+github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
+github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
+github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
+github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
+github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
+github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
+github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
+github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
+github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
+github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
+github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
+github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
+github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
+github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
+github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
+github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
+github.com/rivo/uniseg v0.4.2 h1:YwD0ulJSJytLpiaWua0sBDusfsCZohxjxzVTYjwxfV8=
+github.com/rivo/uniseg v0.4.2/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
+github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
+github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
+github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
+github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w=
+github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk=
+github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ=
+github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
+github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo=
+github.com/sagikazarmark/locafero v0.7.0/go.mod h1:2za3Cg5rMaTMoG/2Ulr9AwtFaIppKXTRYnozin4aB5k=
+github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
+github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
+github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
+github.com/shteou/go-ignore v0.3.1 h1:/DVY4w06eKliWrbkwKfBHJgUleld+QAlmlQvfRQOigA=
+github.com/shteou/go-ignore v0.3.1/go.mod h1:hMVyBe+qt5/Z11W/Fxxf86b5SuL8kM29xNWLYob9Vos=
+github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
+github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
+github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/spdx/gordf v0.0.0-20201111095634-7098f93598fb/go.mod h1:uKWaldnbMnjsSAXRurWqqrdyZen1R7kxl8TkmWk2OyM=
+github.com/spdx/tools-golang v0.5.5 h1:61c0KLfAcNqAjlg6UNMdkwpMernhw3zVRwDZ2x9XOmk=
+github.com/spdx/tools-golang v0.5.5/go.mod h1:MVIsXx8ZZzaRWNQpUDhC4Dud34edUYJYecciXgrw5vE=
+github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
+github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
+github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
+github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y=
+github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
+github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
+github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs=
+github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4=
+github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0=
+github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
+github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y=
+github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
+github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
+github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
+github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
+github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
+github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
+github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
+github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
+github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
+github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ=
+github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk=
+github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4=
+github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4=
+github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE=
+github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
+github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
+github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes=
+github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
+github.com/tchap/go-patricia/v2 v2.3.2 h1:xTHFutuitO2zqKAQ5rCROYgUb7Or/+IC3fts9/Yc7nM=
+github.com/tchap/go-patricia/v2 v2.3.2/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
+github.com/tmccombs/hcl2json v0.3.1 h1:Pf+Lb9OpZ5lkQuIC0BB5txdCQskZ2ud/l8sz/Nkjf3A=
+github.com/tmccombs/hcl2json v0.3.1/go.mod h1:ljY0/prd2IFUF3cagQjV3cpPEEQKzqyGqnKI7m5DBVY=
+github.com/tmccombs/hcl2json v0.6.7 h1:RYKTs4kd/gzRsEiv7J3M2WQ7TYRYZVc+0H0pZdERkxA=
+github.com/tmccombs/hcl2json v0.6.7/go.mod h1:lJgBOOGDpbhjvdG2dLaWsqB4KBzul2HytfDTS3H465o=
+github.com/tonistiigi/go-csvvalue v0.0.0-20240710180619-ddb21b71c0b4 h1:7I5c2Ig/5FgqkYOh/N87NzoyI9U15qUPXhDD8uCupv8=
+github.com/tonistiigi/go-csvvalue v0.0.0-20240710180619-ddb21b71c0b4/go.mod h1:278M4p8WsNh3n4a1eqiFcV2FGk7wE5fwUpUom9mK9lE=
+github.com/tonistiigi/go-csvvalue v0.0.0-20240814133006-030d3b2625d0 h1:2f304B10LaZdB8kkVEaoXvAMVan2tl9AiK4G0odjQtE=
+github.com/tonistiigi/go-csvvalue v0.0.0-20240814133006-030d3b2625d0/go.mod h1:278M4p8WsNh3n4a1eqiFcV2FGk7wE5fwUpUom9mK9lE=
+github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
+github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
+github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8=
+github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
+github.com/vektah/gqlparser v1.2.0/go.mod h1:bkVf0FX+Stjg/MHnm8mEyubuaArhNEqfQhF+OTiAL74=
+github.com/vektah/gqlparser/v2 v2.5.28 h1:bIulcl3LF69ba6EiZVGD88y4MkM+Jxrf3P2MX8xLRkY=
+github.com/vektah/gqlparser/v2 v2.5.28/go.mod h1:D1/VCZtV3LPnQrcPBeR/q5jkSQIPti0uYCP/RI0gIeo=
+github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk=
+github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4=
+github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI=
+github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
+github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
+github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
+github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
+github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
+github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
+github.com/yashtewari/glob-intersection v0.2.0 h1:8iuHdN88yYuCzCdjt0gDe+6bAhUwBeEWqThExu54RFg=
+github.com/yashtewari/glob-intersection v0.2.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok=
+github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8=
+github.com/zclconf/go-cty v1.6.1/go.mod h1:VDR4+I79ubFBGm1uJac1226K5yANQFHeauxPBoP54+o=
+github.com/zclconf/go-cty v1.10.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk=
+github.com/zclconf/go-cty v1.13.2 h1:4GvrUxe/QUDYuJKAav4EYqdM47/kZa672LwmXFmEKT0=
+github.com/zclconf/go-cty v1.13.2/go.mod h1:YKQzy/7pZ7iq2jNFzy5go57xdxdWoLLpaEp4u238AE0=
+github.com/zclconf/go-cty v1.16.2 h1:LAJSwc3v81IRBZyUVQDUdZ7hs3SYs9jv0eZJDWHD/70=
+github.com/zclconf/go-cty v1.16.2/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE=
+github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
+github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM=
+github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4=
+github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
+go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
+go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
+go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
+go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
+go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
+go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
+go.opentelemetry.io/contrib/detectors/gcp v1.34.0 h1:JRxssobiPg23otYU5SbWtQC//snGVIM3Tx6QRzlQBao=
+go.opentelemetry.io/contrib/detectors/gcp v1.34.0/go.mod h1:cV4BMFcscUR/ckqLkbfQmF0PRsq8w/lMGzdbCSveBHo=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q=
+go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
+go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
+go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg=
+go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E=
+go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q=
+go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s=
+go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE=
+go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs=
+go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE=
+go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
+go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs=
+go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY=
+go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis=
+go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4=
+go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
+go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
+go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
+go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
+go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
+go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
+go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
+go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
+go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
+go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
+golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
+golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
+golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
+golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A=
+golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70=
+golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
+golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8=
+golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw=
+golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE=
+golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
+golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
+golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
+golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE=
+golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI=
+golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo=
+golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 h1:R84qjqJb5nVJMxqWYb3np9L5ZsaDtB+a39EqjV0JSUM=
+golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0/go.mod h1:S9Xr4PYopiDyqSyp5NjCrhFrqg6A5zA2E/iPHPhqnS8=
+golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
+golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
+golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
+golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
+golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
+golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
+golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0=
+golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
+golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
+golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
+golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
+golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
+golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
+golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
+golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
+golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
+golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
+golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
+golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
+golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
+golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
+golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo=
+golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0=
+golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
+golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY=
+golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
+golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
+golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
+golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
+golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
+golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
+golang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A=
+golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec=
+golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I=
+golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw=
+golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4=
+golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA=
+golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
+golang.org/x/oauth2 v0.29.0 h1:WdYw2tdTK1S8olAzWHdgeqfy+Mtm9XNhv/xJsY65d98=
+golang.org/x/oauth2 v0.29.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
+golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
+golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ=
+golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34=
+golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
+golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
+golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
+golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
+golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
+golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
+golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
+golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
+golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
+golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
+golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
+golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek=
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
+golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224=
+golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
+golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
+golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4=
+golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
+golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
+golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
+golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
+golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191127201027-ecd32218bd7f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
+golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
+golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
+golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
+golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
+golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
+golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
+golang.org/x/tools v0.32.0 h1:Q7N1vhpkQv7ybVzLFtTjvQya2ewbwNDZzUgfXGqtMWU=
+golang.org/x/tools v0.32.0/go.mod h1:ZxrU41P/wAbZD8EDa6dDCa6XfpkhJ7HFMjHJXfBDu8s=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
+golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
+golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
+gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
+gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0=
+gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0=
+gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA=
+gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
+gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc=
+gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY=
+gonum.org/v1/plot v0.10.1/go.mod h1:VZW5OlhkL1mysU9vaqNHnsy86inf6Ot+jB3r+BczCEo=
+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
+google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
+google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
+google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
+google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
+google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
+google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
+google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
+google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
+google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
+google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
+google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
+google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
+google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
+google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
+google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
+google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
+google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo=
+google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g=
+google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA=
+google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8=
+google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs=
+google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA=
+google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA=
+google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw=
+google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg=
+google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o=
+google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g=
+google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
+google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
+google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI=
+google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s=
+google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s=
+google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s=
+google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08=
+google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70=
+google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo=
+google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0=
+google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY=
+google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY=
+google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY=
+google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI=
+google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0=
+google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg=
+google.golang.org/api v0.169.0 h1:QwWPy71FgMWqJN/l6jVlFHUa29a7dcUy02I8o799nPY=
+google.golang.org/api v0.169.0/go.mod h1:gpNOiMA2tZ4mf5R9Iwf4rK/Dcz0fbdIgWYWVoxmsyLg=
+google.golang.org/api v0.215.0 h1:jdYF4qnyczlEz2ReWIsosNLDuzXyvFHJtI5gcr0J7t0=
+google.golang.org/api v0.215.0/go.mod h1:fta3CVtuJYOEdugLNWm6WodzOS8KdFckABwN4I40hzY=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
+google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
+google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
+google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
+google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
+google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
+google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
+google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
+google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
+google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
+google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
+google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E=
+google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE=
+google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc=
+google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk=
+google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk=
+google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk=
+google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk=
+google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk=
+google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo=
+google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo=
+google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo=
+google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo=
+google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo=
+google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw=
+google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI=
+google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI=
+google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U=
+google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM=
+google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM=
+google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s=
+google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s=
+google.golang.org/genproto v0.0.0-20221025140454-527a21cfbd71/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s=
+google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo=
+google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
+google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
+google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
+google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
+google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
+google.golang.org/genproto v0.0.0-20221201204527-e3fa12d562f3/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
+google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE=
+google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230112194545-e10362b5ecf9/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230123190316-2c411cf9d197/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA=
+google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw=
+google.golang.org/genproto v0.0.0-20230223222841-637eb2293923/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw=
+google.golang.org/genproto v0.0.0-20230303212802-e74f57abe488/go.mod h1:TvhZT5f700eVlTNwND1xoEZQeWTB2RY/65kplwl/bFA=
+google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s=
+google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s=
+google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak=
+google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak=
+google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak=
+google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU=
+google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY=
+google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo=
+google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 h1:ToEetK57OidYuqD4Q5w+vfEnPvPpuTwedCNVohYJfNk=
+google.golang.org/genproto v0.0.0-20241118233622-e639e219e697/go.mod h1:JJrvXBWRZaFMxBufik1a4RpFw4HhgVtBBWQeQgUj2cc=
+google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8=
+google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo=
+google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237 h1:Kog3KlB4xevJlAcbbbzPfRG0+X9fdoGM+UBRKVz6Wr0=
+google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237/go.mod h1:ezi0AVyMKDWy5xAncvjLWH7UcLBB5n7y2fQ8MzjJcto=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 h1:e7S5W7MGGLaSu8j3YjdezkZ+m1/Nm0uRVRMEMGk26Xs=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237 h1:cJfm9zPbe1e873mHJzmQ1nwVEeRDU/T1wXDK2kUSU34=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
+google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
+google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
+google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
+google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
+google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
+google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
+google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
+google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
+google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
+google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
+google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
+google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
+google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
+google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
+google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
+google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
+google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
+google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY=
+google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw=
+google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g=
+google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
+google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw=
+google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
+google.golang.org/grpc v1.72.2 h1:TdbGzwb82ty4OusHWepvFWGLgIbNo1/SUynEN0ssqv8=
+google.golang.org/grpc v1.72.2/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM=
+google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
+google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
+google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
+google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
+google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
+gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las=
+lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
+lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
+modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI=
+modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI=
+modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI=
+modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc=
+modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw=
+modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ=
+modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ=
+modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws=
+modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo=
+modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ=
+modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM=
+modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA=
+modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A=
+modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU=
+modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU=
+modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA=
+modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0=
+modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s=
+modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
+modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
+modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
+modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw=
+modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw=
+modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU=
+modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
+modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
+modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4=
+modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw=
+modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw=
+modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw=
+modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
+modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8=
+muzzammil.xyz/jsonc v1.0.0 h1:B6kaT3wHueZ87mPz3q1nFuM1BlL32IG0wcq0/uOsQ18=
+muzzammil.xyz/jsonc v1.0.0/go.mod h1:rFv8tUUKe+QLh7v02BhfxXEf4ZHhYD7unR93HL/1Uvo=
+olympos.io/encoding/edn v0.0.0-20201019073823-d3554ca0b0a3 h1:slmdOY3vp8a7KQbHkL+FLbvbkgMqmXojpFUO/jENuqQ=
+olympos.io/encoding/edn v0.0.0-20201019073823-d3554ca0b0a3/go.mod h1:oVgVk4OWVDi43qWBEyGhXgYxt7+ED4iYNpTngSLX2Iw=
+oras.land/oras-go/v2 v2.4.0 h1:i+Wt5oCaMHu99guBD0yuBjdLvX7Lz8ukPbwXdR7uBMs=
+oras.land/oras-go/v2 v2.4.0/go.mod h1:osvtg0/ClRq1KkydMAEu/IxFieyjItcsQ4ut4PPF+f8=
+oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc=
+oras.land/oras-go/v2 v2.6.0/go.mod h1:magiQDfG6H1O9APp+rOsvCPcW1GD2MM7vgnKY0Y+u1o=
+rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
+rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
+rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
+sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
+sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
diff --git a/.bingo/golangci-lint.mod b/.bingo/golangci-lint.mod
index 4607edf92..5b5575d98 100644
--- a/.bingo/golangci-lint.mod
+++ b/.bingo/golangci-lint.mod
@@ -2,4 +2,4 @@ module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT
go 1.24.6
-require github.com/golangci/golangci-lint/v2 v2.6.2 // cmd/golangci-lint
+require github.com/golangci/golangci-lint/v2 v2.7.2 // cmd/golangci-lint
diff --git a/.bingo/golangci-lint.sum b/.bingo/golangci-lint.sum
index 3146c7150..b7d8399e3 100644
--- a/.bingo/golangci-lint.sum
+++ b/.bingo/golangci-lint.sum
@@ -65,6 +65,8 @@ github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1
github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
github.com/MirrexOne/unqueryvet v1.2.1 h1:M+zdXMq84g+E1YOLa7g7ExN3dWfZQrdDSTCM7gC+m/A=
github.com/MirrexOne/unqueryvet v1.2.1/go.mod h1:IWwCwMQlSWjAIteW0t+28Q5vouyktfujzYznSIWiuOg=
+github.com/MirrexOne/unqueryvet v1.3.0 h1:5slWSomgqpYU4zFuZ3NNOfOUxVPlXFDBPAVasZOGlAY=
+github.com/MirrexOne/unqueryvet v1.3.0/go.mod h1:IWwCwMQlSWjAIteW0t+28Q5vouyktfujzYznSIWiuOg=
github.com/OpenPeeDeeP/depguard/v2 v2.2.1 h1:vckeWVESWp6Qog7UZSARNqfu/cZqvki8zsuj3piCMx4=
github.com/OpenPeeDeeP/depguard/v2 v2.2.1/go.mod h1:q4DKzC4UcVaAvcfd41CZh0PWpGgzrVxUYBlgKNGquUo=
github.com/alecthomas/chroma/v2 v2.20.0 h1:sfIHpxPyR07/Oylvmcai3X/exDlE8+FA820NTz+9sGw=
@@ -114,6 +116,8 @@ github.com/butuzov/mirror v1.3.0 h1:HdWCXzmwlQHdVhwvsfBb2Au0r3HyINry3bDWLYXiKoc=
github.com/butuzov/mirror v1.3.0/go.mod h1:AEij0Z8YMALaq4yQj9CPPVYOyJQyiexpQEQgihajRfI=
github.com/catenacyber/perfsprint v0.10.0 h1:AZj1mYyxbxLRqmnYOeguZXEQwWOgQGm2wzLI5d7Hl/0=
github.com/catenacyber/perfsprint v0.10.0/go.mod h1:DJTGsi/Zufpuus6XPGJyKOTMELe347o6akPvWG9Zcsc=
+github.com/catenacyber/perfsprint v0.10.1 h1:u7Riei30bk46XsG8nknMhKLXG9BcXz3+3tl/WpKm0PQ=
+github.com/catenacyber/perfsprint v0.10.1/go.mod h1:DJTGsi/Zufpuus6XPGJyKOTMELe347o6akPvWG9Zcsc=
github.com/ccojocar/zxcvbn-go v1.0.4 h1:FWnCIRMXPj43ukfX000kvBZvV6raSxakYr1nzyNrUcc=
github.com/ccojocar/zxcvbn-go v1.0.4/go.mod h1:3GxGX+rHmueTUMvm5ium7irpyjmm7ikxYFOSJB21Das=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
@@ -209,6 +213,8 @@ github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
github.com/godoc-lint/godoc-lint v0.10.1 h1:ZPUVzlDtJfA+P688JfPJPkI/SuzcBr/753yGIk5bOPA=
github.com/godoc-lint/godoc-lint v0.10.1/go.mod h1:KleLcHu/CGSvkjUH2RvZyoK1MBC7pDQg4NxMYLcBBsw=
+github.com/godoc-lint/godoc-lint v0.10.2 h1:dksNgK+zebnVlj4Fx83CRnCmPO0qRat/9xfFsir1nfg=
+github.com/godoc-lint/godoc-lint v0.10.2/go.mod h1:KleLcHu/CGSvkjUH2RvZyoK1MBC7pDQg4NxMYLcBBsw=
github.com/gofrs/flock v0.13.0 h1:95JolYOvGMqeH31+FC7D2+uULf6mG61mEZ/A8dRYMzw=
github.com/gofrs/flock v0.13.0/go.mod h1:jxeyy9R1auM5S6JYDBhDt+E2TCo7DkratH4Pgi8P+Z0=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
@@ -251,6 +257,8 @@ github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d h1:viFft9sS/dxoYY0a
github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d/go.mod h1:ivJ9QDg0XucIkmwhzCDsqcnxxlDStoTl89jDMIoNxKY=
github.com/golangci/golangci-lint/v2 v2.6.2 h1:jkMSVv36JmyTENcEertckvimvjPcD5qxNM7W7qhECvI=
github.com/golangci/golangci-lint/v2 v2.6.2/go.mod h1:fSIMDiBt9kzdpnvvV7GO6iWzyv5uaeZ+iPor+2uRczE=
+github.com/golangci/golangci-lint/v2 v2.7.2 h1:AhBC+YeEueec4AGlIbvPym5C70Thx0JykIqXbdIXWx0=
+github.com/golangci/golangci-lint/v2 v2.7.2/go.mod h1:pDijleoBu7e8sejMqyZ3L5n6geqe+cVvOAz2QImqqVc=
github.com/golangci/golines v0.0.0-20250217134842-442fd0091d95 h1:AkK+w9FZBXlU/xUmBtSJN1+tAI4FIvy5WtnUnY8e4p8=
github.com/golangci/golines v0.0.0-20250217134842-442fd0091d95/go.mod h1:k9mmcyWKSTMcPPvQUCfRWWQ9VHJ1U9Dc0R7kaXAgtnQ=
github.com/golangci/misspell v0.7.0 h1:4GOHr/T1lTW0hhR4tgaaV1WS/lJ+ncvYCoFKmqJsj0c=
@@ -308,6 +316,8 @@ github.com/hashicorp/go-immutable-radix/v2 v2.1.0/go.mod h1:hgdqLXA4f6NIjRVisM1T
github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/go-version v1.8.0 h1:KAkNb1HAiZd1ukkxDFGmokVZe1Xy9HG6NUp+bPle2i4=
+github.com/hashicorp/go-version v1.8.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
@@ -394,6 +404,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0j
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mgechev/revive v1.12.0 h1:Q+/kkbbwerrVYPv9d9efaPGmAO/NsxwW/nE6ahpQaCU=
github.com/mgechev/revive v1.12.0/go.mod h1:VXsY2LsTigk8XU9BpZauVLjVrhICMOV3k1lpB3CXrp8=
+github.com/mgechev/revive v1.13.0 h1:yFbEVliCVKRXY8UgwEO7EOYNopvjb1BFbmYqm9hZjBM=
+github.com/mgechev/revive v1.13.0/go.mod h1:efJfeBVCX2JUumNQ7dtOLDja+QKj9mYGgEZA7rt5u+0=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
@@ -488,6 +500,8 @@ github.com/sashamelentyev/usestdlibvars v1.29.0 h1:8J0MoRrw4/NAXtjQqTHrbW9NN+3iM
github.com/sashamelentyev/usestdlibvars v1.29.0/go.mod h1:8PpnjHMk5VdeWlVb4wCdrB8PNbLqZ3wBZTZWkrpZZL8=
github.com/securego/gosec/v2 v2.22.10 h1:ntbBqdWXnu46DUOXn+R2SvPo3PiJCDugTCgTW2g4tQg=
github.com/securego/gosec/v2 v2.22.10/go.mod h1:9UNjK3tLpv/w2b0+7r82byV43wCJDNtEDQMeS+H/g2w=
+github.com/securego/gosec/v2 v2.22.11-0.20251204091113-daccba6b93d7 h1:rZg6IGn0ySYZwCX8LHwZoYm03JhG/cVAJJ3O+u3Vclo=
+github.com/securego/gosec/v2 v2.22.11-0.20251204091113-daccba6b93d7/go.mod h1:9sr22NZO5Kfh7unW/xZxkGYTmj2484/fCiE54gw7UTY=
github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
@@ -503,10 +517,14 @@ github.com/sourcegraph/go-diff v0.7.0 h1:9uLlrd5T46OXs5qpp8L/MTltk0zikUGi0sNNyCp
github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs=
github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA=
github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo=
+github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I=
+github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg=
github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
+github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
+github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
@@ -519,6 +537,8 @@ github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YE
github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I=
github.com/stbenjam/no-sprintf-host-port v0.2.0 h1:i8pxvGrt1+4G0czLr/WnmyH7zbZ8Bg8etvARQ1rpyl4=
github.com/stbenjam/no-sprintf-host-port v0.2.0/go.mod h1:eL0bQ9PasS0hsyTyfTjjG+E80QIyPnBVQbYZyv20Jfk=
+github.com/stbenjam/no-sprintf-host-port v0.3.1 h1:AyX7+dxI4IdLBPtDbsGAyqiTSLpCP9hWRrXQDU4Cm/g=
+github.com/stbenjam/no-sprintf-host-port v0.3.1/go.mod h1:ODbZesTCHMVKthBHskvUUexdcNHAQRXk9NpSsL8p/HQ=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
@@ -541,6 +561,8 @@ github.com/timonwong/loggercheck v0.11.0 h1:jdaMpYBl+Uq9mWPXv1r8jc5fC3gyXx4/WGwT
github.com/timonwong/loggercheck v0.11.0/go.mod h1:HEAWU8djynujaAVX7QI65Myb8qgfcZ1uKbdpg3ZzKl8=
github.com/tomarrell/wrapcheck/v2 v2.11.0 h1:BJSt36snX9+4WTIXeJ7nvHBQBcm1h2SjQMSlmQ6aFSU=
github.com/tomarrell/wrapcheck/v2 v2.11.0/go.mod h1:wFL9pDWDAbXhhPZZt+nG8Fu+h29TtnZ2MW6Lx4BRXIU=
+github.com/tomarrell/wrapcheck/v2 v2.12.0 h1:H/qQ1aNWz/eeIhxKAFvkfIA+N7YDvq6TWVFL27Of9is=
+github.com/tomarrell/wrapcheck/v2 v2.12.0/go.mod h1:AQhQuZd0p7b6rfW+vUwHm5OMCGgp63moQ9Qr/0BpIWo=
github.com/tommy-muehle/go-mnd/v2 v2.5.1 h1:NowYhSdyE/1zwK9QCLeRb6USWdoif80Ie+v+yU8u1Zw=
github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw=
github.com/ultraware/funlen v0.2.0 h1:gCHmCn+d2/1SemTdYMiKLAHFYxTYz7z9VIDRaTGyLkI=
@@ -589,6 +611,8 @@ go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ=
go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
+go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
+go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@@ -642,6 +666,8 @@ golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
+golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk=
+golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -756,6 +782,8 @@ golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
+golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
+golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
@@ -774,6 +802,8 @@ golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
+golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
+golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -829,6 +859,8 @@ golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58
golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg=
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
+golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ=
+golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/.bingo/goreleaser.mod b/.bingo/goreleaser.mod
index d4e6c3832..3fe6a4405 100644
--- a/.bingo/goreleaser.mod
+++ b/.bingo/goreleaser.mod
@@ -1,5 +1,5 @@
module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT
-go 1.22.5
+go 1.24.6
-require github.com/goreleaser/goreleaser v1.26.2
+require github.com/goreleaser/goreleaser/v2 v2.11.2
diff --git a/.bingo/goreleaser.sum b/.bingo/goreleaser.sum
index c5a6760d4..7d1df8e6f 100644
--- a/.bingo/goreleaser.sum
+++ b/.bingo/goreleaser.sum
@@ -1,199 +1,239 @@
+al.essio.dev/pkg/shellescape v1.6.0 h1:NxFcEqzFSEVCGN2yq7Huv/9hyCEGVa/TncnOOBBeXHA=
+al.essio.dev/pkg/shellescape v1.6.0/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWtls1U8I890=
+cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY=
+cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.112.1 h1:uJSeirPke5UNZHIb4SxfZklVSiWWVqW4oXlETwZziwM=
-cloud.google.com/go v0.112.1/go.mod h1:+Vbu+Y1UU+I1rjmzeMOb/8RfkKJK2Gyxi1X6jJCZLo4=
-cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc=
-cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
-cloud.google.com/go/iam v1.1.6 h1:bEa06k05IO4f4uJonbB5iAgKTPpABy1ayxaIZV/GHVc=
-cloud.google.com/go/iam v1.1.6/go.mod h1:O0zxdPeGBoFdWW3HWmBxJsk0pfvNM/p/qa82rWOGTwI=
-cloud.google.com/go/kms v1.15.8 h1:szIeDCowID8th2i8XE4uRev5PMxQFqW+JjwYxL9h6xs=
-cloud.google.com/go/kms v1.15.8/go.mod h1:WoUHcDjD9pluCg7pNds131awnH429QGvRM3N/4MyoVs=
-cloud.google.com/go/storage v1.39.1 h1:MvraqHKhogCOTXTlct/9C3K3+Uy2jBmFYb3/Sp6dVtY=
-cloud.google.com/go/storage v1.39.1/go.mod h1:xK6xZmxZmo+fyP7+DEF6FhNc24/JAe95OLyOHCXFH1o=
-code.gitea.io/sdk/gitea v0.18.0 h1:+zZrwVmujIrgobt6wVBWCqITz6bn1aBjnCUHmpZrerI=
-code.gitea.io/sdk/gitea v0.18.0/go.mod h1:IG9xZJoltDNeDSW0qiF2Vqx5orMWa7OhVWrjvrd5NpI=
-dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
-dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
+cloud.google.com/go v0.121.1 h1:S3kTQSydxmu1JfLRLpKtxRPA7rSrYPRPEUmL/PavVUw=
+cloud.google.com/go v0.121.1/go.mod h1:nRFlrHq39MNVWu+zESP2PosMWA0ryJw8KUBZ2iZpxbw=
+cloud.google.com/go/auth v0.16.1 h1:XrXauHMd30LhQYVRHLGvJiYeczweKQXZxsTbV9TiguU=
+cloud.google.com/go/auth v0.16.1/go.mod h1:1howDHJ5IETh/LwYs3ZxvlkXF48aSqqJUM+5o02dNOI=
+cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc=
+cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c=
+cloud.google.com/go/compute/metadata v0.7.0 h1:PBWF+iiAerVNe8UCHxdOt6eHLVc3ydFeOCw78U8ytSU=
+cloud.google.com/go/compute/metadata v0.7.0/go.mod h1:j5MvL9PprKL39t166CoB1uVHfQMs4tFQZZcKwksXUjo=
+cloud.google.com/go/iam v1.5.2 h1:qgFRAGEmd8z6dJ/qyEchAuL9jpswyODjA2lS+w234g8=
+cloud.google.com/go/iam v1.5.2/go.mod h1:SE1vg0N81zQqLzQEwxL2WI6yhetBdbNQuTvIKCSkUHE=
+cloud.google.com/go/kms v1.22.0 h1:dBRIj7+GDeeEvatJeTB19oYZNV0aj6wEqSIT/7gLqtk=
+cloud.google.com/go/kms v1.22.0/go.mod h1:U7mf8Sva5jpOb4bxYZdtw/9zsbIjrklYwPcvMk34AL8=
+cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE=
+cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY=
+cloud.google.com/go/monitoring v1.24.2 h1:5OTsoJ1dXYIiMiuL+sYscLc9BumrL3CarVLL7dd7lHM=
+cloud.google.com/go/monitoring v1.24.2/go.mod h1:x7yzPWcgDRnPEv3sI+jJGBkwl5qINf+6qY4eq0I9B4U=
+cloud.google.com/go/storage v1.55.0 h1:NESjdAToN9u1tmhVqhXCaCwYBuvEhZLLv0gBr+2znf0=
+cloud.google.com/go/storage v1.55.0/go.mod h1:ztSmTTwzsdXe5syLVS0YsbFxXuvEmEyZj7v7zChEmuY=
+code.gitea.io/sdk/gitea v0.21.0 h1:69n6oz6kEVHRo1+APQQyizkhrZrLsTLXey9142pfkD4=
+code.gitea.io/sdk/gitea v0.21.0/go.mod h1:tnBjVhuKJCn8ibdyyhvUyxrR1Ca2KHEoTWoukNhXQPA=
+dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8=
+dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA=
+github.com/42wim/httpsig v1.2.2 h1:ofAYoHUNs/MJOLqQ8hIxeyz2QxOz8qdSVvp3PX/oPgA=
+github.com/42wim/httpsig v1.2.2/go.mod h1:P/UYo7ytNBFwc+dg35IubuAUIs8zj5zzFIgUCEl55WY=
github.com/AlekSi/pointer v1.2.0 h1:glcy/gc4h8HnG2Z3ZECSzZ1IX1x2JxRVuDzaJwQE0+w=
github.com/AlekSi/pointer v1.2.0/go.mod h1:gZGfd3dpW4vEc/UlyfKKi1roIqcCgwOIvb0tSNSBle0=
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU=
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 h1:n1DH8TPV4qqPTje2RcUBYwtrTWlabVp4n46+74X2pn4=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0/go.mod h1:HDcZnuGbiyppErN6lB+idp4CKhjbc8gwjto6OPpyggM=
-github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ=
-github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo=
-github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ=
-github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.1 h1:DSDNVxqkoXJiko6x8a90zidoYqnYYa6c1MTzDKzKkTo=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.1/go.mod h1:zGqV2R4Cr/k8Uye5w+dgQ06WJtEcbQG/8J7BB6hnCr4=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2 h1:F0gBpfdPLGsw+nsgk6aqqkZS1jiixa5WwFe3fk/T3Ys=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2/go.mod h1:SqINnQ9lVVdRlyC8cd1lCI0SdX4n2paeABd2K8ggfnE=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY=
github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys v0.10.0 h1:m/sWOGCREuSBqg2htVQTBY8nOZpyajYztF0vUvSZTuM=
github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys v0.10.0/go.mod h1:Pu5Zksi2KrU7LPbZbNINx6fuVrUp/ffvpxdDj+i8LeE=
github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1 h1:FbH3BbSb4bvGluTesZZ+ttN/MDsnMmQP36OSnDuSXqw=
github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1/go.mod h1:9V2j0jn9jDEkCkv8w/bKTNppX/d0FVA1ud77xCIP4KA=
-github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1 h1:fXPMAmuh0gDuRDey0atC8cXBuKIlqCzCkL8sm1n9Ov0=
-github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1/go.mod h1:SUZc9YRRHfx2+FAQKNDGrssXehqLpxmwRv2mC/5ntj4=
+github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0 h1:UXT0o77lXQrikd1kgwIPQOUect7EoR/+sbP4wQKdzxM=
+github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0/go.mod h1:cTvi54pg19DoT07ekoeMgE/taAwNtCShVeZqA+Iv2xI=
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
-github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc=
-github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw=
-github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs=
+github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA=
+github.com/Azure/go-autorest/autorest v0.11.30 h1:iaZ1RGz/ALZtN5eq4Nr1SOFSlf2E4pDI3Tcsl+dZPVE=
+github.com/Azure/go-autorest/autorest v0.11.30/go.mod h1:t1kpPIOpIVX7annvothKvb0stsrXa37i7b+xpmBW8Fs=
github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk=
-github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8=
-github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c=
-github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 h1:wkAZRgT/pn8HhFyzfe9UnqOjJYqlembgCTi72Bm/xKk=
-github.com/Azure/go-autorest/autorest/azure/auth v0.5.12/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg=
-github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg=
-github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 h1:w77/uPk80ZET2F+AfQExZyEWtn+0Rk/uw17m9fv5Ajc=
+github.com/Azure/go-autorest/autorest/adal v0.9.24 h1:BHZfgGsGwdkHDyZdtQRQk1WeUdW0m2WPAwuHZwUi5i4=
+github.com/Azure/go-autorest/autorest/adal v0.9.24/go.mod h1:7T1+g0PYFmACYW5LlG2fcoPiPlFHjClyRGL7dRlP5c8=
+github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 h1:Ov8avRZi2vmrE2JcXw+tu5K/yB41r7xK9GZDiBF7NdM=
+github.com/Azure/go-autorest/autorest/azure/auth v0.5.13/go.mod h1:5BAVfWLWXihP47vYrPuBKKf4cS0bXI+KM9Qx6ETDJYo=
github.com/Azure/go-autorest/autorest/azure/cli v0.4.6/go.mod h1:piCfgPho7BiIDdEQ1+g4VmKyD5y+p/XtSNqE6Hc4QD0=
-github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
+github.com/Azure/go-autorest/autorest/azure/cli v0.4.7 h1:Q9R3utmFg9K1B4OYtAZ7ZUUvIUdzQt7G2MN5Hi/d670=
+github.com/Azure/go-autorest/autorest/azure/cli v0.4.7/go.mod h1:bVrAueELJ0CKLBpUHDIvD516TwmHmzqwCpvONWRsw3s=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
+github.com/Azure/go-autorest/autorest/date v0.3.1 h1:o9Z8Jyt+VJJTCZ/UORishuHOusBwolhjokt9s5k8I4w=
+github.com/Azure/go-autorest/autorest/date v0.3.1/go.mod h1:Dz/RDmXlfiFFS/eW+b/xMUSFs1tboPVy6UjgADToWDM=
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU=
-github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk=
-github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE=
-github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg=
+github.com/Azure/go-autorest/autorest/to v0.4.1 h1:CxNHBqdzTr7rLtdrtb5CMjJcDut+WNGCVv7OmS5+lTc=
+github.com/Azure/go-autorest/autorest/to v0.4.1/go.mod h1:EtaofgU4zmtvn1zT2ARsjRFdq9vXx0YWtmElwL+GZ9M=
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
-github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
+github.com/Azure/go-autorest/logger v0.2.2 h1:hYqBsEBywrrOSW24kkOCXRcKfKhK76OzLTfF+MYDE2o=
+github.com/Azure/go-autorest/logger v0.2.2/go.mod h1:I5fg9K52o+iuydlWfa9T5K6WFos9XYr9dYTFzpqgibw=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
-github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU=
-github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
+github.com/Azure/go-autorest/tracing v0.6.1 h1:YUMSrC/CeD1ZnnXcNYU4a/fzsO35u2Fsful9L/2nyR0=
+github.com/Azure/go-autorest/tracing v0.6.1/go.mod h1:/3EgjbsjraOqiicERAeu3m7/z0x1TzjQGAwDrJrXGkc=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak=
-github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
+github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 h1:ErKg/3iS1AKcTkf3yixlZ54f9U1rljCkQyEXWUnIUxc=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0/go.mod h1:yAZHSGnqScoU556rBOVkwLze6WP5N+U11RHuWaGVxwY=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 h1:fYE9p3esPxA/C0rQ0AHhP0drtPXDRhaWiwg1DPqO7IU=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0/go.mod h1:BnBReJLvVYx2CS/UHOgVz2BXKXD9wsQPxZug20nZhd0=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0 h1:6/0iUd0xrnX7qt+mLNRwg5c0PGv8wpE8K90ryANQwMI=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0/go.mod h1:otE2jQekW/PqXk1Awf5lmfokJx4uwuqcj1ab5SpGeW0=
github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
-github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
-github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0=
-github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
-github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA=
-github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM=
+github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0=
+github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
+github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs=
+github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0=
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
-github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
-github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
-github.com/ProtonMail/go-crypto v1.0.0 h1:LRuvITjQWX+WIfr930YHG2HNfjR1uOfyf5vE0kC2U78=
-github.com/ProtonMail/go-crypto v1.0.0/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
-github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0=
-github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30=
-github.com/anchore/bubbly v0.0.0-20230518153401-87b6af8ccf22 h1:5NFK6VGgqBUOAX2SYyzFYvNdOiYDxzim8jga386FlZY=
-github.com/anchore/bubbly v0.0.0-20230518153401-87b6af8ccf22/go.mod h1:Kv+Mm9CdtnV8iem48iEPIwy7/N4Wmk0hpxYNH5gTwKQ=
-github.com/anchore/go-logger v0.0.0-20230725134548-c21dafa1ec5a h1:nJ2G8zWKASyVClGVgG7sfM5mwoZlZ2zYpIzN2OhjWkw=
-github.com/anchore/go-logger v0.0.0-20230725134548-c21dafa1ec5a/go.mod h1:ubLFmlsv8/DFUQrZwY5syT5/8Er3ugSr4rDFwHsE3hg=
+github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
+github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
+github.com/ProtonMail/go-crypto v1.3.0 h1:ILq8+Sf5If5DCpHQp4PbZdS1J7HDFRXz/+xKBiRGFrw=
+github.com/ProtonMail/go-crypto v1.3.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE=
+github.com/agnivade/levenshtein v1.2.1 h1:EHBY3UOn1gwdy/VbFwgo4cxecRznFk7fKWN1KOX7eoM=
+github.com/agnivade/levenshtein v1.2.1/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU=
+github.com/anchore/bubbly v0.0.0-20241107060245-f2a5536f366a h1:smr1CcMkgeMd6G75N+2OVNk/uHbX/WLR0bk+kMWEyr8=
+github.com/anchore/bubbly v0.0.0-20241107060245-f2a5536f366a/go.mod h1:P5IrP8AhuzApVKa5H7k2hHX5pZA1uhyi+Z1VjK1EtA4=
+github.com/anchore/go-logger v0.0.0-20241005132348-65b4486fbb28 h1:TKlTOayTJKpoLPJbeMykEwxCn0enACf06u0RSIdFG5w=
+github.com/anchore/go-logger v0.0.0-20241005132348-65b4486fbb28/go.mod h1:5iJIa34inbIEFRwoWxNBTnjzIcl4G3le1LppPDmpg/4=
github.com/anchore/go-macholibre v0.0.0-20220308212642-53e6d0aaf6fb h1:iDMnx6LIjtjZ46C0akqveX83WFzhpTD3eqOthawb5vU=
github.com/anchore/go-macholibre v0.0.0-20220308212642-53e6d0aaf6fb/go.mod h1:DmTY2Mfcv38hsHbG78xMiTDdxFtkHpgYNVDPsF2TgHk=
-github.com/anchore/quill v0.4.1 h1:mffDnvnER3ZgPjN5hexc3nr/4Y1dtKdDB6td5K8uInk=
-github.com/anchore/quill v0.4.1/go.mod h1:t6hOPYDohN8wn2SRWQdNkJBkhmK8s3gzuHzzgcEvzQU=
+github.com/anchore/quill v0.5.1 h1:+TAJroWuMC0AofI4gD9V9v65zR8EfKZg8u+ZD+dKZS4=
+github.com/anchore/quill v0.5.1/go.mod h1:tAzfFxVluL2P1cT+xEy+RgQX1hpNuliUC5dTYSsnCLQ=
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
-github.com/atc0005/go-teams-notify/v2 v2.10.0 h1:eQvRIkyESQgBvlUdQ/iPol/lj3QcRyrdEQM3+c/nXhM=
-github.com/atc0005/go-teams-notify/v2 v2.10.0/go.mod h1:SIeE1UfCcVRYMqP5b+r1ZteHyA/2UAjzWF5COnZ8q0w=
-github.com/aws/aws-sdk-go v1.53.0 h1:MMo1x1ggPPxDfHMXJnQudTbGXYlD4UigUAud1DJxPVo=
-github.com/aws/aws-sdk-go v1.53.0/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
-github.com/aws/aws-sdk-go-v2 v1.26.1 h1:5554eUqIYVWpU0YmeeYZ0wU64H2VLBs8TlhRB2L+EkA=
-github.com/aws/aws-sdk-go-v2 v1.26.1/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM=
-github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.1 h1:gTK2uhtAPtFcdRRJilZPx8uJLL2J85xK11nKtWL0wfU=
-github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.1/go.mod h1:sxpLb+nZk7tIfCWChfd+h4QwHNUR57d8hA1cleTkjJo=
-github.com/aws/aws-sdk-go-v2/config v1.27.13 h1:WbKW8hOzrWoOA/+35S5okqO/2Ap8hkkFUzoW8Hzq24A=
-github.com/aws/aws-sdk-go-v2/config v1.27.13/go.mod h1:XLiyiTMnguytjRER7u5RIkhIqS8Nyz41SwAWb4xEjxs=
-github.com/aws/aws-sdk-go-v2/credentials v1.17.13 h1:XDCJDzk/u5cN7Aple7D/MiAhx1Rjo/0nueJ0La8mRuE=
-github.com/aws/aws-sdk-go-v2/credentials v1.17.13/go.mod h1:FMNcjQrmuBYvOTZDtOLCIu0esmxjF7RuA/89iSXWzQI=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 h1:FVJ0r5XTHSmIHJV6KuDmdYhEpvlHpiSd38RQWhut5J4=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1/go.mod h1:zusuAeqezXzAB24LGuzuekqMAEgWkVYukBec3kr3jUg=
-github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.9 h1:vXY/Hq1XdxHBIYgBUmug/AbMyIe1AKulPYS2/VE1X70=
-github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.9/go.mod h1:GyJJTZoHVuENM4TeJEl5Ffs4W9m19u+4wKJcDi/GZ4A=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 h1:aw39xVGeRWlWx9EzGVnhOR4yOjQDHPQ6o6NmBlscyQg=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5/go.mod h1:FSaRudD0dXiMPK2UjknVwwTYyZMRsHv3TtkabsZih5I=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 h1:PG1F3OD1szkuQPzDw3CIQsRIrtTlUC3lP84taWzHlq0=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5/go.mod h1:jU1li6RFryMz+so64PpKtudI+QzbKoIEivqdf6LNpOc=
-github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU=
-github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY=
-github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.3 h1:mDnFOE2sVkyphMWtTH+stv0eW3k0OTx94K63xpxHty4=
-github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.3/go.mod h1:V8MuRVcCRt5h1S+Fwu8KbC7l/gBGo3yBAyUbJM2IJOk=
-github.com/aws/aws-sdk-go-v2/service/ecr v1.28.0 h1:rdPrcOZmqT2F+yzmKEImrx5XUs7Hpf4V9Rp6E8mhsxQ=
-github.com/aws/aws-sdk-go-v2/service/ecr v1.28.0/go.mod h1:if7ybzzjOmDB8pat9FE35AHTY6ZxlYSy3YviSmFZv8c=
-github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.23.5 h1:452e/nFuqPvwPg+1OD2CG/v29R9MH8egJSJKh2Qduv8=
-github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.23.5/go.mod h1:8pvvNAklmq+hKmqyvFoMRg0bwg9sdGOvdwximmKiKP0=
-github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 h1:Ji0DY1xUsUr3I8cHps0G+XM3WWU16lP6yG8qu1GAZAs=
-github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2/go.mod h1:5CsjAbs3NlGQyZNFACh+zztPDI7fU6eW9QsxjfnuBKg=
-github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.5 h1:mbWNpfRUTT6bnacmvOTKXZjR/HycibdWzNpfbrbLDIs=
-github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.5/go.mod h1:FCOPWGjsshkkICJIn9hq9xr6dLKtyaWpuUojiN3W1/8=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 h1:ogRAwT1/gxJBcSWDMZlgyFUM962F51A5CRhDLbxLdmo=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7/go.mod h1:YCsIZhXfRPLFFCl5xxY+1T9RKzOKjCut+28JSX2DnAk=
-github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.3 h1:4t+QEX7BsXz98W8W1lNvMAG+NX8qHz2CjLBxQKku40g=
-github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.3/go.mod h1:oFcjjUq5Hm09N9rpxTdeMeLeQcxS7mIkBkL8qUKng+A=
-github.com/aws/aws-sdk-go-v2/service/kms v1.30.0 h1:yS0JkEdV6h9JOo8sy2JSpjX+i7vsKifU8SIeHrqiDhU=
-github.com/aws/aws-sdk-go-v2/service/kms v1.30.0/go.mod h1:+I8VUUSVD4p5ISQtzpgSva4I8cJ4SQ4b1dcBcof7O+g=
-github.com/aws/aws-sdk-go-v2/service/s3 v1.51.4 h1:lW5xUzOPGAMY7HPuNF4FdyBwRc3UJ/e8KsapbesVeNU=
-github.com/aws/aws-sdk-go-v2/service/s3 v1.51.4/go.mod h1:MGTaf3x/+z7ZGugCGvepnx2DS6+caCYYqKhzVoLNYPk=
-github.com/aws/aws-sdk-go-v2/service/sso v1.20.6 h1:o5cTaeunSpfXiLTIBx5xo2enQmiChtu1IBbzXnfU9Hs=
-github.com/aws/aws-sdk-go-v2/service/sso v1.20.6/go.mod h1:qGzynb/msuZIE8I75DVRCUXw3o3ZyBmUvMwQ2t/BrGM=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.0 h1:Qe0r0lVURDDeBQJ4yP+BOrJkvkiCo/3FH/t+wY11dmw=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.0/go.mod h1:mUYPBhaF2lGiukDEjJX2BLRRKTmoUSitGDUgM4tRxak=
-github.com/aws/aws-sdk-go-v2/service/sts v1.28.7 h1:et3Ta53gotFR4ERLXXHIHl/Uuk1qYpP5uU7cvNql8ns=
-github.com/aws/aws-sdk-go-v2/service/sts v1.28.7/go.mod h1:FZf1/nKNEkHdGGJP/cI2MoIMquumuRK6ol3QQJNDxmw=
-github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q=
-github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E=
-github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20240514230400-03fa26f5508f h1:Z0kS9pJDQgCg3u2lH6+CdYaFbyQtyukVTiUCG6re0E4=
-github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20240514230400-03fa26f5508f/go.mod h1:rAE739ssmE5O5fLuQ2y8uHdmOJaelE5I0Es3SxV0y1A=
+github.com/atc0005/go-teams-notify/v2 v2.13.0 h1:nbDeHy89NjYlF/PEfLVF6lsserY9O5SnN1iOIw3AxXw=
+github.com/atc0005/go-teams-notify/v2 v2.13.0/go.mod h1:WSv9moolRsBcpZbwEf6gZxj7h0uJlJskJq5zkEWKO8Y=
+github.com/aws/aws-sdk-go v1.55.6 h1:cSg4pvZ3m8dgYcgqB97MrcdjUmZ1BeMYKUxMMB89IPk=
+github.com/aws/aws-sdk-go v1.55.6/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
+github.com/aws/aws-sdk-go-v2 v1.37.1 h1:SMUxeNz3Z6nqGsXv0JuJXc8w5YMtrQMuIBmDx//bBDY=
+github.com/aws/aws-sdk-go-v2 v1.37.1/go.mod h1:9Q0OoGQoboYIAJyslFyF1f5K1Ryddop8gqMhWx/n4Wg=
+github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.0 h1:6GMWV6CNpA/6fbFHnoAjrv4+LGfyTqZz2LtCHnspgDg=
+github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.0/go.mod h1:/mXlTIVG9jbxkqDnr5UQNQxW1HRYxeGklkM9vAFeabg=
+github.com/aws/aws-sdk-go-v2/config v1.29.17 h1:jSuiQ5jEe4SAMH6lLRMY9OVC+TqJLP5655pBGjmnjr0=
+github.com/aws/aws-sdk-go-v2/config v1.29.17/go.mod h1:9P4wwACpbeXs9Pm9w1QTh6BwWwJjwYvJ1iCt5QbCXh8=
+github.com/aws/aws-sdk-go-v2/credentials v1.17.70 h1:ONnH5CM16RTXRkS8Z1qg7/s2eDOhHhaXVd72mmyv4/0=
+github.com/aws/aws-sdk-go-v2/credentials v1.17.70/go.mod h1:M+lWhhmomVGgtuPOhO85u4pEa3SmssPTdcYpP/5J/xc=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.32 h1:KAXP9JSHO1vKGCr5f4O6WmlVKLFFXgWYAGoJosorxzU=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.32/go.mod h1:h4Sg6FQdexC1yYG9RDnOvLbW1a/P986++/Y/a+GyEM8=
+github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.69 h1:6VFPH/Zi9xYFMJKPQOX5URYkQoXRWeJ7V/7Y6ZDYoms=
+github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.69/go.mod h1:GJj8mmO6YT6EqgduWocwhMoxTLFitkhIrK+owzrYL2I=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.1 h1:ksZXBYv80EFTcgc8OJO48aQ8XDWXIQL7gGasPeCoTzI=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.1/go.mod h1:HSksQyyJETVZS7uM54cir0IgxttTD+8aEoJMPGepHBI=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.1 h1:+dn/xF/05utS7tUhjIcndbuaPjfll2LhbH1cCDGLYUQ=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.1/go.mod h1:hyAGz30LHdm5KBZDI58MXx5lDVZ5CUfvfTZvMu4HCZo=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo=
+github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.1 h1:4HbnOGE9491a9zYJ9VpPh1ApgEq6ZlD4Kuv1PJenFpc=
+github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.1/go.mod h1:Z6QnHC6TmpJWUxAy8FI4JzA7rTwl6EIANkyK9OR5z5w=
+github.com/aws/aws-sdk-go-v2/service/ecr v1.45.1 h1:Bwzh202Aq7/MYnAjXA9VawCf6u+hjwMdoYmZ4HYsdf8=
+github.com/aws/aws-sdk-go-v2/service/ecr v1.45.1/go.mod h1:xZzWl9AXYa6zsLLH41HBFW8KRKJRIzlGmvSM0mVMIX4=
+github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.33.2 h1:XJ/AEFYj9VFPJdF+VFi4SUPEDfz1akHwxxm07JfZJcs=
+github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.33.2/go.mod h1:JUBHdhvKbbKmhaHjLsKJAWnQL80T6nURmhB/LEprV+4=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 h1:6+lZi2JeGKtCraAj1rpoZfKqnQ9SptseRZioejfUOLM=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0/go.mod h1:eb3gfbVIxIoGgJsi9pGne19dhCBpK6opTYpQqAmdy44=
+github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.1 h1:ps3nrmBWdWwakZBydGX1CxeYFK80HsQ79JLMwm7Y4/c=
+github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.1/go.mod h1:bAdfrfxENre68Hh2swNaGEVuFYE74o0SaSCAlaG9E74=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.1 h1:ky79ysLMxhwk5rxJtS+ILd3Mc8kC5fhsLBrP27r6h4I=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.1/go.mod h1:+2MmkvFvPYM1vsozBWduoLJUi5maxFk5B7KJFECujhY=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.1 h1:MdVYlN5pcQu1t1OYx4Ajo3fKl1IEhzgdPQbYFCRjYS8=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.1/go.mod h1:iikmNLrvHm2p4a3/4BPeix2S9P+nW8yM1IZW73x8bFA=
+github.com/aws/aws-sdk-go-v2/service/kms v1.38.1 h1:tecq7+mAav5byF+Mr+iONJnCBf4B4gon8RSp4BrweSc=
+github.com/aws/aws-sdk-go-v2/service/kms v1.38.1/go.mod h1:cQn6tAF77Di6m4huxovNM7NVAozWTZLsDRp9t8Z/WYk=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.85.1 h1:Hsqo8+dFxSdDvv9B2PgIx1AJAnDpqgS0znVI+R+MoGY=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.85.1/go.mod h1:8Q0TAPXD68Z8YqlcIGHs/UNIDHsxErV9H4dl4vJEpgw=
+github.com/aws/aws-sdk-go-v2/service/sso v1.25.5 h1:AIRJ3lfb2w/1/8wOOSqYb9fUKGwQbtysJ2H1MofRUPg=
+github.com/aws/aws-sdk-go-v2/service/sso v1.25.5/go.mod h1:b7SiVprpU+iGazDUqvRSLf5XmCdn+JtT1on7uNL6Ipc=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3 h1:BpOxT3yhLwSJ77qIY3DoHAQjZsc4HEGfMCE4NGy3uFg=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3/go.mod h1:vq/GQR1gOFLquZMSrxUK/cpvKCNVYibNyJ1m7JrU88E=
+github.com/aws/aws-sdk-go-v2/service/sts v1.34.0 h1:NFOJ/NXEGV4Rq//71Hs1jC/NvPs1ezajK+yQmkwnPV0=
+github.com/aws/aws-sdk-go-v2/service/sts v1.34.0/go.mod h1:7ph2tGpfQvwzgistp2+zga9f+bCjlQJPkPUmMgDSD7w=
+github.com/aws/smithy-go v1.22.5 h1:P9ATCXPMb2mPjYBgueqJNCA5S9UfktsW0tTxi+a7eqw=
+github.com/aws/smithy-go v1.22.5/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI=
+github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.10.1 h1:6lMw4/QGLFPvbKQ0eri/9Oh3YX5Nm6BPrUlZR8yuJHg=
+github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.10.1/go.mod h1:EVJOSYOVeoD3VFFZ/dWCAzWJp5wZr9lTOCjW8ejAmO0=
github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k=
github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8=
github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk=
github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
-github.com/blacktop/go-dwarf v1.0.9 h1:eT/L7gt0gllvvgnRXY0MFKjNB6+jtOY5DTm2ynVX2dY=
-github.com/blacktop/go-dwarf v1.0.9/go.mod h1:4W2FKgSFYcZLDwnR7k+apv5i3nrau4NGl9N6VQ9DSTo=
-github.com/blacktop/go-macho v1.1.162 h1:FjM3XAsJTAOGZ1eppRSX9ZBX3Bk11JMTC1amsZAOA5I=
-github.com/blacktop/go-macho v1.1.162/go.mod h1:f2X4noFBob4G5bWUrzvPBKDVcFWZgDCM7rIn7ygTID0=
+github.com/blacktop/go-dwarf v1.0.10 h1:i9zYgcIROETsNZ6V+zZn3uDH21FCG5BLLZ837GitxS0=
+github.com/blacktop/go-dwarf v1.0.10/go.mod h1:4W2FKgSFYcZLDwnR7k+apv5i3nrau4NGl9N6VQ9DSTo=
+github.com/blacktop/go-macho v1.1.238 h1:OFfT6NB/SWxkoky7L/ytuY8QekgFpa9pmz/GHUQLsmM=
+github.com/blacktop/go-macho v1.1.238/go.mod h1:dtlW2AJKQpFzImBVPWiUKZ6OxrQ2MLfWi/BPPe0EONE=
github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb h1:m935MPodAbYS46DG4pJSv7WO+VECIWUQ7OJYSoTrMh4=
github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb/go.mod h1:PkYb9DJNAwrSvRx5DYA+gUcOIgTGVMNkfSCbZM8cWpI=
-github.com/bluesky-social/indigo v0.0.0-20240411170459-440932307e0d h1:xxPhzCOpmOntzVe8S6tqsMdFgaB8B4NXSV54lG4B1qk=
-github.com/bluesky-social/indigo v0.0.0-20240411170459-440932307e0d/go.mod h1:ysMQ0a4RYWjgyvKrl5ME352oHA6QgK900g5sB9XXgPE=
+github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
+github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+github.com/bluesky-social/indigo v0.0.0-20240813042137-4006c0eca043 h1:927VIkxPFKpfJKVDtCNgSQtlhksARaLvsLxppR2FukM=
+github.com/bluesky-social/indigo v0.0.0-20240813042137-4006c0eca043/go.mod h1:dXjdzg6bhg1JKnKuf6EBJTtcxtfHYBFEe9btxX5YeAE=
github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs=
github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
-github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
-github.com/caarlos0/ctrlc v1.2.0 h1:AtbThhmbeYx1WW3WXdWrd94EHKi+0NPRGS4/4pzrjwk=
-github.com/caarlos0/ctrlc v1.2.0/go.mod h1:n3gDlSjsXZ7rbD9/RprIR040b7oaLfNStikPd4gFago=
-github.com/caarlos0/env/v11 v11.0.1 h1:A8dDt9Ub9ybqRSUF3fQc/TA/gTam2bKT4Pit+cwrsPs=
-github.com/caarlos0/env/v11 v11.0.1/go.mod h1:2RC3HQu8BQqtEK3V4iHPxj0jOdWdbPpWJ6pOueeU1xM=
+github.com/caarlos0/env/v11 v11.3.1 h1:cArPWC15hWmEt+gWk7YBi7lEXTXCvpaSdCiZE2X5mCA=
+github.com/caarlos0/env/v11 v11.3.1/go.mod h1:qupehSf/Y0TUTsxKywqRt/vJjN5nz6vauiYEUUr8P4U=
github.com/caarlos0/go-reddit/v3 v3.0.1 h1:w8ugvsrHhaE/m4ez0BO/sTBOBWI9WZTjG7VTecHnql4=
github.com/caarlos0/go-reddit/v3 v3.0.1/go.mod h1:QlwgmG5SAqxMeQvg/A2dD1x9cIZCO56BMnMdjXLoisI=
github.com/caarlos0/go-shellwords v1.0.12 h1:HWrUnu6lGbWfrDcFiHcZiwOLzHWjjrPVehULaTFgPp8=
github.com/caarlos0/go-shellwords v1.0.12/go.mod h1:bYeeX1GrTLPl5cAMYEzdm272qdsQAZiaHgeF0KTk1Gw=
-github.com/caarlos0/go-version v0.1.1 h1:1bikKHkGGVIIxqCmufhSSs3hpBScgHGacrvsi8FuIfc=
-github.com/caarlos0/go-version v0.1.1/go.mod h1:Ze5Qx4TsBBi5FyrSKVg1Ibc44KGV/llAaKGp86oTwZ0=
-github.com/caarlos0/log v0.4.4 h1:LnvgBz/ofsJ00AupP/cEfksJSZglb1L69g4Obk/sdAc=
-github.com/caarlos0/log v0.4.4/go.mod h1:+AmCI9Liv5LKXmzFmFI1htuHdTTj/0R3KuoP9DMY7Mo=
+github.com/caarlos0/go-version v0.2.1 h1:bJY5WRvs2RXErLKBELd1WR0U72whX8ELbKg0WtQ9/7A=
+github.com/caarlos0/go-version v0.2.1/go.mod h1:X+rI5VAtJDpcjCjeEIXpxGa5+rTcgur1FK66wS0/944=
+github.com/caarlos0/log v0.5.1 h1:uB1jhC/+HimtyyL7pxidkUWO4raKmidVuXifC4uqMf8=
+github.com/caarlos0/log v0.5.1/go.mod h1:37k7VCogxsMsgpIQaca5g9eXFFrLJ5LGgA4Ng/xN85o=
github.com/caarlos0/testfs v0.4.4/go.mod h1:bRN55zgG4XCUVVHZCeU+/Tz1Q6AxEJOEJTliBy+1DMk=
github.com/carlmjohnson/versioninfo v0.22.5 h1:O00sjOLUAFxYQjlN/bzYTuZiS0y6fWDQjMRvwtKgwwc=
github.com/carlmjohnson/versioninfo v0.22.5/go.mod h1:QT9mph3wcVfISUKd0i9sZfVrPviHuSF+cUtLjm2WSf8=
github.com/cavaliergopher/cpio v1.0.1 h1:KQFSeKmZhv0cr+kawA3a0xTQCU4QxXF1vhU7P7av2KM=
github.com/cavaliergopher/cpio v1.0.1/go.mod h1:pBdaqQjnvXxdS/6CvNDwIANIFSP0xRKI16PX4xejRQc=
github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
-github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
-github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
+github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
+github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/certifi/gocertifi v0.0.0-20180118203423-deb3ae2ef261/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4=
-github.com/charmbracelet/bubbletea v0.22.1 h1:z66q0LWdJNOWEH9zadiAIXp2GN1AWrwNXU8obVY9X24=
-github.com/charmbracelet/bubbletea v0.22.1/go.mod h1:8/7hVvbPN6ZZPkczLiB8YpLkLJ0n7DMho5Wvfd2X1C0=
-github.com/charmbracelet/lipgloss v0.10.0 h1:KWeXFSexGcfahHX+54URiZGkBFazf70JNMtwg/AFW3s=
-github.com/charmbracelet/lipgloss v0.10.0/go.mod h1:Wig9DSfvANsxqkRsqj6x87irdy123SR4dOXlKa91ciE=
-github.com/charmbracelet/x/exp/ordered v0.0.0-20231010190216-1cb11efc897d h1:+o+e/8hf7cG0SbAzEAm/usJ8qoZPgFXhudLjop+TM0g=
-github.com/charmbracelet/x/exp/ordered v0.0.0-20231010190216-1cb11efc897d/go.mod h1:aoG4bThKYIOnyB55r202eHqo6TkN7ZXV+cu4Do3eoBQ=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/charmbracelet/bubbletea v1.3.0 h1:fPMyirm0u3Fou+flch7hlJN9krlnVURrkUVDwqXjoAc=
+github.com/charmbracelet/bubbletea v1.3.0/go.mod h1:eTaHfqbIwvBhFQM/nlT1NsGc4kp8jhF8LfUK67XiTDM=
+github.com/charmbracelet/colorprofile v0.3.1 h1:k8dTHMd7fgw4bnFd7jXTLZrSU/CQrKnL3m+AxCzDz40=
+github.com/charmbracelet/colorprofile v0.3.1/go.mod h1:/GkGusxNs8VB/RSOh3fu0TJmQ4ICMMPApIIVn0KszZ0=
+github.com/charmbracelet/fang v0.3.0 h1:Be6TB+ExS8VWizTQRJgjqbJBudKrmVUet65xmFPGhaA=
+github.com/charmbracelet/fang v0.3.0/go.mod h1:b0ZfEXZeBds0I27/wnTfnv2UVigFDXHhrFNwQztfA0M=
+github.com/charmbracelet/lipgloss v1.1.0 h1:vYXsiLHVkK7fp74RkV7b2kq9+zDLoEU4MZoFqR/noCY=
+github.com/charmbracelet/lipgloss v1.1.0/go.mod h1:/6Q8FR2o+kj8rz4Dq0zQc3vYf7X+B0binUUBwA0aL30=
+github.com/charmbracelet/lipgloss/v2 v2.0.0-beta1 h1:SOylT6+BQzPHEjn15TIzawBPVD0QmhKXbcb3jY0ZIKU=
+github.com/charmbracelet/lipgloss/v2 v2.0.0-beta1/go.mod h1:tRlx/Hu0lo/j9viunCN2H+Ze6JrmdjQlXUQvvArgaOc=
+github.com/charmbracelet/x/ansi v0.8.0 h1:9GTq3xq9caJW8ZrBTe0LIe2fvfLR/bYXKTx2llXn7xE=
+github.com/charmbracelet/x/ansi v0.8.0/go.mod h1:wdYl/ONOLHLIVmQaxbIYEC/cRKOQyjTkowiI4blgS9Q=
+github.com/charmbracelet/x/cellbuf v0.0.13 h1:/KBBKHuVRbq1lYx5BzEHBAFBP8VcQzJejZ/IA3iR28k=
+github.com/charmbracelet/x/cellbuf v0.0.13/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs=
+github.com/charmbracelet/x/exp/charmtone v0.0.0-20250603201427-c31516f43444 h1:IJDiTgVE56gkAGfq0lBEloWgkXMk4hl/bmuPoicI4R0=
+github.com/charmbracelet/x/exp/charmtone v0.0.0-20250603201427-c31516f43444/go.mod h1:T9jr8CzFpjhFVHjNjKwbAD7KwBNyFnj2pntAO7F2zw0=
+github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ=
+github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg=
github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 h1:krfRl01rzPzxSxyLyrChD+U+MzsBXbm0OwYYB67uF+4=
github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589/go.mod h1:OuDyvmLnMCwa2ep4Jkm6nyA0ocJuZlGyk2gGseVzERM=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
-github.com/cloudflare/circl v1.3.8 h1:j+V8jJt09PoeMFIu2uh5JUyEaIHTXVOHslFoLNAKqwI=
-github.com/cloudflare/circl v1.3.8/go.mod h1:PDRU+oXvdD7KCtgKxW95M5Z8BpSCJXQORiZFnBQS5QU=
+github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0=
+github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
-github.com/containerd/console v1.0.4 h1:F2g4+oChYvBTsASRTz8NP6iIAi97J3TtSAsLbIFn4ro=
-github.com/containerd/console v1.0.4/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk=
-github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSkDwbkEETK84kQgEeFwDC+62k=
-github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o=
+github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f h1:C5bqEmzEPLsHm9Mv73lSE9e9bKV23aB1vxOsmZrkl3k=
+github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
+github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
+github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
+github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
+github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
+github.com/containerd/stargz-snapshotter/estargz v0.16.3 h1:7evrXtoh1mSbGj/pfRccTampEyKpjpOnS3CyiV1Ebr8=
+github.com/containerd/stargz-snapshotter/estargz v0.16.3/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
-github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
-github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
-github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
+github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
+github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo=
+github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
+github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 h1:uX1JmpONuD549D73r6cgnxyUu18Zb7yHAy5AYU0Pm4Q=
+github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw=
+github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s=
+github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davidmz/go-pageant v1.0.2 h1:bPblRCh5jGU+Uptpz6LgMZGD5hJoOt7otgT454WvHn0=
@@ -204,62 +244,75 @@ github.com/dghubble/oauth1 v0.7.3 h1:EkEM/zMDMp3zOsX2DC/ZQ2vnEX3ELK0/l9kb+vs4ptE
github.com/dghubble/oauth1 v0.7.3/go.mod h1:oxTe+az9NSMIucDPDCCtzJGsPhciJV33xocHfcR2sVY=
github.com/dghubble/sling v1.4.0 h1:/n8MRosVTthvMbwlNZgLx579OGVjUOy3GNEv5BIqAWY=
github.com/dghubble/sling v1.4.0/go.mod h1:0r40aNsU9EdDUVBNhfCstAtFgutjgJGYbO1oNzkMoM8=
+github.com/digitorus/pkcs7 v0.0.0-20230713084857-e76b763bdc49/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc=
+github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 h1:ge14PCmCvPjpMQMIAH7uKg0lrtNSOdpYsRXlwk3QbaE=
+github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc=
+github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 h1:lxmTCgmHE1GUYL7P0MlNa00M67axePTq+9nBSGddR8I=
+github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7/go.mod h1:GvWntX9qiTlOud0WkQ6ewFm0LPy5JUR1Xo0Ngbd1w6Y=
github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U=
github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE=
-github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
-github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
-github.com/docker/cli v25.0.4+incompatible h1:DatRkJ+nrFoYL2HZUzjM5Z5sAmcA5XGp+AW0oEw2+cA=
-github.com/docker/cli v25.0.4+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
+github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
+github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
+github.com/docker/cli v28.2.2+incompatible h1:qzx5BNUDFqlvyq4AHzdNB7gSyVTmU4cgsyN9SdInc1A=
+github.com/docker/cli v28.2.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo=
-github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/docker-credential-helpers v0.8.1 h1:j/eKUktUltBtMzKqmfLB0PAgqYyMHOp5vfsD1807oKo=
-github.com/docker/docker-credential-helpers v0.8.1/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M=
-github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
-github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
+github.com/docker/docker v28.3.3+incompatible h1:Dypm25kh4rmk49v1eiVbsAtpAsYURjYkaKubwuBdxEI=
+github.com/docker/docker v28.3.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8=
+github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo=
+github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
+github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
-github.com/elliotchance/orderedmap/v2 v2.2.0 h1:7/2iwO98kYT4XkOjA9mBEIwvi4KpGB4cyHeOFOnj4Vk=
-github.com/elliotchance/orderedmap/v2 v2.2.0/go.mod h1:85lZyVbpGaGvHvnKa7Qhx7zncAdBIBq6u56Hb1PRU5Q=
github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A=
+github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww=
-github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=
+github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8=
+github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU=
+github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4=
+github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM=
+github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU=
+github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM=
github.com/fatih/set v0.2.1/go.mod h1:+RKtMCH+favT2+3YecHGxcc0b4KyVWA1QWWJUs4E0CI=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
-github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
-github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
-github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
-github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
+github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
+github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
+github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM=
+github.com/gabriel-vasile/mimetype v1.4.8/go.mod h1:ByKUIKGjh1ODkGM1asKUbQZOLGrPjydw3hYPU2YU9t8=
github.com/github/smimesign v0.2.0 h1:Hho4YcX5N1I9XNqhq0fNx0Sts8MhLonHd+HRXVGNjvk=
github.com/github/smimesign v0.2.0/go.mod h1:iZiiwNT4HbtGRVqCQu7uJPEZCuEE5sfSSttcnePkDl4=
+github.com/go-chi/chi v4.1.2+incompatible h1:fGFk2Gmi/YKXk0OmGfBh0WgmN3XB8lVnEyNz34tQRec=
+github.com/go-chi/chi v4.1.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ=
github.com/go-fed/httpsig v1.1.0 h1:9M+hb0jkEICD8/cAiNqEB66R87tTINszBRTjwjQzWcI=
github.com/go-fed/httpsig v1.1.0/go.mod h1:RCMrTZvN1bJYtofsG4rd5NaO5obxQ5xBkdiS7xsT7bM=
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI=
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic=
-github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU=
-github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow=
-github.com/go-git/go-git/v5 v5.12.0 h1:7Md+ndsjrzZxbddRDZjF14qK+NN56sy6wkqaVrjZtys=
-github.com/go-git/go-git/v5 v5.12.0/go.mod h1:FTM9VKtnI2m65hNI/TenDDDnUf2Q9FHnXYjuz9i5OEY=
+github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UNbRM=
+github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH+MJAU+jaUU=
+github.com/go-git/go-git/v5 v5.14.0 h1:/MD3lCrGjCen5WfEAzKg00MJJffKhC8gzS80ycmCi60=
+github.com/go-git/go-git/v5 v5.14.0/go.mod h1:Z5Xhoia5PcWA3NF8vRLURn9E5FRhSl7dGj9ItW3Wk5k=
+github.com/go-jose/go-jose/v4 v4.1.0 h1:cYSYxd3pw5zd2FSXk2vGdn9igQU2PS8MuxrCOCl0FdY=
+github.com/go-jose/go-jose/v4 v4.1.0/go.mod h1:GG/vqmYm3Von2nYiB2vGTXzdoNKE5tix5tuc6iAd+sw=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
-github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
+github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU=
github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo=
-github.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w=
-github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE=
-github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
-github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
+github.com/go-openapi/errors v0.22.1 h1:kslMRRnK7NCb/CvR1q1VWuEQCEIsBGn5GgKD9e+HYhU=
+github.com/go-openapi/errors v0.22.1/go.mod h1:+n/5UdIqdVnLIJ6Q9Se8HNGUXYaY6CN8ImWzfi/Gzp0=
+github.com/go-openapi/jsonpointer v0.21.1 h1:whnzv/pNXtK2FbX/W9yJfRmE2gsmkfahjMKB0fZvcic=
+github.com/go-openapi/jsonpointer v0.21.1/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk=
github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco=
@@ -270,15 +323,16 @@ github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9Z
github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk=
github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c=
github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4=
-github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
-github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
+github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU=
+github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0=
github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58=
github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ=
github.com/go-restruct/restruct v1.2.0-alpha h1:2Lp474S/9660+SJjpVxoKuWX09JsXHSrdV7Nv3/gkvc=
github.com/go-restruct/restruct v1.2.0-alpha/go.mod h1:KqrpKpn4M8OLznErihXTGLlsXFGeLxHUrLRRI/1YjGk=
github.com/go-telegram-bot-api/telegram-bot-api/v5 v5.5.1 h1:wG8n/XJQ07TmjbITcGiUaOtXxdrINDz1b0J1w0SzqDc=
github.com/go-telegram-bot-api/telegram-bot-api/v5 v5.5.1/go.mod h1:A2S0CWkNylc2phvKXWBBdD3K0iGnDBGbzRpISP2zBl8=
-github.com/go-test/deep v1.0.4/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
+github.com/go-viper/mapstructure/v2 v2.3.0 h1:27XbWsHIqhbdR5TIC911OfYvgSaW93HM+dX7970Q7jk=
+github.com/go-viper/mapstructure/v2 v2.3.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
@@ -286,14 +340,15 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
-github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
-github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
-github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
+github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI=
+github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
+github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
+github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
-github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ=
+github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@@ -304,8 +359,8 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
-github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
+github.com/google/certificate-transparency-go v1.3.1 h1:akbcTfQg0iZlANZLn0L9xOeWtyCIdeoYhKrqi5iH3Go=
+github.com/google/certificate-transparency-go v1.3.1/go.mod h1:gg+UQlx6caKEDQ9EElFOujyxEQEfOiQzAt6782Bvi8k=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@@ -313,73 +368,69 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/google/go-containerregistry v0.19.1 h1:yMQ62Al6/V0Z7CqIrrS1iYoA5/oQCm88DeNujc7C1KY=
-github.com/google/go-containerregistry v0.19.1/go.mod h1:YCMFNQeeXeLF+dnhhWkqDItx/JSkH01j1Kis4PsjzFI=
-github.com/google/go-github/v62 v62.0.0 h1:/6mGCaRywZz9MuHyw9gD1CwsbmBX8GWsbFkwMmHdhl4=
-github.com/google/go-github/v62 v62.0.0/go.mod h1:EMxeUqGJq2xRu9DYBMwel/mr7kZrzUOfQmmpYrZn2a4=
+github.com/google/go-containerregistry v0.20.6 h1:cvWX87UxxLgaH76b4hIvya6Dzz9qHB31qAwjAohdSTU=
+github.com/google/go-containerregistry v0.20.6/go.mod h1:T0x8MuoAoKX/873bkeSfLD2FAkwCDf9/HZgsFJ02E2Y=
+github.com/google/go-github/v74 v74.0.0 h1:yZcddTUn8DPbj11GxnMrNiAnXH14gNs559AsUpNpPgM=
+github.com/google/go-github/v74 v74.0.0/go.mod h1:ubn/YdyftV80VPSI26nSJvaEsTOnsjrxG3o9kJhcyak=
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
-github.com/google/ko v0.15.4 h1:0blRbIdPmSy6v4LvedGxbI/8krdJYQgbSih3v6Y8V1c=
-github.com/google/ko v0.15.4/go.mod h1:ZkcmfV91Xt6ZzOBHc/cXXGYnqWdNWDVy/gHoUU9sjag=
+github.com/google/ko v0.18.0 h1:jkF5Fkvm+SMtqTt/SMzsCJO+6hz7FSDE6GRldGn0VVI=
+github.com/google/ko v0.18.0/go.mod h1:iR0zT5aR4pINW9tk2Ujj99dBJ7cVy4to9ZirAkGKb9g=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
-github.com/google/rpmpack v0.6.1-0.20240329070804-c2247cbb881a h1:JJBdjSfqSy3mnDT0940ASQFghwcZ4y4cb6ttjAoXqwE=
-github.com/google/rpmpack v0.6.1-0.20240329070804-c2247cbb881a/go.mod h1:uqVAUVQLq8UY2hCDfmJ/+rtO3aw7qyhc90rCVEabEfI=
-github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=
-github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw=
-github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 h1:SJ+NtwL6QaZ21U+IrK7d0gGgpjGGvd2kz+FzTHVzdqI=
-github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2/go.mod h1:Tv1PlzqC9t8wNnpPdctvtSUOPUUg4SHeE6vR1Ir2hmg=
+github.com/google/rpmpack v0.7.0 h1:mA2Yd3/dOmao1ype0DJA8DFquEpslaleywOuglVCrUs=
+github.com/google/rpmpack v0.7.0/go.mod h1:uqVAUVQLq8UY2hCDfmJ/+rtO3aw7qyhc90rCVEabEfI=
+github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0=
+github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM=
+github.com/google/safetext v0.0.0-20240722112252-5a72de7e7962 h1:+9C/TgFfcCmZBV7Fjb3kQCGlkpFrhtvFDgbdQHB9RaA=
+github.com/google/safetext v0.0.0-20240722112252-5a72de7e7962/go.mod h1:H3K1Iu/utuCfa10JO+GsmKUYSWi7ug57Rk6GaDRHaaQ=
github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
-github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/wire v0.6.0 h1:HBkoIh4BdSxoyo9PveV8giw7ZsaBOvzWKfcg/6MrVwI=
github.com/google/wire v0.6.0/go.mod h1:F4QhpQ9EDIdJ1Mbop/NZBRB+5yrR6qg3BnctaoUk6NA=
-github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs=
-github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
-github.com/googleapis/gax-go/v2 v2.12.3 h1:5/zPPDvw8Q1SuXjrqrZslrqT7dL/uJT2CQii/cLCKqA=
-github.com/googleapis/gax-go/v2 v2.12.3/go.mod h1:AKloxT6GtNbaLm8QTNSidHUVsHYcBHwWRvkNFJUQcS4=
+github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4=
+github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA=
+github.com/googleapis/gax-go/v2 v2.14.2 h1:eBLnkZ9635krYIPD+ag1USrOAI0Nr0QYF3+/3GqO0k0=
+github.com/googleapis/gax-go/v2 v2.14.2/go.mod h1:ON64QhlJkhVtSqp4v1uaK92VyZ2gmvDQsweuyLV+8+w=
github.com/gookit/color v1.2.5/go.mod h1:AhIE+pS6D4Ql0SQWbBeXPHw7gY0/sjHoA4s/n1KB7xg=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
-github.com/goreleaser/chglog v0.6.1 h1:NZKiX8l0FTQPRzBgKST7knvNZmZ04f7PEGkN2wInfhE=
-github.com/goreleaser/chglog v0.6.1/go.mod h1:Bnnfo07jMZkaAb0uRNASMZyOsX6ROW6X1qbXqN3guUo=
+github.com/goreleaser/chglog v0.7.0 h1:/KzXWAeg4DrEz4r3OI6K2Yb8RAsVGeInCUfLWFXL9C0=
+github.com/goreleaser/chglog v0.7.0/go.mod h1:2h/yyq9xvTUeM9tOoucBP+jri8Dj28splx+SjlYkklc=
github.com/goreleaser/fileglob v1.3.0 h1:/X6J7U8lbDpQtBvGcwwPS6OpzkNVlVEsFUVRx9+k+7I=
github.com/goreleaser/fileglob v1.3.0/go.mod h1:Jx6BoXv3mbYkEzwm9THo7xbr5egkAraxkGorbJb4RxU=
-github.com/goreleaser/goreleaser v1.26.2 h1:1iY1HaXtRiMTrwy6KE1sNjkRjsjMi+9l0k6WUX8GpWw=
-github.com/goreleaser/goreleaser v1.26.2/go.mod h1:mHi6zr6fuuOh5eHdWWgyo/N8BWED5WEVtb/4GETc9jQ=
-github.com/goreleaser/nfpm/v2 v2.37.1 h1:RUmeEt8OlEVeSzKRrO5Vl5qVWCtUwx4j9uivGuRo5fw=
-github.com/goreleaser/nfpm/v2 v2.37.1/go.mod h1:q8+sZXFqn106/eGw+9V+I8+izFxZ/sJjrhwmEUxXhUg=
-github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY=
-github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY=
+github.com/goreleaser/goreleaser/v2 v2.11.2 h1:Od6dcPI5r8IWVPnJYz6wYe3rML1qf80fLzXB1Ix6ZnY=
+github.com/goreleaser/goreleaser/v2 v2.11.2/go.mod h1:NSsia+m49thkd/pX9Rz7Cq1KE8HDGrLJVoPLjFeAV/4=
+github.com/goreleaser/nfpm/v2 v2.43.0 h1:o5oureuZkhu55RK0M9WSN8JLW7hu6MymtMh7LypInlk=
+github.com/goreleaser/nfpm/v2 v2.43.0/go.mod h1:f//PE8PjNHjaPCbd7Jkok+aPKdLTrzM+fuIWg3PfVRg=
+github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
+github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
-github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
-github.com/hashicorp/go-retryablehttp v0.7.5 h1:bJj+Pj19UZMIweq/iie+1u5YCdGrnxCT9yvm0e+Nd5M=
-github.com/hashicorp/go-retryablehttp v0.7.5/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
-github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
-github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48=
+github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw=
+github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
+github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=
github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
-github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM=
-github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM=
-github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4=
-github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
-github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
-github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
-github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
+github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI=
+github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
+github.com/in-toto/attestation v1.1.1 h1:QD3d+oATQ0dFsWoNh5oT0udQ3tUrOsZZ0Fc3tSgWbzI=
+github.com/in-toto/attestation v1.1.1/go.mod h1:Dcq1zVwA2V7Qin8I7rgOi+i837wEf/mOZwRm047Sjys=
+github.com/in-toto/in-toto-golang v0.9.0 h1:tHny7ac4KgtsfrG6ybU8gVOZux2H8jN05AXJ9EBM1XU=
+github.com/in-toto/in-toto-golang v0.9.0/go.mod h1:xsBVrVsHNsB61++S6Dy2vWosKhuA3lUTQd+eF9HdeMo=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
-github.com/invopop/jsonschema v0.12.0 h1:6ovsNSuvn9wEQVOyc72aycBMVQFKz7cPdMJn10CvzRI=
-github.com/invopop/jsonschema v0.12.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0=
+github.com/invopop/jsonschema v0.13.0 h1:KvpoAJWEjR3uD9Kbm2HWJmqsEaHt8lBUpd0qHcIi21E=
+github.com/invopop/jsonschema v0.13.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0=
github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs=
github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0=
github.com/ipfs/go-block-format v0.2.0 h1:ZqrkxBA2ICbDRbK8KJs/u0O3dlp6gmAuuXUJNiW1Ycs=
@@ -410,9 +461,10 @@ github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOl
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o=
github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4=
-github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
-github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
-github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/jedisct1/go-minisign v0.0.0-20241212093149-d2f9f49435c7 h1:FWpSWRD8FbVkKQu8M1DM9jF5oXFLyE+XpisIYfdzbic=
+github.com/jedisct1/go-minisign v0.0.0-20241212093149-d2f9f49435c7/go.mod h1:BMxO138bOokdgt4UaxZiEfypcSHX0t6SIFimVP1oRfk=
+github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24 h1:liMMTbpW34dhU4az1GN0pTPADwNmvoRSeoZ6PItiqnY=
+github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
@@ -421,8 +473,8 @@ github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4
github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU=
-github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
+github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
+github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU=
@@ -432,46 +484,40 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
-github.com/letsencrypt/boulder v0.0.0-20231026200631-000cd05d5491 h1:WGrKdjHtWC67RX96eTkYD2f53NDHhrq/7robWTAfk4s=
-github.com/letsencrypt/boulder v0.0.0-20231026200631-000cd05d5491/go.mod h1:o158RFmdEbYyIZmXAbrvmJWesbyxlLKee6X64VPVuOc=
+github.com/letsencrypt/boulder v0.0.0-20250411005613-d800055fe666 h1:ndfLOJNaxu0fX358UKxtq2bU8IMASWi87Hn0Nv/TIoY=
+github.com/letsencrypt/boulder v0.0.0-20250411005613-d800055fe666/go.mod h1:WGXwLq/jKt0kng727wv6a0h0q7TVC+MwS2S75rcqL+4=
github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
-github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
-github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
-github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
-github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
+github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4=
+github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
+github.com/mark3labs/mcp-go v0.36.0 h1:rIZaijrRYPeSbJG8/qNDe0hWlGrCJ7FWHNMz2SQpTis=
+github.com/mark3labs/mcp-go v0.36.0/go.mod h1:T7tUa2jO6MavG+3P25Oy/jR7iCeJPHImCZHRymCn39g=
github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
-github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
-github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4=
-github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88=
-github.com/mattn/go-mastodon v0.0.8 h1:UgKs4SmQ5JeawxMIPP7NQ9xncmOXA+5q6jYk4erR7vk=
-github.com/mattn/go-mastodon v0.0.8/go.mod h1:8YkqetHoAVEktRkK15qeiv/aaIMfJ/Gc89etisPZtHU=
-github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk=
-github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
-github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
-github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
+github.com/mattn/go-localereader v0.0.2-0.20220822084749-2491eb6c1c75 h1:P8UmIzZMYDR+NGImiFvErt6VWfIRPuGM+vyjiEdkmIw=
+github.com/mattn/go-localereader v0.0.2-0.20220822084749-2491eb6c1c75/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88=
+github.com/mattn/go-mastodon v0.0.10 h1:wz1d/aCkJOIkz46iv4eAqXHVreUMxydY1xBWrPBdDeE=
+github.com/mattn/go-mastodon v0.0.10/go.mod h1:YBofeqh7G6s787787NQR8erBYz6fKDu+KNMrn5RuD6Y=
+github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
+github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
-github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
-github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
-github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
+github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE=
+github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
-github.com/muesli/ansi v0.0.0-20211018074035-2e021307bc4b/go.mod h1:fQuZ0gauxyBcmsdE3ZT4NasjaRdxmbCS0jRHsrWu3Ho=
-github.com/muesli/ansi v0.0.0-20211031195517-c9f0611b6c70 h1:kMlmsLSbjkikxQJ1IPwaM+7LJ9ltFu/fi8CRzvSnQmA=
-github.com/muesli/ansi v0.0.0-20211031195517-c9f0611b6c70/go.mod h1:fQuZ0gauxyBcmsdE3ZT4NasjaRdxmbCS0jRHsrWu3Ho=
+github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D264iyp3TiX5OmNcI5cIARiQI=
+github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6/go.mod h1:CJlz5H+gyd6CUWT45Oy4q24RdLyn7Md9Vj2/ldJBSIo=
github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA=
github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo=
github.com/muesli/mango v0.1.0 h1:DZQK45d2gGbql1arsYA4vfg4d7I9Hfx5rX/GCmzsAvI=
@@ -480,13 +526,10 @@ github.com/muesli/mango-cobra v1.2.0 h1:DQvjzAM0PMZr85Iv9LIMaYISpTOliMEg+uMFtNbY
github.com/muesli/mango-cobra v1.2.0/go.mod h1:vMJL54QytZAJhCT13LPVDfkvCUJ5/4jNUKF/8NC2UjA=
github.com/muesli/mango-pflag v0.1.0 h1:UADqbYgpUyRoBja3g6LUL+3LErjpsOwaC9ywvBWe7Sg=
github.com/muesli/mango-pflag v0.1.0/go.mod h1:YEQomTxaCUp8PrbhFh10UfbhbQrM/xJ4i2PB8VTLLW0=
-github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s=
-github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8=
github.com/muesli/roff v0.1.0 h1:YD0lalCotmYuF5HhZliKWlIx7IEhiXeSfq7hNjFqGF8=
github.com/muesli/roff v0.1.0/go.mod h1:pjAHQM9hdUUwm/krAfrLGgJkXJ+YuhtsfZ42kieB2Ig=
-github.com/muesli/termenv v0.11.1-0.20220212125758-44cd13922739/go.mod h1:Bd5NYQ7pd+SrtBSrSNoBBmXlcY8+Xj4BMJgh8qcZrvs=
-github.com/muesli/termenv v0.15.2 h1:GohcuySI0QmI3wN8Ok9PtKGkgkFIk7y6Vpb5PvrY+Wo=
-github.com/muesli/termenv v0.15.2/go.mod h1:Epx+iuz8sNs7mNKhxzH4fWXGNpZwUaJKRS1noLXviQ8=
+github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc=
+github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk=
github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE=
github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
@@ -501,27 +544,30 @@ github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
-github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
-github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
+github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
+github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
github.com/pborman/getopt v0.0.0-20180811024354-2b5b3bfb099b/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o=
+github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw=
+github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
-github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4=
-github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
-github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4=
-github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI=
+github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
+github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
+github.com/pjbgf/sha1cd v0.3.2 h1:a9wb0bp1oC2TGwStyn0Umc/IGKQnEgF0vVaZ8QF8eo4=
+github.com/pjbgf/sha1cd v0.3.2/go.mod h1:zQWigSxVmsHEZow5qaLtPYxpcKMMQpa09ixqBxuCS6A=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo=
+github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/polydawn/refmt v0.89.1-0.20221221234430-40501e09de1f h1:VXTQfuJj9vKR4TCkEuWIckKvdHFeJH/huIFJ9/cXOB0=
github.com/polydawn/refmt v0.89.1-0.20221221234430-40501e09de1f/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
@@ -529,113 +575,143 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ=
-github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
-github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
-github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
+github.com/sagikazarmark/locafero v0.9.0 h1:GbgQGNtTrEmddYDSAH9QLRyfAHY12md+8YFTqyMTC9k=
+github.com/sagikazarmark/locafero v0.9.0/go.mod h1:UBUyz37V+EdMS3hDF3QWIiVr/2dPrx49OMO0Bn0hJqk=
+github.com/sassoftware/relic v7.2.1+incompatible h1:Pwyh1F3I0r4clFJXkSI8bOyJINGqpgjJU3DYAZeI05A=
+github.com/sassoftware/relic v7.2.1+incompatible/go.mod h1:CWfAxv73/iLZ17rbyhIEq3K9hs5w6FpNMdUT//qR+zk=
github.com/scylladb/go-set v1.0.3-0.20200225121959-cc7b2070d91e h1:7q6NSFZDeGfvvtIRwBrU/aegEYJYmvev0cHAwo17zZQ=
github.com/scylladb/go-set v1.0.3-0.20200225121959-cc7b2070d91e/go.mod h1:DkpGd78rljTxKAnTDPFqXSGxvETQnJyuSOQwsHycqfs=
-github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA=
-github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU=
+github.com/secure-systems-lab/go-securesystemslib v0.9.0 h1:rf1HIbL64nUpEIZnjLZ3mcNEL9NBPB0iuVjyxvq3LZc=
+github.com/secure-systems-lab/go-securesystemslib v0.9.0/go.mod h1:DVHKMcZ+V4/woA/peqr+L0joiRXbPpQ042GgJckkFgw=
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8=
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
-github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ=
-github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
+github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI=
+github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE=
+github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k=
+github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
-github.com/sigstore/cosign/v2 v2.2.4 h1:iY4vtEacmu2hkNj1Fh+8EBqBwKs2DHM27/lbNWDFJro=
-github.com/sigstore/cosign/v2 v2.2.4/go.mod h1:JZlRD2uaEjVAvZ1XJ3QkkZJhTqSDVtLaet+C/TMR81Y=
-github.com/sigstore/rekor v1.3.6 h1:QvpMMJVWAp69a3CHzdrLelqEqpTM3ByQRt5B5Kspbi8=
-github.com/sigstore/rekor v1.3.6/go.mod h1:JDTSNNMdQ/PxdsS49DJkJ+pRJCO/83nbR5p3aZQteXc=
-github.com/sigstore/sigstore v1.8.3 h1:G7LVXqL+ekgYtYdksBks9B38dPoIsbscjQJX/MGWkA4=
-github.com/sigstore/sigstore v1.8.3/go.mod h1:mqbTEariiGA94cn6G3xnDiV6BD8eSLdL/eA7bvJ0fVs=
+github.com/sigstore/cosign/v2 v2.5.0 h1:1aRfPgRQHHlODI3Mvs/JkPBS9dJT9bRLCuHZgnHxFt8=
+github.com/sigstore/cosign/v2 v2.5.0/go.mod h1:2V2hmo+jjFNnDb5Q5VL6PXvLU9Vujio7T5yldrpNTRw=
+github.com/sigstore/protobuf-specs v0.4.1 h1:5SsMqZbdkcO/DNHudaxuCUEjj6x29tS2Xby1BxGU7Zc=
+github.com/sigstore/protobuf-specs v0.4.1/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc=
+github.com/sigstore/rekor v1.3.9 h1:sUjRpKVh/hhgqGMs0t+TubgYsksArZ6poLEC3MsGAzU=
+github.com/sigstore/rekor v1.3.9/go.mod h1:xThNUhm6eNEmkJ/SiU/FVU7pLY2f380fSDZFsdDWlcM=
+github.com/sigstore/sigstore v1.9.3 h1:y2qlTj+vh+Or3ictKuR3JUFawZPdDxAjrWkeFhon0OQ=
+github.com/sigstore/sigstore v1.9.3/go.mod h1:VwYkiw0G0dRtwL25KSs04hCyVFF6CYMd/qvNeYrl7EQ=
+github.com/sigstore/sigstore-go v0.7.1 h1:lyzi3AjO6+BHc5zCf9fniycqPYOt3RaC08M/FRmQhVY=
+github.com/sigstore/sigstore-go v0.7.1/go.mod h1:AIRj4I3LC82qd07VFm3T2zXYiddxeBV1k/eoS8nTz0E=
+github.com/sigstore/timestamp-authority v1.2.5 h1:W22JmwRv1Salr/NFFuP7iJuhytcZszQjldoB8GiEdnw=
+github.com/sigstore/timestamp-authority v1.2.5/go.mod h1:gWPKWq4HMWgPCETre0AakgBzcr9DRqHrsgbrRqsigOs=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
-github.com/skeema/knownhosts v1.2.2 h1:Iug2P4fLmDw9f41PB6thxUkNUkJzB5i+1/exaj40L3A=
-github.com/skeema/knownhosts v1.2.2/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo=
-github.com/slack-go/slack v0.13.0 h1:7my/pR2ubZJ9912p9FtvALYpbt0cQPAqkRy2jaSI1PQ=
-github.com/slack-go/slack v0.13.0/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw=
+github.com/skeema/knownhosts v1.3.1 h1:X2osQ+RAjK76shCbvhHHHVl3ZlgDm8apHEHFqRjnBY8=
+github.com/skeema/knownhosts v1.3.1/go.mod h1:r7KTdC8l4uxWRyK2TpQZ/1o5HaSzh06ePQNxPwTcfiY=
+github.com/slack-go/slack v0.17.3 h1:zV5qO3Q+WJAQ/XwbGfNFrRMaJ5T/naqaonyPV/1TP4g=
+github.com/slack-go/slack v0.17.3/go.mod h1:X+UqOufi3LYQHDnMG1vxf0J8asC6+WllXrVrhl8/Prk=
github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo=
github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM=
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
-github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
-github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
-github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0=
-github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
-github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
-github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
-github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
-github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ=
-github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk=
+github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA=
+github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo=
+github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y=
+github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
+github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
+github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
+github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
+github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4=
+github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4=
+github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE=
+github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
+github.com/theupdateframework/go-tuf v0.7.0 h1:CqbQFrWo1ae3/I0UCblSbczevCCbS31Qvs5LdxRWqRI=
+github.com/theupdateframework/go-tuf v0.7.0/go.mod h1:uEB7WSY+7ZIugK6R1hiBMBjQftaFzn7ZCDJcp1tCUug=
+github.com/theupdateframework/go-tuf/v2 v2.0.2 h1:PyNnjV9BJNzN1ZE6BcWK+5JbF+if370jjzO84SS+Ebo=
+github.com/theupdateframework/go-tuf/v2 v2.0.2/go.mod h1:baB22nBHeHBCeuGZcIlctNq4P61PcOdyARlplg5xmLA=
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0=
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs=
github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80 h1:nrZ3ySNYwJbSpD6ce9duiP+QkD3JuLCcWkdaehUS/3Y=
github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80/go.mod h1:iFyPdL66DjUD96XmzVL3ZntbzcflLnznH0fr99w5VqE=
+github.com/transparency-dev/merkle v0.0.2 h1:Q9nBoQcZcgPamMkGn7ghV8XiTZ/kRxn1yCG81+twTK4=
+github.com/transparency-dev/merkle v0.0.2/go.mod h1:pqSy+OXefQ1EDUVmAJ8MUhHB9TXGuzVAT58PqBoHz1A=
github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc=
github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
-github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts=
-github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk=
+github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo=
+github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA=
github.com/wagoodman/go-partybus v0.0.0-20230516145632-8ccac152c651 h1:jIVmlAFIqV3d+DOxazTR9v+zgj8+VYuQBzPgBZvWBHA=
github.com/wagoodman/go-partybus v0.0.0-20230516145632-8ccac152c651/go.mod h1:b26F2tHLqaoRQf8DywqzVaV1MQ9yvjb0OMcNl7Nxu20=
github.com/wagoodman/go-progress v0.0.0-20220614130704-4b1c25a33c7c h1:gFwUKtkv6QzQsFdIjvPqd0Qdw42DHUEbbUdiUTI1uco=
github.com/wagoodman/go-progress v0.0.0-20220614130704-4b1c25a33c7c/go.mod h1:jLXFoL31zFaHKAAyZUh+sxiTDFe1L1ZHrcK2T1itVKA=
github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw=
-github.com/whyrusleeping/cbor-gen v0.1.1-0.20240311221002-68b9f235c302 h1:MhInbXe4SzcImAKktUvWBCWZgcw6MYf5NfumTj1BhAw=
-github.com/whyrusleeping/cbor-gen v0.1.1-0.20240311221002-68b9f235c302/go.mod h1:pM99HXyEbSQHcosHc0iW7YFmwnscr+t9Te4ibko05so=
+github.com/whyrusleeping/cbor-gen v0.1.3-0.20240731173018-74d74643234c h1:Jmc9fHbd0LKFmS5CkLgczNUyW36UbiyvbHCG9xCTyiw=
+github.com/whyrusleeping/cbor-gen v0.1.3-0.20240731173018-74d74643234c/go.mod h1:pM99HXyEbSQHcosHc0iW7YFmwnscr+t9Te4ibko05so=
github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc=
github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw=
-github.com/xanzy/go-gitlab v0.105.0 h1:3nyLq0ESez0crcaM19o5S//SvezOQguuIHZ3wgX64hM=
-github.com/xanzy/go-gitlab v0.105.0/go.mod h1:ETg8tcj4OhrB84UEgeE8dSuV/0h4BBL1uOV/qK0vlyI=
github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
+github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no=
+github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM=
+github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4=
+github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM=
+github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4=
gitlab.com/digitalxero/go-conventional-commit v1.0.7 h1:8/dO6WWG+98PMhlZowt/YjuiKhqhGlOCwlIV8SqqGh8=
gitlab.com/digitalxero/go-conventional-commit v1.0.7/go.mod h1:05Xc2BFsSyC5tKhK0y+P3bs0AwUtNuTp+mTpbCU/DZ0=
-go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80=
-go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c=
+gitlab.com/gitlab-org/api/client-go v0.137.0 h1:H26yL44qnb38Czl20pEINCJrcj63W6/BX8iKPVUKQP0=
+gitlab.com/gitlab-org/api/client-go v0.137.0/go.mod h1:AcAYES3lfkIS4zhso04S/wyUaWQmDYve2Fd9AF7C6qc=
+go.mongodb.org/mongo-driver v1.17.3 h1:TQyXhnsWfWtgAhMtOgtYHMTkZIfBTpMTsMnd9ZBeHxQ=
+go.mongodb.org/mongo-driver v1.17.3/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw=
-go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=
-go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
-go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI=
-go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco=
-go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
-go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
+go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
+go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
+go.opentelemetry.io/contrib/detectors/gcp v1.36.0 h1:F7q2tNlCaHY9nMKHR6XH9/qkp8FktLnIcy6jJNyOCQw=
+go.opentelemetry.io/contrib/detectors/gcp v1.36.0/go.mod h1:IbBN8uAIIx734PTonTPxAxnjc2pQTxWNkwfstZ+6H2k=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q=
+go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg=
+go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E=
+go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE=
+go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs=
+go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs=
+go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY=
+go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis=
+go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4=
+go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
+go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
-go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8=
-go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0=
+go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
+go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
@@ -646,8 +722,8 @@ go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ=
go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
-gocloud.dev v0.37.0 h1:XF1rN6R0qZI/9DYjN16Uy0durAmSlf58DHOcb28GPro=
-gocloud.dev v0.37.0/go.mod h1:7/O4kqdInCNsc6LqgmuFnS0GRew4XNNYWpA44yQnwco=
+gocloud.dev v0.42.0 h1:qzG+9ItUL3RPB62/Amugws28n+4vGZXEoJEAMfjutzw=
+gocloud.dev v0.42.0/go.mod h1:zkaYAapZfQisXOA4bzhsbA4ckiStGQ3Psvs9/OQ5dPM=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@@ -655,20 +731,16 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
-golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
-golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
-golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
+golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
-golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
-golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
+golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
+golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb h1:c0vyKkb6yr3KR7jEfJaOSv4lG7xPkbN6r52aJz1d8a8=
-golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI=
+golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 h1:R84qjqJb5nVJMxqWYb3np9L5ZsaDtB+a39EqjV0JSUM=
+golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0/go.mod h1:S9Xr4PYopiDyqSyp5NjCrhFrqg6A5zA2E/iPHPhqnS8=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
@@ -681,8 +753,8 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
-golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
-golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg=
+golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -696,17 +768,15 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
-golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
-golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
-golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
-golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
+golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
+golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo=
-golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
+golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
+golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -717,8 +787,8 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
-golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
-golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
+golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -731,45 +801,40 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220204135822-1c1b9b1eba6a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
-golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
+golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
-golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
+golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
-golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw=
-golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
+golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg=
+golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
-golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
-golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
-golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
-golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
-golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
+golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4=
+golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU=
+golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
+golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
@@ -787,34 +852,34 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
-golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw=
-golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
+golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0=
+golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU=
-golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90=
-google.golang.org/api v0.172.0 h1:/1OcMZGPmW1rX2LCu2CmGUD1KXK1+pfzxotxyRUCCdk=
-google.golang.org/api v0.172.0/go.mod h1:+fJZq6QXWfa9pXhnIzsjx4yI22d4aI9ZpLb58gvXjis=
+golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY=
+golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90=
+google.golang.org/api v0.235.0 h1:C3MkpQSRxS1Jy6AkzTGKKrpSCOd2WOGrezZ+icKSkKo=
+google.golang.org/api v0.235.0/go.mod h1:QpeJkemzkFKe5VCE/PMv7GsUfn9ZF+u+q1Q7w6ckxTg=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto v0.0.0-20240311173647-c811ad7063a7 h1:ImUcDPHjTrAqNhlOkSocDLfG9rrNHH7w7uoKWPaWZ8s=
-google.golang.org/genproto v0.0.0-20240311173647-c811ad7063a7/go.mod h1:/3XmxOjePkvmKrHuBy4zNFw7IzxJXtAgdpXi8Ll990U=
-google.golang.org/genproto/googleapis/api v0.0.0-20240311173647-c811ad7063a7 h1:oqta3O3AnlWbmIE3bFnWbu4bRxZjfbWCp0cKSuZh01E=
-google.golang.org/genproto/googleapis/api v0.0.0-20240311173647-c811ad7063a7/go.mod h1:VQW3tUculP/D4B+xVCo+VgSq8As6wA9ZjHl//pmk+6s=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
+google.golang.org/genproto v0.0.0-20250603155806-513f23925822 h1:rHWScKit0gvAPuOnu87KpaYtjK5zBMLcULh7gxkCXu4=
+google.golang.org/genproto v0.0.0-20250603155806-513f23925822/go.mod h1:HubltRL7rMh0LfnQPkMH4NPDFEWp0jw3vixw7jEM53s=
+google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY=
+google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
-google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk=
-google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE=
+google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok=
+google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -824,18 +889,14 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
-google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
-google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
+google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk=
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
-gopkg.in/go-jose/go-jose.v2 v2.6.3 h1:nt80fvSDlhKWQgSWyHyy5CfmlQr+asih51R8PTWNKKs=
-gopkg.in/go-jose/go-jose.v2 v2.6.3/go.mod h1:zzZDPkNNw/c9IE7Z9jr11mBZQhKQTMzoEEIoEdZlFBI=
-gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
-gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/mail.v2 v2.3.1 h1:WYFn/oANrAGP2C0dcV6/pbkPzv8yGzqTjPmTeO7qoXk=
gopkg.in/mail.v2 v2.3.1/go.mod h1:htwXN1Qh09vZJ1NVKxQqHPBaCBbzKhp5GzuJEA4VJWw=
gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
@@ -843,7 +904,6 @@ gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRN
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
@@ -852,11 +912,13 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
+k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI=
lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k=
-sigs.k8s.io/kind v0.23.0 h1:8fyDGWbWTeCcCTwA04v4Nfr45KKxbSPH1WO9K+jVrBg=
-sigs.k8s.io/kind v0.23.0/go.mod h1:ZQ1iZuJLh3T+O8fzhdi3VWcFTzsdXtNv2ppsHc8JQ7s=
+sigs.k8s.io/kind v0.27.0 h1:PQ3f0iAWNIj66LYkZ1ivhEg/+Zb6UPMbO+qVei/INZA=
+sigs.k8s.io/kind v0.27.0/go.mod h1:RZVFmy6qcwlSWwp6xeIUv7kXCPF3i8MXsEXxW/J+gJY=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
-software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k=
-software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI=
+software.sslmate.com/src/go-pkcs12 v0.5.0 h1:EC6R394xgENTpZ4RltKydeDUjtlM5drOYIG9c6TVj2M=
+software.sslmate.com/src/go-pkcs12 v0.5.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI=
diff --git a/.bingo/kube-score.mod b/.bingo/kube-score.mod
new file mode 100644
index 000000000..873a8ecb7
--- /dev/null
+++ b/.bingo/kube-score.mod
@@ -0,0 +1,5 @@
+module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT
+
+go 1.24.6
+
+require github.com/zegl/kube-score v1.20.0 // cmd/kube-score
diff --git a/.bingo/kube-score.sum b/.bingo/kube-score.sum
new file mode 100644
index 000000000..9a4cabc8e
--- /dev/null
+++ b/.bingo/kube-score.sum
@@ -0,0 +1,98 @@
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/eidolon/wordwrap v0.0.0-20161011182207-e0f54129b8bb h1:ioQwBmKdOCpMVS/bDaESqNWXIE/aw4+gsVtysCGMWZ4=
+github.com/eidolon/wordwrap v0.0.0-20161011182207-e0f54129b8bb/go.mod h1:ZAPs+OyRzeVJFGvXVDVffgCzQfjg3qU9Ig8G/MU3zZ4=
+github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
+github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
+github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
+github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
+github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
+github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
+github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
+github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
+github.com/jstemmer/go-junit-report/v2 v2.1.0 h1:X3+hPYlSczH9IMIpSC9CQSZA0L+BipYafciZUWHEmsc=
+github.com/jstemmer/go-junit-report/v2 v2.1.0/go.mod h1:mgHVr7VUo5Tn8OLVr1cKnLuEy0M92wdRntM99h7RkgQ=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
+github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
+github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
+github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
+github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/zegl/kube-score v1.20.0 h1:J1VqK86SunV4Gg8emPTmwUVxe0rmXnAs5K9ZUbGMKR8=
+github.com/zegl/kube-score v1.20.0/go.mod h1:mBOw3S3g7TBG/GziT8xNG15dCFn54/jUeEHndxLinE8=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.36.0 h1:vWF2fRbw4qslQsQzgFqZff+BItCvGFQqKzKIzx1rmoA=
+golang.org/x/net v0.36.0/go.mod h1:bFmbeoIPfrw4sMHNhb4J9f6+tPziuGjq7Jk/38fxi1I=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
+golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o=
+golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
+golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
+gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls=
+k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k=
+k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U=
+k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
+k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
+k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
+k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
+k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
+sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
+sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA=
+sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4=
+sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
+sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
diff --git a/.bingo/variables.env b/.bingo/variables.env
index fc3a980e0..52133be6a 100644
--- a/.bingo/variables.env
+++ b/.bingo/variables.env
@@ -10,6 +10,8 @@ fi
BINGO="${GOBIN}/bingo-v0.9.0"
+CONFTEST="${GOBIN}/conftest-v0.62.0"
+
CONTROLLER_GEN="${GOBIN}/controller-gen-v0.19.0"
CRD_DIFF="${GOBIN}/crd-diff-v0.5.0"
@@ -18,14 +20,16 @@ CRD_REF_DOCS="${GOBIN}/crd-ref-docs-v0.2.0"
GOJQ="${GOBIN}/gojq-v0.12.17"
-GOLANGCI_LINT="${GOBIN}/golangci-lint-v2.6.2"
+GOLANGCI_LINT="${GOBIN}/golangci-lint-v2.7.2"
-GORELEASER="${GOBIN}/goreleaser-v1.26.2"
+GORELEASER="${GOBIN}/goreleaser-v2.11.2"
HELM="${GOBIN}/helm-v3.18.4"
KIND="${GOBIN}/kind-v0.30.0"
+KUBE_SCORE="${GOBIN}/kube-score-v1.20.0"
+
KUSTOMIZE="${GOBIN}/kustomize-v5.7.1"
OPERATOR_SDK="${GOBIN}/operator-sdk-v1.41.1"
diff --git a/.goreleaser.yml b/.goreleaser.yml
index 720014214..e2807ca41 100644
--- a/.goreleaser.yml
+++ b/.goreleaser.yml
@@ -1,3 +1,4 @@
+version: 2
before:
hooks:
- go mod tidy
@@ -116,7 +117,7 @@ docker_manifests:
checksum:
name_template: 'checksums.txt'
snapshot:
- name_template: "{{ incpatch .Version }}-next"
+ version_template: "{{ incpatch .Version }}-next"
changelog:
use: github-native
disable: '{{ ne .Env.ENABLE_RELEASE_PIPELINE "true" }}'
diff --git a/.tilt-support b/.tilt-support
index 9cb01b152..dcd827d04 100644
--- a/.tilt-support
+++ b/.tilt-support
@@ -14,7 +14,7 @@ def deploy_cert_manager_if_needed():
docker_build(
ref='helper',
context='.',
- build_args={'GO_VERSION': '1.24'},
+ build_args={'GO_VERSION': '1.25'},
dockerfile_contents='''
ARG GO_VERSION
FROM golang:${GO_VERSION}
diff --git a/Makefile b/Makefile
index 17025fd9f..7b422471a 100644
--- a/Makefile
+++ b/Makefile
@@ -62,6 +62,9 @@ ifeq ($(origin KIND_CLUSTER_NAME), undefined)
KIND_CLUSTER_NAME := operator-controller
endif
+ifeq ($(origin KIND_CONFIG), undefined)
+KIND_CONFIG := ./kind-config/kind-config.yaml
+endif
ifneq (, $(shell command -v docker 2>/dev/null))
CONTAINER_RUNTIME := docker
@@ -118,9 +121,25 @@ help-extended: #HELP Display extended help.
lint: lint-custom $(GOLANGCI_LINT) #HELP Run golangci linter.
$(GOLANGCI_LINT) run --build-tags $(GO_BUILD_TAGS) $(GOLANGCI_LINT_ARGS)
-lint-helm: $(HELM) #HELP Run helm linter
+.PHONY: lint-helm
+lint-helm: $(HELM) $(CONFTEST) #HELP Run helm linter
helm lint helm/olmv1
helm lint helm/prometheus
+ (helm template olmv1 helm/olmv1; helm template prometheus helm/prometheus) | $(CONFTEST) test --policy hack/conftest/policy/ --combine -n main -n prometheus -
+
+.PHONY: lint-deployed-resources
+lint-deployed-resources: $(KUBE_SCORE) #HELP Lint deployed resources.
+ (for ns in $$(printf "olmv1-system\n%s\n" "$(CATD_NAMESPACE)" | uniq); do \
+ for resource in $$(kubectl api-resources --verbs=list --namespaced -o name); do \
+ kubectl get $$resource -n $$ns -o yaml ; \
+ echo "---" ; \
+ done \
+ done) | $(KUBE_SCORE) score - \
+ `# TODO: currently these checks are failing, decide if resources should be fixed for them to pass (https://github.com/operator-framework/operator-controller/issues/2398)` \
+ --ignore-test container-resources \
+ --ignore-test container-image-pull-policy \
+ --ignore-test container-ephemeral-storage-request-and-limit \
+ --ignore-test container-security-context-user-group-id
.PHONY: custom-linter-build
custom-linter-build: #EXHELP Build custom linter
@@ -161,9 +180,10 @@ $(EXPERIMENTAL_MANIFEST) ?= helm/cert-manager.yaml helm/experimental.yaml
$(EXPERIMENTAL_E2E_MANIFEST) ?= helm/cert-manager.yaml helm/experimental.yaml helm/e2e.yaml
HELM_SETTINGS ?=
.PHONY: $(MANIFESTS)
-$(MANIFESTS): $(HELM)
+$(MANIFESTS): $(HELM) $(CONFTEST)
@mkdir -p $(MANIFEST_HOME)
$(HELM) template olmv1 helm/olmv1 $(addprefix --values ,$($@)) $(addprefix --set ,$(HELM_SETTINGS)) > $@
+ $(CONFTEST) test --policy hack/conftest/policy/ -n main --combine $@
# Generate manifests stored in source-control
.PHONY: manifests
@@ -215,7 +235,7 @@ test: manifests generate fmt lint test-unit test-e2e test-regression #HELP Run a
.PHONY: e2e
e2e: #EXHELP Run the e2e tests.
- go test -count=1 -v ./test/e2e/...
+ go test -count=1 -v ./test/e2e/features_test.go
E2E_REGISTRY_NAME := docker-registry
E2E_REGISTRY_NAMESPACE := operator-controller-e2e
@@ -266,7 +286,7 @@ image-registry: ## Build the testdata catalog used for e2e tests and push it to
# or inject unintended characters into the binary (e.g., version metadata).
go build $(GO_BUILD_FLAGS) $(GO_BUILD_EXTRA_FLAGS) -tags '$(GO_BUILD_TAGS)' -ldflags "$(GO_BUILD_LDFLAGS)" -gcflags '$(GO_BUILD_GCFLAGS)' -asmflags '$(GO_BUILD_ASMFLAGS)' -o ./testdata/push/bin/push ./testdata/push/push.go
$(CONTAINER_RUNTIME) build -f ./testdata/Dockerfile -t $(E2E_REGISTRY_IMAGE) ./testdata
- $(CONTAINER_RUNTIME) save $(E2E_REGISTRY_IMAGE) | $(KIND) load image-archive /dev/stdin --name $(KIND_CLUSTER_NAME)
+ $(KIND) load docker-image $(E2E_REGISTRY_IMAGE) --name $(KIND_CLUSTER_NAME)
./testdata/build-test-registry.sh $(E2E_REGISTRY_NAMESPACE) $(E2E_REGISTRY_NAME) $(E2E_REGISTRY_IMAGE)
# When running the e2e suite, you can set the ARTIFACT_PATH variable to the absolute path
@@ -285,6 +305,7 @@ test-e2e: run-internal image-registry prometheus e2e e2e-coverage kind-clean #HE
.PHONY: test-experimental-e2e
test-experimental-e2e: SOURCE_MANIFEST := $(EXPERIMENTAL_E2E_MANIFEST)
test-experimental-e2e: KIND_CLUSTER_NAME := operator-controller-e2e
+test-experimental-e2e: KIND_CONFIG := ./kind-config/kind-config-2node.yaml
test-experimental-e2e: GO_BUILD_EXTRA_FLAGS := -cover
test-experimental-e2e: COVERAGE_NAME := experimental-e2e
test-experimental-e2e: export MANIFEST := $(EXPERIMENTAL_RELEASE_MANIFEST)
@@ -385,8 +406,8 @@ stop-profiling: build-test-profiler #EXHELP Stop profiling and generate analysis
.PHONY: kind-load
kind-load: $(KIND) #EXHELP Loads the currently constructed images into the KIND cluster.
- $(CONTAINER_RUNTIME) save $(OPCON_IMG) | $(KIND) load image-archive /dev/stdin --name $(KIND_CLUSTER_NAME)
- $(CONTAINER_RUNTIME) save $(CATD_IMG) | $(KIND) load image-archive /dev/stdin --name $(KIND_CLUSTER_NAME)
+ $(KIND) load docker-image $(OPCON_IMG) --name $(KIND_CLUSTER_NAME)
+ $(KIND) load docker-image $(CATD_IMG) --name $(KIND_CLUSTER_NAME)
.PHONY: kind-deploy
kind-deploy: export DEFAULT_CATALOG := $(RELEASE_CATALOGS)
@@ -411,8 +432,9 @@ kind-deploy-experimental: manifests
.PHONY: kind-cluster
kind-cluster: $(KIND) kind-verify-versions #EXHELP Standup a kind cluster.
-$(KIND) delete cluster --name $(KIND_CLUSTER_NAME)
- $(KIND) create cluster --name $(KIND_CLUSTER_NAME) --config ./kind-config.yaml
+ $(KIND) create cluster --name $(KIND_CLUSTER_NAME) --config $(KIND_CONFIG)
$(KIND) export kubeconfig --name $(KIND_CLUSTER_NAME)
+ kubectl wait --for=condition=Ready nodes --all --timeout=2m
.PHONY: kind-clean
kind-clean: $(KIND) #EXHELP Delete the kind cluster.
@@ -475,7 +497,7 @@ go-build-linux: export GOARCH=amd64
go-build-linux: $(BINARIES)
.PHONY: run-internal
-run-internal: docker-build kind-cluster kind-load kind-deploy wait
+run-internal: docker-build kind-cluster kind-load kind-deploy lint-deployed-resources wait
.PHONY: run
run: SOURCE_MANIFEST := $(STANDARD_MANIFEST)
diff --git a/api/v1/clustercatalog_types.go b/api/v1/clustercatalog_types.go
index c18fa3c7e..8df90a806 100644
--- a/api/v1/clustercatalog_types.go
+++ b/api/v1/clustercatalog_types.go
@@ -51,7 +51,7 @@ const (
//+kubebuilder:printcolumn:name="Serving",type=string,JSONPath=`.status.conditions[?(@.type=="Serving")].status`
//+kubebuilder:printcolumn:name=Age,type=date,JSONPath=`.metadata.creationTimestamp`
-// ClusterCatalog enables users to make File-Based Catalog (FBC) catalog data available to the cluster.
+// ClusterCatalog makes File-Based Catalog (FBC) data available to your cluster.
// For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs
type ClusterCatalog struct {
metav1.TypeMeta `json:",inline"`
@@ -60,16 +60,14 @@ type ClusterCatalog struct {
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata"`
- // spec is the desired state of the ClusterCatalog.
- // spec is required.
- // The controller will work to ensure that the desired
- // catalog is unpacked and served over the catalog content HTTP server.
+ // spec is a required field that defines the desired state of the ClusterCatalog.
+ // The controller ensures that the catalog is unpacked and served over the catalog content HTTP server.
// +kubebuilder:validation:Required
Spec ClusterCatalogSpec `json:"spec"`
- // status contains information about the state of the ClusterCatalog such as:
- // - Whether or not the catalog contents are being served via the catalog content HTTP server
- // - Whether or not the ClusterCatalog is progressing to a new state
+ // status contains the following information about the state of the ClusterCatalog:
+ // - Whether the catalog contents are being served via the catalog content HTTP server
+ // - Whether the ClusterCatalog is progressing to a new state
// - A reference to the source from which the catalog contents were retrieved
// +optional
Status ClusterCatalogStatus `json:"status,omitempty"`
@@ -93,15 +91,12 @@ type ClusterCatalogList struct {
// ClusterCatalogSpec defines the desired state of ClusterCatalog
type ClusterCatalogSpec struct {
- // source allows a user to define the source of a catalog.
- // A "catalog" contains information on content that can be installed on a cluster.
- // Providing a catalog source makes the contents of the catalog discoverable and usable by
- // other on-cluster components.
- // These on-cluster components may do a variety of things with this information, such as
- // presenting the content in a GUI dashboard or installing content from the catalog on the cluster.
+ // source is a required field that defines the source of a catalog.
+ // A catalog contains information on content that can be installed on a cluster.
+ // The catalog source makes catalog contents discoverable and usable by other on-cluster components.
+ // These components can present the content in a GUI dashboard or install content from the catalog on the cluster.
// The catalog source must contain catalog metadata in the File-Based Catalog (FBC) format.
// For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs.
- // source is a required field.
//
// Below is a minimal example of a ClusterCatalogSpec that sources a catalog from an image:
//
@@ -113,19 +108,18 @@ type ClusterCatalogSpec struct {
// +kubebuilder:validation:Required
Source CatalogSource `json:"source"`
- // priority allows the user to define a priority for a ClusterCatalog.
- // priority is optional.
+ // priority is an optional field that defines a priority for this ClusterCatalog.
//
- // A ClusterCatalog's priority is used by clients as a tie-breaker between ClusterCatalogs that meet the client's requirements.
- // A higher number means higher priority.
+ // Clients use the ClusterCatalog priority as a tie-breaker between ClusterCatalogs that meet their requirements.
+ // Higher numbers mean higher priority.
//
- // It is up to clients to decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements.
- // When deciding how to break the tie in this scenario, it is recommended that clients prompt their users for additional input.
+ // Clients decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements.
+ // Clients should prompt users for additional input to break the tie.
//
- // When omitted, the default priority is 0 because that is the zero value of integers.
+ // When omitted, the default priority is 0.
//
- // Negative numbers can be used to specify a priority lower than the default.
- // Positive numbers can be used to specify a priority higher than the default.
+ // Use negative numbers to specify a priority lower than the default.
+ // Use positive numbers to specify a priority higher than the default.
//
// The lowest possible value is -2147483648.
// The highest possible value is 2147483647.
@@ -136,21 +130,18 @@ type ClusterCatalogSpec struct {
// +optional
Priority int32 `json:"priority"`
- // availabilityMode allows users to define how the ClusterCatalog is made available to clients on the cluster.
- // availabilityMode is optional.
+ // availabilityMode is an optional field that defines how the ClusterCatalog is made available to clients on the cluster.
//
- // Allowed values are "Available" and "Unavailable" and omitted.
+ // Allowed values are "Available", "Unavailable", or omitted.
//
// When omitted, the default value is "Available".
//
- // When set to "Available", the catalog contents will be unpacked and served over the catalog content HTTP server.
- // Setting the availabilityMode to "Available" tells clients that they should consider this ClusterCatalog
- // and its contents as usable.
+ // When set to "Available", the catalog contents are unpacked and served over the catalog content HTTP server.
+ // Clients should consider this ClusterCatalog and its contents as usable.
//
- // When set to "Unavailable", the catalog contents will no longer be served over the catalog content HTTP server.
- // When set to this availabilityMode it should be interpreted the same as the ClusterCatalog not existing.
- // Setting the availabilityMode to "Unavailable" can be useful in scenarios where a user may not want
- // to delete the ClusterCatalog all together, but would still like it to be treated as if it doesn't exist.
+ // When set to "Unavailable", the catalog contents are no longer served over the catalog content HTTP server.
+ // Treat this the same as if the ClusterCatalog does not exist.
+ // Use "Unavailable" when you want to keep the ClusterCatalog but treat it as if it doesn't exist.
//
// +kubebuilder:validation:Enum:="Unavailable";"Available"
// +kubebuilder:default:="Available"
@@ -160,24 +151,23 @@ type ClusterCatalogSpec struct {
// ClusterCatalogStatus defines the observed state of ClusterCatalog
type ClusterCatalogStatus struct {
- // conditions is a representation of the current state for this ClusterCatalog.
+ // conditions represents the current state of this ClusterCatalog.
//
// The current condition types are Serving and Progressing.
//
- // The Serving condition is used to represent whether or not the contents of the catalog is being served via the HTTP(S) web server.
- // When it has a status of True and a reason of Available, the contents of the catalog are being served.
- // When it has a status of False and a reason of Unavailable, the contents of the catalog are not being served because the contents are not yet available.
- // When it has a status of False and a reason of UserSpecifiedUnavailable, the contents of the catalog are not being served because the catalog has been intentionally marked as unavailable.
+ // The Serving condition represents whether the catalog contents are being served via the HTTP(S) web server:
+ // - When status is True and reason is Available, the catalog contents are being served.
+ // - When status is False and reason is Unavailable, the catalog contents are not being served because the contents are not yet available.
+ // - When status is False and reason is UserSpecifiedUnavailable, the catalog contents are not being served because the catalog has been intentionally marked as unavailable.
//
- // The Progressing condition is used to represent whether or not the ClusterCatalog is progressing or is ready to progress towards a new state.
- // When it has a status of True and a reason of Retrying, there was an error in the progression of the ClusterCatalog that may be resolved on subsequent reconciliation attempts.
- // When it has a status of True and a reason of Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing.
- // When it has a status of False and a reason of Blocked, there was an error in the progression of the ClusterCatalog that requires manual intervention for recovery.
+ // The Progressing condition represents whether the ClusterCatalog is progressing or is ready to progress towards a new state:
+ // - When status is True and reason is Retrying, an error occurred that may be resolved on subsequent reconciliation attempts.
+ // - When status is True and reason is Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing.
+ // - When status is False and reason is Blocked, an error occurred that requires manual intervention for recovery.
//
- // In the case that the Serving condition is True with reason Available and Progressing is True with reason Retrying, the previously fetched
- // catalog contents are still being served via the HTTP(S) web server while we are progressing towards serving a new version of the catalog
- // contents. This could occur when we've initially fetched the latest contents from the source for this catalog and when polling for changes
- // to the contents we identify that there are updates to the contents.
+ // If the system initially fetched contents and polling identifies updates, both conditions can be active simultaneously:
+ // - The Serving condition remains True with reason Available because the previous contents are still served via the HTTP(S) web server.
+ // - The Progressing condition is True with reason Retrying because the system is working to serve the new version.
//
// +listType=map
// +listMapKey=type
@@ -189,30 +179,25 @@ type ClusterCatalogStatus struct {
// urls contains the URLs that can be used to access the catalog.
// +optional
URLs *ClusterCatalogURLs `json:"urls,omitempty"`
- // lastUnpacked represents the last time the contents of the
- // catalog were extracted from their source format. As an example,
- // when using an Image source, the OCI image will be pulled and the
- // image layers written to a file-system backed cache. We refer to the
- // act of this extraction from the source format as "unpacking".
+ // lastUnpacked represents the last time the catalog contents were extracted from their source format.
+ // For example, when using an Image source, the OCI image is pulled and image layers are written to a file-system backed cache.
+ // This extraction from the source format is called "unpacking".
// +optional
LastUnpacked *metav1.Time `json:"lastUnpacked,omitempty"`
}
// ClusterCatalogURLs contains the URLs that can be used to access the catalog.
type ClusterCatalogURLs struct {
- // base is a cluster-internal URL that provides endpoints for
- // accessing the content of the catalog.
+ // base is a cluster-internal URL that provides endpoints for accessing the catalog content.
//
- // It is expected that clients append the path for the endpoint they wish
- // to access.
+ // Clients should append the path for the endpoint they want to access.
//
- // Currently, only a single endpoint is served and is accessible at the path
- // /api/v1.
+ // Currently, only a single endpoint is served and is accessible at the path /api/v1.
//
// The endpoints served for the v1 API are:
- // - /all - this endpoint returns the entirety of the catalog contents in the FBC format
+ // - /all - this endpoint returns the entire catalog contents in the FBC format
//
- // As the needs of users and clients of the evolve, new endpoints may be added.
+ // New endpoints may be added as needs evolve.
//
// +kubebuilder:validation:Required
// +kubebuilder:validation:MaxLength:=525
@@ -226,20 +211,19 @@ type ClusterCatalogURLs struct {
// +union
// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'Image' ? has(self.image) : !has(self.image)",message="image is required when source type is Image, and forbidden otherwise"
type CatalogSource struct {
- // type is a reference to the type of source the catalog is sourced from.
- // type is required.
+ // type is a required field that specifies the type of source for the catalog.
//
// The only allowed value is "Image".
//
- // When set to "Image", the ClusterCatalog content will be sourced from an OCI image.
+ // When set to "Image", the ClusterCatalog content is sourced from an OCI image.
// When using an image source, the image field must be set and must be the only field defined for this type.
//
// +unionDiscriminator
// +kubebuilder:validation:Enum:="Image"
// +kubebuilder:validation:Required
Type SourceType `json:"type"`
- // image is used to configure how catalog contents are sourced from an OCI image.
- // This field is required when type is Image, and forbidden otherwise.
+ // image configures how catalog contents are sourced from an OCI image.
+ // It is required when type is Image, and forbidden otherwise.
// +optional
Image *ImageSource `json:"image,omitempty"`
}
@@ -249,27 +233,26 @@ type CatalogSource struct {
// +union
// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'Image' ? has(self.image) : !has(self.image)",message="image is required when source type is Image, and forbidden otherwise"
type ResolvedCatalogSource struct {
- // type is a reference to the type of source the catalog is sourced from.
- // type is required.
+ // type is a required field that specifies the type of source for the catalog.
//
// The only allowed value is "Image".
//
- // When set to "Image", information about the resolved image source will be set in the 'image' field.
+ // When set to "Image", information about the resolved image source is set in the image field.
//
// +unionDiscriminator
// +kubebuilder:validation:Enum:="Image"
// +kubebuilder:validation:Required
Type SourceType `json:"type"`
- // image is a field containing resolution information for a catalog sourced from an image.
- // This field must be set when type is Image, and forbidden otherwise.
+ // image contains resolution information for a catalog sourced from an image.
+ // It must be set when type is Image, and forbidden otherwise.
Image *ResolvedImageSource `json:"image"`
}
// ResolvedImageSource provides information about the resolved source of a Catalog sourced from an image.
type ResolvedImageSource struct {
// ref contains the resolved image digest-based reference.
- // The digest format is used so users can use other tooling to fetch the exact
- // OCI manifests that were used to extract the catalog contents.
+ // The digest format allows you to use other tooling to fetch the exact OCI manifests
+ // that were used to extract the catalog contents.
// +kubebuilder:validation:Required
// +kubebuilder:validation:MaxLength:=1000
// +kubebuilder:validation:XValidation:rule="self.matches('^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])((\\\\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(:[0-9]+)?\\\\b')",message="must start with a valid domain. valid domains must be alphanumeric characters (lowercase and uppercase) separated by the \".\" character."
@@ -287,11 +270,10 @@ type ResolvedImageSource struct {
// reject the resource since there is no use in polling a digest-based image reference.
// +kubebuilder:validation:XValidation:rule="self.ref.find('(@.*:)') != \"\" ? !has(self.pollIntervalMinutes) : true",message="cannot specify pollIntervalMinutes while using digest-based image"
type ImageSource struct {
- // ref allows users to define the reference to a container image containing Catalog contents.
- // ref is required.
- // ref can not be more than 1000 characters.
+ // ref is a required field that defines the reference to a container image containing catalog contents.
+ // It cannot be more than 1000 characters.
//
- // A reference can be broken down into 3 parts - the domain, name, and identifier.
+ // A reference has 3 parts: the domain, name, and identifier.
//
// The domain is typically the registry where an image is located.
// It must be alphanumeric characters (lowercase and uppercase) separated by the "." character.
@@ -337,11 +319,10 @@ type ImageSource struct {
// +kubebuilder:validation:XValidation:rule="self.find('(@.*:)') != \"\" ? self.find(':.*$').matches(':[0-9A-Fa-f]*$') : true",message="digest is not valid. the encoded string must only contain hex characters (A-F, a-f, 0-9)"
Ref string `json:"ref"`
- // pollIntervalMinutes allows the user to set the interval, in minutes, at which the image source should be polled for new content.
- // pollIntervalMinutes is optional.
- // pollIntervalMinutes can not be specified when ref is a digest-based reference.
+ // pollIntervalMinutes is an optional field that sets the interval, in minutes, at which the image source is polled for new content.
+ // You cannot specify pollIntervalMinutes when ref is a digest-based reference.
//
- // When omitted, the image will not be polled for new content.
+ // When omitted, the image is not polled for new content.
// +kubebuilder:validation:Minimum:=1
// +optional
PollIntervalMinutes *int `json:"pollIntervalMinutes,omitempty"`
diff --git a/api/v1/clusterextension_types.go b/api/v1/clusterextension_types.go
index fb82b7a23..f098d2220 100644
--- a/api/v1/clusterextension_types.go
+++ b/api/v1/clusterextension_types.go
@@ -48,16 +48,15 @@ const (
// ClusterExtensionSpec defines the desired state of ClusterExtension
type ClusterExtensionSpec struct {
- // namespace is a reference to a Kubernetes namespace.
- // This is the namespace in which the provided ServiceAccount must exist.
- // It also designates the default namespace where namespace-scoped resources
- // for the extension are applied to the cluster.
+ // namespace specifies a Kubernetes namespace.
+ // This is the namespace where the provided ServiceAccount must exist.
+ // It also designates the default namespace where namespace-scoped resources for the extension are applied to the cluster.
// Some extensions may contain namespace-scoped resources to be applied in other namespaces.
// This namespace must exist.
//
- // namespace is required, immutable, and follows the DNS label standard
- // as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters or hyphens (-),
- // start and end with an alphanumeric character, and be no longer than 63 characters
+ // The namespace field is required, immutable, and follows the DNS label standard as defined in [RFC 1123].
+ // It must contain only lowercase alphanumeric characters or hyphens (-), start and end with an alphanumeric character,
+ // and be no longer than 63 characters.
//
// [RFC 1123]: https://tools.ietf.org/html/rfc1123
//
@@ -67,20 +66,20 @@ type ClusterExtensionSpec struct {
// +kubebuilder:validation:Required
Namespace string `json:"namespace"`
- // serviceAccount is a reference to a ServiceAccount used to perform all interactions
- // with the cluster that are required to manage the extension.
+ // serviceAccount specifies a ServiceAccount used to perform all interactions with the cluster
+ // that are required to manage the extension.
// The ServiceAccount must be configured with the necessary permissions to perform these interactions.
// The ServiceAccount must exist in the namespace referenced in the spec.
- // serviceAccount is required.
+ // The serviceAccount field is required.
//
// +kubebuilder:validation:Required
ServiceAccount ServiceAccountReference `json:"serviceAccount"`
- // source is a required field which selects the installation source of content
- // for this ClusterExtension. Selection is performed by setting the sourceType.
+ // source is required and selects the installation source of content for this ClusterExtension.
+ // Set the sourceType field to perform the selection.
//
- // Catalog is currently the only implemented sourceType, and setting the
- // sourcetype to "Catalog" requires the catalog field to also be defined.
+ // Catalog is currently the only implemented sourceType.
+ // Setting sourceType to "Catalog" requires the catalog field to also be defined.
//
// Below is a minimal example of a source definition (in yaml):
//
@@ -92,15 +91,15 @@ type ClusterExtensionSpec struct {
// +kubebuilder:validation:Required
Source SourceConfig `json:"source"`
- // install is an optional field used to configure the installation options
- // for the ClusterExtension such as the pre-flight check configuration.
+ // install is optional and configures installation options for the ClusterExtension,
+ // such as the pre-flight check configuration.
//
// +optional
Install *ClusterExtensionInstallConfig `json:"install,omitempty"`
- // config is an optional field used to specify bundle specific configuration
- // used to configure the bundle. Configuration is bundle specific and a bundle may provide
- // a configuration schema. When not specified, the default configuration of the resolved bundle will be used.
+ // config is optional and specifies bundle-specific configuration.
+ // Configuration is bundle-specific and a bundle may provide a configuration schema.
+ // When not specified, the default configuration of the resolved bundle is used.
//
// config is validated against a configuration schema provided by the resolved bundle. If the bundle does not provide
// a configuration schema the bundle is deemed to not be configurable. More information on how
@@ -118,13 +117,12 @@ const SourceTypeCatalog = "Catalog"
// +union
// +kubebuilder:validation:XValidation:rule="has(self.sourceType) && self.sourceType == 'Catalog' ? has(self.catalog) : !has(self.catalog)",message="catalog is required when sourceType is Catalog, and forbidden otherwise"
type SourceConfig struct {
- // sourceType is a required reference to the type of install source.
+ // sourceType is required and specifies the type of install source.
//
- // Allowed values are "Catalog"
+ // The only allowed value is "Catalog".
//
- // When this field is set to "Catalog", information for determining the
- // appropriate bundle of content to install will be fetched from
- // ClusterCatalog resources existing on the cluster.
+ // When set to "Catalog", information for determining the appropriate bundle of content to install
+ // is fetched from ClusterCatalog resources on the cluster.
// When using the Catalog sourceType, the catalog field must also be set.
//
// +unionDiscriminator
@@ -132,8 +130,8 @@ type SourceConfig struct {
// +kubebuilder:validation:Required
SourceType string `json:"sourceType"`
- // catalog is used to configure how information is sourced from a catalog.
- // This field is required when sourceType is "Catalog", and forbidden otherwise.
+ // catalog configures how information is sourced from a catalog.
+ // It is required when sourceType is "Catalog", and forbidden otherwise.
//
// +optional
Catalog *CatalogFilter `json:"catalog,omitempty"`
@@ -145,11 +143,11 @@ type SourceConfig struct {
// +kubebuilder:validation:XValidation:rule="has(self.preflight)",message="at least one of [preflight] are required when install is specified"
// +union
type ClusterExtensionInstallConfig struct {
- // preflight is an optional field that can be used to configure the checks that are
- // run before installation or upgrade of the content for the package specified in the packageName field.
+ // preflight is optional and configures the checks that run before installation or upgrade
+ // of the content for the package specified in the packageName field.
//
// When specified, it replaces the default preflight configuration for install/upgrade actions.
- // When not specified, the default configuration will be used.
+ // When not specified, the default configuration is used.
//
// +optional
Preflight *PreflightConfig `json:"preflight,omitempty"`
@@ -161,22 +159,20 @@ type ClusterExtensionInstallConfig struct {
// +kubebuilder:validation:XValidation:rule="has(self.configType) && self.configType == 'Inline' ?has(self.inline) : !has(self.inline)",message="inline is required when configType is Inline, and forbidden otherwise"
// +union
type ClusterExtensionConfig struct {
- // configType is a required reference to the type of configuration source.
+ // configType is required and specifies the type of configuration source.
//
- // Allowed values are "Inline"
+ // The only allowed value is "Inline".
//
- // When this field is set to "Inline", the cluster extension configuration is defined inline within the
- // ClusterExtension resource.
+ // When set to "Inline", the cluster extension configuration is defined inline within the ClusterExtension resource.
//
// +unionDiscriminator
// +kubebuilder:validation:Enum:="Inline"
// +kubebuilder:validation:Required
ConfigType ClusterExtensionConfigType `json:"configType"`
- // inline contains JSON or YAML values specified directly in the
- // ClusterExtension.
+ // inline contains JSON or YAML values specified directly in the ClusterExtension.
//
- // inline is used to specify arbitrary configuration values for the ClusterExtension.
+ // It is used to specify arbitrary configuration values for the ClusterExtension.
// It must be set if configType is 'Inline' and must be a valid JSON/YAML object containing at least one property.
// The configuration values are validated at runtime against a JSON schema provided by the bundle.
//
@@ -189,13 +185,12 @@ type ClusterExtensionConfig struct {
// CatalogFilter defines the attributes used to identify and filter content from a catalog.
type CatalogFilter struct {
- // packageName is a reference to the name of the package to be installed
- // and is used to filter the content from catalogs.
+ // packageName specifies the name of the package to be installed and is used to filter
+ // the content from catalogs.
//
- // packageName is required, immutable, and follows the DNS subdomain standard
- // as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
- // hyphens (-) or periods (.), start and end with an alphanumeric character,
- // and be no longer than 253 characters.
+ // It is required, immutable, and follows the DNS subdomain standard as defined in [RFC 1123].
+ // It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
+ // start and end with an alphanumeric character, and be no longer than 253 characters.
//
// Some examples of valid values are:
// - some-package
@@ -218,12 +213,13 @@ type CatalogFilter struct {
// +kubebuilder:validation:Required
PackageName string `json:"packageName"`
- // version is an optional semver constraint (a specific version or range of versions). When unspecified, the latest version available will be installed.
+ // version is an optional semver constraint (a specific version or range of versions).
+ // When unspecified, the latest version available is installed.
//
// Acceptable version ranges are no longer than 64 characters.
- // Version ranges are composed of comma- or space-delimited values and one or
- // more comparison operators, known as comparison strings. Additional
- // comparison strings can be added using the OR operator (||).
+ // Version ranges are composed of comma- or space-delimited values and one or more comparison operators,
+ // known as comparison strings.
+ // You can add additional comparison strings using the OR operator (||).
//
// # Range Comparisons
//
@@ -297,25 +293,24 @@ type CatalogFilter struct {
// +optional
Version string `json:"version,omitempty"`
- // channels is an optional reference to a set of channels belonging to
- // the package specified in the packageName field.
+ // channels is optional and specifies a set of channels belonging to the package
+ // specified in the packageName field.
//
- // A "channel" is a package-author-defined stream of updates for an extension.
+ // A channel is a package-author-defined stream of updates for an extension.
//
- // Each channel in the list must follow the DNS subdomain standard
- // as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
- // hyphens (-) or periods (.), start and end with an alphanumeric character,
- // and be no longer than 253 characters. No more than 256 channels can be specified.
+ // Each channel in the list must follow the DNS subdomain standard as defined in [RFC 1123].
+ // It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
+ // start and end with an alphanumeric character, and be no longer than 253 characters.
+ // You can specify no more than 256 channels.
//
- // When specified, it is used to constrain the set of installable bundles and
- // the automated upgrade path. This constraint is an AND operation with the
- // version field. For example:
+ // When specified, it constrains the set of installable bundles and the automated upgrade path.
+ // This constraint is an AND operation with the version field. For example:
// - Given channel is set to "foo"
// - Given version is set to ">=1.0.0, <1.5.0"
- // - Only bundles that exist in channel "foo" AND satisfy the version range comparison will be considered installable
- // - Automatic upgrades will be constrained to upgrade edges defined by the selected channel
+ // - Only bundles that exist in channel "foo" AND satisfy the version range comparison are considered installable
+ // - Automatic upgrades are constrained to upgrade edges defined by the selected channel
//
- // When unspecified, upgrade edges across all channels will be used to identify valid automatic upgrade paths.
+ // When unspecified, upgrade edges across all channels are used to identify valid automatic upgrade paths.
//
// Some examples of valid values are:
// - 1.1.x
@@ -342,33 +337,28 @@ type CatalogFilter struct {
// +optional
Channels []string `json:"channels,omitempty"`
- // selector is an optional field that can be used
- // to filter the set of ClusterCatalogs used in the bundle
- // selection process.
+ // selector is optional and filters the set of ClusterCatalogs used in the bundle selection process.
//
- // When unspecified, all ClusterCatalogs will be used in
- // the bundle selection process.
+ // When unspecified, all ClusterCatalogs are used in the bundle selection process.
//
// +optional
Selector *metav1.LabelSelector `json:"selector,omitempty"`
- // upgradeConstraintPolicy is an optional field that controls whether
- // the upgrade path(s) defined in the catalog are enforced for the package
- // referenced in the packageName field.
+ // upgradeConstraintPolicy is optional and controls whether the upgrade paths defined in the catalog
+ // are enforced for the package referenced in the packageName field.
//
- // Allowed values are: "CatalogProvided" or "SelfCertified", or omitted.
+ // Allowed values are "CatalogProvided", "SelfCertified", or omitted.
//
- // When this field is set to "CatalogProvided", automatic upgrades will only occur
- // when upgrade constraints specified by the package author are met.
+ // When set to "CatalogProvided", automatic upgrades only occur when upgrade constraints specified by the package
+ // author are met.
//
- // When this field is set to "SelfCertified", the upgrade constraints specified by
- // the package author are ignored. This allows for upgrades and downgrades to
- // any version of the package. This is considered a dangerous operation as it
- // can lead to unknown and potentially disastrous outcomes, such as data
- // loss. It is assumed that users have independently verified changes when
- // using this option.
+ // When set to "SelfCertified", the upgrade constraints specified by the package author are ignored.
+ // This allows upgrades and downgrades to any version of the package.
+ // This is considered a dangerous operation as it can lead to unknown and potentially disastrous outcomes,
+ // such as data loss.
+ // Use this option only if you have independently verified the changes.
//
- // When this field is omitted, the default value is "CatalogProvided".
+ // When omitted, the default value is "CatalogProvided".
//
// +kubebuilder:validation:Enum:=CatalogProvided;SelfCertified
// +kubebuilder:default:=CatalogProvided
@@ -378,16 +368,14 @@ type CatalogFilter struct {
// ServiceAccountReference identifies the serviceAccount used fo install a ClusterExtension.
type ServiceAccountReference struct {
- // name is a required, immutable reference to the name of the ServiceAccount
- // to be used for installation and management of the content for the package
- // specified in the packageName field.
+ // name is a required, immutable reference to the name of the ServiceAccount used for installation
+ // and management of the content for the package specified in the packageName field.
//
// This ServiceAccount must exist in the installNamespace.
//
- // name follows the DNS subdomain standard as defined in [RFC 1123].
- // It must contain only lowercase alphanumeric characters,
- // hyphens (-) or periods (.), start and end with an alphanumeric character,
- // and be no longer than 253 characters.
+ // The name field follows the DNS subdomain standard as defined in [RFC 1123].
+ // It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
+ // start and end with an alphanumeric character, and be no longer than 253 characters.
//
// Some examples of valid values are:
// - some-serviceaccount
@@ -413,26 +401,24 @@ type ServiceAccountReference struct {
//
// +kubebuilder:validation:XValidation:rule="has(self.crdUpgradeSafety)",message="at least one of [crdUpgradeSafety] are required when preflight is specified"
type PreflightConfig struct {
- // crdUpgradeSafety is used to configure the CRD Upgrade Safety pre-flight
- // checks that run prior to upgrades of installed content.
+ // crdUpgradeSafety configures the CRD Upgrade Safety pre-flight checks that run
+ // before upgrades of installed content.
//
- // The CRD Upgrade Safety pre-flight check safeguards from unintended
- // consequences of upgrading a CRD, such as data loss.
+ // The CRD Upgrade Safety pre-flight check safeguards from unintended consequences of upgrading a CRD,
+ // such as data loss.
CRDUpgradeSafety *CRDUpgradeSafetyPreflightConfig `json:"crdUpgradeSafety"`
}
// CRDUpgradeSafetyPreflightConfig is the configuration for CRD upgrade safety preflight check.
type CRDUpgradeSafetyPreflightConfig struct {
- // enforcement is a required field, used to configure the state of the CRD Upgrade Safety pre-flight check.
+ // enforcement is required and configures the state of the CRD Upgrade Safety pre-flight check.
//
// Allowed values are "None" or "Strict". The default value is "Strict".
//
- // When set to "None", the CRD Upgrade Safety pre-flight check will be skipped
- // when performing an upgrade operation. This should be used with caution as
- // unintended consequences such as data loss can occur.
+ // When set to "None", the CRD Upgrade Safety pre-flight check is skipped during an upgrade operation.
+ // Use this option with caution as unintended consequences such as data loss can occur.
//
- // When set to "Strict", the CRD Upgrade Safety pre-flight check will be run when
- // performing an upgrade operation.
+ // When set to "Strict", the CRD Upgrade Safety pre-flight check runs during an upgrade operation.
//
// +kubebuilder:validation:Enum:="None";"Strict"
// +kubebuilder:validation:Required
@@ -455,17 +441,16 @@ const (
// BundleMetadata is a representation of the identifying attributes of a bundle.
type BundleMetadata struct {
- // name is required and follows the DNS subdomain standard
- // as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
- // hyphens (-) or periods (.), start and end with an alphanumeric character,
- // and be no longer than 253 characters.
+ // name is required and follows the DNS subdomain standard as defined in [RFC 1123].
+ // It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
+ // start and end with an alphanumeric character, and be no longer than 253 characters.
//
// +kubebuilder:validation:Required
// +kubebuilder:validation:XValidation:rule="self.matches(\"^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$\")",message="packageName must be a valid DNS1123 subdomain. It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), start and end with an alphanumeric character, and be no longer than 253 characters"
Name string `json:"name"`
- // version is a required field and is a reference to the version that this bundle represents
- // version follows the semantic versioning standard as defined in https://semver.org/.
+ // version is required and references the version that this bundle represents.
+ // It follows the semantic versioning standard as defined in https://semver.org/.
//
// +kubebuilder:validation:Required
// +kubebuilder:validation:XValidation:rule="self.matches(\"^([0-9]+)(\\\\.[0-9]+)?(\\\\.[0-9]+)?(-([-0-9A-Za-z]+(\\\\.[-0-9A-Za-z]+)*))?(\\\\+([-0-9A-Za-z]+(-\\\\.[-0-9A-Za-z]+)*))?\")",message="version must be well-formed semver"
@@ -491,9 +476,9 @@ type RevisionStatus struct {
type ClusterExtensionStatus struct {
// The set of condition types which apply to all spec.source variations are Installed and Progressing.
//
- // The Installed condition represents whether or not the bundle has been installed for this ClusterExtension.
- // When Installed is True and the Reason is Succeeded, the bundle has been successfully installed.
- // When Installed is False and the Reason is Failed, the bundle has failed to install.
+ // The Installed condition represents whether the bundle has been installed for this ClusterExtension:
+ // - When Installed is True and the Reason is Succeeded, the bundle has been successfully installed.
+ // - When Installed is False and the Reason is Failed, the bundle has failed to install.
//
// The Progressing condition represents whether or not the ClusterExtension is advancing towards a new state.
// When Progressing is True and the Reason is Succeeded, the ClusterExtension is making progress towards a new state.
@@ -503,12 +488,12 @@ type ClusterExtensionStatus struct {
// When Progressing is True and Reason is RollingOut, the ClusterExtension has one or more ClusterExtensionRevisions in active roll out.
//
//
- // When the ClusterExtension is sourced from a catalog, if may also communicate a deprecation condition.
- // These are indications from a package owner to guide users away from a particular package, channel, or bundle.
- // BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog.
- // ChannelDeprecated is set if the requested channel is marked deprecated in the catalog.
- // PackageDeprecated is set if the requested package is marked deprecated in the catalog.
- // Deprecated is a rollup condition that is present when any of the deprecated conditions are present.
+ // When the ClusterExtension is sourced from a catalog, it may also communicate a deprecation condition.
+ // These are indications from a package owner to guide users away from a particular package, channel, or bundle:
+ // - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog.
+ // - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog.
+ // - PackageDeprecated is set if the requested package is marked deprecated in the catalog.
+ // - Deprecated is a rollup condition that is present when any of the deprecated conditions are present.
//
// +listType=map
// +listMapKey=type
@@ -531,10 +516,10 @@ type ClusterExtensionStatus struct {
// ClusterExtensionInstallStatus is a representation of the status of the identified bundle.
type ClusterExtensionInstallStatus struct {
- // bundle is a required field which represents the identifying attributes of a bundle.
+ // bundle is required and represents the identifying attributes of a bundle.
//
- // A "bundle" is a versioned set of content that represents the resources that
- // need to be applied to a cluster to install a package.
+ // A "bundle" is a versioned set of content that represents the resources that need to be applied
+ // to a cluster to install a package.
//
// +kubebuilder:validation:Required
Bundle BundleMetadata `json:"bundle"`
diff --git a/commitchecker.yaml b/commitchecker.yaml
index 883a4f9e0..99c3f3028 100644
--- a/commitchecker.yaml
+++ b/commitchecker.yaml
@@ -1,4 +1,4 @@
-expectedMergeBase: 39718ba9a077b3e95ff7e69cc8e6bef5a8815541
+expectedMergeBase: 08436e031b685709ea24072c18f8fc0d31c3b07a
upstreamBranch: main
upstreamOrg: operator-framework
upstreamRepo: operator-controller
diff --git a/docs/api-reference/olmv1-api-reference.md b/docs/api-reference/olmv1-api-reference.md
index 4fc8cb3c6..6aeb4c8f4 100644
--- a/docs/api-reference/olmv1-api-reference.md
+++ b/docs/api-reference/olmv1-api-reference.md
@@ -46,8 +46,8 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `name` _string_ | name is required and follows the DNS subdomain standard as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), start and end with an alphanumeric character, and be no longer than 253 characters. | | Required: \{\} |
-| `version` _string_ | version is a required field and is a reference to the version that this bundle represents version follows the semantic versioning standard as defined in https://semver.org/. | | Required: \{\} |
+| `name` _string_ | name is required and follows the DNS subdomain standard as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), start and end with an alphanumeric character, and be no longer than 253 characters. | | Required: \{\} |
+| `version` _string_ | version is required and references the version that this bundle represents. It follows the semantic versioning standard as defined in https://semver.org/. | | Required: \{\} |
#### CRDUpgradeSafetyEnforcement
@@ -80,7 +80,7 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `enforcement` _[CRDUpgradeSafetyEnforcement](#crdupgradesafetyenforcement)_ | enforcement is a required field, used to configure the state of the CRD Upgrade Safety pre-flight check. Allowed values are "None" or "Strict". The default value is "Strict". When set to "None", the CRD Upgrade Safety pre-flight check will be skipped when performing an upgrade operation. This should be used with caution as unintended consequences such as data loss can occur. When set to "Strict", the CRD Upgrade Safety pre-flight check will be run when performing an upgrade operation. | | Enum: [None Strict] Required: \{\} |
+| `enforcement` _[CRDUpgradeSafetyEnforcement](#crdupgradesafetyenforcement)_ | enforcement is required and configures the state of the CRD Upgrade Safety pre-flight check. Allowed values are "None" or "Strict". The default value is "Strict". When set to "None", the CRD Upgrade Safety pre-flight check is skipped during an upgrade operation. Use this option with caution as unintended consequences such as data loss can occur. When set to "Strict", the CRD Upgrade Safety pre-flight check runs during an upgrade operation. | | Enum: [None Strict] Required: \{\} |
#### CatalogFilter
@@ -96,11 +96,11 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `packageName` _string_ | packageName is a reference to the name of the package to be installed and is used to filter the content from catalogs. packageName is required, immutable, and follows the DNS subdomain standard as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), start and end with an alphanumeric character, and be no longer than 253 characters. Some examples of valid values are: - some-package - 123-package - 1-package-2 - somepackage Some examples of invalid values are: - -some-package - some-package- - thisisareallylongpackagenamethatisgreaterthanthemaximumlength - some.package [RFC 1123]: https://tools.ietf.org/html/rfc1123 | | MaxLength: 253 Required: \{\} |
-| `version` _string_ | version is an optional semver constraint (a specific version or range of versions). When unspecified, the latest version available will be installed. Acceptable version ranges are no longer than 64 characters. Version ranges are composed of comma- or space-delimited values and one or more comparison operators, known as comparison strings. Additional comparison strings can be added using the OR operator (\|\|). # Range Comparisons To specify a version range, you can use a comparison string like ">=3.0, <3.6". When specifying a range, automatic updates will occur within that range. The example comparison string means "install any version greater than or equal to 3.0.0 but less than 3.6.0.". It also states intent that if any upgrades are available within the version range after initial installation, those upgrades should be automatically performed. # Pinned Versions To specify an exact version to install you can use a version range that "pins" to a specific version. When pinning to a specific version, no automatic updates will occur. An example of a pinned version range is "0.6.0", which means "only install version 0.6.0 and never upgrade from this version". # Basic Comparison Operators The basic comparison operators and their meanings are: - "=", equal (not aliased to an operator) - "!=", not equal - "<", less than - ">", greater than - ">=", greater than OR equal to - "<=", less than OR equal to # Wildcard Comparisons You can use the "x", "X", and "*" characters as wildcard characters in all comparison operations. Some examples of using the wildcard characters: - "1.2.x", "1.2.X", and "1.2.*" is equivalent to ">=1.2.0, < 1.3.0" - ">= 1.2.x", ">= 1.2.X", and ">= 1.2.*" is equivalent to ">= 1.2.0" - "<= 2.x", "<= 2.X", and "<= 2.*" is equivalent to "< 3" - "x", "X", and "*" is equivalent to ">= 0.0.0" # Patch Release Comparisons When you want to specify a minor version up to the next major version you can use the "~" character to perform patch comparisons. Some examples: - "~1.2.3" is equivalent to ">=1.2.3, <1.3.0" - "~1" and "~1.x" is equivalent to ">=1, <2" - "~2.3" is equivalent to ">=2.3, <2.4" - "~1.2.x" is equivalent to ">=1.2.0, <1.3.0" # Major Release Comparisons You can use the "^" character to make major release comparisons after a stable 1.0.0 version is published. If there is no stable version published, // minor versions define the stability level. Some examples: - "^1.2.3" is equivalent to ">=1.2.3, <2.0.0" - "^1.2.x" is equivalent to ">=1.2.0, <2.0.0" - "^2.3" is equivalent to ">=2.3, <3" - "^2.x" is equivalent to ">=2.0.0, <3" - "^0.2.3" is equivalent to ">=0.2.3, <0.3.0" - "^0.2" is equivalent to ">=0.2.0, <0.3.0" - "^0.0.3" is equvalent to ">=0.0.3, <0.0.4" - "^0.0" is equivalent to ">=0.0.0, <0.1.0" - "^0" is equivalent to ">=0.0.0, <1.0.0" # OR Comparisons You can use the "\|\|" character to represent an OR operation in the version range. Some examples: - ">=1.2.3, <2.0.0 \|\| >3.0.0" - "^0 \|\| ^3 \|\| ^5" For more information on semver, please see https://semver.org/ | | MaxLength: 64 |
-| `channels` _string array_ | channels is an optional reference to a set of channels belonging to the package specified in the packageName field. A "channel" is a package-author-defined stream of updates for an extension. Each channel in the list must follow the DNS subdomain standard as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), start and end with an alphanumeric character, and be no longer than 253 characters. No more than 256 channels can be specified. When specified, it is used to constrain the set of installable bundles and the automated upgrade path. This constraint is an AND operation with the version field. For example: - Given channel is set to "foo" - Given version is set to ">=1.0.0, <1.5.0" - Only bundles that exist in channel "foo" AND satisfy the version range comparison will be considered installable - Automatic upgrades will be constrained to upgrade edges defined by the selected channel When unspecified, upgrade edges across all channels will be used to identify valid automatic upgrade paths. Some examples of valid values are: - 1.1.x - alpha - stable - stable-v1 - v1-stable - dev-preview - preview - community Some examples of invalid values are: - -some-channel - some-channel- - thisisareallylongchannelnamethatisgreaterthanthemaximumlength - original_40 - --default-channel [RFC 1123]: https://tools.ietf.org/html/rfc1123 | | MaxItems: 256 items:MaxLength: 253 items:XValidation: \{self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") channels entries must be valid DNS1123 subdomains \} |
-| `selector` _[LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#labelselector-v1-meta)_ | selector is an optional field that can be used to filter the set of ClusterCatalogs used in the bundle selection process. When unspecified, all ClusterCatalogs will be used in the bundle selection process. | | |
-| `upgradeConstraintPolicy` _[UpgradeConstraintPolicy](#upgradeconstraintpolicy)_ | upgradeConstraintPolicy is an optional field that controls whether the upgrade path(s) defined in the catalog are enforced for the package referenced in the packageName field. Allowed values are: "CatalogProvided" or "SelfCertified", or omitted. When this field is set to "CatalogProvided", automatic upgrades will only occur when upgrade constraints specified by the package author are met. When this field is set to "SelfCertified", the upgrade constraints specified by the package author are ignored. This allows for upgrades and downgrades to any version of the package. This is considered a dangerous operation as it can lead to unknown and potentially disastrous outcomes, such as data loss. It is assumed that users have independently verified changes when using this option. When this field is omitted, the default value is "CatalogProvided". | CatalogProvided | Enum: [CatalogProvided SelfCertified] |
+| `packageName` _string_ | packageName specifies the name of the package to be installed and is used to filter the content from catalogs. It is required, immutable, and follows the DNS subdomain standard as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), start and end with an alphanumeric character, and be no longer than 253 characters. Some examples of valid values are: - some-package - 123-package - 1-package-2 - somepackage Some examples of invalid values are: - -some-package - some-package- - thisisareallylongpackagenamethatisgreaterthanthemaximumlength - some.package [RFC 1123]: https://tools.ietf.org/html/rfc1123 | | MaxLength: 253 Required: \{\} |
+| `version` _string_ | version is an optional semver constraint (a specific version or range of versions). When unspecified, the latest version available is installed. Acceptable version ranges are no longer than 64 characters. Version ranges are composed of comma- or space-delimited values and one or more comparison operators, known as comparison strings. You can add additional comparison strings using the OR operator (\|\|). # Range Comparisons To specify a version range, you can use a comparison string like ">=3.0, <3.6". When specifying a range, automatic updates will occur within that range. The example comparison string means "install any version greater than or equal to 3.0.0 but less than 3.6.0.". It also states intent that if any upgrades are available within the version range after initial installation, those upgrades should be automatically performed. # Pinned Versions To specify an exact version to install you can use a version range that "pins" to a specific version. When pinning to a specific version, no automatic updates will occur. An example of a pinned version range is "0.6.0", which means "only install version 0.6.0 and never upgrade from this version". # Basic Comparison Operators The basic comparison operators and their meanings are: - "=", equal (not aliased to an operator) - "!=", not equal - "<", less than - ">", greater than - ">=", greater than OR equal to - "<=", less than OR equal to # Wildcard Comparisons You can use the "x", "X", and "*" characters as wildcard characters in all comparison operations. Some examples of using the wildcard characters: - "1.2.x", "1.2.X", and "1.2.*" is equivalent to ">=1.2.0, < 1.3.0" - ">= 1.2.x", ">= 1.2.X", and ">= 1.2.*" is equivalent to ">= 1.2.0" - "<= 2.x", "<= 2.X", and "<= 2.*" is equivalent to "< 3" - "x", "X", and "*" is equivalent to ">= 0.0.0" # Patch Release Comparisons When you want to specify a minor version up to the next major version you can use the "~" character to perform patch comparisons. Some examples: - "~1.2.3" is equivalent to ">=1.2.3, <1.3.0" - "~1" and "~1.x" is equivalent to ">=1, <2" - "~2.3" is equivalent to ">=2.3, <2.4" - "~1.2.x" is equivalent to ">=1.2.0, <1.3.0" # Major Release Comparisons You can use the "^" character to make major release comparisons after a stable 1.0.0 version is published. If there is no stable version published, // minor versions define the stability level. Some examples: - "^1.2.3" is equivalent to ">=1.2.3, <2.0.0" - "^1.2.x" is equivalent to ">=1.2.0, <2.0.0" - "^2.3" is equivalent to ">=2.3, <3" - "^2.x" is equivalent to ">=2.0.0, <3" - "^0.2.3" is equivalent to ">=0.2.3, <0.3.0" - "^0.2" is equivalent to ">=0.2.0, <0.3.0" - "^0.0.3" is equvalent to ">=0.0.3, <0.0.4" - "^0.0" is equivalent to ">=0.0.0, <0.1.0" - "^0" is equivalent to ">=0.0.0, <1.0.0" # OR Comparisons You can use the "\|\|" character to represent an OR operation in the version range. Some examples: - ">=1.2.3, <2.0.0 \|\| >3.0.0" - "^0 \|\| ^3 \|\| ^5" For more information on semver, please see https://semver.org/ | | MaxLength: 64 |
+| `channels` _string array_ | channels is optional and specifies a set of channels belonging to the package specified in the packageName field. A channel is a package-author-defined stream of updates for an extension. Each channel in the list must follow the DNS subdomain standard as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), start and end with an alphanumeric character, and be no longer than 253 characters. You can specify no more than 256 channels. When specified, it constrains the set of installable bundles and the automated upgrade path. This constraint is an AND operation with the version field. For example: - Given channel is set to "foo" - Given version is set to ">=1.0.0, <1.5.0" - Only bundles that exist in channel "foo" AND satisfy the version range comparison are considered installable - Automatic upgrades are constrained to upgrade edges defined by the selected channel When unspecified, upgrade edges across all channels are used to identify valid automatic upgrade paths. Some examples of valid values are: - 1.1.x - alpha - stable - stable-v1 - v1-stable - dev-preview - preview - community Some examples of invalid values are: - -some-channel - some-channel- - thisisareallylongchannelnamethatisgreaterthanthemaximumlength - original_40 - --default-channel [RFC 1123]: https://tools.ietf.org/html/rfc1123 | | MaxItems: 256 items:MaxLength: 253 items:XValidation: \{self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") channels entries must be valid DNS1123 subdomains \} |
+| `selector` _[LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#labelselector-v1-meta)_ | selector is optional and filters the set of ClusterCatalogs used in the bundle selection process. When unspecified, all ClusterCatalogs are used in the bundle selection process. | | |
+| `upgradeConstraintPolicy` _[UpgradeConstraintPolicy](#upgradeconstraintpolicy)_ | upgradeConstraintPolicy is optional and controls whether the upgrade paths defined in the catalog are enforced for the package referenced in the packageName field. Allowed values are "CatalogProvided", "SelfCertified", or omitted. When set to "CatalogProvided", automatic upgrades only occur when upgrade constraints specified by the package author are met. When set to "SelfCertified", the upgrade constraints specified by the package author are ignored. This allows upgrades and downgrades to any version of the package. This is considered a dangerous operation as it can lead to unknown and potentially disastrous outcomes, such as data loss. Use this option only if you have independently verified the changes. When omitted, the default value is "CatalogProvided". | CatalogProvided | Enum: [CatalogProvided SelfCertified] |
#### CatalogSource
@@ -117,15 +117,15 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `type` _[SourceType](#sourcetype)_ | type is a reference to the type of source the catalog is sourced from. type is required. The only allowed value is "Image". When set to "Image", the ClusterCatalog content will be sourced from an OCI image. When using an image source, the image field must be set and must be the only field defined for this type. | | Enum: [Image] Required: \{\} |
-| `image` _[ImageSource](#imagesource)_ | image is used to configure how catalog contents are sourced from an OCI image. This field is required when type is Image, and forbidden otherwise. | | |
+| `type` _[SourceType](#sourcetype)_ | type is a required field that specifies the type of source for the catalog. The only allowed value is "Image". When set to "Image", the ClusterCatalog content is sourced from an OCI image. When using an image source, the image field must be set and must be the only field defined for this type. | | Enum: [Image] Required: \{\} |
+| `image` _[ImageSource](#imagesource)_ | image configures how catalog contents are sourced from an OCI image. It is required when type is Image, and forbidden otherwise. | | |
#### ClusterCatalog
-ClusterCatalog enables users to make File-Based Catalog (FBC) catalog data available to the cluster.
+ClusterCatalog makes File-Based Catalog (FBC) data available to your cluster.
For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs
@@ -140,8 +140,8 @@ _Appears in:_
| `kind` _string_ | Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | |
| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | |
| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
-| `spec` _[ClusterCatalogSpec](#clustercatalogspec)_ | spec is the desired state of the ClusterCatalog. spec is required. The controller will work to ensure that the desired catalog is unpacked and served over the catalog content HTTP server. | | Required: \{\} |
-| `status` _[ClusterCatalogStatus](#clustercatalogstatus)_ | status contains information about the state of the ClusterCatalog such as: - Whether or not the catalog contents are being served via the catalog content HTTP server - Whether or not the ClusterCatalog is progressing to a new state - A reference to the source from which the catalog contents were retrieved | | |
+| `spec` _[ClusterCatalogSpec](#clustercatalogspec)_ | spec is a required field that defines the desired state of the ClusterCatalog. The controller ensures that the catalog is unpacked and served over the catalog content HTTP server. | | Required: \{\} |
+| `status` _[ClusterCatalogStatus](#clustercatalogstatus)_ | status contains the following information about the state of the ClusterCatalog: - Whether the catalog contents are being served via the catalog content HTTP server - Whether the ClusterCatalog is progressing to a new state - A reference to the source from which the catalog contents were retrieved | | |
#### ClusterCatalogList
@@ -177,9 +177,9 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `source` _[CatalogSource](#catalogsource)_ | source allows a user to define the source of a catalog. A "catalog" contains information on content that can be installed on a cluster. Providing a catalog source makes the contents of the catalog discoverable and usable by other on-cluster components. These on-cluster components may do a variety of things with this information, such as presenting the content in a GUI dashboard or installing content from the catalog on the cluster. The catalog source must contain catalog metadata in the File-Based Catalog (FBC) format. For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs. source is a required field. Below is a minimal example of a ClusterCatalogSpec that sources a catalog from an image: source: type: Image image: ref: quay.io/operatorhubio/catalog:latest | | Required: \{\} |
-| `priority` _integer_ | priority allows the user to define a priority for a ClusterCatalog. priority is optional. A ClusterCatalog's priority is used by clients as a tie-breaker between ClusterCatalogs that meet the client's requirements. A higher number means higher priority. It is up to clients to decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements. When deciding how to break the tie in this scenario, it is recommended that clients prompt their users for additional input. When omitted, the default priority is 0 because that is the zero value of integers. Negative numbers can be used to specify a priority lower than the default. Positive numbers can be used to specify a priority higher than the default. The lowest possible value is -2147483648. The highest possible value is 2147483647. | 0 | |
-| `availabilityMode` _[AvailabilityMode](#availabilitymode)_ | availabilityMode allows users to define how the ClusterCatalog is made available to clients on the cluster. availabilityMode is optional. Allowed values are "Available" and "Unavailable" and omitted. When omitted, the default value is "Available". When set to "Available", the catalog contents will be unpacked and served over the catalog content HTTP server. Setting the availabilityMode to "Available" tells clients that they should consider this ClusterCatalog and its contents as usable. When set to "Unavailable", the catalog contents will no longer be served over the catalog content HTTP server. When set to this availabilityMode it should be interpreted the same as the ClusterCatalog not existing. Setting the availabilityMode to "Unavailable" can be useful in scenarios where a user may not want to delete the ClusterCatalog all together, but would still like it to be treated as if it doesn't exist. | Available | Enum: [Unavailable Available] |
+| `source` _[CatalogSource](#catalogsource)_ | source is a required field that defines the source of a catalog. A catalog contains information on content that can be installed on a cluster. The catalog source makes catalog contents discoverable and usable by other on-cluster components. These components can present the content in a GUI dashboard or install content from the catalog on the cluster. The catalog source must contain catalog metadata in the File-Based Catalog (FBC) format. For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs. Below is a minimal example of a ClusterCatalogSpec that sources a catalog from an image: source: type: Image image: ref: quay.io/operatorhubio/catalog:latest | | Required: \{\} |
+| `priority` _integer_ | priority is an optional field that defines a priority for this ClusterCatalog. Clients use the ClusterCatalog priority as a tie-breaker between ClusterCatalogs that meet their requirements. Higher numbers mean higher priority. Clients decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements. Clients should prompt users for additional input to break the tie. When omitted, the default priority is 0. Use negative numbers to specify a priority lower than the default. Use positive numbers to specify a priority higher than the default. The lowest possible value is -2147483648. The highest possible value is 2147483647. | 0 | |
+| `availabilityMode` _[AvailabilityMode](#availabilitymode)_ | availabilityMode is an optional field that defines how the ClusterCatalog is made available to clients on the cluster. Allowed values are "Available", "Unavailable", or omitted. When omitted, the default value is "Available". When set to "Available", the catalog contents are unpacked and served over the catalog content HTTP server. Clients should consider this ClusterCatalog and its contents as usable. When set to "Unavailable", the catalog contents are no longer served over the catalog content HTTP server. Treat this the same as if the ClusterCatalog does not exist. Use "Unavailable" when you want to keep the ClusterCatalog but treat it as if it doesn't exist. | Available | Enum: [Unavailable Available] |
#### ClusterCatalogStatus
@@ -195,10 +195,10 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#condition-v1-meta) array_ | conditions is a representation of the current state for this ClusterCatalog. The current condition types are Serving and Progressing. The Serving condition is used to represent whether or not the contents of the catalog is being served via the HTTP(S) web server. When it has a status of True and a reason of Available, the contents of the catalog are being served. When it has a status of False and a reason of Unavailable, the contents of the catalog are not being served because the contents are not yet available. When it has a status of False and a reason of UserSpecifiedUnavailable, the contents of the catalog are not being served because the catalog has been intentionally marked as unavailable. The Progressing condition is used to represent whether or not the ClusterCatalog is progressing or is ready to progress towards a new state. When it has a status of True and a reason of Retrying, there was an error in the progression of the ClusterCatalog that may be resolved on subsequent reconciliation attempts. When it has a status of True and a reason of Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing. When it has a status of False and a reason of Blocked, there was an error in the progression of the ClusterCatalog that requires manual intervention for recovery. In the case that the Serving condition is True with reason Available and Progressing is True with reason Retrying, the previously fetched catalog contents are still being served via the HTTP(S) web server while we are progressing towards serving a new version of the catalog contents. This could occur when we've initially fetched the latest contents from the source for this catalog and when polling for changes to the contents we identify that there are updates to the contents. | | |
+| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#condition-v1-meta) array_ | conditions represents the current state of this ClusterCatalog. The current condition types are Serving and Progressing. The Serving condition represents whether the catalog contents are being served via the HTTP(S) web server: - When status is True and reason is Available, the catalog contents are being served. - When status is False and reason is Unavailable, the catalog contents are not being served because the contents are not yet available. - When status is False and reason is UserSpecifiedUnavailable, the catalog contents are not being served because the catalog has been intentionally marked as unavailable. The Progressing condition represents whether the ClusterCatalog is progressing or is ready to progress towards a new state: - When status is True and reason is Retrying, an error occurred that may be resolved on subsequent reconciliation attempts. - When status is True and reason is Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing. - When status is False and reason is Blocked, an error occurred that requires manual intervention for recovery. If the system initially fetched contents and polling identifies updates, both conditions can be active simultaneously: - The Serving condition remains True with reason Available because the previous contents are still served via the HTTP(S) web server. - The Progressing condition is True with reason Retrying because the system is working to serve the new version. | | |
| `resolvedSource` _[ResolvedCatalogSource](#resolvedcatalogsource)_ | resolvedSource contains information about the resolved source based on the source type. | | |
| `urls` _[ClusterCatalogURLs](#clustercatalogurls)_ | urls contains the URLs that can be used to access the catalog. | | |
-| `lastUnpacked` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#time-v1-meta)_ | lastUnpacked represents the last time the contents of the catalog were extracted from their source format. As an example, when using an Image source, the OCI image will be pulled and the image layers written to a file-system backed cache. We refer to the act of this extraction from the source format as "unpacking". | | |
+| `lastUnpacked` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#time-v1-meta)_ | lastUnpacked represents the last time the catalog contents were extracted from their source format. For example, when using an Image source, the OCI image is pulled and image layers are written to a file-system backed cache. This extraction from the source format is called "unpacking". | | |
#### ClusterCatalogURLs
@@ -214,7 +214,7 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `base` _string_ | base is a cluster-internal URL that provides endpoints for accessing the content of the catalog. It is expected that clients append the path for the endpoint they wish to access. Currently, only a single endpoint is served and is accessible at the path /api/v1. The endpoints served for the v1 API are: - /all - this endpoint returns the entirety of the catalog contents in the FBC format As the needs of users and clients of the evolve, new endpoints may be added. | | MaxLength: 525 Required: \{\} |
+| `base` _string_ | base is a cluster-internal URL that provides endpoints for accessing the catalog content. Clients should append the path for the endpoint they want to access. Currently, only a single endpoint is served and is accessible at the path /api/v1. The endpoints served for the v1 API are: - /all - this endpoint returns the entire catalog contents in the FBC format New endpoints may be added as needs evolve. | | MaxLength: 525 Required: \{\} |
#### ClusterExtension
@@ -253,8 +253,8 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `configType` _[ClusterExtensionConfigType](#clusterextensionconfigtype)_ | configType is a required reference to the type of configuration source. Allowed values are "Inline" When this field is set to "Inline", the cluster extension configuration is defined inline within the ClusterExtension resource. | | Enum: [Inline] Required: \{\} |
-| `inline` _[JSON](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#json-v1-apiextensions-k8s-io)_ | inline contains JSON or YAML values specified directly in the ClusterExtension. inline is used to specify arbitrary configuration values for the ClusterExtension. It must be set if configType is 'Inline' and must be a valid JSON/YAML object containing at least one property. The configuration values are validated at runtime against a JSON schema provided by the bundle. | | MinProperties: 1 Type: object |
+| `configType` _[ClusterExtensionConfigType](#clusterextensionconfigtype)_ | configType is required and specifies the type of configuration source. The only allowed value is "Inline". When set to "Inline", the cluster extension configuration is defined inline within the ClusterExtension resource. | | Enum: [Inline] Required: \{\} |
+| `inline` _[JSON](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#json-v1-apiextensions-k8s-io)_ | inline contains JSON or YAML values specified directly in the ClusterExtension. It is used to specify arbitrary configuration values for the ClusterExtension. It must be set if configType is 'Inline' and must be a valid JSON/YAML object containing at least one property. The configuration values are validated at runtime against a JSON schema provided by the bundle. | | MinProperties: 1 Type: object |
#### ClusterExtensionConfigType
@@ -287,7 +287,7 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `preflight` _[PreflightConfig](#preflightconfig)_ | preflight is an optional field that can be used to configure the checks that are run before installation or upgrade of the content for the package specified in the packageName field. When specified, it replaces the default preflight configuration for install/upgrade actions. When not specified, the default configuration will be used. | | |
+| `preflight` _[PreflightConfig](#preflightconfig)_ | preflight is optional and configures the checks that run before installation or upgrade of the content for the package specified in the packageName field. When specified, it replaces the default preflight configuration for install/upgrade actions. When not specified, the default configuration is used. | | |
#### ClusterExtensionInstallStatus
@@ -303,7 +303,7 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `bundle` _[BundleMetadata](#bundlemetadata)_ | bundle is a required field which represents the identifying attributes of a bundle. A "bundle" is a versioned set of content that represents the resources that need to be applied to a cluster to install a package. | | Required: \{\} |
+| `bundle` _[BundleMetadata](#bundlemetadata)_ | bundle is required and represents the identifying attributes of a bundle. A "bundle" is a versioned set of content that represents the resources that need to be applied to a cluster to install a package. | | Required: \{\} |
#### ClusterExtensionList
@@ -339,11 +339,11 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `namespace` _string_ | namespace is a reference to a Kubernetes namespace. This is the namespace in which the provided ServiceAccount must exist. It also designates the default namespace where namespace-scoped resources for the extension are applied to the cluster. Some extensions may contain namespace-scoped resources to be applied in other namespaces. This namespace must exist. namespace is required, immutable, and follows the DNS label standard as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters or hyphens (-), start and end with an alphanumeric character, and be no longer than 63 characters [RFC 1123]: https://tools.ietf.org/html/rfc1123 | | MaxLength: 63 Required: \{\} |
-| `serviceAccount` _[ServiceAccountReference](#serviceaccountreference)_ | serviceAccount is a reference to a ServiceAccount used to perform all interactions with the cluster that are required to manage the extension. The ServiceAccount must be configured with the necessary permissions to perform these interactions. The ServiceAccount must exist in the namespace referenced in the spec. serviceAccount is required. | | Required: \{\} |
-| `source` _[SourceConfig](#sourceconfig)_ | source is a required field which selects the installation source of content for this ClusterExtension. Selection is performed by setting the sourceType. Catalog is currently the only implemented sourceType, and setting the sourcetype to "Catalog" requires the catalog field to also be defined. Below is a minimal example of a source definition (in yaml): source: sourceType: Catalog catalog: packageName: example-package | | Required: \{\} |
-| `install` _[ClusterExtensionInstallConfig](#clusterextensioninstallconfig)_ | install is an optional field used to configure the installation options for the ClusterExtension such as the pre-flight check configuration. | | |
-| `config` _[ClusterExtensionConfig](#clusterextensionconfig)_ | config is an optional field used to specify bundle specific configuration used to configure the bundle. Configuration is bundle specific and a bundle may provide a configuration schema. When not specified, the default configuration of the resolved bundle will be used. config is validated against a configuration schema provided by the resolved bundle. If the bundle does not provide a configuration schema the bundle is deemed to not be configurable. More information on how to configure bundles can be found in the OLM documentation associated with your current OLM version. | | |
+| `namespace` _string_ | namespace specifies a Kubernetes namespace. This is the namespace where the provided ServiceAccount must exist. It also designates the default namespace where namespace-scoped resources for the extension are applied to the cluster. Some extensions may contain namespace-scoped resources to be applied in other namespaces. This namespace must exist. The namespace field is required, immutable, and follows the DNS label standard as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters or hyphens (-), start and end with an alphanumeric character, and be no longer than 63 characters. [RFC 1123]: https://tools.ietf.org/html/rfc1123 | | MaxLength: 63 Required: \{\} |
+| `serviceAccount` _[ServiceAccountReference](#serviceaccountreference)_ | serviceAccount specifies a ServiceAccount used to perform all interactions with the cluster that are required to manage the extension. The ServiceAccount must be configured with the necessary permissions to perform these interactions. The ServiceAccount must exist in the namespace referenced in the spec. The serviceAccount field is required. | | Required: \{\} |
+| `source` _[SourceConfig](#sourceconfig)_ | source is required and selects the installation source of content for this ClusterExtension. Set the sourceType field to perform the selection. Catalog is currently the only implemented sourceType. Setting sourceType to "Catalog" requires the catalog field to also be defined. Below is a minimal example of a source definition (in yaml): source: sourceType: Catalog catalog: packageName: example-package | | Required: \{\} |
+| `install` _[ClusterExtensionInstallConfig](#clusterextensioninstallconfig)_ | install is optional and configures installation options for the ClusterExtension, such as the pre-flight check configuration. | | |
+| `config` _[ClusterExtensionConfig](#clusterextensionconfig)_ | config is optional and specifies bundle-specific configuration. Configuration is bundle-specific and a bundle may provide a configuration schema. When not specified, the default configuration of the resolved bundle is used. config is validated against a configuration schema provided by the resolved bundle. If the bundle does not provide a configuration schema the bundle is deemed to not be configurable. More information on how to configure bundles can be found in the OLM documentation associated with your current OLM version. | | |
#### ClusterExtensionStatus
@@ -359,7 +359,7 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#condition-v1-meta) array_ | The set of condition types which apply to all spec.source variations are Installed and Progressing. The Installed condition represents whether or not the bundle has been installed for this ClusterExtension. When Installed is True and the Reason is Succeeded, the bundle has been successfully installed. When Installed is False and the Reason is Failed, the bundle has failed to install. The Progressing condition represents whether or not the ClusterExtension is advancing towards a new state. When Progressing is True and the Reason is Succeeded, the ClusterExtension is making progress towards a new state. When Progressing is True and the Reason is Retrying, the ClusterExtension has encountered an error that could be resolved on subsequent reconciliation attempts. When Progressing is False and the Reason is Blocked, the ClusterExtension has encountered an error that requires manual intervention for recovery. When Progressing is True and Reason is RollingOut, the ClusterExtension has one or more ClusterExtensionRevisions in active roll out.
When the ClusterExtension is sourced from a catalog, if may also communicate a deprecation condition. These are indications from a package owner to guide users away from a particular package, channel, or bundle. BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. PackageDeprecated is set if the requested package is marked deprecated in the catalog. Deprecated is a rollup condition that is present when any of the deprecated conditions are present. | | |
+| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#condition-v1-meta) array_ | The set of condition types which apply to all spec.source variations are Installed and Progressing. The Installed condition represents whether the bundle has been installed for this ClusterExtension: - When Installed is True and the Reason is Succeeded, the bundle has been successfully installed. - When Installed is False and the Reason is Failed, the bundle has failed to install. The Progressing condition represents whether or not the ClusterExtension is advancing towards a new state. When Progressing is True and the Reason is Succeeded, the ClusterExtension is making progress towards a new state. When Progressing is True and the Reason is Retrying, the ClusterExtension has encountered an error that could be resolved on subsequent reconciliation attempts. When Progressing is False and the Reason is Blocked, the ClusterExtension has encountered an error that requires manual intervention for recovery. When Progressing is True and Reason is RollingOut, the ClusterExtension has one or more ClusterExtensionRevisions in active roll out.
When the ClusterExtension is sourced from a catalog, it may also communicate a deprecation condition. These are indications from a package owner to guide users away from a particular package, channel, or bundle: - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. - PackageDeprecated is set if the requested package is marked deprecated in the catalog. - Deprecated is a rollup condition that is present when any of the deprecated conditions are present. | | |
| `install` _[ClusterExtensionInstallStatus](#clusterextensioninstallstatus)_ | install is a representation of the current installation status for this ClusterExtension. | | |
| `activeRevisions` _[RevisionStatus](#revisionstatus) array_ | activeRevisions holds a list of currently active (non-archived) ClusterExtensionRevisions, including both installed and rolling out revisions. | | |
@@ -382,8 +382,8 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `ref` _string_ | ref allows users to define the reference to a container image containing Catalog contents. ref is required. ref can not be more than 1000 characters. A reference can be broken down into 3 parts - the domain, name, and identifier. The domain is typically the registry where an image is located. It must be alphanumeric characters (lowercase and uppercase) separated by the "." character. Hyphenation is allowed, but the domain must start and end with alphanumeric characters. Specifying a port to use is also allowed by adding the ":" character followed by numeric values. The port must be the last value in the domain. Some examples of valid domain values are "registry.mydomain.io", "quay.io", "my-registry.io:8080". The name is typically the repository in the registry where an image is located. It must contain lowercase alphanumeric characters separated only by the ".", "_", "__", "-" characters. Multiple names can be concatenated with the "/" character. The domain and name are combined using the "/" character. Some examples of valid name values are "operatorhubio/catalog", "catalog", "my-catalog.prod". An example of the domain and name parts of a reference being combined is "quay.io/operatorhubio/catalog". The identifier is typically the tag or digest for an image reference and is present at the end of the reference. It starts with a separator character used to distinguish the end of the name and beginning of the identifier. For a digest-based reference, the "@" character is the separator. For a tag-based reference, the ":" character is the separator. An identifier is required in the reference. Digest-based references must contain an algorithm reference immediately after the "@" separator. The algorithm reference must be followed by the ":" character and an encoded string. The algorithm must start with an uppercase or lowercase alpha character followed by alphanumeric characters and may contain the "-", "_", "+", and "." characters. Some examples of valid algorithm values are "sha256", "sha256+b64u", "multihash+base58". The encoded string following the algorithm must be hex digits (a-f, A-F, 0-9) and must be a minimum of 32 characters. Tag-based references must begin with a word character (alphanumeric + "_") followed by word characters or ".", and "-" characters. The tag must not be longer than 127 characters. An example of a valid digest-based image reference is "quay.io/operatorhubio/catalog@sha256:200d4ddb2a73594b91358fe6397424e975205bfbe44614f5846033cad64b3f05" An example of a valid tag-based image reference is "quay.io/operatorhubio/catalog:latest" | | MaxLength: 1000 Required: \{\} |
-| `pollIntervalMinutes` _integer_ | pollIntervalMinutes allows the user to set the interval, in minutes, at which the image source should be polled for new content. pollIntervalMinutes is optional. pollIntervalMinutes can not be specified when ref is a digest-based reference. When omitted, the image will not be polled for new content. | | Minimum: 1 |
+| `ref` _string_ | ref is a required field that defines the reference to a container image containing catalog contents. It cannot be more than 1000 characters. A reference has 3 parts: the domain, name, and identifier. The domain is typically the registry where an image is located. It must be alphanumeric characters (lowercase and uppercase) separated by the "." character. Hyphenation is allowed, but the domain must start and end with alphanumeric characters. Specifying a port to use is also allowed by adding the ":" character followed by numeric values. The port must be the last value in the domain. Some examples of valid domain values are "registry.mydomain.io", "quay.io", "my-registry.io:8080". The name is typically the repository in the registry where an image is located. It must contain lowercase alphanumeric characters separated only by the ".", "_", "__", "-" characters. Multiple names can be concatenated with the "/" character. The domain and name are combined using the "/" character. Some examples of valid name values are "operatorhubio/catalog", "catalog", "my-catalog.prod". An example of the domain and name parts of a reference being combined is "quay.io/operatorhubio/catalog". The identifier is typically the tag or digest for an image reference and is present at the end of the reference. It starts with a separator character used to distinguish the end of the name and beginning of the identifier. For a digest-based reference, the "@" character is the separator. For a tag-based reference, the ":" character is the separator. An identifier is required in the reference. Digest-based references must contain an algorithm reference immediately after the "@" separator. The algorithm reference must be followed by the ":" character and an encoded string. The algorithm must start with an uppercase or lowercase alpha character followed by alphanumeric characters and may contain the "-", "_", "+", and "." characters. Some examples of valid algorithm values are "sha256", "sha256+b64u", "multihash+base58". The encoded string following the algorithm must be hex digits (a-f, A-F, 0-9) and must be a minimum of 32 characters. Tag-based references must begin with a word character (alphanumeric + "_") followed by word characters or ".", and "-" characters. The tag must not be longer than 127 characters. An example of a valid digest-based image reference is "quay.io/operatorhubio/catalog@sha256:200d4ddb2a73594b91358fe6397424e975205bfbe44614f5846033cad64b3f05" An example of a valid tag-based image reference is "quay.io/operatorhubio/catalog:latest" | | MaxLength: 1000 Required: \{\} |
+| `pollIntervalMinutes` _integer_ | pollIntervalMinutes is an optional field that sets the interval, in minutes, at which the image source is polled for new content. You cannot specify pollIntervalMinutes when ref is a digest-based reference. When omitted, the image is not polled for new content. | | Minimum: 1 |
#### PreflightConfig
@@ -399,7 +399,7 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `crdUpgradeSafety` _[CRDUpgradeSafetyPreflightConfig](#crdupgradesafetypreflightconfig)_ | crdUpgradeSafety is used to configure the CRD Upgrade Safety pre-flight checks that run prior to upgrades of installed content. The CRD Upgrade Safety pre-flight check safeguards from unintended consequences of upgrading a CRD, such as data loss. | | |
+| `crdUpgradeSafety` _[CRDUpgradeSafetyPreflightConfig](#crdupgradesafetypreflightconfig)_ | crdUpgradeSafety configures the CRD Upgrade Safety pre-flight checks that run before upgrades of installed content. The CRD Upgrade Safety pre-flight check safeguards from unintended consequences of upgrading a CRD, such as data loss. | | |
#### ResolvedCatalogSource
@@ -416,8 +416,8 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `type` _[SourceType](#sourcetype)_ | type is a reference to the type of source the catalog is sourced from. type is required. The only allowed value is "Image". When set to "Image", information about the resolved image source will be set in the 'image' field. | | Enum: [Image] Required: \{\} |
-| `image` _[ResolvedImageSource](#resolvedimagesource)_ | image is a field containing resolution information for a catalog sourced from an image. This field must be set when type is Image, and forbidden otherwise. | | |
+| `type` _[SourceType](#sourcetype)_ | type is a required field that specifies the type of source for the catalog. The only allowed value is "Image". When set to "Image", information about the resolved image source is set in the image field. | | Enum: [Image] Required: \{\} |
+| `image` _[ResolvedImageSource](#resolvedimagesource)_ | image contains resolution information for a catalog sourced from an image. It must be set when type is Image, and forbidden otherwise. | | |
#### ResolvedImageSource
@@ -433,7 +433,7 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `ref` _string_ | ref contains the resolved image digest-based reference. The digest format is used so users can use other tooling to fetch the exact OCI manifests that were used to extract the catalog contents. | | MaxLength: 1000 Required: \{\} |
+| `ref` _string_ | ref contains the resolved image digest-based reference. The digest format allows you to use other tooling to fetch the exact OCI manifests that were used to extract the catalog contents. | | MaxLength: 1000 Required: \{\} |
#### RevisionStatus
@@ -466,7 +466,7 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `name` _string_ | name is a required, immutable reference to the name of the ServiceAccount to be used for installation and management of the content for the package specified in the packageName field. This ServiceAccount must exist in the installNamespace. name follows the DNS subdomain standard as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), start and end with an alphanumeric character, and be no longer than 253 characters. Some examples of valid values are: - some-serviceaccount - 123-serviceaccount - 1-serviceaccount-2 - someserviceaccount - some.serviceaccount Some examples of invalid values are: - -some-serviceaccount - some-serviceaccount- [RFC 1123]: https://tools.ietf.org/html/rfc1123 | | MaxLength: 253 Required: \{\} |
+| `name` _string_ | name is a required, immutable reference to the name of the ServiceAccount used for installation and management of the content for the package specified in the packageName field. This ServiceAccount must exist in the installNamespace. The name field follows the DNS subdomain standard as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), start and end with an alphanumeric character, and be no longer than 253 characters. Some examples of valid values are: - some-serviceaccount - 123-serviceaccount - 1-serviceaccount-2 - someserviceaccount - some.serviceaccount Some examples of invalid values are: - -some-serviceaccount - some-serviceaccount- [RFC 1123]: https://tools.ietf.org/html/rfc1123 | | MaxLength: 253 Required: \{\} |
#### SourceConfig
@@ -482,8 +482,8 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `sourceType` _string_ | sourceType is a required reference to the type of install source. Allowed values are "Catalog" When this field is set to "Catalog", information for determining the appropriate bundle of content to install will be fetched from ClusterCatalog resources existing on the cluster. When using the Catalog sourceType, the catalog field must also be set. | | Enum: [Catalog] Required: \{\} |
-| `catalog` _[CatalogFilter](#catalogfilter)_ | catalog is used to configure how information is sourced from a catalog. This field is required when sourceType is "Catalog", and forbidden otherwise. | | |
+| `sourceType` _string_ | sourceType is required and specifies the type of install source. The only allowed value is "Catalog". When set to "Catalog", information for determining the appropriate bundle of content to install is fetched from ClusterCatalog resources on the cluster. When using the Catalog sourceType, the catalog field must also be set. | | Enum: [Catalog] Required: \{\} |
+| `catalog` _[CatalogFilter](#catalogfilter)_ | catalog configures how information is sourced from a catalog. It is required when sourceType is "Catalog", and forbidden otherwise. | | |
#### SourceType
diff --git a/go.mod b/go.mod
index 89aa72d7b..46c131db2 100644
--- a/go.mod
+++ b/go.mod
@@ -1,6 +1,6 @@
module github.com/operator-framework/operator-controller
-go 1.24.6
+go 1.25.3
require (
github.com/BurntSushi/toml v1.5.0
@@ -8,6 +8,8 @@ require (
github.com/blang/semver/v4 v4.0.0
github.com/cert-manager/cert-manager v1.18.2
github.com/containerd/containerd v1.7.29
+ github.com/cucumber/godog v0.15.1
+ github.com/evanphx/json-patch v5.9.11+incompatible
github.com/fsnotify/fsnotify v1.9.0
github.com/go-logr/logr v1.4.3
github.com/golang-jwt/jwt/v5 v5.3.0
@@ -15,7 +17,7 @@ require (
github.com/google/go-containerregistry v0.20.7
github.com/google/renameio/v2 v2.0.1
github.com/gorilla/handlers v1.5.2
- github.com/klauspost/compress v1.18.1
+ github.com/klauspost/compress v1.18.2
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.1.1
github.com/operator-framework/api v0.36.0
@@ -24,13 +26,13 @@ require (
github.com/prometheus/client_golang v1.23.2
github.com/prometheus/common v0.67.4
github.com/santhosh-tekuri/jsonschema/v6 v6.0.2
- github.com/spf13/cobra v1.10.1
+ github.com/spf13/cobra v1.10.2
github.com/spf13/pflag v1.0.10
github.com/stretchr/testify v1.11.1
go.podman.io/image/v5 v5.38.0
golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b
golang.org/x/mod v0.30.0
- golang.org/x/sync v0.18.0
+ golang.org/x/sync v0.19.0
golang.org/x/tools v0.39.0
helm.sh/helm/v3 v3.19.3
k8s.io/api v0.34.1
@@ -86,6 +88,8 @@ require (
github.com/containerd/typeurl/v2 v2.2.3 // indirect
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
github.com/containers/ocicrypt v1.2.1 // indirect
+ github.com/cucumber/gherkin/go/v26 v26.2.0 // indirect
+ github.com/cucumber/messages/go/v21 v21.0.1 // indirect
github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect
github.com/cyphar/filepath-securejoin v0.6.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
@@ -97,7 +101,6 @@ require (
github.com/docker/go-connections v0.6.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/emicklei/go-restful/v3 v3.13.0 // indirect
- github.com/evanphx/json-patch v5.9.11+incompatible // indirect
github.com/evanphx/json-patch/v5 v5.9.11 // indirect
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
github.com/fatih/color v1.18.0 // indirect
@@ -126,6 +129,7 @@ require (
github.com/go-openapi/swag/yamlutils v0.24.0 // indirect
github.com/gobuffalo/flect v1.0.3 // indirect
github.com/gobwas/glob v0.2.3 // indirect
+ github.com/gofrs/uuid v4.3.1+incompatible // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
github.com/golang/protobuf v1.5.4 // indirect
@@ -142,7 +146,10 @@ require (
github.com/h2non/filetype v1.1.3 // indirect
github.com/h2non/go-is-svg v0.0.0-20160927212452-35e8c4b0612c // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
+ github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
+ github.com/hashicorp/go-memdb v1.3.4 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
+ github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/huandu/xstrings v1.5.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
diff --git a/go.sum b/go.sum
index c9879d9aa..3b877b334 100644
--- a/go.sum
+++ b/go.sum
@@ -85,9 +85,17 @@ github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
github.com/coreos/go-systemd/v22 v22.6.0 h1:aGVa/v8B7hpb0TKl0MWoAavPDmHvobFe5R5zn0bCJWo=
github.com/coreos/go-systemd/v22 v22.6.0/go.mod h1:iG+pp635Fo7ZmV/j14KUcmEyWF+0X7Lua8rrTWzYgWU=
+github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
+github.com/cucumber/gherkin/go/v26 v26.2.0 h1:EgIjePLWiPeslwIWmNQ3XHcypPsWAHoMCz/YEBKP4GI=
+github.com/cucumber/gherkin/go/v26 v26.2.0/go.mod h1:t2GAPnB8maCT4lkHL99BDCVNzCh1d7dBhCLt150Nr/0=
+github.com/cucumber/godog v0.15.1 h1:rb/6oHDdvVZKS66hrhpjFQFHjthFSrQBCOI1LwshNTI=
+github.com/cucumber/godog v0.15.1/go.mod h1:qju+SQDewOljHuq9NSM66s0xEhogx0q30flfxL4WUk8=
+github.com/cucumber/messages/go/v21 v21.0.1 h1:wzA0LxwjlWQYZd32VTlAVDTkW6inOFmSM+RuOwHZiMI=
+github.com/cucumber/messages/go/v21 v21.0.1/go.mod h1:zheH/2HS9JLVFukdrsPWoPdmUtmYQAQPLk7w5vWsk5s=
+github.com/cucumber/messages/go/v22 v22.0.0/go.mod h1:aZipXTKc0JnjCsXrJnuZpWhtay93k7Rn3Dee7iyPJjs=
github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 h1:uX1JmpONuD549D73r6cgnxyUu18Zb7yHAy5AYU0Pm4Q=
github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw=
github.com/cyphar/filepath-securejoin v0.6.0 h1:BtGB77njd6SVO6VztOHfPxKitJvd/VPT+OFBFMOi1Is=
@@ -202,6 +210,9 @@ github.com/gobuffalo/flect v1.0.3 h1:xeWBM2nui+qnVvNM4S3foBhCAL2XgPU+a7FdpelbTq4
github.com/gobuffalo/flect v1.0.3/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
+github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
+github.com/gofrs/uuid v4.3.1+incompatible h1:0/KbAdpx3UXAx1kEOWHJeOkpbgRFGHVgv+CFIY7dBJI=
+github.com/gofrs/uuid v4.3.1+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
@@ -274,8 +285,19 @@ github.com/h2non/go-is-svg v0.0.0-20160927212452-35e8c4b0612c/go.mod h1:ObS/W+h8
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
+github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-memdb v1.3.4 h1:XSL3NR682X/cVk2IeV0d70N4DZ9ljI885xAEU8IoK3c=
+github.com/hashicorp/go-memdb v1.3.4/go.mod h1:uBTr1oQbtuMgd1SSGoR8YV27eT3sBHbYiNm53bMpgSg=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
+github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE=
+github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
+github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/golang-lru/arc/v2 v2.0.7 h1:QxkVTxwColcduO+LP7eJO56r2hFiG8zEbfAAzRv52KQ=
github.com/hashicorp/golang-lru/arc/v2 v2.0.7/go.mod h1:Pe7gBlGdc8clY5LJ0LpJXMt5AmgmWNH1g+oFFVUHOEc=
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
@@ -300,12 +322,15 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co=
-github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0=
+github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk=
+github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU=
github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
@@ -457,8 +482,11 @@ github.com/smallstep/pkcs7 v0.2.1 h1:6Kfzr/QizdIuB6LSv8y1LJdZ3aPSfTNhTLqAx9CTLfA
github.com/smallstep/pkcs7 v0.2.1/go.mod h1:RcXHsMfL+BzH8tRhmrF1NkkpebKpq3JEM66cOFxanf0=
github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y=
github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
-github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
-github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
+github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
+github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
+github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
@@ -478,6 +506,7 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0=
@@ -626,8 +655,8 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
-golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
-golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
+golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
+golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
diff --git a/hack/ci/custom-linters/analyzers/testdata/go.mod b/hack/ci/custom-linters/analyzers/testdata/go.mod
index 23875e233..6a5571ff3 100644
--- a/hack/ci/custom-linters/analyzers/testdata/go.mod
+++ b/hack/ci/custom-linters/analyzers/testdata/go.mod
@@ -1,5 +1,5 @@
module testdata
-go 1.24.3
+go 1.25.3
require github.com/go-logr/logr v1.4.3
diff --git a/hack/conftest/policy/README.md b/hack/conftest/policy/README.md
new file mode 100644
index 000000000..21225dae3
--- /dev/null
+++ b/hack/conftest/policy/README.md
@@ -0,0 +1,70 @@
+# OPA Policies for NetworkPolicy Validation
+
+This directory contains [Open Policy Agent (OPA)](https://www.openpolicyagent.org/) Rego policies used by [conftest](https://www.conftest.dev/) to validate generated Kubernetes manifests.
+
+## Policy Files
+
+### olm-networkpolicies.rego
+
+Package: `main`
+
+Validates core OLM NetworkPolicy requirements:
+
+- **Deny-all policy**: Ensures a default deny-all NetworkPolicy exists with empty podSelector and both Ingress/Egress policy types
+- **catalogd-controller-manager policy**: Validates the NetworkPolicy for catalogd:
+ - Ingress on port 7443 (Prometheus metrics scraping)
+ - Ingress on port 8443 (catalog metadata queries from operator-controller)
+ - Ingress on port 9443 (Kubernetes API server webhook access)
+ - General egress enabled
+- **operator-controller-controller-manager policy**: Validates the NetworkPolicy for operator-controller:
+ - Ingress on port 8443 (Prometheus metrics scraping)
+ - General egress enabled (for pulling bundle images, connecting to catalogd, and Kubernetes API)
+
+### prometheus-networkpolicies.rego
+
+Package: `prometheus`
+
+Validates Prometheus NetworkPolicy requirements:
+
+- Ensures a NetworkPolicy exists that allows both ingress and egress traffic for prometheus pods
+
+## Usage
+
+These policies are automatically run as part of:
+
+- `make lint-helm` - Validates both helm/olmv1 and helm/prometheus charts (runs `main` and `prometheus` packages)
+- `make manifests` - Generates and validates core OLM manifests using only `main` package policies
+ (Prometheus policies are intentionally skipped here, even if manifests include Prometheus resources;
+ they are validated via `make lint-helm`)
+
+### Running manually
+
+```bash
+# Run all policies (main + prometheus namespaces)
+helm template olmv1 helm/olmv1 | conftest test --policy hack/conftest/policy/ --combine -n main -n prometheus -
+
+# Run only OLM policies
+helm template olmv1 helm/olmv1 | conftest test --policy hack/conftest/policy/ --combine -n main -
+
+# Run only prometheus policies
+helm template prometheus helm/prometheus | conftest test --policy hack/conftest/policy/ --combine -n prometheus -
+```
+
+### Excluding policies
+
+Use the `-n` (namespace) flag to selectively run policies:
+
+```bash
+# Skip prometheus policies
+conftest test --policy hack/conftest/policy/ --combine -n main
+
+# Skip OLM policies
+conftest test --policy hack/conftest/policy/ --combine -n prometheus
+```
+
+## Adding New Policies
+
+1. Add new rules to an existing `.rego` file or create a new one
+2. Use `package main` for policies that should run by default on all manifests
+3. Use a custom package name (e.g., `package prometheus`) for optional policies
+4. Update the Makefile targets if new namespaces need to be included
diff --git a/hack/conftest/policy/olm-networkpolicies.rego b/hack/conftest/policy/olm-networkpolicies.rego
new file mode 100644
index 000000000..df0c81426
--- /dev/null
+++ b/hack/conftest/policy/olm-networkpolicies.rego
@@ -0,0 +1,160 @@
+package main
+
+import rego.v1
+
+# Check that a deny-all NetworkPolicy exists
+# A deny-all policy has:
+# - podSelector: {} (empty, applies to all pods)
+# - policyTypes containing both "Ingress" and "Egress"
+# - No ingress or egress rules defined
+
+is_deny_all(policy) if {
+ policy.kind == "NetworkPolicy"
+ policy.apiVersion == "networking.k8s.io/v1"
+
+ # podSelector must be empty (applies to all pods)
+ count(policy.spec.podSelector) == 0
+
+ # Must have both Ingress and Egress policy types
+ policy_types := {t | some t in policy.spec.policyTypes}
+ policy_types["Ingress"]
+ policy_types["Egress"]
+
+ # Must not have any ingress rules
+ not policy.spec.ingress
+
+ # Must not have any egress rules
+ not policy.spec.egress
+}
+
+has_deny_all_policy if {
+ some i in numbers.range(0, count(input) - 1)
+ is_deny_all(input[i].contents)
+}
+
+deny contains msg if {
+ not has_deny_all_policy
+ msg := "No deny-all NetworkPolicy found. A NetworkPolicy with empty podSelector, policyTypes [Ingress, Egress], and no ingress/egress rules is required."
+}
+
+# Check that a NetworkPolicy exists for catalogd-controller-manager that:
+# - Allows ingress on TCP ports 7443, 8443, 9443
+# - Allows general egress traffic
+
+is_catalogd_policy(policy) if {
+ policy.kind == "NetworkPolicy"
+ policy.apiVersion == "networking.k8s.io/v1"
+ policy.spec.podSelector.matchLabels["control-plane"] == "catalogd-controller-manager"
+}
+
+catalogd_policies contains policy if {
+ some i in numbers.range(0, count(input) - 1)
+ policy := input[i].contents
+ is_catalogd_policy(policy)
+}
+
+catalogd_ingress_ports contains port if {
+ some policy in catalogd_policies
+ some rule in policy.spec.ingress
+ some port in rule.ports
+ port.protocol == "TCP"
+}
+
+catalogd_ingress_port_numbers contains num if {
+ some port in catalogd_ingress_ports
+ num := port.port
+}
+
+catalogd_has_egress if {
+ some policy in catalogd_policies
+ policy.spec.egress
+}
+
+deny contains msg if {
+ count(catalogd_policies) == 0
+ msg := "No NetworkPolicy found for catalogd-controller-manager. A NetworkPolicy allowing ingress on TCP ports 7443, 8443, 9443 and general egress is required."
+}
+
+deny contains msg if {
+ count(catalogd_policies) > 1
+ msg := sprintf("Expected exactly 1 NetworkPolicy for catalogd-controller-manager, found %d.", [count(catalogd_policies)])
+}
+
+deny contains msg if {
+ count(catalogd_policies) == 1
+ not catalogd_ingress_port_numbers[7443]
+ msg := "Allow traffic to port 7443. Permit Prometheus to scrape metrics from catalogd, which is essential for monitoring its performance and health."
+}
+
+deny contains msg if {
+ count(catalogd_policies) == 1
+ not catalogd_ingress_port_numbers[8443]
+ msg := "Allow traffic to port 8443. Permit clients (eg. operator-controller) to query catalog metadata from catalogd, which is a core function for bundle resolution and operator discovery."
+}
+
+deny contains msg if {
+ count(catalogd_policies) == 1
+ not catalogd_ingress_port_numbers[9443]
+ msg := "Allow traffic to port 9443. Permit Kubernetes API server to reach catalogd's mutating admission webhook, ensuring integrity of catalog resources."
+}
+
+deny contains msg if {
+ count(catalogd_policies) == 1
+ not catalogd_has_egress
+ msg := "Missing egress rules in catalogd-controller-manager NetworkPolicy. General egress is required to enables operator-controller to pull bundle images from arbitrary image registries, connect to catalogd's HTTPS server for metadata, and interact with the Kubernetes API server."
+}
+
+# Check that a NetworkPolicy exists for operator-controller-controller-manager that:
+# - Allows ingress on TCP port 8443
+# - Allows general egress traffic
+
+is_operator_controller_policy(policy) if {
+ policy.kind == "NetworkPolicy"
+ policy.apiVersion == "networking.k8s.io/v1"
+ policy.spec.podSelector.matchLabels["control-plane"] == "operator-controller-controller-manager"
+}
+
+operator_controller_policies contains policy if {
+ some i in numbers.range(0, count(input) - 1)
+ policy := input[i].contents
+ is_operator_controller_policy(policy)
+}
+
+operator_controller_ingress_ports contains port if {
+ some policy in operator_controller_policies
+ some rule in policy.spec.ingress
+ some port in rule.ports
+ port.protocol == "TCP"
+}
+
+operator_controller_ingress_port_numbers contains num if {
+ some port in operator_controller_ingress_ports
+ num := port.port
+}
+
+operator_controller_has_egress if {
+ some policy in operator_controller_policies
+ policy.spec.egress
+}
+
+deny contains msg if {
+ count(operator_controller_policies) == 0
+ msg := "No NetworkPolicy found for operator-controller-controller-manager. A NetworkPolicy allowing ingress on TCP port 8443 and general egress is required."
+}
+
+deny contains msg if {
+ count(operator_controller_policies) > 1
+ msg := sprintf("Expected exactly 1 NetworkPolicy for operator-controller-controller-manager, found %d.", [count(operator_controller_policies)])
+}
+
+deny contains msg if {
+ count(operator_controller_policies) == 1
+ not operator_controller_ingress_port_numbers[8443]
+ msg := "Allow traffic to port 8443. Permit Prometheus to scrape metrics from catalogd, which is essential for monitoring its performance and health."
+}
+
+deny contains msg if {
+ count(operator_controller_policies) == 1
+ not operator_controller_has_egress
+ msg := "Missing egress rules in operator-controller-controller-manager NetworkPolicy. General egress is required to enables operator-controller to pull bundle images from arbitrary image registries, connect to catalogd's HTTPS server for metadata, and interact with the Kubernetes API server."
+}
diff --git a/hack/conftest/policy/prometheus-networkpolicies.rego b/hack/conftest/policy/prometheus-networkpolicies.rego
new file mode 100644
index 000000000..c37158250
--- /dev/null
+++ b/hack/conftest/policy/prometheus-networkpolicies.rego
@@ -0,0 +1,33 @@
+package prometheus
+
+import rego.v1
+
+# Check that a NetworkPolicy exists that allows both ingress and egress traffic to prometheus pods
+is_prometheus_policy(policy) if {
+ policy.kind == "NetworkPolicy"
+ policy.apiVersion == "networking.k8s.io/v1"
+
+ # Must target prometheus pods
+ policy.spec.podSelector.matchLabels["app.kubernetes.io/name"] == "prometheus"
+
+ # Must have both Ingress and Egress policy types
+ policy_types := {t | some t in policy.spec.policyTypes}
+ policy_types["Ingress"]
+ policy_types["Egress"]
+
+ # Must have ingress rules defined (allowing traffic)
+ policy.spec.ingress
+
+ # Must have egress rules defined (allowing traffic)
+ policy.spec.egress
+}
+
+has_prometheus_policy if {
+ some i in numbers.range(0, count(input) - 1)
+ is_prometheus_policy(input[i].contents)
+}
+
+deny contains msg if {
+ not has_prometheus_policy
+ msg := "No NetworkPolicy found that allows both ingress and egress traffic to prometheus pods. A NetworkPolicy targeting prometheus pods with ingress and egress rules is required."
+}
diff --git a/hack/kind-config/containerd/certs.d/go.mod b/hack/kind-config/containerd/certs.d/go.mod
index adbe39415..fa9ef1076 100644
--- a/hack/kind-config/containerd/certs.d/go.mod
+++ b/hack/kind-config/containerd/certs.d/go.mod
@@ -1,6 +1,6 @@
module hack-cert.d
-go 1.24.6
+go 1.25.3
// This file is present in the certs.d directory to ensure that
// certs.d/host:port directories are not included in the main go
diff --git a/hack/tools/test-profiling/go.mod b/hack/tools/test-profiling/go.mod
index df225c427..11c55a0d8 100644
--- a/hack/tools/test-profiling/go.mod
+++ b/hack/tools/test-profiling/go.mod
@@ -1,6 +1,6 @@
module github.com/operator-framework/operator-controller/hack/tools/test-profiling
-go 1.24.6
+go 1.25.3
require (
github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d
diff --git a/helm/olmv1/base/catalogd/crd/experimental/olm.operatorframework.io_clustercatalogs.yaml b/helm/olmv1/base/catalogd/crd/experimental/olm.operatorframework.io_clustercatalogs.yaml
index c78a57b92..7508ab775 100644
--- a/helm/olmv1/base/catalogd/crd/experimental/olm.operatorframework.io_clustercatalogs.yaml
+++ b/helm/olmv1/base/catalogd/crd/experimental/olm.operatorframework.io_clustercatalogs.yaml
@@ -29,7 +29,7 @@ spec:
schema:
openAPIV3Schema:
description: |-
- ClusterCatalog enables users to make File-Based Catalog (FBC) catalog data available to the cluster.
+ ClusterCatalog makes File-Based Catalog (FBC) data available to your cluster.
For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs
properties:
apiVersion:
@@ -51,29 +51,24 @@ spec:
type: object
spec:
description: |-
- spec is the desired state of the ClusterCatalog.
- spec is required.
- The controller will work to ensure that the desired
- catalog is unpacked and served over the catalog content HTTP server.
+ spec is a required field that defines the desired state of the ClusterCatalog.
+ The controller ensures that the catalog is unpacked and served over the catalog content HTTP server.
properties:
availabilityMode:
default: Available
description: |-
- availabilityMode allows users to define how the ClusterCatalog is made available to clients on the cluster.
- availabilityMode is optional.
+ availabilityMode is an optional field that defines how the ClusterCatalog is made available to clients on the cluster.
- Allowed values are "Available" and "Unavailable" and omitted.
+ Allowed values are "Available", "Unavailable", or omitted.
When omitted, the default value is "Available".
- When set to "Available", the catalog contents will be unpacked and served over the catalog content HTTP server.
- Setting the availabilityMode to "Available" tells clients that they should consider this ClusterCatalog
- and its contents as usable.
+ When set to "Available", the catalog contents are unpacked and served over the catalog content HTTP server.
+ Clients should consider this ClusterCatalog and its contents as usable.
- When set to "Unavailable", the catalog contents will no longer be served over the catalog content HTTP server.
- When set to this availabilityMode it should be interpreted the same as the ClusterCatalog not existing.
- Setting the availabilityMode to "Unavailable" can be useful in scenarios where a user may not want
- to delete the ClusterCatalog all together, but would still like it to be treated as if it doesn't exist.
+ When set to "Unavailable", the catalog contents are no longer served over the catalog content HTTP server.
+ Treat this the same as if the ClusterCatalog does not exist.
+ Use "Unavailable" when you want to keep the ClusterCatalog but treat it as if it doesn't exist.
enum:
- Unavailable
- Available
@@ -81,19 +76,18 @@ spec:
priority:
default: 0
description: |-
- priority allows the user to define a priority for a ClusterCatalog.
- priority is optional.
+ priority is an optional field that defines a priority for this ClusterCatalog.
- A ClusterCatalog's priority is used by clients as a tie-breaker between ClusterCatalogs that meet the client's requirements.
- A higher number means higher priority.
+ Clients use the ClusterCatalog priority as a tie-breaker between ClusterCatalogs that meet their requirements.
+ Higher numbers mean higher priority.
- It is up to clients to decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements.
- When deciding how to break the tie in this scenario, it is recommended that clients prompt their users for additional input.
+ Clients decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements.
+ Clients should prompt users for additional input to break the tie.
- When omitted, the default priority is 0 because that is the zero value of integers.
+ When omitted, the default priority is 0.
- Negative numbers can be used to specify a priority lower than the default.
- Positive numbers can be used to specify a priority higher than the default.
+ Use negative numbers to specify a priority lower than the default.
+ Use positive numbers to specify a priority higher than the default.
The lowest possible value is -2147483648.
The highest possible value is 2147483647.
@@ -101,15 +95,12 @@ spec:
type: integer
source:
description: |-
- source allows a user to define the source of a catalog.
- A "catalog" contains information on content that can be installed on a cluster.
- Providing a catalog source makes the contents of the catalog discoverable and usable by
- other on-cluster components.
- These on-cluster components may do a variety of things with this information, such as
- presenting the content in a GUI dashboard or installing content from the catalog on the cluster.
+ source is a required field that defines the source of a catalog.
+ A catalog contains information on content that can be installed on a cluster.
+ The catalog source makes catalog contents discoverable and usable by other on-cluster components.
+ These components can present the content in a GUI dashboard or install content from the catalog on the cluster.
The catalog source must contain catalog metadata in the File-Based Catalog (FBC) format.
For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs.
- source is a required field.
Below is a minimal example of a ClusterCatalogSpec that sources a catalog from an image:
@@ -120,25 +111,23 @@ spec:
properties:
image:
description: |-
- image is used to configure how catalog contents are sourced from an OCI image.
- This field is required when type is Image, and forbidden otherwise.
+ image configures how catalog contents are sourced from an OCI image.
+ It is required when type is Image, and forbidden otherwise.
properties:
pollIntervalMinutes:
description: |-
- pollIntervalMinutes allows the user to set the interval, in minutes, at which the image source should be polled for new content.
- pollIntervalMinutes is optional.
- pollIntervalMinutes can not be specified when ref is a digest-based reference.
+ pollIntervalMinutes is an optional field that sets the interval, in minutes, at which the image source is polled for new content.
+ You cannot specify pollIntervalMinutes when ref is a digest-based reference.
- When omitted, the image will not be polled for new content.
+ When omitted, the image is not polled for new content.
minimum: 1
type: integer
ref:
description: |-
- ref allows users to define the reference to a container image containing Catalog contents.
- ref is required.
- ref can not be more than 1000 characters.
+ ref is a required field that defines the reference to a container image containing catalog contents.
+ It cannot be more than 1000 characters.
- A reference can be broken down into 3 parts - the domain, name, and identifier.
+ A reference has 3 parts: the domain, name, and identifier.
The domain is typically the registry where an image is located.
It must be alphanumeric characters (lowercase and uppercase) separated by the "." character.
@@ -221,12 +210,11 @@ spec:
: true'
type:
description: |-
- type is a reference to the type of source the catalog is sourced from.
- type is required.
+ type is a required field that specifies the type of source for the catalog.
The only allowed value is "Image".
- When set to "Image", the ClusterCatalog content will be sourced from an OCI image.
+ When set to "Image", the ClusterCatalog content is sourced from an OCI image.
When using an image source, the image field must be set and must be the only field defined for this type.
enum:
- Image
@@ -244,31 +232,30 @@ spec:
type: object
status:
description: |-
- status contains information about the state of the ClusterCatalog such as:
- - Whether or not the catalog contents are being served via the catalog content HTTP server
- - Whether or not the ClusterCatalog is progressing to a new state
+ status contains the following information about the state of the ClusterCatalog:
+ - Whether the catalog contents are being served via the catalog content HTTP server
+ - Whether the ClusterCatalog is progressing to a new state
- A reference to the source from which the catalog contents were retrieved
properties:
conditions:
description: |-
- conditions is a representation of the current state for this ClusterCatalog.
+ conditions represents the current state of this ClusterCatalog.
The current condition types are Serving and Progressing.
- The Serving condition is used to represent whether or not the contents of the catalog is being served via the HTTP(S) web server.
- When it has a status of True and a reason of Available, the contents of the catalog are being served.
- When it has a status of False and a reason of Unavailable, the contents of the catalog are not being served because the contents are not yet available.
- When it has a status of False and a reason of UserSpecifiedUnavailable, the contents of the catalog are not being served because the catalog has been intentionally marked as unavailable.
+ The Serving condition represents whether the catalog contents are being served via the HTTP(S) web server:
+ - When status is True and reason is Available, the catalog contents are being served.
+ - When status is False and reason is Unavailable, the catalog contents are not being served because the contents are not yet available.
+ - When status is False and reason is UserSpecifiedUnavailable, the catalog contents are not being served because the catalog has been intentionally marked as unavailable.
- The Progressing condition is used to represent whether or not the ClusterCatalog is progressing or is ready to progress towards a new state.
- When it has a status of True and a reason of Retrying, there was an error in the progression of the ClusterCatalog that may be resolved on subsequent reconciliation attempts.
- When it has a status of True and a reason of Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing.
- When it has a status of False and a reason of Blocked, there was an error in the progression of the ClusterCatalog that requires manual intervention for recovery.
+ The Progressing condition represents whether the ClusterCatalog is progressing or is ready to progress towards a new state:
+ - When status is True and reason is Retrying, an error occurred that may be resolved on subsequent reconciliation attempts.
+ - When status is True and reason is Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing.
+ - When status is False and reason is Blocked, an error occurred that requires manual intervention for recovery.
- In the case that the Serving condition is True with reason Available and Progressing is True with reason Retrying, the previously fetched
- catalog contents are still being served via the HTTP(S) web server while we are progressing towards serving a new version of the catalog
- contents. This could occur when we've initially fetched the latest contents from the source for this catalog and when polling for changes
- to the contents we identify that there are updates to the contents.
+ If the system initially fetched contents and polling identifies updates, both conditions can be active simultaneously:
+ - The Serving condition remains True with reason Available because the previous contents are still served via the HTTP(S) web server.
+ - The Progressing condition is True with reason Retrying because the system is working to serve the new version.
items:
description: Condition contains details for one aspect of the current
state of this API Resource.
@@ -329,11 +316,9 @@ spec:
x-kubernetes-list-type: map
lastUnpacked:
description: |-
- lastUnpacked represents the last time the contents of the
- catalog were extracted from their source format. As an example,
- when using an Image source, the OCI image will be pulled and the
- image layers written to a file-system backed cache. We refer to the
- act of this extraction from the source format as "unpacking".
+ lastUnpacked represents the last time the catalog contents were extracted from their source format.
+ For example, when using an Image source, the OCI image is pulled and image layers are written to a file-system backed cache.
+ This extraction from the source format is called "unpacking".
format: date-time
type: string
resolvedSource:
@@ -342,14 +327,14 @@ spec:
properties:
image:
description: |-
- image is a field containing resolution information for a catalog sourced from an image.
- This field must be set when type is Image, and forbidden otherwise.
+ image contains resolution information for a catalog sourced from an image.
+ It must be set when type is Image, and forbidden otherwise.
properties:
ref:
description: |-
ref contains the resolved image digest-based reference.
- The digest format is used so users can use other tooling to fetch the exact
- OCI manifests that were used to extract the catalog contents.
+ The digest format allows you to use other tooling to fetch the exact OCI manifests
+ that were used to extract the catalog contents.
maxLength: 1000
type: string
x-kubernetes-validations:
@@ -383,12 +368,11 @@ spec:
type: object
type:
description: |-
- type is a reference to the type of source the catalog is sourced from.
- type is required.
+ type is a required field that specifies the type of source for the catalog.
The only allowed value is "Image".
- When set to "Image", information about the resolved image source will be set in the 'image' field.
+ When set to "Image", information about the resolved image source is set in the image field.
enum:
- Image
type: string
@@ -407,19 +391,16 @@ spec:
properties:
base:
description: |-
- base is a cluster-internal URL that provides endpoints for
- accessing the content of the catalog.
+ base is a cluster-internal URL that provides endpoints for accessing the catalog content.
- It is expected that clients append the path for the endpoint they wish
- to access.
+ Clients should append the path for the endpoint they want to access.
- Currently, only a single endpoint is served and is accessible at the path
- /api/v1.
+ Currently, only a single endpoint is served and is accessible at the path /api/v1.
The endpoints served for the v1 API are:
- - /all - this endpoint returns the entirety of the catalog contents in the FBC format
+ - /all - this endpoint returns the entire catalog contents in the FBC format
- As the needs of users and clients of the evolve, new endpoints may be added.
+ New endpoints may be added as needs evolve.
maxLength: 525
type: string
x-kubernetes-validations:
diff --git a/helm/olmv1/base/catalogd/crd/standard/olm.operatorframework.io_clustercatalogs.yaml b/helm/olmv1/base/catalogd/crd/standard/olm.operatorframework.io_clustercatalogs.yaml
index 94f1d7121..0bbf9b988 100644
--- a/helm/olmv1/base/catalogd/crd/standard/olm.operatorframework.io_clustercatalogs.yaml
+++ b/helm/olmv1/base/catalogd/crd/standard/olm.operatorframework.io_clustercatalogs.yaml
@@ -29,7 +29,7 @@ spec:
schema:
openAPIV3Schema:
description: |-
- ClusterCatalog enables users to make File-Based Catalog (FBC) catalog data available to the cluster.
+ ClusterCatalog makes File-Based Catalog (FBC) data available to your cluster.
For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs
properties:
apiVersion:
@@ -51,29 +51,24 @@ spec:
type: object
spec:
description: |-
- spec is the desired state of the ClusterCatalog.
- spec is required.
- The controller will work to ensure that the desired
- catalog is unpacked and served over the catalog content HTTP server.
+ spec is a required field that defines the desired state of the ClusterCatalog.
+ The controller ensures that the catalog is unpacked and served over the catalog content HTTP server.
properties:
availabilityMode:
default: Available
description: |-
- availabilityMode allows users to define how the ClusterCatalog is made available to clients on the cluster.
- availabilityMode is optional.
+ availabilityMode is an optional field that defines how the ClusterCatalog is made available to clients on the cluster.
- Allowed values are "Available" and "Unavailable" and omitted.
+ Allowed values are "Available", "Unavailable", or omitted.
When omitted, the default value is "Available".
- When set to "Available", the catalog contents will be unpacked and served over the catalog content HTTP server.
- Setting the availabilityMode to "Available" tells clients that they should consider this ClusterCatalog
- and its contents as usable.
+ When set to "Available", the catalog contents are unpacked and served over the catalog content HTTP server.
+ Clients should consider this ClusterCatalog and its contents as usable.
- When set to "Unavailable", the catalog contents will no longer be served over the catalog content HTTP server.
- When set to this availabilityMode it should be interpreted the same as the ClusterCatalog not existing.
- Setting the availabilityMode to "Unavailable" can be useful in scenarios where a user may not want
- to delete the ClusterCatalog all together, but would still like it to be treated as if it doesn't exist.
+ When set to "Unavailable", the catalog contents are no longer served over the catalog content HTTP server.
+ Treat this the same as if the ClusterCatalog does not exist.
+ Use "Unavailable" when you want to keep the ClusterCatalog but treat it as if it doesn't exist.
enum:
- Unavailable
- Available
@@ -81,19 +76,18 @@ spec:
priority:
default: 0
description: |-
- priority allows the user to define a priority for a ClusterCatalog.
- priority is optional.
+ priority is an optional field that defines a priority for this ClusterCatalog.
- A ClusterCatalog's priority is used by clients as a tie-breaker between ClusterCatalogs that meet the client's requirements.
- A higher number means higher priority.
+ Clients use the ClusterCatalog priority as a tie-breaker between ClusterCatalogs that meet their requirements.
+ Higher numbers mean higher priority.
- It is up to clients to decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements.
- When deciding how to break the tie in this scenario, it is recommended that clients prompt their users for additional input.
+ Clients decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements.
+ Clients should prompt users for additional input to break the tie.
- When omitted, the default priority is 0 because that is the zero value of integers.
+ When omitted, the default priority is 0.
- Negative numbers can be used to specify a priority lower than the default.
- Positive numbers can be used to specify a priority higher than the default.
+ Use negative numbers to specify a priority lower than the default.
+ Use positive numbers to specify a priority higher than the default.
The lowest possible value is -2147483648.
The highest possible value is 2147483647.
@@ -101,15 +95,12 @@ spec:
type: integer
source:
description: |-
- source allows a user to define the source of a catalog.
- A "catalog" contains information on content that can be installed on a cluster.
- Providing a catalog source makes the contents of the catalog discoverable and usable by
- other on-cluster components.
- These on-cluster components may do a variety of things with this information, such as
- presenting the content in a GUI dashboard or installing content from the catalog on the cluster.
+ source is a required field that defines the source of a catalog.
+ A catalog contains information on content that can be installed on a cluster.
+ The catalog source makes catalog contents discoverable and usable by other on-cluster components.
+ These components can present the content in a GUI dashboard or install content from the catalog on the cluster.
The catalog source must contain catalog metadata in the File-Based Catalog (FBC) format.
For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs.
- source is a required field.
Below is a minimal example of a ClusterCatalogSpec that sources a catalog from an image:
@@ -120,25 +111,23 @@ spec:
properties:
image:
description: |-
- image is used to configure how catalog contents are sourced from an OCI image.
- This field is required when type is Image, and forbidden otherwise.
+ image configures how catalog contents are sourced from an OCI image.
+ It is required when type is Image, and forbidden otherwise.
properties:
pollIntervalMinutes:
description: |-
- pollIntervalMinutes allows the user to set the interval, in minutes, at which the image source should be polled for new content.
- pollIntervalMinutes is optional.
- pollIntervalMinutes can not be specified when ref is a digest-based reference.
+ pollIntervalMinutes is an optional field that sets the interval, in minutes, at which the image source is polled for new content.
+ You cannot specify pollIntervalMinutes when ref is a digest-based reference.
- When omitted, the image will not be polled for new content.
+ When omitted, the image is not polled for new content.
minimum: 1
type: integer
ref:
description: |-
- ref allows users to define the reference to a container image containing Catalog contents.
- ref is required.
- ref can not be more than 1000 characters.
+ ref is a required field that defines the reference to a container image containing catalog contents.
+ It cannot be more than 1000 characters.
- A reference can be broken down into 3 parts - the domain, name, and identifier.
+ A reference has 3 parts: the domain, name, and identifier.
The domain is typically the registry where an image is located.
It must be alphanumeric characters (lowercase and uppercase) separated by the "." character.
@@ -221,12 +210,11 @@ spec:
: true'
type:
description: |-
- type is a reference to the type of source the catalog is sourced from.
- type is required.
+ type is a required field that specifies the type of source for the catalog.
The only allowed value is "Image".
- When set to "Image", the ClusterCatalog content will be sourced from an OCI image.
+ When set to "Image", the ClusterCatalog content is sourced from an OCI image.
When using an image source, the image field must be set and must be the only field defined for this type.
enum:
- Image
@@ -244,31 +232,30 @@ spec:
type: object
status:
description: |-
- status contains information about the state of the ClusterCatalog such as:
- - Whether or not the catalog contents are being served via the catalog content HTTP server
- - Whether or not the ClusterCatalog is progressing to a new state
+ status contains the following information about the state of the ClusterCatalog:
+ - Whether the catalog contents are being served via the catalog content HTTP server
+ - Whether the ClusterCatalog is progressing to a new state
- A reference to the source from which the catalog contents were retrieved
properties:
conditions:
description: |-
- conditions is a representation of the current state for this ClusterCatalog.
+ conditions represents the current state of this ClusterCatalog.
The current condition types are Serving and Progressing.
- The Serving condition is used to represent whether or not the contents of the catalog is being served via the HTTP(S) web server.
- When it has a status of True and a reason of Available, the contents of the catalog are being served.
- When it has a status of False and a reason of Unavailable, the contents of the catalog are not being served because the contents are not yet available.
- When it has a status of False and a reason of UserSpecifiedUnavailable, the contents of the catalog are not being served because the catalog has been intentionally marked as unavailable.
+ The Serving condition represents whether the catalog contents are being served via the HTTP(S) web server:
+ - When status is True and reason is Available, the catalog contents are being served.
+ - When status is False and reason is Unavailable, the catalog contents are not being served because the contents are not yet available.
+ - When status is False and reason is UserSpecifiedUnavailable, the catalog contents are not being served because the catalog has been intentionally marked as unavailable.
- The Progressing condition is used to represent whether or not the ClusterCatalog is progressing or is ready to progress towards a new state.
- When it has a status of True and a reason of Retrying, there was an error in the progression of the ClusterCatalog that may be resolved on subsequent reconciliation attempts.
- When it has a status of True and a reason of Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing.
- When it has a status of False and a reason of Blocked, there was an error in the progression of the ClusterCatalog that requires manual intervention for recovery.
+ The Progressing condition represents whether the ClusterCatalog is progressing or is ready to progress towards a new state:
+ - When status is True and reason is Retrying, an error occurred that may be resolved on subsequent reconciliation attempts.
+ - When status is True and reason is Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing.
+ - When status is False and reason is Blocked, an error occurred that requires manual intervention for recovery.
- In the case that the Serving condition is True with reason Available and Progressing is True with reason Retrying, the previously fetched
- catalog contents are still being served via the HTTP(S) web server while we are progressing towards serving a new version of the catalog
- contents. This could occur when we've initially fetched the latest contents from the source for this catalog and when polling for changes
- to the contents we identify that there are updates to the contents.
+ If the system initially fetched contents and polling identifies updates, both conditions can be active simultaneously:
+ - The Serving condition remains True with reason Available because the previous contents are still served via the HTTP(S) web server.
+ - The Progressing condition is True with reason Retrying because the system is working to serve the new version.
items:
description: Condition contains details for one aspect of the current
state of this API Resource.
@@ -329,11 +316,9 @@ spec:
x-kubernetes-list-type: map
lastUnpacked:
description: |-
- lastUnpacked represents the last time the contents of the
- catalog were extracted from their source format. As an example,
- when using an Image source, the OCI image will be pulled and the
- image layers written to a file-system backed cache. We refer to the
- act of this extraction from the source format as "unpacking".
+ lastUnpacked represents the last time the catalog contents were extracted from their source format.
+ For example, when using an Image source, the OCI image is pulled and image layers are written to a file-system backed cache.
+ This extraction from the source format is called "unpacking".
format: date-time
type: string
resolvedSource:
@@ -342,14 +327,14 @@ spec:
properties:
image:
description: |-
- image is a field containing resolution information for a catalog sourced from an image.
- This field must be set when type is Image, and forbidden otherwise.
+ image contains resolution information for a catalog sourced from an image.
+ It must be set when type is Image, and forbidden otherwise.
properties:
ref:
description: |-
ref contains the resolved image digest-based reference.
- The digest format is used so users can use other tooling to fetch the exact
- OCI manifests that were used to extract the catalog contents.
+ The digest format allows you to use other tooling to fetch the exact OCI manifests
+ that were used to extract the catalog contents.
maxLength: 1000
type: string
x-kubernetes-validations:
@@ -383,12 +368,11 @@ spec:
type: object
type:
description: |-
- type is a reference to the type of source the catalog is sourced from.
- type is required.
+ type is a required field that specifies the type of source for the catalog.
The only allowed value is "Image".
- When set to "Image", information about the resolved image source will be set in the 'image' field.
+ When set to "Image", information about the resolved image source is set in the image field.
enum:
- Image
type: string
@@ -407,19 +391,16 @@ spec:
properties:
base:
description: |-
- base is a cluster-internal URL that provides endpoints for
- accessing the content of the catalog.
+ base is a cluster-internal URL that provides endpoints for accessing the catalog content.
- It is expected that clients append the path for the endpoint they wish
- to access.
+ Clients should append the path for the endpoint they want to access.
- Currently, only a single endpoint is served and is accessible at the path
- /api/v1.
+ Currently, only a single endpoint is served and is accessible at the path /api/v1.
The endpoints served for the v1 API are:
- - /all - this endpoint returns the entirety of the catalog contents in the FBC format
+ - /all - this endpoint returns the entire catalog contents in the FBC format
- As the needs of users and clients of the evolve, new endpoints may be added.
+ New endpoints may be added as needs evolve.
maxLength: 525
type: string
x-kubernetes-validations:
diff --git a/helm/olmv1/base/operator-controller/crd/experimental/olm.operatorframework.io_clusterextensions.yaml b/helm/olmv1/base/operator-controller/crd/experimental/olm.operatorframework.io_clusterextensions.yaml
index 0d1bbd71c..55687b567 100644
--- a/helm/olmv1/base/operator-controller/crd/experimental/olm.operatorframework.io_clusterextensions.yaml
+++ b/helm/olmv1/base/operator-controller/crd/experimental/olm.operatorframework.io_clusterextensions.yaml
@@ -59,9 +59,9 @@ spec:
properties:
config:
description: |-
- config is an optional field used to specify bundle specific configuration
- used to configure the bundle. Configuration is bundle specific and a bundle may provide
- a configuration schema. When not specified, the default configuration of the resolved bundle will be used.
+ config is optional and specifies bundle-specific configuration.
+ Configuration is bundle-specific and a bundle may provide a configuration schema.
+ When not specified, the default configuration of the resolved bundle is used.
config is validated against a configuration schema provided by the resolved bundle. If the bundle does not provide
a configuration schema the bundle is deemed to not be configurable. More information on how
@@ -69,21 +69,19 @@ spec:
properties:
configType:
description: |-
- configType is a required reference to the type of configuration source.
+ configType is required and specifies the type of configuration source.
- Allowed values are "Inline"
+ The only allowed value is "Inline".
- When this field is set to "Inline", the cluster extension configuration is defined inline within the
- ClusterExtension resource.
+ When set to "Inline", the cluster extension configuration is defined inline within the ClusterExtension resource.
enum:
- Inline
type: string
inline:
description: |-
- inline contains JSON or YAML values specified directly in the
- ClusterExtension.
+ inline contains JSON or YAML values specified directly in the ClusterExtension.
- inline is used to specify arbitrary configuration values for the ClusterExtension.
+ It is used to specify arbitrary configuration values for the ClusterExtension.
It must be set if configType is 'Inline' and must be a valid JSON/YAML object containing at least one property.
The configuration values are validated at runtime against a JSON schema provided by the bundle.
minProperties: 1
@@ -99,37 +97,35 @@ spec:
: !has(self.inline)'
install:
description: |-
- install is an optional field used to configure the installation options
- for the ClusterExtension such as the pre-flight check configuration.
+ install is optional and configures installation options for the ClusterExtension,
+ such as the pre-flight check configuration.
properties:
preflight:
description: |-
- preflight is an optional field that can be used to configure the checks that are
- run before installation or upgrade of the content for the package specified in the packageName field.
+ preflight is optional and configures the checks that run before installation or upgrade
+ of the content for the package specified in the packageName field.
When specified, it replaces the default preflight configuration for install/upgrade actions.
- When not specified, the default configuration will be used.
+ When not specified, the default configuration is used.
properties:
crdUpgradeSafety:
description: |-
- crdUpgradeSafety is used to configure the CRD Upgrade Safety pre-flight
- checks that run prior to upgrades of installed content.
+ crdUpgradeSafety configures the CRD Upgrade Safety pre-flight checks that run
+ before upgrades of installed content.
- The CRD Upgrade Safety pre-flight check safeguards from unintended
- consequences of upgrading a CRD, such as data loss.
+ The CRD Upgrade Safety pre-flight check safeguards from unintended consequences of upgrading a CRD,
+ such as data loss.
properties:
enforcement:
description: |-
- enforcement is a required field, used to configure the state of the CRD Upgrade Safety pre-flight check.
+ enforcement is required and configures the state of the CRD Upgrade Safety pre-flight check.
Allowed values are "None" or "Strict". The default value is "Strict".
- When set to "None", the CRD Upgrade Safety pre-flight check will be skipped
- when performing an upgrade operation. This should be used with caution as
- unintended consequences such as data loss can occur.
+ When set to "None", the CRD Upgrade Safety pre-flight check is skipped during an upgrade operation.
+ Use this option with caution as unintended consequences such as data loss can occur.
- When set to "Strict", the CRD Upgrade Safety pre-flight check will be run when
- performing an upgrade operation.
+ When set to "Strict", the CRD Upgrade Safety pre-flight check runs during an upgrade operation.
enum:
- None
- Strict
@@ -151,16 +147,15 @@ spec:
rule: has(self.preflight)
namespace:
description: |-
- namespace is a reference to a Kubernetes namespace.
- This is the namespace in which the provided ServiceAccount must exist.
- It also designates the default namespace where namespace-scoped resources
- for the extension are applied to the cluster.
+ namespace specifies a Kubernetes namespace.
+ This is the namespace where the provided ServiceAccount must exist.
+ It also designates the default namespace where namespace-scoped resources for the extension are applied to the cluster.
Some extensions may contain namespace-scoped resources to be applied in other namespaces.
This namespace must exist.
- namespace is required, immutable, and follows the DNS label standard
- as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters or hyphens (-),
- start and end with an alphanumeric character, and be no longer than 63 characters
+ The namespace field is required, immutable, and follows the DNS label standard as defined in [RFC 1123].
+ It must contain only lowercase alphanumeric characters or hyphens (-), start and end with an alphanumeric character,
+ and be no longer than 63 characters.
[RFC 1123]: https://tools.ietf.org/html/rfc1123
maxLength: 63
@@ -172,24 +167,22 @@ spec:
rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?$")
serviceAccount:
description: |-
- serviceAccount is a reference to a ServiceAccount used to perform all interactions
- with the cluster that are required to manage the extension.
+ serviceAccount specifies a ServiceAccount used to perform all interactions with the cluster
+ that are required to manage the extension.
The ServiceAccount must be configured with the necessary permissions to perform these interactions.
The ServiceAccount must exist in the namespace referenced in the spec.
- serviceAccount is required.
+ The serviceAccount field is required.
properties:
name:
description: |-
- name is a required, immutable reference to the name of the ServiceAccount
- to be used for installation and management of the content for the package
- specified in the packageName field.
+ name is a required, immutable reference to the name of the ServiceAccount used for installation
+ and management of the content for the package specified in the packageName field.
This ServiceAccount must exist in the installNamespace.
- name follows the DNS subdomain standard as defined in [RFC 1123].
- It must contain only lowercase alphanumeric characters,
- hyphens (-) or periods (.), start and end with an alphanumeric character,
- and be no longer than 253 characters.
+ The name field follows the DNS subdomain standard as defined in [RFC 1123].
+ It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
+ start and end with an alphanumeric character, and be no longer than 253 characters.
Some examples of valid values are:
- some-serviceaccount
@@ -218,11 +211,11 @@ spec:
type: object
source:
description: |-
- source is a required field which selects the installation source of content
- for this ClusterExtension. Selection is performed by setting the sourceType.
+ source is required and selects the installation source of content for this ClusterExtension.
+ Set the sourceType field to perform the selection.
- Catalog is currently the only implemented sourceType, and setting the
- sourcetype to "Catalog" requires the catalog field to also be defined.
+ Catalog is currently the only implemented sourceType.
+ Setting sourceType to "Catalog" requires the catalog field to also be defined.
Below is a minimal example of a source definition (in yaml):
@@ -233,30 +226,29 @@ spec:
properties:
catalog:
description: |-
- catalog is used to configure how information is sourced from a catalog.
- This field is required when sourceType is "Catalog", and forbidden otherwise.
+ catalog configures how information is sourced from a catalog.
+ It is required when sourceType is "Catalog", and forbidden otherwise.
properties:
channels:
description: |-
- channels is an optional reference to a set of channels belonging to
- the package specified in the packageName field.
+ channels is optional and specifies a set of channels belonging to the package
+ specified in the packageName field.
- A "channel" is a package-author-defined stream of updates for an extension.
+ A channel is a package-author-defined stream of updates for an extension.
- Each channel in the list must follow the DNS subdomain standard
- as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
- hyphens (-) or periods (.), start and end with an alphanumeric character,
- and be no longer than 253 characters. No more than 256 channels can be specified.
+ Each channel in the list must follow the DNS subdomain standard as defined in [RFC 1123].
+ It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
+ start and end with an alphanumeric character, and be no longer than 253 characters.
+ You can specify no more than 256 channels.
- When specified, it is used to constrain the set of installable bundles and
- the automated upgrade path. This constraint is an AND operation with the
- version field. For example:
+ When specified, it constrains the set of installable bundles and the automated upgrade path.
+ This constraint is an AND operation with the version field. For example:
- Given channel is set to "foo"
- Given version is set to ">=1.0.0, <1.5.0"
- - Only bundles that exist in channel "foo" AND satisfy the version range comparison will be considered installable
- - Automatic upgrades will be constrained to upgrade edges defined by the selected channel
+ - Only bundles that exist in channel "foo" AND satisfy the version range comparison are considered installable
+ - Automatic upgrades are constrained to upgrade edges defined by the selected channel
- When unspecified, upgrade edges across all channels will be used to identify valid automatic upgrade paths.
+ When unspecified, upgrade edges across all channels are used to identify valid automatic upgrade paths.
Some examples of valid values are:
- 1.1.x
@@ -286,13 +278,12 @@ spec:
type: array
packageName:
description: |-
- packageName is a reference to the name of the package to be installed
- and is used to filter the content from catalogs.
+ packageName specifies the name of the package to be installed and is used to filter
+ the content from catalogs.
- packageName is required, immutable, and follows the DNS subdomain standard
- as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
- hyphens (-) or periods (.), start and end with an alphanumeric character,
- and be no longer than 253 characters.
+ It is required, immutable, and follows the DNS subdomain standard as defined in [RFC 1123].
+ It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
+ start and end with an alphanumeric character, and be no longer than 253 characters.
Some examples of valid values are:
- some-package
@@ -319,12 +310,9 @@ spec:
rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$")
selector:
description: |-
- selector is an optional field that can be used
- to filter the set of ClusterCatalogs used in the bundle
- selection process.
+ selector is optional and filters the set of ClusterCatalogs used in the bundle selection process.
- When unspecified, all ClusterCatalogs will be used in
- the bundle selection process.
+ When unspecified, all ClusterCatalogs are used in the bundle selection process.
properties:
matchExpressions:
description: matchExpressions is a list of label selector
@@ -372,35 +360,34 @@ spec:
upgradeConstraintPolicy:
default: CatalogProvided
description: |-
- upgradeConstraintPolicy is an optional field that controls whether
- the upgrade path(s) defined in the catalog are enforced for the package
- referenced in the packageName field.
+ upgradeConstraintPolicy is optional and controls whether the upgrade paths defined in the catalog
+ are enforced for the package referenced in the packageName field.
- Allowed values are: "CatalogProvided" or "SelfCertified", or omitted.
+ Allowed values are "CatalogProvided", "SelfCertified", or omitted.
- When this field is set to "CatalogProvided", automatic upgrades will only occur
- when upgrade constraints specified by the package author are met.
+ When set to "CatalogProvided", automatic upgrades only occur when upgrade constraints specified by the package
+ author are met.
- When this field is set to "SelfCertified", the upgrade constraints specified by
- the package author are ignored. This allows for upgrades and downgrades to
- any version of the package. This is considered a dangerous operation as it
- can lead to unknown and potentially disastrous outcomes, such as data
- loss. It is assumed that users have independently verified changes when
- using this option.
+ When set to "SelfCertified", the upgrade constraints specified by the package author are ignored.
+ This allows upgrades and downgrades to any version of the package.
+ This is considered a dangerous operation as it can lead to unknown and potentially disastrous outcomes,
+ such as data loss.
+ Use this option only if you have independently verified the changes.
- When this field is omitted, the default value is "CatalogProvided".
+ When omitted, the default value is "CatalogProvided".
enum:
- CatalogProvided
- SelfCertified
type: string
version:
description: |-
- version is an optional semver constraint (a specific version or range of versions). When unspecified, the latest version available will be installed.
+ version is an optional semver constraint (a specific version or range of versions).
+ When unspecified, the latest version available is installed.
Acceptable version ranges are no longer than 64 characters.
- Version ranges are composed of comma- or space-delimited values and one or
- more comparison operators, known as comparison strings. Additional
- comparison strings can be added using the OR operator (||).
+ Version ranges are composed of comma- or space-delimited values and one or more comparison operators,
+ known as comparison strings.
+ You can add additional comparison strings using the OR operator (||).
# Range Comparisons
@@ -478,13 +465,12 @@ spec:
type: object
sourceType:
description: |-
- sourceType is a required reference to the type of install source.
+ sourceType is required and specifies the type of install source.
- Allowed values are "Catalog"
+ The only allowed value is "Catalog".
- When this field is set to "Catalog", information for determining the
- appropriate bundle of content to install will be fetched from
- ClusterCatalog resources existing on the cluster.
+ When set to "Catalog", information for determining the appropriate bundle of content to install
+ is fetched from ClusterCatalog resources on the cluster.
When using the Catalog sourceType, the catalog field must also be set.
enum:
- Catalog
@@ -592,9 +578,9 @@ spec:
description: |-
The set of condition types which apply to all spec.source variations are Installed and Progressing.
- The Installed condition represents whether or not the bundle has been installed for this ClusterExtension.
- When Installed is True and the Reason is Succeeded, the bundle has been successfully installed.
- When Installed is False and the Reason is Failed, the bundle has failed to install.
+ The Installed condition represents whether the bundle has been installed for this ClusterExtension:
+ - When Installed is True and the Reason is Succeeded, the bundle has been successfully installed.
+ - When Installed is False and the Reason is Failed, the bundle has failed to install.
The Progressing condition represents whether or not the ClusterExtension is advancing towards a new state.
When Progressing is True and the Reason is Succeeded, the ClusterExtension is making progress towards a new state.
@@ -603,12 +589,12 @@ spec:
When Progressing is True and Reason is RollingOut, the ClusterExtension has one or more ClusterExtensionRevisions in active roll out.
- When the ClusterExtension is sourced from a catalog, if may also communicate a deprecation condition.
- These are indications from a package owner to guide users away from a particular package, channel, or bundle.
- BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog.
- ChannelDeprecated is set if the requested channel is marked deprecated in the catalog.
- PackageDeprecated is set if the requested package is marked deprecated in the catalog.
- Deprecated is a rollup condition that is present when any of the deprecated conditions are present.
+ When the ClusterExtension is sourced from a catalog, it may also communicate a deprecation condition.
+ These are indications from a package owner to guide users away from a particular package, channel, or bundle:
+ - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog.
+ - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog.
+ - PackageDeprecated is set if the requested package is marked deprecated in the catalog.
+ - Deprecated is a rollup condition that is present when any of the deprecated conditions are present.
items:
description: Condition contains details for one aspect of the current
state of this API Resource.
@@ -673,17 +659,16 @@ spec:
properties:
bundle:
description: |-
- bundle is a required field which represents the identifying attributes of a bundle.
+ bundle is required and represents the identifying attributes of a bundle.
- A "bundle" is a versioned set of content that represents the resources that
- need to be applied to a cluster to install a package.
+ A "bundle" is a versioned set of content that represents the resources that need to be applied
+ to a cluster to install a package.
properties:
name:
description: |-
- name is required and follows the DNS subdomain standard
- as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
- hyphens (-) or periods (.), start and end with an alphanumeric character,
- and be no longer than 253 characters.
+ name is required and follows the DNS subdomain standard as defined in [RFC 1123].
+ It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
+ start and end with an alphanumeric character, and be no longer than 253 characters.
type: string
x-kubernetes-validations:
- message: packageName must be a valid DNS1123 subdomain.
@@ -693,8 +678,8 @@ spec:
rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$")
version:
description: |-
- version is a required field and is a reference to the version that this bundle represents
- version follows the semantic versioning standard as defined in https://semver.org/.
+ version is required and references the version that this bundle represents.
+ It follows the semantic versioning standard as defined in https://semver.org/.
type: string
x-kubernetes-validations:
- message: version must be well-formed semver
diff --git a/helm/olmv1/base/operator-controller/crd/standard/olm.operatorframework.io_clusterextensions.yaml b/helm/olmv1/base/operator-controller/crd/standard/olm.operatorframework.io_clusterextensions.yaml
index a0983e41f..e1316237c 100644
--- a/helm/olmv1/base/operator-controller/crd/standard/olm.operatorframework.io_clusterextensions.yaml
+++ b/helm/olmv1/base/operator-controller/crd/standard/olm.operatorframework.io_clusterextensions.yaml
@@ -59,37 +59,35 @@ spec:
properties:
install:
description: |-
- install is an optional field used to configure the installation options
- for the ClusterExtension such as the pre-flight check configuration.
+ install is optional and configures installation options for the ClusterExtension,
+ such as the pre-flight check configuration.
properties:
preflight:
description: |-
- preflight is an optional field that can be used to configure the checks that are
- run before installation or upgrade of the content for the package specified in the packageName field.
+ preflight is optional and configures the checks that run before installation or upgrade
+ of the content for the package specified in the packageName field.
When specified, it replaces the default preflight configuration for install/upgrade actions.
- When not specified, the default configuration will be used.
+ When not specified, the default configuration is used.
properties:
crdUpgradeSafety:
description: |-
- crdUpgradeSafety is used to configure the CRD Upgrade Safety pre-flight
- checks that run prior to upgrades of installed content.
+ crdUpgradeSafety configures the CRD Upgrade Safety pre-flight checks that run
+ before upgrades of installed content.
- The CRD Upgrade Safety pre-flight check safeguards from unintended
- consequences of upgrading a CRD, such as data loss.
+ The CRD Upgrade Safety pre-flight check safeguards from unintended consequences of upgrading a CRD,
+ such as data loss.
properties:
enforcement:
description: |-
- enforcement is a required field, used to configure the state of the CRD Upgrade Safety pre-flight check.
+ enforcement is required and configures the state of the CRD Upgrade Safety pre-flight check.
Allowed values are "None" or "Strict". The default value is "Strict".
- When set to "None", the CRD Upgrade Safety pre-flight check will be skipped
- when performing an upgrade operation. This should be used with caution as
- unintended consequences such as data loss can occur.
+ When set to "None", the CRD Upgrade Safety pre-flight check is skipped during an upgrade operation.
+ Use this option with caution as unintended consequences such as data loss can occur.
- When set to "Strict", the CRD Upgrade Safety pre-flight check will be run when
- performing an upgrade operation.
+ When set to "Strict", the CRD Upgrade Safety pre-flight check runs during an upgrade operation.
enum:
- None
- Strict
@@ -111,16 +109,15 @@ spec:
rule: has(self.preflight)
namespace:
description: |-
- namespace is a reference to a Kubernetes namespace.
- This is the namespace in which the provided ServiceAccount must exist.
- It also designates the default namespace where namespace-scoped resources
- for the extension are applied to the cluster.
+ namespace specifies a Kubernetes namespace.
+ This is the namespace where the provided ServiceAccount must exist.
+ It also designates the default namespace where namespace-scoped resources for the extension are applied to the cluster.
Some extensions may contain namespace-scoped resources to be applied in other namespaces.
This namespace must exist.
- namespace is required, immutable, and follows the DNS label standard
- as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters or hyphens (-),
- start and end with an alphanumeric character, and be no longer than 63 characters
+ The namespace field is required, immutable, and follows the DNS label standard as defined in [RFC 1123].
+ It must contain only lowercase alphanumeric characters or hyphens (-), start and end with an alphanumeric character,
+ and be no longer than 63 characters.
[RFC 1123]: https://tools.ietf.org/html/rfc1123
maxLength: 63
@@ -132,24 +129,22 @@ spec:
rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?$")
serviceAccount:
description: |-
- serviceAccount is a reference to a ServiceAccount used to perform all interactions
- with the cluster that are required to manage the extension.
+ serviceAccount specifies a ServiceAccount used to perform all interactions with the cluster
+ that are required to manage the extension.
The ServiceAccount must be configured with the necessary permissions to perform these interactions.
The ServiceAccount must exist in the namespace referenced in the spec.
- serviceAccount is required.
+ The serviceAccount field is required.
properties:
name:
description: |-
- name is a required, immutable reference to the name of the ServiceAccount
- to be used for installation and management of the content for the package
- specified in the packageName field.
+ name is a required, immutable reference to the name of the ServiceAccount used for installation
+ and management of the content for the package specified in the packageName field.
This ServiceAccount must exist in the installNamespace.
- name follows the DNS subdomain standard as defined in [RFC 1123].
- It must contain only lowercase alphanumeric characters,
- hyphens (-) or periods (.), start and end with an alphanumeric character,
- and be no longer than 253 characters.
+ The name field follows the DNS subdomain standard as defined in [RFC 1123].
+ It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
+ start and end with an alphanumeric character, and be no longer than 253 characters.
Some examples of valid values are:
- some-serviceaccount
@@ -178,11 +173,11 @@ spec:
type: object
source:
description: |-
- source is a required field which selects the installation source of content
- for this ClusterExtension. Selection is performed by setting the sourceType.
+ source is required and selects the installation source of content for this ClusterExtension.
+ Set the sourceType field to perform the selection.
- Catalog is currently the only implemented sourceType, and setting the
- sourcetype to "Catalog" requires the catalog field to also be defined.
+ Catalog is currently the only implemented sourceType.
+ Setting sourceType to "Catalog" requires the catalog field to also be defined.
Below is a minimal example of a source definition (in yaml):
@@ -193,30 +188,29 @@ spec:
properties:
catalog:
description: |-
- catalog is used to configure how information is sourced from a catalog.
- This field is required when sourceType is "Catalog", and forbidden otherwise.
+ catalog configures how information is sourced from a catalog.
+ It is required when sourceType is "Catalog", and forbidden otherwise.
properties:
channels:
description: |-
- channels is an optional reference to a set of channels belonging to
- the package specified in the packageName field.
+ channels is optional and specifies a set of channels belonging to the package
+ specified in the packageName field.
- A "channel" is a package-author-defined stream of updates for an extension.
+ A channel is a package-author-defined stream of updates for an extension.
- Each channel in the list must follow the DNS subdomain standard
- as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
- hyphens (-) or periods (.), start and end with an alphanumeric character,
- and be no longer than 253 characters. No more than 256 channels can be specified.
+ Each channel in the list must follow the DNS subdomain standard as defined in [RFC 1123].
+ It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
+ start and end with an alphanumeric character, and be no longer than 253 characters.
+ You can specify no more than 256 channels.
- When specified, it is used to constrain the set of installable bundles and
- the automated upgrade path. This constraint is an AND operation with the
- version field. For example:
+ When specified, it constrains the set of installable bundles and the automated upgrade path.
+ This constraint is an AND operation with the version field. For example:
- Given channel is set to "foo"
- Given version is set to ">=1.0.0, <1.5.0"
- - Only bundles that exist in channel "foo" AND satisfy the version range comparison will be considered installable
- - Automatic upgrades will be constrained to upgrade edges defined by the selected channel
+ - Only bundles that exist in channel "foo" AND satisfy the version range comparison are considered installable
+ - Automatic upgrades are constrained to upgrade edges defined by the selected channel
- When unspecified, upgrade edges across all channels will be used to identify valid automatic upgrade paths.
+ When unspecified, upgrade edges across all channels are used to identify valid automatic upgrade paths.
Some examples of valid values are:
- 1.1.x
@@ -246,13 +240,12 @@ spec:
type: array
packageName:
description: |-
- packageName is a reference to the name of the package to be installed
- and is used to filter the content from catalogs.
+ packageName specifies the name of the package to be installed and is used to filter
+ the content from catalogs.
- packageName is required, immutable, and follows the DNS subdomain standard
- as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
- hyphens (-) or periods (.), start and end with an alphanumeric character,
- and be no longer than 253 characters.
+ It is required, immutable, and follows the DNS subdomain standard as defined in [RFC 1123].
+ It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
+ start and end with an alphanumeric character, and be no longer than 253 characters.
Some examples of valid values are:
- some-package
@@ -279,12 +272,9 @@ spec:
rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$")
selector:
description: |-
- selector is an optional field that can be used
- to filter the set of ClusterCatalogs used in the bundle
- selection process.
+ selector is optional and filters the set of ClusterCatalogs used in the bundle selection process.
- When unspecified, all ClusterCatalogs will be used in
- the bundle selection process.
+ When unspecified, all ClusterCatalogs are used in the bundle selection process.
properties:
matchExpressions:
description: matchExpressions is a list of label selector
@@ -332,35 +322,34 @@ spec:
upgradeConstraintPolicy:
default: CatalogProvided
description: |-
- upgradeConstraintPolicy is an optional field that controls whether
- the upgrade path(s) defined in the catalog are enforced for the package
- referenced in the packageName field.
+ upgradeConstraintPolicy is optional and controls whether the upgrade paths defined in the catalog
+ are enforced for the package referenced in the packageName field.
- Allowed values are: "CatalogProvided" or "SelfCertified", or omitted.
+ Allowed values are "CatalogProvided", "SelfCertified", or omitted.
- When this field is set to "CatalogProvided", automatic upgrades will only occur
- when upgrade constraints specified by the package author are met.
+ When set to "CatalogProvided", automatic upgrades only occur when upgrade constraints specified by the package
+ author are met.
- When this field is set to "SelfCertified", the upgrade constraints specified by
- the package author are ignored. This allows for upgrades and downgrades to
- any version of the package. This is considered a dangerous operation as it
- can lead to unknown and potentially disastrous outcomes, such as data
- loss. It is assumed that users have independently verified changes when
- using this option.
+ When set to "SelfCertified", the upgrade constraints specified by the package author are ignored.
+ This allows upgrades and downgrades to any version of the package.
+ This is considered a dangerous operation as it can lead to unknown and potentially disastrous outcomes,
+ such as data loss.
+ Use this option only if you have independently verified the changes.
- When this field is omitted, the default value is "CatalogProvided".
+ When omitted, the default value is "CatalogProvided".
enum:
- CatalogProvided
- SelfCertified
type: string
version:
description: |-
- version is an optional semver constraint (a specific version or range of versions). When unspecified, the latest version available will be installed.
+ version is an optional semver constraint (a specific version or range of versions).
+ When unspecified, the latest version available is installed.
Acceptable version ranges are no longer than 64 characters.
- Version ranges are composed of comma- or space-delimited values and one or
- more comparison operators, known as comparison strings. Additional
- comparison strings can be added using the OR operator (||).
+ Version ranges are composed of comma- or space-delimited values and one or more comparison operators,
+ known as comparison strings.
+ You can add additional comparison strings using the OR operator (||).
# Range Comparisons
@@ -438,13 +427,12 @@ spec:
type: object
sourceType:
description: |-
- sourceType is a required reference to the type of install source.
+ sourceType is required and specifies the type of install source.
- Allowed values are "Catalog"
+ The only allowed value is "Catalog".
- When this field is set to "Catalog", information for determining the
- appropriate bundle of content to install will be fetched from
- ClusterCatalog resources existing on the cluster.
+ When set to "Catalog", information for determining the appropriate bundle of content to install
+ is fetched from ClusterCatalog resources on the cluster.
When using the Catalog sourceType, the catalog field must also be set.
enum:
- Catalog
@@ -470,21 +458,21 @@ spec:
description: |-
The set of condition types which apply to all spec.source variations are Installed and Progressing.
- The Installed condition represents whether or not the bundle has been installed for this ClusterExtension.
- When Installed is True and the Reason is Succeeded, the bundle has been successfully installed.
- When Installed is False and the Reason is Failed, the bundle has failed to install.
+ The Installed condition represents whether the bundle has been installed for this ClusterExtension:
+ - When Installed is True and the Reason is Succeeded, the bundle has been successfully installed.
+ - When Installed is False and the Reason is Failed, the bundle has failed to install.
The Progressing condition represents whether or not the ClusterExtension is advancing towards a new state.
When Progressing is True and the Reason is Succeeded, the ClusterExtension is making progress towards a new state.
When Progressing is True and the Reason is Retrying, the ClusterExtension has encountered an error that could be resolved on subsequent reconciliation attempts.
When Progressing is False and the Reason is Blocked, the ClusterExtension has encountered an error that requires manual intervention for recovery.
- When the ClusterExtension is sourced from a catalog, if may also communicate a deprecation condition.
- These are indications from a package owner to guide users away from a particular package, channel, or bundle.
- BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog.
- ChannelDeprecated is set if the requested channel is marked deprecated in the catalog.
- PackageDeprecated is set if the requested package is marked deprecated in the catalog.
- Deprecated is a rollup condition that is present when any of the deprecated conditions are present.
+ When the ClusterExtension is sourced from a catalog, it may also communicate a deprecation condition.
+ These are indications from a package owner to guide users away from a particular package, channel, or bundle:
+ - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog.
+ - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog.
+ - PackageDeprecated is set if the requested package is marked deprecated in the catalog.
+ - Deprecated is a rollup condition that is present when any of the deprecated conditions are present.
items:
description: Condition contains details for one aspect of the current
state of this API Resource.
@@ -549,17 +537,16 @@ spec:
properties:
bundle:
description: |-
- bundle is a required field which represents the identifying attributes of a bundle.
+ bundle is required and represents the identifying attributes of a bundle.
- A "bundle" is a versioned set of content that represents the resources that
- need to be applied to a cluster to install a package.
+ A "bundle" is a versioned set of content that represents the resources that need to be applied
+ to a cluster to install a package.
properties:
name:
description: |-
- name is required and follows the DNS subdomain standard
- as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
- hyphens (-) or periods (.), start and end with an alphanumeric character,
- and be no longer than 253 characters.
+ name is required and follows the DNS subdomain standard as defined in [RFC 1123].
+ It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
+ start and end with an alphanumeric character, and be no longer than 253 characters.
type: string
x-kubernetes-validations:
- message: packageName must be a valid DNS1123 subdomain.
@@ -569,8 +556,8 @@ spec:
rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$")
version:
description: |-
- version is a required field and is a reference to the version that this bundle represents
- version follows the semantic versioning standard as defined in https://semver.org/.
+ version is required and references the version that this bundle represents.
+ It follows the semantic versioning standard as defined in https://semver.org/.
type: string
x-kubernetes-validations:
- message: version must be well-formed semver
diff --git a/helm/olmv1/templates/e2e/configmap-olmv1-system-e2e-registries-conf.yml b/helm/olmv1/templates/e2e/configmap-olmv1-system-e2e-registries-conf.yml
index d6fec9b5f..44c5bdea2 100644
--- a/helm/olmv1/templates/e2e/configmap-olmv1-system-e2e-registries-conf.yml
+++ b/helm/olmv1/templates/e2e/configmap-olmv1-system-e2e-registries-conf.yml
@@ -5,6 +5,10 @@ data:
[[registry]]
prefix = "mirrored-registry.operator-controller-e2e.svc.cluster.local:5000"
location = "docker-registry.operator-controller-e2e.svc.cluster.local:5000"
+
+ [[registry]]
+ prefix = "dynamic-registry.operator-controller-e2e.svc.cluster.local:5000"
+ location = "docker-registry.operator-controller-e2e.svc.cluster.local:5000"
kind: ConfigMap
metadata:
annotations:
diff --git a/helm/olmv1/templates/e2e/pod-olmv1-system-e2e-coverage-copy-pod.yml b/helm/olmv1/templates/e2e/pod-olmv1-system-e2e-coverage-copy-pod.yml
index fa4b11aca..ce1ff3c41 100644
--- a/helm/olmv1/templates/e2e/pod-olmv1-system-e2e-coverage-copy-pod.yml
+++ b/helm/olmv1/templates/e2e/pod-olmv1-system-e2e-coverage-copy-pod.yml
@@ -17,6 +17,7 @@ spec:
image: busybox:1.36
name: tar
securityContext:
+ readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
capabilities:
drop:
diff --git a/kind-config/kind-config-2node.yaml b/kind-config/kind-config-2node.yaml
new file mode 100644
index 000000000..5532a9932
--- /dev/null
+++ b/kind-config/kind-config-2node.yaml
@@ -0,0 +1,45 @@
+apiVersion: kind.x-k8s.io/v1alpha4
+kind: Cluster
+nodes:
+ - role: control-plane
+ extraPortMappings:
+ # e2e image registry service's NodePort
+ - containerPort: 30000
+ hostPort: 30000
+ listenAddress: "127.0.0.1"
+ protocol: tcp
+ # prometheus metrics service's NodePort
+ - containerPort: 30900
+ hostPort: 30900
+ listenAddress: "127.0.0.1"
+ protocol: tcp
+ kubeadmConfigPatches:
+ - |
+ kind: ClusterConfiguration
+ apiServer:
+ extraArgs:
+ enable-admission-plugins: OwnerReferencesPermissionEnforcement
+ - |
+ kind: InitConfiguration
+ nodeRegistration:
+ kubeletExtraArgs:
+ node-labels: "ingress-ready=true"
+ taints: []
+ extraMounts:
+ - hostPath: ./hack/kind-config/containerd/certs.d
+ containerPath: /etc/containerd/certs.d
+ - role: control-plane
+ kubeadmConfigPatches:
+ - |
+ kind: JoinConfiguration
+ nodeRegistration:
+ kubeletExtraArgs:
+ node-labels: "ingress-ready=true"
+ taints: []
+ extraMounts:
+ - hostPath: ./hack/kind-config/containerd/certs.d
+ containerPath: /etc/containerd/certs.d
+containerdConfigPatches:
+ - |-
+ [plugins."io.containerd.grpc.v1.cri".registry]
+ config_path = "/etc/containerd/certs.d"
diff --git a/kind-config.yaml b/kind-config/kind-config.yaml
similarity index 100%
rename from kind-config.yaml
rename to kind-config/kind-config.yaml
diff --git a/manifests/experimental-e2e.yaml b/manifests/experimental-e2e.yaml
index 1b583d207..fbc5b4a53 100644
--- a/manifests/experimental-e2e.yaml
+++ b/manifests/experimental-e2e.yaml
@@ -152,6 +152,10 @@ data:
[[registry]]
prefix = "mirrored-registry.operator-controller-e2e.svc.cluster.local:5000"
location = "docker-registry.operator-controller-e2e.svc.cluster.local:5000"
+
+ [[registry]]
+ prefix = "dynamic-registry.operator-controller-e2e.svc.cluster.local:5000"
+ location = "docker-registry.operator-controller-e2e.svc.cluster.local:5000"
kind: ConfigMap
metadata:
annotations:
@@ -211,7 +215,7 @@ spec:
schema:
openAPIV3Schema:
description: |-
- ClusterCatalog enables users to make File-Based Catalog (FBC) catalog data available to the cluster.
+ ClusterCatalog makes File-Based Catalog (FBC) data available to your cluster.
For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs
properties:
apiVersion:
@@ -233,29 +237,24 @@ spec:
type: object
spec:
description: |-
- spec is the desired state of the ClusterCatalog.
- spec is required.
- The controller will work to ensure that the desired
- catalog is unpacked and served over the catalog content HTTP server.
+ spec is a required field that defines the desired state of the ClusterCatalog.
+ The controller ensures that the catalog is unpacked and served over the catalog content HTTP server.
properties:
availabilityMode:
default: Available
description: |-
- availabilityMode allows users to define how the ClusterCatalog is made available to clients on the cluster.
- availabilityMode is optional.
+ availabilityMode is an optional field that defines how the ClusterCatalog is made available to clients on the cluster.
- Allowed values are "Available" and "Unavailable" and omitted.
+ Allowed values are "Available", "Unavailable", or omitted.
When omitted, the default value is "Available".
- When set to "Available", the catalog contents will be unpacked and served over the catalog content HTTP server.
- Setting the availabilityMode to "Available" tells clients that they should consider this ClusterCatalog
- and its contents as usable.
+ When set to "Available", the catalog contents are unpacked and served over the catalog content HTTP server.
+ Clients should consider this ClusterCatalog and its contents as usable.
- When set to "Unavailable", the catalog contents will no longer be served over the catalog content HTTP server.
- When set to this availabilityMode it should be interpreted the same as the ClusterCatalog not existing.
- Setting the availabilityMode to "Unavailable" can be useful in scenarios where a user may not want
- to delete the ClusterCatalog all together, but would still like it to be treated as if it doesn't exist.
+ When set to "Unavailable", the catalog contents are no longer served over the catalog content HTTP server.
+ Treat this the same as if the ClusterCatalog does not exist.
+ Use "Unavailable" when you want to keep the ClusterCatalog but treat it as if it doesn't exist.
enum:
- Unavailable
- Available
@@ -263,19 +262,18 @@ spec:
priority:
default: 0
description: |-
- priority allows the user to define a priority for a ClusterCatalog.
- priority is optional.
+ priority is an optional field that defines a priority for this ClusterCatalog.
- A ClusterCatalog's priority is used by clients as a tie-breaker between ClusterCatalogs that meet the client's requirements.
- A higher number means higher priority.
+ Clients use the ClusterCatalog priority as a tie-breaker between ClusterCatalogs that meet their requirements.
+ Higher numbers mean higher priority.
- It is up to clients to decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements.
- When deciding how to break the tie in this scenario, it is recommended that clients prompt their users for additional input.
+ Clients decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements.
+ Clients should prompt users for additional input to break the tie.
- When omitted, the default priority is 0 because that is the zero value of integers.
+ When omitted, the default priority is 0.
- Negative numbers can be used to specify a priority lower than the default.
- Positive numbers can be used to specify a priority higher than the default.
+ Use negative numbers to specify a priority lower than the default.
+ Use positive numbers to specify a priority higher than the default.
The lowest possible value is -2147483648.
The highest possible value is 2147483647.
@@ -283,15 +281,12 @@ spec:
type: integer
source:
description: |-
- source allows a user to define the source of a catalog.
- A "catalog" contains information on content that can be installed on a cluster.
- Providing a catalog source makes the contents of the catalog discoverable and usable by
- other on-cluster components.
- These on-cluster components may do a variety of things with this information, such as
- presenting the content in a GUI dashboard or installing content from the catalog on the cluster.
+ source is a required field that defines the source of a catalog.
+ A catalog contains information on content that can be installed on a cluster.
+ The catalog source makes catalog contents discoverable and usable by other on-cluster components.
+ These components can present the content in a GUI dashboard or install content from the catalog on the cluster.
The catalog source must contain catalog metadata in the File-Based Catalog (FBC) format.
For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs.
- source is a required field.
Below is a minimal example of a ClusterCatalogSpec that sources a catalog from an image:
@@ -302,25 +297,23 @@ spec:
properties:
image:
description: |-
- image is used to configure how catalog contents are sourced from an OCI image.
- This field is required when type is Image, and forbidden otherwise.
+ image configures how catalog contents are sourced from an OCI image.
+ It is required when type is Image, and forbidden otherwise.
properties:
pollIntervalMinutes:
description: |-
- pollIntervalMinutes allows the user to set the interval, in minutes, at which the image source should be polled for new content.
- pollIntervalMinutes is optional.
- pollIntervalMinutes can not be specified when ref is a digest-based reference.
+ pollIntervalMinutes is an optional field that sets the interval, in minutes, at which the image source is polled for new content.
+ You cannot specify pollIntervalMinutes when ref is a digest-based reference.
- When omitted, the image will not be polled for new content.
+ When omitted, the image is not polled for new content.
minimum: 1
type: integer
ref:
description: |-
- ref allows users to define the reference to a container image containing Catalog contents.
- ref is required.
- ref can not be more than 1000 characters.
+ ref is a required field that defines the reference to a container image containing catalog contents.
+ It cannot be more than 1000 characters.
- A reference can be broken down into 3 parts - the domain, name, and identifier.
+ A reference has 3 parts: the domain, name, and identifier.
The domain is typically the registry where an image is located.
It must be alphanumeric characters (lowercase and uppercase) separated by the "." character.
@@ -403,12 +396,11 @@ spec:
: true'
type:
description: |-
- type is a reference to the type of source the catalog is sourced from.
- type is required.
+ type is a required field that specifies the type of source for the catalog.
The only allowed value is "Image".
- When set to "Image", the ClusterCatalog content will be sourced from an OCI image.
+ When set to "Image", the ClusterCatalog content is sourced from an OCI image.
When using an image source, the image field must be set and must be the only field defined for this type.
enum:
- Image
@@ -426,31 +418,30 @@ spec:
type: object
status:
description: |-
- status contains information about the state of the ClusterCatalog such as:
- - Whether or not the catalog contents are being served via the catalog content HTTP server
- - Whether or not the ClusterCatalog is progressing to a new state
+ status contains the following information about the state of the ClusterCatalog:
+ - Whether the catalog contents are being served via the catalog content HTTP server
+ - Whether the ClusterCatalog is progressing to a new state
- A reference to the source from which the catalog contents were retrieved
properties:
conditions:
description: |-
- conditions is a representation of the current state for this ClusterCatalog.
+ conditions represents the current state of this ClusterCatalog.
The current condition types are Serving and Progressing.
- The Serving condition is used to represent whether or not the contents of the catalog is being served via the HTTP(S) web server.
- When it has a status of True and a reason of Available, the contents of the catalog are being served.
- When it has a status of False and a reason of Unavailable, the contents of the catalog are not being served because the contents are not yet available.
- When it has a status of False and a reason of UserSpecifiedUnavailable, the contents of the catalog are not being served because the catalog has been intentionally marked as unavailable.
+ The Serving condition represents whether the catalog contents are being served via the HTTP(S) web server:
+ - When status is True and reason is Available, the catalog contents are being served.
+ - When status is False and reason is Unavailable, the catalog contents are not being served because the contents are not yet available.
+ - When status is False and reason is UserSpecifiedUnavailable, the catalog contents are not being served because the catalog has been intentionally marked as unavailable.
- The Progressing condition is used to represent whether or not the ClusterCatalog is progressing or is ready to progress towards a new state.
- When it has a status of True and a reason of Retrying, there was an error in the progression of the ClusterCatalog that may be resolved on subsequent reconciliation attempts.
- When it has a status of True and a reason of Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing.
- When it has a status of False and a reason of Blocked, there was an error in the progression of the ClusterCatalog that requires manual intervention for recovery.
+ The Progressing condition represents whether the ClusterCatalog is progressing or is ready to progress towards a new state:
+ - When status is True and reason is Retrying, an error occurred that may be resolved on subsequent reconciliation attempts.
+ - When status is True and reason is Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing.
+ - When status is False and reason is Blocked, an error occurred that requires manual intervention for recovery.
- In the case that the Serving condition is True with reason Available and Progressing is True with reason Retrying, the previously fetched
- catalog contents are still being served via the HTTP(S) web server while we are progressing towards serving a new version of the catalog
- contents. This could occur when we've initially fetched the latest contents from the source for this catalog and when polling for changes
- to the contents we identify that there are updates to the contents.
+ If the system initially fetched contents and polling identifies updates, both conditions can be active simultaneously:
+ - The Serving condition remains True with reason Available because the previous contents are still served via the HTTP(S) web server.
+ - The Progressing condition is True with reason Retrying because the system is working to serve the new version.
items:
description: Condition contains details for one aspect of the current
state of this API Resource.
@@ -511,11 +502,9 @@ spec:
x-kubernetes-list-type: map
lastUnpacked:
description: |-
- lastUnpacked represents the last time the contents of the
- catalog were extracted from their source format. As an example,
- when using an Image source, the OCI image will be pulled and the
- image layers written to a file-system backed cache. We refer to the
- act of this extraction from the source format as "unpacking".
+ lastUnpacked represents the last time the catalog contents were extracted from their source format.
+ For example, when using an Image source, the OCI image is pulled and image layers are written to a file-system backed cache.
+ This extraction from the source format is called "unpacking".
format: date-time
type: string
resolvedSource:
@@ -524,14 +513,14 @@ spec:
properties:
image:
description: |-
- image is a field containing resolution information for a catalog sourced from an image.
- This field must be set when type is Image, and forbidden otherwise.
+ image contains resolution information for a catalog sourced from an image.
+ It must be set when type is Image, and forbidden otherwise.
properties:
ref:
description: |-
ref contains the resolved image digest-based reference.
- The digest format is used so users can use other tooling to fetch the exact
- OCI manifests that were used to extract the catalog contents.
+ The digest format allows you to use other tooling to fetch the exact OCI manifests
+ that were used to extract the catalog contents.
maxLength: 1000
type: string
x-kubernetes-validations:
@@ -565,12 +554,11 @@ spec:
type: object
type:
description: |-
- type is a reference to the type of source the catalog is sourced from.
- type is required.
+ type is a required field that specifies the type of source for the catalog.
The only allowed value is "Image".
- When set to "Image", information about the resolved image source will be set in the 'image' field.
+ When set to "Image", information about the resolved image source is set in the image field.
enum:
- Image
type: string
@@ -589,19 +577,16 @@ spec:
properties:
base:
description: |-
- base is a cluster-internal URL that provides endpoints for
- accessing the content of the catalog.
+ base is a cluster-internal URL that provides endpoints for accessing the catalog content.
- It is expected that clients append the path for the endpoint they wish
- to access.
+ Clients should append the path for the endpoint they want to access.
- Currently, only a single endpoint is served and is accessible at the path
- /api/v1.
+ Currently, only a single endpoint is served and is accessible at the path /api/v1.
The endpoints served for the v1 API are:
- - /all - this endpoint returns the entirety of the catalog contents in the FBC format
+ - /all - this endpoint returns the entire catalog contents in the FBC format
- As the needs of users and clients of the evolve, new endpoints may be added.
+ New endpoints may be added as needs evolve.
maxLength: 525
type: string
x-kubernetes-validations:
@@ -959,9 +944,9 @@ spec:
properties:
config:
description: |-
- config is an optional field used to specify bundle specific configuration
- used to configure the bundle. Configuration is bundle specific and a bundle may provide
- a configuration schema. When not specified, the default configuration of the resolved bundle will be used.
+ config is optional and specifies bundle-specific configuration.
+ Configuration is bundle-specific and a bundle may provide a configuration schema.
+ When not specified, the default configuration of the resolved bundle is used.
config is validated against a configuration schema provided by the resolved bundle. If the bundle does not provide
a configuration schema the bundle is deemed to not be configurable. More information on how
@@ -969,21 +954,19 @@ spec:
properties:
configType:
description: |-
- configType is a required reference to the type of configuration source.
+ configType is required and specifies the type of configuration source.
- Allowed values are "Inline"
+ The only allowed value is "Inline".
- When this field is set to "Inline", the cluster extension configuration is defined inline within the
- ClusterExtension resource.
+ When set to "Inline", the cluster extension configuration is defined inline within the ClusterExtension resource.
enum:
- Inline
type: string
inline:
description: |-
- inline contains JSON or YAML values specified directly in the
- ClusterExtension.
+ inline contains JSON or YAML values specified directly in the ClusterExtension.
- inline is used to specify arbitrary configuration values for the ClusterExtension.
+ It is used to specify arbitrary configuration values for the ClusterExtension.
It must be set if configType is 'Inline' and must be a valid JSON/YAML object containing at least one property.
The configuration values are validated at runtime against a JSON schema provided by the bundle.
minProperties: 1
@@ -999,37 +982,35 @@ spec:
: !has(self.inline)'
install:
description: |-
- install is an optional field used to configure the installation options
- for the ClusterExtension such as the pre-flight check configuration.
+ install is optional and configures installation options for the ClusterExtension,
+ such as the pre-flight check configuration.
properties:
preflight:
description: |-
- preflight is an optional field that can be used to configure the checks that are
- run before installation or upgrade of the content for the package specified in the packageName field.
+ preflight is optional and configures the checks that run before installation or upgrade
+ of the content for the package specified in the packageName field.
When specified, it replaces the default preflight configuration for install/upgrade actions.
- When not specified, the default configuration will be used.
+ When not specified, the default configuration is used.
properties:
crdUpgradeSafety:
description: |-
- crdUpgradeSafety is used to configure the CRD Upgrade Safety pre-flight
- checks that run prior to upgrades of installed content.
+ crdUpgradeSafety configures the CRD Upgrade Safety pre-flight checks that run
+ before upgrades of installed content.
- The CRD Upgrade Safety pre-flight check safeguards from unintended
- consequences of upgrading a CRD, such as data loss.
+ The CRD Upgrade Safety pre-flight check safeguards from unintended consequences of upgrading a CRD,
+ such as data loss.
properties:
enforcement:
description: |-
- enforcement is a required field, used to configure the state of the CRD Upgrade Safety pre-flight check.
+ enforcement is required and configures the state of the CRD Upgrade Safety pre-flight check.
Allowed values are "None" or "Strict". The default value is "Strict".
- When set to "None", the CRD Upgrade Safety pre-flight check will be skipped
- when performing an upgrade operation. This should be used with caution as
- unintended consequences such as data loss can occur.
+ When set to "None", the CRD Upgrade Safety pre-flight check is skipped during an upgrade operation.
+ Use this option with caution as unintended consequences such as data loss can occur.
- When set to "Strict", the CRD Upgrade Safety pre-flight check will be run when
- performing an upgrade operation.
+ When set to "Strict", the CRD Upgrade Safety pre-flight check runs during an upgrade operation.
enum:
- None
- Strict
@@ -1051,16 +1032,15 @@ spec:
rule: has(self.preflight)
namespace:
description: |-
- namespace is a reference to a Kubernetes namespace.
- This is the namespace in which the provided ServiceAccount must exist.
- It also designates the default namespace where namespace-scoped resources
- for the extension are applied to the cluster.
+ namespace specifies a Kubernetes namespace.
+ This is the namespace where the provided ServiceAccount must exist.
+ It also designates the default namespace where namespace-scoped resources for the extension are applied to the cluster.
Some extensions may contain namespace-scoped resources to be applied in other namespaces.
This namespace must exist.
- namespace is required, immutable, and follows the DNS label standard
- as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters or hyphens (-),
- start and end with an alphanumeric character, and be no longer than 63 characters
+ The namespace field is required, immutable, and follows the DNS label standard as defined in [RFC 1123].
+ It must contain only lowercase alphanumeric characters or hyphens (-), start and end with an alphanumeric character,
+ and be no longer than 63 characters.
[RFC 1123]: https://tools.ietf.org/html/rfc1123
maxLength: 63
@@ -1072,24 +1052,22 @@ spec:
rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?$")
serviceAccount:
description: |-
- serviceAccount is a reference to a ServiceAccount used to perform all interactions
- with the cluster that are required to manage the extension.
+ serviceAccount specifies a ServiceAccount used to perform all interactions with the cluster
+ that are required to manage the extension.
The ServiceAccount must be configured with the necessary permissions to perform these interactions.
The ServiceAccount must exist in the namespace referenced in the spec.
- serviceAccount is required.
+ The serviceAccount field is required.
properties:
name:
description: |-
- name is a required, immutable reference to the name of the ServiceAccount
- to be used for installation and management of the content for the package
- specified in the packageName field.
+ name is a required, immutable reference to the name of the ServiceAccount used for installation
+ and management of the content for the package specified in the packageName field.
This ServiceAccount must exist in the installNamespace.
- name follows the DNS subdomain standard as defined in [RFC 1123].
- It must contain only lowercase alphanumeric characters,
- hyphens (-) or periods (.), start and end with an alphanumeric character,
- and be no longer than 253 characters.
+ The name field follows the DNS subdomain standard as defined in [RFC 1123].
+ It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
+ start and end with an alphanumeric character, and be no longer than 253 characters.
Some examples of valid values are:
- some-serviceaccount
@@ -1118,11 +1096,11 @@ spec:
type: object
source:
description: |-
- source is a required field which selects the installation source of content
- for this ClusterExtension. Selection is performed by setting the sourceType.
+ source is required and selects the installation source of content for this ClusterExtension.
+ Set the sourceType field to perform the selection.
- Catalog is currently the only implemented sourceType, and setting the
- sourcetype to "Catalog" requires the catalog field to also be defined.
+ Catalog is currently the only implemented sourceType.
+ Setting sourceType to "Catalog" requires the catalog field to also be defined.
Below is a minimal example of a source definition (in yaml):
@@ -1133,30 +1111,29 @@ spec:
properties:
catalog:
description: |-
- catalog is used to configure how information is sourced from a catalog.
- This field is required when sourceType is "Catalog", and forbidden otherwise.
+ catalog configures how information is sourced from a catalog.
+ It is required when sourceType is "Catalog", and forbidden otherwise.
properties:
channels:
description: |-
- channels is an optional reference to a set of channels belonging to
- the package specified in the packageName field.
+ channels is optional and specifies a set of channels belonging to the package
+ specified in the packageName field.
- A "channel" is a package-author-defined stream of updates for an extension.
+ A channel is a package-author-defined stream of updates for an extension.
- Each channel in the list must follow the DNS subdomain standard
- as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
- hyphens (-) or periods (.), start and end with an alphanumeric character,
- and be no longer than 253 characters. No more than 256 channels can be specified.
+ Each channel in the list must follow the DNS subdomain standard as defined in [RFC 1123].
+ It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
+ start and end with an alphanumeric character, and be no longer than 253 characters.
+ You can specify no more than 256 channels.
- When specified, it is used to constrain the set of installable bundles and
- the automated upgrade path. This constraint is an AND operation with the
- version field. For example:
+ When specified, it constrains the set of installable bundles and the automated upgrade path.
+ This constraint is an AND operation with the version field. For example:
- Given channel is set to "foo"
- Given version is set to ">=1.0.0, <1.5.0"
- - Only bundles that exist in channel "foo" AND satisfy the version range comparison will be considered installable
- - Automatic upgrades will be constrained to upgrade edges defined by the selected channel
+ - Only bundles that exist in channel "foo" AND satisfy the version range comparison are considered installable
+ - Automatic upgrades are constrained to upgrade edges defined by the selected channel
- When unspecified, upgrade edges across all channels will be used to identify valid automatic upgrade paths.
+ When unspecified, upgrade edges across all channels are used to identify valid automatic upgrade paths.
Some examples of valid values are:
- 1.1.x
@@ -1186,13 +1163,12 @@ spec:
type: array
packageName:
description: |-
- packageName is a reference to the name of the package to be installed
- and is used to filter the content from catalogs.
+ packageName specifies the name of the package to be installed and is used to filter
+ the content from catalogs.
- packageName is required, immutable, and follows the DNS subdomain standard
- as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
- hyphens (-) or periods (.), start and end with an alphanumeric character,
- and be no longer than 253 characters.
+ It is required, immutable, and follows the DNS subdomain standard as defined in [RFC 1123].
+ It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
+ start and end with an alphanumeric character, and be no longer than 253 characters.
Some examples of valid values are:
- some-package
@@ -1219,12 +1195,9 @@ spec:
rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$")
selector:
description: |-
- selector is an optional field that can be used
- to filter the set of ClusterCatalogs used in the bundle
- selection process.
+ selector is optional and filters the set of ClusterCatalogs used in the bundle selection process.
- When unspecified, all ClusterCatalogs will be used in
- the bundle selection process.
+ When unspecified, all ClusterCatalogs are used in the bundle selection process.
properties:
matchExpressions:
description: matchExpressions is a list of label selector
@@ -1272,35 +1245,34 @@ spec:
upgradeConstraintPolicy:
default: CatalogProvided
description: |-
- upgradeConstraintPolicy is an optional field that controls whether
- the upgrade path(s) defined in the catalog are enforced for the package
- referenced in the packageName field.
+ upgradeConstraintPolicy is optional and controls whether the upgrade paths defined in the catalog
+ are enforced for the package referenced in the packageName field.
- Allowed values are: "CatalogProvided" or "SelfCertified", or omitted.
+ Allowed values are "CatalogProvided", "SelfCertified", or omitted.
- When this field is set to "CatalogProvided", automatic upgrades will only occur
- when upgrade constraints specified by the package author are met.
+ When set to "CatalogProvided", automatic upgrades only occur when upgrade constraints specified by the package
+ author are met.
- When this field is set to "SelfCertified", the upgrade constraints specified by
- the package author are ignored. This allows for upgrades and downgrades to
- any version of the package. This is considered a dangerous operation as it
- can lead to unknown and potentially disastrous outcomes, such as data
- loss. It is assumed that users have independently verified changes when
- using this option.
+ When set to "SelfCertified", the upgrade constraints specified by the package author are ignored.
+ This allows upgrades and downgrades to any version of the package.
+ This is considered a dangerous operation as it can lead to unknown and potentially disastrous outcomes,
+ such as data loss.
+ Use this option only if you have independently verified the changes.
- When this field is omitted, the default value is "CatalogProvided".
+ When omitted, the default value is "CatalogProvided".
enum:
- CatalogProvided
- SelfCertified
type: string
version:
description: |-
- version is an optional semver constraint (a specific version or range of versions). When unspecified, the latest version available will be installed.
+ version is an optional semver constraint (a specific version or range of versions).
+ When unspecified, the latest version available is installed.
Acceptable version ranges are no longer than 64 characters.
- Version ranges are composed of comma- or space-delimited values and one or
- more comparison operators, known as comparison strings. Additional
- comparison strings can be added using the OR operator (||).
+ Version ranges are composed of comma- or space-delimited values and one or more comparison operators,
+ known as comparison strings.
+ You can add additional comparison strings using the OR operator (||).
# Range Comparisons
@@ -1378,13 +1350,12 @@ spec:
type: object
sourceType:
description: |-
- sourceType is a required reference to the type of install source.
+ sourceType is required and specifies the type of install source.
- Allowed values are "Catalog"
+ The only allowed value is "Catalog".
- When this field is set to "Catalog", information for determining the
- appropriate bundle of content to install will be fetched from
- ClusterCatalog resources existing on the cluster.
+ When set to "Catalog", information for determining the appropriate bundle of content to install
+ is fetched from ClusterCatalog resources on the cluster.
When using the Catalog sourceType, the catalog field must also be set.
enum:
- Catalog
@@ -1492,9 +1463,9 @@ spec:
description: |-
The set of condition types which apply to all spec.source variations are Installed and Progressing.
- The Installed condition represents whether or not the bundle has been installed for this ClusterExtension.
- When Installed is True and the Reason is Succeeded, the bundle has been successfully installed.
- When Installed is False and the Reason is Failed, the bundle has failed to install.
+ The Installed condition represents whether the bundle has been installed for this ClusterExtension:
+ - When Installed is True and the Reason is Succeeded, the bundle has been successfully installed.
+ - When Installed is False and the Reason is Failed, the bundle has failed to install.
The Progressing condition represents whether or not the ClusterExtension is advancing towards a new state.
When Progressing is True and the Reason is Succeeded, the ClusterExtension is making progress towards a new state.
@@ -1503,12 +1474,12 @@ spec:
When Progressing is True and Reason is RollingOut, the ClusterExtension has one or more ClusterExtensionRevisions in active roll out.
- When the ClusterExtension is sourced from a catalog, if may also communicate a deprecation condition.
- These are indications from a package owner to guide users away from a particular package, channel, or bundle.
- BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog.
- ChannelDeprecated is set if the requested channel is marked deprecated in the catalog.
- PackageDeprecated is set if the requested package is marked deprecated in the catalog.
- Deprecated is a rollup condition that is present when any of the deprecated conditions are present.
+ When the ClusterExtension is sourced from a catalog, it may also communicate a deprecation condition.
+ These are indications from a package owner to guide users away from a particular package, channel, or bundle:
+ - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog.
+ - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog.
+ - PackageDeprecated is set if the requested package is marked deprecated in the catalog.
+ - Deprecated is a rollup condition that is present when any of the deprecated conditions are present.
items:
description: Condition contains details for one aspect of the current
state of this API Resource.
@@ -1573,17 +1544,16 @@ spec:
properties:
bundle:
description: |-
- bundle is a required field which represents the identifying attributes of a bundle.
+ bundle is required and represents the identifying attributes of a bundle.
- A "bundle" is a versioned set of content that represents the resources that
- need to be applied to a cluster to install a package.
+ A "bundle" is a versioned set of content that represents the resources that need to be applied
+ to a cluster to install a package.
properties:
name:
description: |-
- name is required and follows the DNS subdomain standard
- as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
- hyphens (-) or periods (.), start and end with an alphanumeric character,
- and be no longer than 253 characters.
+ name is required and follows the DNS subdomain standard as defined in [RFC 1123].
+ It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
+ start and end with an alphanumeric character, and be no longer than 253 characters.
type: string
x-kubernetes-validations:
- message: packageName must be a valid DNS1123 subdomain.
@@ -1593,8 +1563,8 @@ spec:
rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$")
version:
description: |-
- version is a required field and is a reference to the version that this bundle represents
- version follows the semantic versioning standard as defined in https://semver.org/.
+ version is required and references the version that this bundle represents.
+ It follows the semantic versioning standard as defined in https://semver.org/.
type: string
x-kubernetes-validations:
- message: version must be well-formed semver
@@ -2159,6 +2129,7 @@ spec:
image: busybox:1.36
name: tar
securityContext:
+ readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
capabilities:
drop:
diff --git a/manifests/experimental.yaml b/manifests/experimental.yaml
index 7bff36748..22c7db269 100644
--- a/manifests/experimental.yaml
+++ b/manifests/experimental.yaml
@@ -176,7 +176,7 @@ spec:
schema:
openAPIV3Schema:
description: |-
- ClusterCatalog enables users to make File-Based Catalog (FBC) catalog data available to the cluster.
+ ClusterCatalog makes File-Based Catalog (FBC) data available to your cluster.
For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs
properties:
apiVersion:
@@ -198,29 +198,24 @@ spec:
type: object
spec:
description: |-
- spec is the desired state of the ClusterCatalog.
- spec is required.
- The controller will work to ensure that the desired
- catalog is unpacked and served over the catalog content HTTP server.
+ spec is a required field that defines the desired state of the ClusterCatalog.
+ The controller ensures that the catalog is unpacked and served over the catalog content HTTP server.
properties:
availabilityMode:
default: Available
description: |-
- availabilityMode allows users to define how the ClusterCatalog is made available to clients on the cluster.
- availabilityMode is optional.
+ availabilityMode is an optional field that defines how the ClusterCatalog is made available to clients on the cluster.
- Allowed values are "Available" and "Unavailable" and omitted.
+ Allowed values are "Available", "Unavailable", or omitted.
When omitted, the default value is "Available".
- When set to "Available", the catalog contents will be unpacked and served over the catalog content HTTP server.
- Setting the availabilityMode to "Available" tells clients that they should consider this ClusterCatalog
- and its contents as usable.
+ When set to "Available", the catalog contents are unpacked and served over the catalog content HTTP server.
+ Clients should consider this ClusterCatalog and its contents as usable.
- When set to "Unavailable", the catalog contents will no longer be served over the catalog content HTTP server.
- When set to this availabilityMode it should be interpreted the same as the ClusterCatalog not existing.
- Setting the availabilityMode to "Unavailable" can be useful in scenarios where a user may not want
- to delete the ClusterCatalog all together, but would still like it to be treated as if it doesn't exist.
+ When set to "Unavailable", the catalog contents are no longer served over the catalog content HTTP server.
+ Treat this the same as if the ClusterCatalog does not exist.
+ Use "Unavailable" when you want to keep the ClusterCatalog but treat it as if it doesn't exist.
enum:
- Unavailable
- Available
@@ -228,19 +223,18 @@ spec:
priority:
default: 0
description: |-
- priority allows the user to define a priority for a ClusterCatalog.
- priority is optional.
+ priority is an optional field that defines a priority for this ClusterCatalog.
- A ClusterCatalog's priority is used by clients as a tie-breaker between ClusterCatalogs that meet the client's requirements.
- A higher number means higher priority.
+ Clients use the ClusterCatalog priority as a tie-breaker between ClusterCatalogs that meet their requirements.
+ Higher numbers mean higher priority.
- It is up to clients to decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements.
- When deciding how to break the tie in this scenario, it is recommended that clients prompt their users for additional input.
+ Clients decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements.
+ Clients should prompt users for additional input to break the tie.
- When omitted, the default priority is 0 because that is the zero value of integers.
+ When omitted, the default priority is 0.
- Negative numbers can be used to specify a priority lower than the default.
- Positive numbers can be used to specify a priority higher than the default.
+ Use negative numbers to specify a priority lower than the default.
+ Use positive numbers to specify a priority higher than the default.
The lowest possible value is -2147483648.
The highest possible value is 2147483647.
@@ -248,15 +242,12 @@ spec:
type: integer
source:
description: |-
- source allows a user to define the source of a catalog.
- A "catalog" contains information on content that can be installed on a cluster.
- Providing a catalog source makes the contents of the catalog discoverable and usable by
- other on-cluster components.
- These on-cluster components may do a variety of things with this information, such as
- presenting the content in a GUI dashboard or installing content from the catalog on the cluster.
+ source is a required field that defines the source of a catalog.
+ A catalog contains information on content that can be installed on a cluster.
+ The catalog source makes catalog contents discoverable and usable by other on-cluster components.
+ These components can present the content in a GUI dashboard or install content from the catalog on the cluster.
The catalog source must contain catalog metadata in the File-Based Catalog (FBC) format.
For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs.
- source is a required field.
Below is a minimal example of a ClusterCatalogSpec that sources a catalog from an image:
@@ -267,25 +258,23 @@ spec:
properties:
image:
description: |-
- image is used to configure how catalog contents are sourced from an OCI image.
- This field is required when type is Image, and forbidden otherwise.
+ image configures how catalog contents are sourced from an OCI image.
+ It is required when type is Image, and forbidden otherwise.
properties:
pollIntervalMinutes:
description: |-
- pollIntervalMinutes allows the user to set the interval, in minutes, at which the image source should be polled for new content.
- pollIntervalMinutes is optional.
- pollIntervalMinutes can not be specified when ref is a digest-based reference.
+ pollIntervalMinutes is an optional field that sets the interval, in minutes, at which the image source is polled for new content.
+ You cannot specify pollIntervalMinutes when ref is a digest-based reference.
- When omitted, the image will not be polled for new content.
+ When omitted, the image is not polled for new content.
minimum: 1
type: integer
ref:
description: |-
- ref allows users to define the reference to a container image containing Catalog contents.
- ref is required.
- ref can not be more than 1000 characters.
+ ref is a required field that defines the reference to a container image containing catalog contents.
+ It cannot be more than 1000 characters.
- A reference can be broken down into 3 parts - the domain, name, and identifier.
+ A reference has 3 parts: the domain, name, and identifier.
The domain is typically the registry where an image is located.
It must be alphanumeric characters (lowercase and uppercase) separated by the "." character.
@@ -368,12 +357,11 @@ spec:
: true'
type:
description: |-
- type is a reference to the type of source the catalog is sourced from.
- type is required.
+ type is a required field that specifies the type of source for the catalog.
The only allowed value is "Image".
- When set to "Image", the ClusterCatalog content will be sourced from an OCI image.
+ When set to "Image", the ClusterCatalog content is sourced from an OCI image.
When using an image source, the image field must be set and must be the only field defined for this type.
enum:
- Image
@@ -391,31 +379,30 @@ spec:
type: object
status:
description: |-
- status contains information about the state of the ClusterCatalog such as:
- - Whether or not the catalog contents are being served via the catalog content HTTP server
- - Whether or not the ClusterCatalog is progressing to a new state
+ status contains the following information about the state of the ClusterCatalog:
+ - Whether the catalog contents are being served via the catalog content HTTP server
+ - Whether the ClusterCatalog is progressing to a new state
- A reference to the source from which the catalog contents were retrieved
properties:
conditions:
description: |-
- conditions is a representation of the current state for this ClusterCatalog.
+ conditions represents the current state of this ClusterCatalog.
The current condition types are Serving and Progressing.
- The Serving condition is used to represent whether or not the contents of the catalog is being served via the HTTP(S) web server.
- When it has a status of True and a reason of Available, the contents of the catalog are being served.
- When it has a status of False and a reason of Unavailable, the contents of the catalog are not being served because the contents are not yet available.
- When it has a status of False and a reason of UserSpecifiedUnavailable, the contents of the catalog are not being served because the catalog has been intentionally marked as unavailable.
+ The Serving condition represents whether the catalog contents are being served via the HTTP(S) web server:
+ - When status is True and reason is Available, the catalog contents are being served.
+ - When status is False and reason is Unavailable, the catalog contents are not being served because the contents are not yet available.
+ - When status is False and reason is UserSpecifiedUnavailable, the catalog contents are not being served because the catalog has been intentionally marked as unavailable.
- The Progressing condition is used to represent whether or not the ClusterCatalog is progressing or is ready to progress towards a new state.
- When it has a status of True and a reason of Retrying, there was an error in the progression of the ClusterCatalog that may be resolved on subsequent reconciliation attempts.
- When it has a status of True and a reason of Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing.
- When it has a status of False and a reason of Blocked, there was an error in the progression of the ClusterCatalog that requires manual intervention for recovery.
+ The Progressing condition represents whether the ClusterCatalog is progressing or is ready to progress towards a new state:
+ - When status is True and reason is Retrying, an error occurred that may be resolved on subsequent reconciliation attempts.
+ - When status is True and reason is Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing.
+ - When status is False and reason is Blocked, an error occurred that requires manual intervention for recovery.
- In the case that the Serving condition is True with reason Available and Progressing is True with reason Retrying, the previously fetched
- catalog contents are still being served via the HTTP(S) web server while we are progressing towards serving a new version of the catalog
- contents. This could occur when we've initially fetched the latest contents from the source for this catalog and when polling for changes
- to the contents we identify that there are updates to the contents.
+ If the system initially fetched contents and polling identifies updates, both conditions can be active simultaneously:
+ - The Serving condition remains True with reason Available because the previous contents are still served via the HTTP(S) web server.
+ - The Progressing condition is True with reason Retrying because the system is working to serve the new version.
items:
description: Condition contains details for one aspect of the current
state of this API Resource.
@@ -476,11 +463,9 @@ spec:
x-kubernetes-list-type: map
lastUnpacked:
description: |-
- lastUnpacked represents the last time the contents of the
- catalog were extracted from their source format. As an example,
- when using an Image source, the OCI image will be pulled and the
- image layers written to a file-system backed cache. We refer to the
- act of this extraction from the source format as "unpacking".
+ lastUnpacked represents the last time the catalog contents were extracted from their source format.
+ For example, when using an Image source, the OCI image is pulled and image layers are written to a file-system backed cache.
+ This extraction from the source format is called "unpacking".
format: date-time
type: string
resolvedSource:
@@ -489,14 +474,14 @@ spec:
properties:
image:
description: |-
- image is a field containing resolution information for a catalog sourced from an image.
- This field must be set when type is Image, and forbidden otherwise.
+ image contains resolution information for a catalog sourced from an image.
+ It must be set when type is Image, and forbidden otherwise.
properties:
ref:
description: |-
ref contains the resolved image digest-based reference.
- The digest format is used so users can use other tooling to fetch the exact
- OCI manifests that were used to extract the catalog contents.
+ The digest format allows you to use other tooling to fetch the exact OCI manifests
+ that were used to extract the catalog contents.
maxLength: 1000
type: string
x-kubernetes-validations:
@@ -530,12 +515,11 @@ spec:
type: object
type:
description: |-
- type is a reference to the type of source the catalog is sourced from.
- type is required.
+ type is a required field that specifies the type of source for the catalog.
The only allowed value is "Image".
- When set to "Image", information about the resolved image source will be set in the 'image' field.
+ When set to "Image", information about the resolved image source is set in the image field.
enum:
- Image
type: string
@@ -554,19 +538,16 @@ spec:
properties:
base:
description: |-
- base is a cluster-internal URL that provides endpoints for
- accessing the content of the catalog.
+ base is a cluster-internal URL that provides endpoints for accessing the catalog content.
- It is expected that clients append the path for the endpoint they wish
- to access.
+ Clients should append the path for the endpoint they want to access.
- Currently, only a single endpoint is served and is accessible at the path
- /api/v1.
+ Currently, only a single endpoint is served and is accessible at the path /api/v1.
The endpoints served for the v1 API are:
- - /all - this endpoint returns the entirety of the catalog contents in the FBC format
+ - /all - this endpoint returns the entire catalog contents in the FBC format
- As the needs of users and clients of the evolve, new endpoints may be added.
+ New endpoints may be added as needs evolve.
maxLength: 525
type: string
x-kubernetes-validations:
@@ -924,9 +905,9 @@ spec:
properties:
config:
description: |-
- config is an optional field used to specify bundle specific configuration
- used to configure the bundle. Configuration is bundle specific and a bundle may provide
- a configuration schema. When not specified, the default configuration of the resolved bundle will be used.
+ config is optional and specifies bundle-specific configuration.
+ Configuration is bundle-specific and a bundle may provide a configuration schema.
+ When not specified, the default configuration of the resolved bundle is used.
config is validated against a configuration schema provided by the resolved bundle. If the bundle does not provide
a configuration schema the bundle is deemed to not be configurable. More information on how
@@ -934,21 +915,19 @@ spec:
properties:
configType:
description: |-
- configType is a required reference to the type of configuration source.
+ configType is required and specifies the type of configuration source.
- Allowed values are "Inline"
+ The only allowed value is "Inline".
- When this field is set to "Inline", the cluster extension configuration is defined inline within the
- ClusterExtension resource.
+ When set to "Inline", the cluster extension configuration is defined inline within the ClusterExtension resource.
enum:
- Inline
type: string
inline:
description: |-
- inline contains JSON or YAML values specified directly in the
- ClusterExtension.
+ inline contains JSON or YAML values specified directly in the ClusterExtension.
- inline is used to specify arbitrary configuration values for the ClusterExtension.
+ It is used to specify arbitrary configuration values for the ClusterExtension.
It must be set if configType is 'Inline' and must be a valid JSON/YAML object containing at least one property.
The configuration values are validated at runtime against a JSON schema provided by the bundle.
minProperties: 1
@@ -964,37 +943,35 @@ spec:
: !has(self.inline)'
install:
description: |-
- install is an optional field used to configure the installation options
- for the ClusterExtension such as the pre-flight check configuration.
+ install is optional and configures installation options for the ClusterExtension,
+ such as the pre-flight check configuration.
properties:
preflight:
description: |-
- preflight is an optional field that can be used to configure the checks that are
- run before installation or upgrade of the content for the package specified in the packageName field.
+ preflight is optional and configures the checks that run before installation or upgrade
+ of the content for the package specified in the packageName field.
When specified, it replaces the default preflight configuration for install/upgrade actions.
- When not specified, the default configuration will be used.
+ When not specified, the default configuration is used.
properties:
crdUpgradeSafety:
description: |-
- crdUpgradeSafety is used to configure the CRD Upgrade Safety pre-flight
- checks that run prior to upgrades of installed content.
+ crdUpgradeSafety configures the CRD Upgrade Safety pre-flight checks that run
+ before upgrades of installed content.
- The CRD Upgrade Safety pre-flight check safeguards from unintended
- consequences of upgrading a CRD, such as data loss.
+ The CRD Upgrade Safety pre-flight check safeguards from unintended consequences of upgrading a CRD,
+ such as data loss.
properties:
enforcement:
description: |-
- enforcement is a required field, used to configure the state of the CRD Upgrade Safety pre-flight check.
+ enforcement is required and configures the state of the CRD Upgrade Safety pre-flight check.
Allowed values are "None" or "Strict". The default value is "Strict".
- When set to "None", the CRD Upgrade Safety pre-flight check will be skipped
- when performing an upgrade operation. This should be used with caution as
- unintended consequences such as data loss can occur.
+ When set to "None", the CRD Upgrade Safety pre-flight check is skipped during an upgrade operation.
+ Use this option with caution as unintended consequences such as data loss can occur.
- When set to "Strict", the CRD Upgrade Safety pre-flight check will be run when
- performing an upgrade operation.
+ When set to "Strict", the CRD Upgrade Safety pre-flight check runs during an upgrade operation.
enum:
- None
- Strict
@@ -1016,16 +993,15 @@ spec:
rule: has(self.preflight)
namespace:
description: |-
- namespace is a reference to a Kubernetes namespace.
- This is the namespace in which the provided ServiceAccount must exist.
- It also designates the default namespace where namespace-scoped resources
- for the extension are applied to the cluster.
+ namespace specifies a Kubernetes namespace.
+ This is the namespace where the provided ServiceAccount must exist.
+ It also designates the default namespace where namespace-scoped resources for the extension are applied to the cluster.
Some extensions may contain namespace-scoped resources to be applied in other namespaces.
This namespace must exist.
- namespace is required, immutable, and follows the DNS label standard
- as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters or hyphens (-),
- start and end with an alphanumeric character, and be no longer than 63 characters
+ The namespace field is required, immutable, and follows the DNS label standard as defined in [RFC 1123].
+ It must contain only lowercase alphanumeric characters or hyphens (-), start and end with an alphanumeric character,
+ and be no longer than 63 characters.
[RFC 1123]: https://tools.ietf.org/html/rfc1123
maxLength: 63
@@ -1037,24 +1013,22 @@ spec:
rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?$")
serviceAccount:
description: |-
- serviceAccount is a reference to a ServiceAccount used to perform all interactions
- with the cluster that are required to manage the extension.
+ serviceAccount specifies a ServiceAccount used to perform all interactions with the cluster
+ that are required to manage the extension.
The ServiceAccount must be configured with the necessary permissions to perform these interactions.
The ServiceAccount must exist in the namespace referenced in the spec.
- serviceAccount is required.
+ The serviceAccount field is required.
properties:
name:
description: |-
- name is a required, immutable reference to the name of the ServiceAccount
- to be used for installation and management of the content for the package
- specified in the packageName field.
+ name is a required, immutable reference to the name of the ServiceAccount used for installation
+ and management of the content for the package specified in the packageName field.
This ServiceAccount must exist in the installNamespace.
- name follows the DNS subdomain standard as defined in [RFC 1123].
- It must contain only lowercase alphanumeric characters,
- hyphens (-) or periods (.), start and end with an alphanumeric character,
- and be no longer than 253 characters.
+ The name field follows the DNS subdomain standard as defined in [RFC 1123].
+ It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
+ start and end with an alphanumeric character, and be no longer than 253 characters.
Some examples of valid values are:
- some-serviceaccount
@@ -1083,11 +1057,11 @@ spec:
type: object
source:
description: |-
- source is a required field which selects the installation source of content
- for this ClusterExtension. Selection is performed by setting the sourceType.
+ source is required and selects the installation source of content for this ClusterExtension.
+ Set the sourceType field to perform the selection.
- Catalog is currently the only implemented sourceType, and setting the
- sourcetype to "Catalog" requires the catalog field to also be defined.
+ Catalog is currently the only implemented sourceType.
+ Setting sourceType to "Catalog" requires the catalog field to also be defined.
Below is a minimal example of a source definition (in yaml):
@@ -1098,30 +1072,29 @@ spec:
properties:
catalog:
description: |-
- catalog is used to configure how information is sourced from a catalog.
- This field is required when sourceType is "Catalog", and forbidden otherwise.
+ catalog configures how information is sourced from a catalog.
+ It is required when sourceType is "Catalog", and forbidden otherwise.
properties:
channels:
description: |-
- channels is an optional reference to a set of channels belonging to
- the package specified in the packageName field.
+ channels is optional and specifies a set of channels belonging to the package
+ specified in the packageName field.
- A "channel" is a package-author-defined stream of updates for an extension.
+ A channel is a package-author-defined stream of updates for an extension.
- Each channel in the list must follow the DNS subdomain standard
- as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
- hyphens (-) or periods (.), start and end with an alphanumeric character,
- and be no longer than 253 characters. No more than 256 channels can be specified.
+ Each channel in the list must follow the DNS subdomain standard as defined in [RFC 1123].
+ It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
+ start and end with an alphanumeric character, and be no longer than 253 characters.
+ You can specify no more than 256 channels.
- When specified, it is used to constrain the set of installable bundles and
- the automated upgrade path. This constraint is an AND operation with the
- version field. For example:
+ When specified, it constrains the set of installable bundles and the automated upgrade path.
+ This constraint is an AND operation with the version field. For example:
- Given channel is set to "foo"
- Given version is set to ">=1.0.0, <1.5.0"
- - Only bundles that exist in channel "foo" AND satisfy the version range comparison will be considered installable
- - Automatic upgrades will be constrained to upgrade edges defined by the selected channel
+ - Only bundles that exist in channel "foo" AND satisfy the version range comparison are considered installable
+ - Automatic upgrades are constrained to upgrade edges defined by the selected channel
- When unspecified, upgrade edges across all channels will be used to identify valid automatic upgrade paths.
+ When unspecified, upgrade edges across all channels are used to identify valid automatic upgrade paths.
Some examples of valid values are:
- 1.1.x
@@ -1151,13 +1124,12 @@ spec:
type: array
packageName:
description: |-
- packageName is a reference to the name of the package to be installed
- and is used to filter the content from catalogs.
+ packageName specifies the name of the package to be installed and is used to filter
+ the content from catalogs.
- packageName is required, immutable, and follows the DNS subdomain standard
- as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
- hyphens (-) or periods (.), start and end with an alphanumeric character,
- and be no longer than 253 characters.
+ It is required, immutable, and follows the DNS subdomain standard as defined in [RFC 1123].
+ It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
+ start and end with an alphanumeric character, and be no longer than 253 characters.
Some examples of valid values are:
- some-package
@@ -1184,12 +1156,9 @@ spec:
rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$")
selector:
description: |-
- selector is an optional field that can be used
- to filter the set of ClusterCatalogs used in the bundle
- selection process.
+ selector is optional and filters the set of ClusterCatalogs used in the bundle selection process.
- When unspecified, all ClusterCatalogs will be used in
- the bundle selection process.
+ When unspecified, all ClusterCatalogs are used in the bundle selection process.
properties:
matchExpressions:
description: matchExpressions is a list of label selector
@@ -1237,35 +1206,34 @@ spec:
upgradeConstraintPolicy:
default: CatalogProvided
description: |-
- upgradeConstraintPolicy is an optional field that controls whether
- the upgrade path(s) defined in the catalog are enforced for the package
- referenced in the packageName field.
+ upgradeConstraintPolicy is optional and controls whether the upgrade paths defined in the catalog
+ are enforced for the package referenced in the packageName field.
- Allowed values are: "CatalogProvided" or "SelfCertified", or omitted.
+ Allowed values are "CatalogProvided", "SelfCertified", or omitted.
- When this field is set to "CatalogProvided", automatic upgrades will only occur
- when upgrade constraints specified by the package author are met.
+ When set to "CatalogProvided", automatic upgrades only occur when upgrade constraints specified by the package
+ author are met.
- When this field is set to "SelfCertified", the upgrade constraints specified by
- the package author are ignored. This allows for upgrades and downgrades to
- any version of the package. This is considered a dangerous operation as it
- can lead to unknown and potentially disastrous outcomes, such as data
- loss. It is assumed that users have independently verified changes when
- using this option.
+ When set to "SelfCertified", the upgrade constraints specified by the package author are ignored.
+ This allows upgrades and downgrades to any version of the package.
+ This is considered a dangerous operation as it can lead to unknown and potentially disastrous outcomes,
+ such as data loss.
+ Use this option only if you have independently verified the changes.
- When this field is omitted, the default value is "CatalogProvided".
+ When omitted, the default value is "CatalogProvided".
enum:
- CatalogProvided
- SelfCertified
type: string
version:
description: |-
- version is an optional semver constraint (a specific version or range of versions). When unspecified, the latest version available will be installed.
+ version is an optional semver constraint (a specific version or range of versions).
+ When unspecified, the latest version available is installed.
Acceptable version ranges are no longer than 64 characters.
- Version ranges are composed of comma- or space-delimited values and one or
- more comparison operators, known as comparison strings. Additional
- comparison strings can be added using the OR operator (||).
+ Version ranges are composed of comma- or space-delimited values and one or more comparison operators,
+ known as comparison strings.
+ You can add additional comparison strings using the OR operator (||).
# Range Comparisons
@@ -1343,13 +1311,12 @@ spec:
type: object
sourceType:
description: |-
- sourceType is a required reference to the type of install source.
+ sourceType is required and specifies the type of install source.
- Allowed values are "Catalog"
+ The only allowed value is "Catalog".
- When this field is set to "Catalog", information for determining the
- appropriate bundle of content to install will be fetched from
- ClusterCatalog resources existing on the cluster.
+ When set to "Catalog", information for determining the appropriate bundle of content to install
+ is fetched from ClusterCatalog resources on the cluster.
When using the Catalog sourceType, the catalog field must also be set.
enum:
- Catalog
@@ -1457,9 +1424,9 @@ spec:
description: |-
The set of condition types which apply to all spec.source variations are Installed and Progressing.
- The Installed condition represents whether or not the bundle has been installed for this ClusterExtension.
- When Installed is True and the Reason is Succeeded, the bundle has been successfully installed.
- When Installed is False and the Reason is Failed, the bundle has failed to install.
+ The Installed condition represents whether the bundle has been installed for this ClusterExtension:
+ - When Installed is True and the Reason is Succeeded, the bundle has been successfully installed.
+ - When Installed is False and the Reason is Failed, the bundle has failed to install.
The Progressing condition represents whether or not the ClusterExtension is advancing towards a new state.
When Progressing is True and the Reason is Succeeded, the ClusterExtension is making progress towards a new state.
@@ -1468,12 +1435,12 @@ spec:
When Progressing is True and Reason is RollingOut, the ClusterExtension has one or more ClusterExtensionRevisions in active roll out.
- When the ClusterExtension is sourced from a catalog, if may also communicate a deprecation condition.
- These are indications from a package owner to guide users away from a particular package, channel, or bundle.
- BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog.
- ChannelDeprecated is set if the requested channel is marked deprecated in the catalog.
- PackageDeprecated is set if the requested package is marked deprecated in the catalog.
- Deprecated is a rollup condition that is present when any of the deprecated conditions are present.
+ When the ClusterExtension is sourced from a catalog, it may also communicate a deprecation condition.
+ These are indications from a package owner to guide users away from a particular package, channel, or bundle:
+ - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog.
+ - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog.
+ - PackageDeprecated is set if the requested package is marked deprecated in the catalog.
+ - Deprecated is a rollup condition that is present when any of the deprecated conditions are present.
items:
description: Condition contains details for one aspect of the current
state of this API Resource.
@@ -1538,17 +1505,16 @@ spec:
properties:
bundle:
description: |-
- bundle is a required field which represents the identifying attributes of a bundle.
+ bundle is required and represents the identifying attributes of a bundle.
- A "bundle" is a versioned set of content that represents the resources that
- need to be applied to a cluster to install a package.
+ A "bundle" is a versioned set of content that represents the resources that need to be applied
+ to a cluster to install a package.
properties:
name:
description: |-
- name is required and follows the DNS subdomain standard
- as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
- hyphens (-) or periods (.), start and end with an alphanumeric character,
- and be no longer than 253 characters.
+ name is required and follows the DNS subdomain standard as defined in [RFC 1123].
+ It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
+ start and end with an alphanumeric character, and be no longer than 253 characters.
type: string
x-kubernetes-validations:
- message: packageName must be a valid DNS1123 subdomain.
@@ -1558,8 +1524,8 @@ spec:
rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$")
version:
description: |-
- version is a required field and is a reference to the version that this bundle represents
- version follows the semantic versioning standard as defined in https://semver.org/.
+ version is required and references the version that this bundle represents.
+ It follows the semantic versioning standard as defined in https://semver.org/.
type: string
x-kubernetes-validations:
- message: version must be well-formed semver
diff --git a/manifests/standard-e2e.yaml b/manifests/standard-e2e.yaml
index 1aed38ba9..9b8b95c9d 100644
--- a/manifests/standard-e2e.yaml
+++ b/manifests/standard-e2e.yaml
@@ -152,6 +152,10 @@ data:
[[registry]]
prefix = "mirrored-registry.operator-controller-e2e.svc.cluster.local:5000"
location = "docker-registry.operator-controller-e2e.svc.cluster.local:5000"
+
+ [[registry]]
+ prefix = "dynamic-registry.operator-controller-e2e.svc.cluster.local:5000"
+ location = "docker-registry.operator-controller-e2e.svc.cluster.local:5000"
kind: ConfigMap
metadata:
annotations:
@@ -211,7 +215,7 @@ spec:
schema:
openAPIV3Schema:
description: |-
- ClusterCatalog enables users to make File-Based Catalog (FBC) catalog data available to the cluster.
+ ClusterCatalog makes File-Based Catalog (FBC) data available to your cluster.
For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs
properties:
apiVersion:
@@ -233,29 +237,24 @@ spec:
type: object
spec:
description: |-
- spec is the desired state of the ClusterCatalog.
- spec is required.
- The controller will work to ensure that the desired
- catalog is unpacked and served over the catalog content HTTP server.
+ spec is a required field that defines the desired state of the ClusterCatalog.
+ The controller ensures that the catalog is unpacked and served over the catalog content HTTP server.
properties:
availabilityMode:
default: Available
description: |-
- availabilityMode allows users to define how the ClusterCatalog is made available to clients on the cluster.
- availabilityMode is optional.
+ availabilityMode is an optional field that defines how the ClusterCatalog is made available to clients on the cluster.
- Allowed values are "Available" and "Unavailable" and omitted.
+ Allowed values are "Available", "Unavailable", or omitted.
When omitted, the default value is "Available".
- When set to "Available", the catalog contents will be unpacked and served over the catalog content HTTP server.
- Setting the availabilityMode to "Available" tells clients that they should consider this ClusterCatalog
- and its contents as usable.
+ When set to "Available", the catalog contents are unpacked and served over the catalog content HTTP server.
+ Clients should consider this ClusterCatalog and its contents as usable.
- When set to "Unavailable", the catalog contents will no longer be served over the catalog content HTTP server.
- When set to this availabilityMode it should be interpreted the same as the ClusterCatalog not existing.
- Setting the availabilityMode to "Unavailable" can be useful in scenarios where a user may not want
- to delete the ClusterCatalog all together, but would still like it to be treated as if it doesn't exist.
+ When set to "Unavailable", the catalog contents are no longer served over the catalog content HTTP server.
+ Treat this the same as if the ClusterCatalog does not exist.
+ Use "Unavailable" when you want to keep the ClusterCatalog but treat it as if it doesn't exist.
enum:
- Unavailable
- Available
@@ -263,19 +262,18 @@ spec:
priority:
default: 0
description: |-
- priority allows the user to define a priority for a ClusterCatalog.
- priority is optional.
+ priority is an optional field that defines a priority for this ClusterCatalog.
- A ClusterCatalog's priority is used by clients as a tie-breaker between ClusterCatalogs that meet the client's requirements.
- A higher number means higher priority.
+ Clients use the ClusterCatalog priority as a tie-breaker between ClusterCatalogs that meet their requirements.
+ Higher numbers mean higher priority.
- It is up to clients to decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements.
- When deciding how to break the tie in this scenario, it is recommended that clients prompt their users for additional input.
+ Clients decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements.
+ Clients should prompt users for additional input to break the tie.
- When omitted, the default priority is 0 because that is the zero value of integers.
+ When omitted, the default priority is 0.
- Negative numbers can be used to specify a priority lower than the default.
- Positive numbers can be used to specify a priority higher than the default.
+ Use negative numbers to specify a priority lower than the default.
+ Use positive numbers to specify a priority higher than the default.
The lowest possible value is -2147483648.
The highest possible value is 2147483647.
@@ -283,15 +281,12 @@ spec:
type: integer
source:
description: |-
- source allows a user to define the source of a catalog.
- A "catalog" contains information on content that can be installed on a cluster.
- Providing a catalog source makes the contents of the catalog discoverable and usable by
- other on-cluster components.
- These on-cluster components may do a variety of things with this information, such as
- presenting the content in a GUI dashboard or installing content from the catalog on the cluster.
+ source is a required field that defines the source of a catalog.
+ A catalog contains information on content that can be installed on a cluster.
+ The catalog source makes catalog contents discoverable and usable by other on-cluster components.
+ These components can present the content in a GUI dashboard or install content from the catalog on the cluster.
The catalog source must contain catalog metadata in the File-Based Catalog (FBC) format.
For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs.
- source is a required field.
Below is a minimal example of a ClusterCatalogSpec that sources a catalog from an image:
@@ -302,25 +297,23 @@ spec:
properties:
image:
description: |-
- image is used to configure how catalog contents are sourced from an OCI image.
- This field is required when type is Image, and forbidden otherwise.
+ image configures how catalog contents are sourced from an OCI image.
+ It is required when type is Image, and forbidden otherwise.
properties:
pollIntervalMinutes:
description: |-
- pollIntervalMinutes allows the user to set the interval, in minutes, at which the image source should be polled for new content.
- pollIntervalMinutes is optional.
- pollIntervalMinutes can not be specified when ref is a digest-based reference.
+ pollIntervalMinutes is an optional field that sets the interval, in minutes, at which the image source is polled for new content.
+ You cannot specify pollIntervalMinutes when ref is a digest-based reference.
- When omitted, the image will not be polled for new content.
+ When omitted, the image is not polled for new content.
minimum: 1
type: integer
ref:
description: |-
- ref allows users to define the reference to a container image containing Catalog contents.
- ref is required.
- ref can not be more than 1000 characters.
+ ref is a required field that defines the reference to a container image containing catalog contents.
+ It cannot be more than 1000 characters.
- A reference can be broken down into 3 parts - the domain, name, and identifier.
+ A reference has 3 parts: the domain, name, and identifier.
The domain is typically the registry where an image is located.
It must be alphanumeric characters (lowercase and uppercase) separated by the "." character.
@@ -403,12 +396,11 @@ spec:
: true'
type:
description: |-
- type is a reference to the type of source the catalog is sourced from.
- type is required.
+ type is a required field that specifies the type of source for the catalog.
The only allowed value is "Image".
- When set to "Image", the ClusterCatalog content will be sourced from an OCI image.
+ When set to "Image", the ClusterCatalog content is sourced from an OCI image.
When using an image source, the image field must be set and must be the only field defined for this type.
enum:
- Image
@@ -426,31 +418,30 @@ spec:
type: object
status:
description: |-
- status contains information about the state of the ClusterCatalog such as:
- - Whether or not the catalog contents are being served via the catalog content HTTP server
- - Whether or not the ClusterCatalog is progressing to a new state
+ status contains the following information about the state of the ClusterCatalog:
+ - Whether the catalog contents are being served via the catalog content HTTP server
+ - Whether the ClusterCatalog is progressing to a new state
- A reference to the source from which the catalog contents were retrieved
properties:
conditions:
description: |-
- conditions is a representation of the current state for this ClusterCatalog.
+ conditions represents the current state of this ClusterCatalog.
The current condition types are Serving and Progressing.
- The Serving condition is used to represent whether or not the contents of the catalog is being served via the HTTP(S) web server.
- When it has a status of True and a reason of Available, the contents of the catalog are being served.
- When it has a status of False and a reason of Unavailable, the contents of the catalog are not being served because the contents are not yet available.
- When it has a status of False and a reason of UserSpecifiedUnavailable, the contents of the catalog are not being served because the catalog has been intentionally marked as unavailable.
+ The Serving condition represents whether the catalog contents are being served via the HTTP(S) web server:
+ - When status is True and reason is Available, the catalog contents are being served.
+ - When status is False and reason is Unavailable, the catalog contents are not being served because the contents are not yet available.
+ - When status is False and reason is UserSpecifiedUnavailable, the catalog contents are not being served because the catalog has been intentionally marked as unavailable.
- The Progressing condition is used to represent whether or not the ClusterCatalog is progressing or is ready to progress towards a new state.
- When it has a status of True and a reason of Retrying, there was an error in the progression of the ClusterCatalog that may be resolved on subsequent reconciliation attempts.
- When it has a status of True and a reason of Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing.
- When it has a status of False and a reason of Blocked, there was an error in the progression of the ClusterCatalog that requires manual intervention for recovery.
+ The Progressing condition represents whether the ClusterCatalog is progressing or is ready to progress towards a new state:
+ - When status is True and reason is Retrying, an error occurred that may be resolved on subsequent reconciliation attempts.
+ - When status is True and reason is Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing.
+ - When status is False and reason is Blocked, an error occurred that requires manual intervention for recovery.
- In the case that the Serving condition is True with reason Available and Progressing is True with reason Retrying, the previously fetched
- catalog contents are still being served via the HTTP(S) web server while we are progressing towards serving a new version of the catalog
- contents. This could occur when we've initially fetched the latest contents from the source for this catalog and when polling for changes
- to the contents we identify that there are updates to the contents.
+ If the system initially fetched contents and polling identifies updates, both conditions can be active simultaneously:
+ - The Serving condition remains True with reason Available because the previous contents are still served via the HTTP(S) web server.
+ - The Progressing condition is True with reason Retrying because the system is working to serve the new version.
items:
description: Condition contains details for one aspect of the current
state of this API Resource.
@@ -511,11 +502,9 @@ spec:
x-kubernetes-list-type: map
lastUnpacked:
description: |-
- lastUnpacked represents the last time the contents of the
- catalog were extracted from their source format. As an example,
- when using an Image source, the OCI image will be pulled and the
- image layers written to a file-system backed cache. We refer to the
- act of this extraction from the source format as "unpacking".
+ lastUnpacked represents the last time the catalog contents were extracted from their source format.
+ For example, when using an Image source, the OCI image is pulled and image layers are written to a file-system backed cache.
+ This extraction from the source format is called "unpacking".
format: date-time
type: string
resolvedSource:
@@ -524,14 +513,14 @@ spec:
properties:
image:
description: |-
- image is a field containing resolution information for a catalog sourced from an image.
- This field must be set when type is Image, and forbidden otherwise.
+ image contains resolution information for a catalog sourced from an image.
+ It must be set when type is Image, and forbidden otherwise.
properties:
ref:
description: |-
ref contains the resolved image digest-based reference.
- The digest format is used so users can use other tooling to fetch the exact
- OCI manifests that were used to extract the catalog contents.
+ The digest format allows you to use other tooling to fetch the exact OCI manifests
+ that were used to extract the catalog contents.
maxLength: 1000
type: string
x-kubernetes-validations:
@@ -565,12 +554,11 @@ spec:
type: object
type:
description: |-
- type is a reference to the type of source the catalog is sourced from.
- type is required.
+ type is a required field that specifies the type of source for the catalog.
The only allowed value is "Image".
- When set to "Image", information about the resolved image source will be set in the 'image' field.
+ When set to "Image", information about the resolved image source is set in the image field.
enum:
- Image
type: string
@@ -589,19 +577,16 @@ spec:
properties:
base:
description: |-
- base is a cluster-internal URL that provides endpoints for
- accessing the content of the catalog.
+ base is a cluster-internal URL that provides endpoints for accessing the catalog content.
- It is expected that clients append the path for the endpoint they wish
- to access.
+ Clients should append the path for the endpoint they want to access.
- Currently, only a single endpoint is served and is accessible at the path
- /api/v1.
+ Currently, only a single endpoint is served and is accessible at the path /api/v1.
The endpoints served for the v1 API are:
- - /all - this endpoint returns the entirety of the catalog contents in the FBC format
+ - /all - this endpoint returns the entire catalog contents in the FBC format
- As the needs of users and clients of the evolve, new endpoints may be added.
+ New endpoints may be added as needs evolve.
maxLength: 525
type: string
x-kubernetes-validations:
@@ -684,37 +669,35 @@ spec:
properties:
install:
description: |-
- install is an optional field used to configure the installation options
- for the ClusterExtension such as the pre-flight check configuration.
+ install is optional and configures installation options for the ClusterExtension,
+ such as the pre-flight check configuration.
properties:
preflight:
description: |-
- preflight is an optional field that can be used to configure the checks that are
- run before installation or upgrade of the content for the package specified in the packageName field.
+ preflight is optional and configures the checks that run before installation or upgrade
+ of the content for the package specified in the packageName field.
When specified, it replaces the default preflight configuration for install/upgrade actions.
- When not specified, the default configuration will be used.
+ When not specified, the default configuration is used.
properties:
crdUpgradeSafety:
description: |-
- crdUpgradeSafety is used to configure the CRD Upgrade Safety pre-flight
- checks that run prior to upgrades of installed content.
+ crdUpgradeSafety configures the CRD Upgrade Safety pre-flight checks that run
+ before upgrades of installed content.
- The CRD Upgrade Safety pre-flight check safeguards from unintended
- consequences of upgrading a CRD, such as data loss.
+ The CRD Upgrade Safety pre-flight check safeguards from unintended consequences of upgrading a CRD,
+ such as data loss.
properties:
enforcement:
description: |-
- enforcement is a required field, used to configure the state of the CRD Upgrade Safety pre-flight check.
+ enforcement is required and configures the state of the CRD Upgrade Safety pre-flight check.
Allowed values are "None" or "Strict". The default value is "Strict".
- When set to "None", the CRD Upgrade Safety pre-flight check will be skipped
- when performing an upgrade operation. This should be used with caution as
- unintended consequences such as data loss can occur.
+ When set to "None", the CRD Upgrade Safety pre-flight check is skipped during an upgrade operation.
+ Use this option with caution as unintended consequences such as data loss can occur.
- When set to "Strict", the CRD Upgrade Safety pre-flight check will be run when
- performing an upgrade operation.
+ When set to "Strict", the CRD Upgrade Safety pre-flight check runs during an upgrade operation.
enum:
- None
- Strict
@@ -736,16 +719,15 @@ spec:
rule: has(self.preflight)
namespace:
description: |-
- namespace is a reference to a Kubernetes namespace.
- This is the namespace in which the provided ServiceAccount must exist.
- It also designates the default namespace where namespace-scoped resources
- for the extension are applied to the cluster.
+ namespace specifies a Kubernetes namespace.
+ This is the namespace where the provided ServiceAccount must exist.
+ It also designates the default namespace where namespace-scoped resources for the extension are applied to the cluster.
Some extensions may contain namespace-scoped resources to be applied in other namespaces.
This namespace must exist.
- namespace is required, immutable, and follows the DNS label standard
- as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters or hyphens (-),
- start and end with an alphanumeric character, and be no longer than 63 characters
+ The namespace field is required, immutable, and follows the DNS label standard as defined in [RFC 1123].
+ It must contain only lowercase alphanumeric characters or hyphens (-), start and end with an alphanumeric character,
+ and be no longer than 63 characters.
[RFC 1123]: https://tools.ietf.org/html/rfc1123
maxLength: 63
@@ -757,24 +739,22 @@ spec:
rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?$")
serviceAccount:
description: |-
- serviceAccount is a reference to a ServiceAccount used to perform all interactions
- with the cluster that are required to manage the extension.
+ serviceAccount specifies a ServiceAccount used to perform all interactions with the cluster
+ that are required to manage the extension.
The ServiceAccount must be configured with the necessary permissions to perform these interactions.
The ServiceAccount must exist in the namespace referenced in the spec.
- serviceAccount is required.
+ The serviceAccount field is required.
properties:
name:
description: |-
- name is a required, immutable reference to the name of the ServiceAccount
- to be used for installation and management of the content for the package
- specified in the packageName field.
+ name is a required, immutable reference to the name of the ServiceAccount used for installation
+ and management of the content for the package specified in the packageName field.
This ServiceAccount must exist in the installNamespace.
- name follows the DNS subdomain standard as defined in [RFC 1123].
- It must contain only lowercase alphanumeric characters,
- hyphens (-) or periods (.), start and end with an alphanumeric character,
- and be no longer than 253 characters.
+ The name field follows the DNS subdomain standard as defined in [RFC 1123].
+ It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
+ start and end with an alphanumeric character, and be no longer than 253 characters.
Some examples of valid values are:
- some-serviceaccount
@@ -803,11 +783,11 @@ spec:
type: object
source:
description: |-
- source is a required field which selects the installation source of content
- for this ClusterExtension. Selection is performed by setting the sourceType.
+ source is required and selects the installation source of content for this ClusterExtension.
+ Set the sourceType field to perform the selection.
- Catalog is currently the only implemented sourceType, and setting the
- sourcetype to "Catalog" requires the catalog field to also be defined.
+ Catalog is currently the only implemented sourceType.
+ Setting sourceType to "Catalog" requires the catalog field to also be defined.
Below is a minimal example of a source definition (in yaml):
@@ -818,30 +798,29 @@ spec:
properties:
catalog:
description: |-
- catalog is used to configure how information is sourced from a catalog.
- This field is required when sourceType is "Catalog", and forbidden otherwise.
+ catalog configures how information is sourced from a catalog.
+ It is required when sourceType is "Catalog", and forbidden otherwise.
properties:
channels:
description: |-
- channels is an optional reference to a set of channels belonging to
- the package specified in the packageName field.
+ channels is optional and specifies a set of channels belonging to the package
+ specified in the packageName field.
- A "channel" is a package-author-defined stream of updates for an extension.
+ A channel is a package-author-defined stream of updates for an extension.
- Each channel in the list must follow the DNS subdomain standard
- as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
- hyphens (-) or periods (.), start and end with an alphanumeric character,
- and be no longer than 253 characters. No more than 256 channels can be specified.
+ Each channel in the list must follow the DNS subdomain standard as defined in [RFC 1123].
+ It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
+ start and end with an alphanumeric character, and be no longer than 253 characters.
+ You can specify no more than 256 channels.
- When specified, it is used to constrain the set of installable bundles and
- the automated upgrade path. This constraint is an AND operation with the
- version field. For example:
+ When specified, it constrains the set of installable bundles and the automated upgrade path.
+ This constraint is an AND operation with the version field. For example:
- Given channel is set to "foo"
- Given version is set to ">=1.0.0, <1.5.0"
- - Only bundles that exist in channel "foo" AND satisfy the version range comparison will be considered installable
- - Automatic upgrades will be constrained to upgrade edges defined by the selected channel
+ - Only bundles that exist in channel "foo" AND satisfy the version range comparison are considered installable
+ - Automatic upgrades are constrained to upgrade edges defined by the selected channel
- When unspecified, upgrade edges across all channels will be used to identify valid automatic upgrade paths.
+ When unspecified, upgrade edges across all channels are used to identify valid automatic upgrade paths.
Some examples of valid values are:
- 1.1.x
@@ -871,13 +850,12 @@ spec:
type: array
packageName:
description: |-
- packageName is a reference to the name of the package to be installed
- and is used to filter the content from catalogs.
+ packageName specifies the name of the package to be installed and is used to filter
+ the content from catalogs.
- packageName is required, immutable, and follows the DNS subdomain standard
- as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
- hyphens (-) or periods (.), start and end with an alphanumeric character,
- and be no longer than 253 characters.
+ It is required, immutable, and follows the DNS subdomain standard as defined in [RFC 1123].
+ It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
+ start and end with an alphanumeric character, and be no longer than 253 characters.
Some examples of valid values are:
- some-package
@@ -904,12 +882,9 @@ spec:
rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$")
selector:
description: |-
- selector is an optional field that can be used
- to filter the set of ClusterCatalogs used in the bundle
- selection process.
+ selector is optional and filters the set of ClusterCatalogs used in the bundle selection process.
- When unspecified, all ClusterCatalogs will be used in
- the bundle selection process.
+ When unspecified, all ClusterCatalogs are used in the bundle selection process.
properties:
matchExpressions:
description: matchExpressions is a list of label selector
@@ -957,35 +932,34 @@ spec:
upgradeConstraintPolicy:
default: CatalogProvided
description: |-
- upgradeConstraintPolicy is an optional field that controls whether
- the upgrade path(s) defined in the catalog are enforced for the package
- referenced in the packageName field.
+ upgradeConstraintPolicy is optional and controls whether the upgrade paths defined in the catalog
+ are enforced for the package referenced in the packageName field.
- Allowed values are: "CatalogProvided" or "SelfCertified", or omitted.
+ Allowed values are "CatalogProvided", "SelfCertified", or omitted.
- When this field is set to "CatalogProvided", automatic upgrades will only occur
- when upgrade constraints specified by the package author are met.
+ When set to "CatalogProvided", automatic upgrades only occur when upgrade constraints specified by the package
+ author are met.
- When this field is set to "SelfCertified", the upgrade constraints specified by
- the package author are ignored. This allows for upgrades and downgrades to
- any version of the package. This is considered a dangerous operation as it
- can lead to unknown and potentially disastrous outcomes, such as data
- loss. It is assumed that users have independently verified changes when
- using this option.
+ When set to "SelfCertified", the upgrade constraints specified by the package author are ignored.
+ This allows upgrades and downgrades to any version of the package.
+ This is considered a dangerous operation as it can lead to unknown and potentially disastrous outcomes,
+ such as data loss.
+ Use this option only if you have independently verified the changes.
- When this field is omitted, the default value is "CatalogProvided".
+ When omitted, the default value is "CatalogProvided".
enum:
- CatalogProvided
- SelfCertified
type: string
version:
description: |-
- version is an optional semver constraint (a specific version or range of versions). When unspecified, the latest version available will be installed.
+ version is an optional semver constraint (a specific version or range of versions).
+ When unspecified, the latest version available is installed.
Acceptable version ranges are no longer than 64 characters.
- Version ranges are composed of comma- or space-delimited values and one or
- more comparison operators, known as comparison strings. Additional
- comparison strings can be added using the OR operator (||).
+ Version ranges are composed of comma- or space-delimited values and one or more comparison operators,
+ known as comparison strings.
+ You can add additional comparison strings using the OR operator (||).
# Range Comparisons
@@ -1063,13 +1037,12 @@ spec:
type: object
sourceType:
description: |-
- sourceType is a required reference to the type of install source.
+ sourceType is required and specifies the type of install source.
- Allowed values are "Catalog"
+ The only allowed value is "Catalog".
- When this field is set to "Catalog", information for determining the
- appropriate bundle of content to install will be fetched from
- ClusterCatalog resources existing on the cluster.
+ When set to "Catalog", information for determining the appropriate bundle of content to install
+ is fetched from ClusterCatalog resources on the cluster.
When using the Catalog sourceType, the catalog field must also be set.
enum:
- Catalog
@@ -1095,21 +1068,21 @@ spec:
description: |-
The set of condition types which apply to all spec.source variations are Installed and Progressing.
- The Installed condition represents whether or not the bundle has been installed for this ClusterExtension.
- When Installed is True and the Reason is Succeeded, the bundle has been successfully installed.
- When Installed is False and the Reason is Failed, the bundle has failed to install.
+ The Installed condition represents whether the bundle has been installed for this ClusterExtension:
+ - When Installed is True and the Reason is Succeeded, the bundle has been successfully installed.
+ - When Installed is False and the Reason is Failed, the bundle has failed to install.
The Progressing condition represents whether or not the ClusterExtension is advancing towards a new state.
When Progressing is True and the Reason is Succeeded, the ClusterExtension is making progress towards a new state.
When Progressing is True and the Reason is Retrying, the ClusterExtension has encountered an error that could be resolved on subsequent reconciliation attempts.
When Progressing is False and the Reason is Blocked, the ClusterExtension has encountered an error that requires manual intervention for recovery.
- When the ClusterExtension is sourced from a catalog, if may also communicate a deprecation condition.
- These are indications from a package owner to guide users away from a particular package, channel, or bundle.
- BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog.
- ChannelDeprecated is set if the requested channel is marked deprecated in the catalog.
- PackageDeprecated is set if the requested package is marked deprecated in the catalog.
- Deprecated is a rollup condition that is present when any of the deprecated conditions are present.
+ When the ClusterExtension is sourced from a catalog, it may also communicate a deprecation condition.
+ These are indications from a package owner to guide users away from a particular package, channel, or bundle:
+ - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog.
+ - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog.
+ - PackageDeprecated is set if the requested package is marked deprecated in the catalog.
+ - Deprecated is a rollup condition that is present when any of the deprecated conditions are present.
items:
description: Condition contains details for one aspect of the current
state of this API Resource.
@@ -1174,17 +1147,16 @@ spec:
properties:
bundle:
description: |-
- bundle is a required field which represents the identifying attributes of a bundle.
+ bundle is required and represents the identifying attributes of a bundle.
- A "bundle" is a versioned set of content that represents the resources that
- need to be applied to a cluster to install a package.
+ A "bundle" is a versioned set of content that represents the resources that need to be applied
+ to a cluster to install a package.
properties:
name:
description: |-
- name is required and follows the DNS subdomain standard
- as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
- hyphens (-) or periods (.), start and end with an alphanumeric character,
- and be no longer than 253 characters.
+ name is required and follows the DNS subdomain standard as defined in [RFC 1123].
+ It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
+ start and end with an alphanumeric character, and be no longer than 253 characters.
type: string
x-kubernetes-validations:
- message: packageName must be a valid DNS1123 subdomain.
@@ -1194,8 +1166,8 @@ spec:
rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$")
version:
description: |-
- version is a required field and is a reference to the version that this bundle represents
- version follows the semantic versioning standard as defined in https://semver.org/.
+ version is required and references the version that this bundle represents.
+ It follows the semantic versioning standard as defined in https://semver.org/.
type: string
x-kubernetes-validations:
- message: version must be well-formed semver
@@ -1760,6 +1732,7 @@ spec:
image: busybox:1.36
name: tar
securityContext:
+ readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
capabilities:
drop:
diff --git a/manifests/standard.yaml b/manifests/standard.yaml
index 34cc57918..b5166be98 100644
--- a/manifests/standard.yaml
+++ b/manifests/standard.yaml
@@ -176,7 +176,7 @@ spec:
schema:
openAPIV3Schema:
description: |-
- ClusterCatalog enables users to make File-Based Catalog (FBC) catalog data available to the cluster.
+ ClusterCatalog makes File-Based Catalog (FBC) data available to your cluster.
For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs
properties:
apiVersion:
@@ -198,29 +198,24 @@ spec:
type: object
spec:
description: |-
- spec is the desired state of the ClusterCatalog.
- spec is required.
- The controller will work to ensure that the desired
- catalog is unpacked and served over the catalog content HTTP server.
+ spec is a required field that defines the desired state of the ClusterCatalog.
+ The controller ensures that the catalog is unpacked and served over the catalog content HTTP server.
properties:
availabilityMode:
default: Available
description: |-
- availabilityMode allows users to define how the ClusterCatalog is made available to clients on the cluster.
- availabilityMode is optional.
+ availabilityMode is an optional field that defines how the ClusterCatalog is made available to clients on the cluster.
- Allowed values are "Available" and "Unavailable" and omitted.
+ Allowed values are "Available", "Unavailable", or omitted.
When omitted, the default value is "Available".
- When set to "Available", the catalog contents will be unpacked and served over the catalog content HTTP server.
- Setting the availabilityMode to "Available" tells clients that they should consider this ClusterCatalog
- and its contents as usable.
+ When set to "Available", the catalog contents are unpacked and served over the catalog content HTTP server.
+ Clients should consider this ClusterCatalog and its contents as usable.
- When set to "Unavailable", the catalog contents will no longer be served over the catalog content HTTP server.
- When set to this availabilityMode it should be interpreted the same as the ClusterCatalog not existing.
- Setting the availabilityMode to "Unavailable" can be useful in scenarios where a user may not want
- to delete the ClusterCatalog all together, but would still like it to be treated as if it doesn't exist.
+ When set to "Unavailable", the catalog contents are no longer served over the catalog content HTTP server.
+ Treat this the same as if the ClusterCatalog does not exist.
+ Use "Unavailable" when you want to keep the ClusterCatalog but treat it as if it doesn't exist.
enum:
- Unavailable
- Available
@@ -228,19 +223,18 @@ spec:
priority:
default: 0
description: |-
- priority allows the user to define a priority for a ClusterCatalog.
- priority is optional.
+ priority is an optional field that defines a priority for this ClusterCatalog.
- A ClusterCatalog's priority is used by clients as a tie-breaker between ClusterCatalogs that meet the client's requirements.
- A higher number means higher priority.
+ Clients use the ClusterCatalog priority as a tie-breaker between ClusterCatalogs that meet their requirements.
+ Higher numbers mean higher priority.
- It is up to clients to decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements.
- When deciding how to break the tie in this scenario, it is recommended that clients prompt their users for additional input.
+ Clients decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements.
+ Clients should prompt users for additional input to break the tie.
- When omitted, the default priority is 0 because that is the zero value of integers.
+ When omitted, the default priority is 0.
- Negative numbers can be used to specify a priority lower than the default.
- Positive numbers can be used to specify a priority higher than the default.
+ Use negative numbers to specify a priority lower than the default.
+ Use positive numbers to specify a priority higher than the default.
The lowest possible value is -2147483648.
The highest possible value is 2147483647.
@@ -248,15 +242,12 @@ spec:
type: integer
source:
description: |-
- source allows a user to define the source of a catalog.
- A "catalog" contains information on content that can be installed on a cluster.
- Providing a catalog source makes the contents of the catalog discoverable and usable by
- other on-cluster components.
- These on-cluster components may do a variety of things with this information, such as
- presenting the content in a GUI dashboard or installing content from the catalog on the cluster.
+ source is a required field that defines the source of a catalog.
+ A catalog contains information on content that can be installed on a cluster.
+ The catalog source makes catalog contents discoverable and usable by other on-cluster components.
+ These components can present the content in a GUI dashboard or install content from the catalog on the cluster.
The catalog source must contain catalog metadata in the File-Based Catalog (FBC) format.
For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs.
- source is a required field.
Below is a minimal example of a ClusterCatalogSpec that sources a catalog from an image:
@@ -267,25 +258,23 @@ spec:
properties:
image:
description: |-
- image is used to configure how catalog contents are sourced from an OCI image.
- This field is required when type is Image, and forbidden otherwise.
+ image configures how catalog contents are sourced from an OCI image.
+ It is required when type is Image, and forbidden otherwise.
properties:
pollIntervalMinutes:
description: |-
- pollIntervalMinutes allows the user to set the interval, in minutes, at which the image source should be polled for new content.
- pollIntervalMinutes is optional.
- pollIntervalMinutes can not be specified when ref is a digest-based reference.
+ pollIntervalMinutes is an optional field that sets the interval, in minutes, at which the image source is polled for new content.
+ You cannot specify pollIntervalMinutes when ref is a digest-based reference.
- When omitted, the image will not be polled for new content.
+ When omitted, the image is not polled for new content.
minimum: 1
type: integer
ref:
description: |-
- ref allows users to define the reference to a container image containing Catalog contents.
- ref is required.
- ref can not be more than 1000 characters.
+ ref is a required field that defines the reference to a container image containing catalog contents.
+ It cannot be more than 1000 characters.
- A reference can be broken down into 3 parts - the domain, name, and identifier.
+ A reference has 3 parts: the domain, name, and identifier.
The domain is typically the registry where an image is located.
It must be alphanumeric characters (lowercase and uppercase) separated by the "." character.
@@ -368,12 +357,11 @@ spec:
: true'
type:
description: |-
- type is a reference to the type of source the catalog is sourced from.
- type is required.
+ type is a required field that specifies the type of source for the catalog.
The only allowed value is "Image".
- When set to "Image", the ClusterCatalog content will be sourced from an OCI image.
+ When set to "Image", the ClusterCatalog content is sourced from an OCI image.
When using an image source, the image field must be set and must be the only field defined for this type.
enum:
- Image
@@ -391,31 +379,30 @@ spec:
type: object
status:
description: |-
- status contains information about the state of the ClusterCatalog such as:
- - Whether or not the catalog contents are being served via the catalog content HTTP server
- - Whether or not the ClusterCatalog is progressing to a new state
+ status contains the following information about the state of the ClusterCatalog:
+ - Whether the catalog contents are being served via the catalog content HTTP server
+ - Whether the ClusterCatalog is progressing to a new state
- A reference to the source from which the catalog contents were retrieved
properties:
conditions:
description: |-
- conditions is a representation of the current state for this ClusterCatalog.
+ conditions represents the current state of this ClusterCatalog.
The current condition types are Serving and Progressing.
- The Serving condition is used to represent whether or not the contents of the catalog is being served via the HTTP(S) web server.
- When it has a status of True and a reason of Available, the contents of the catalog are being served.
- When it has a status of False and a reason of Unavailable, the contents of the catalog are not being served because the contents are not yet available.
- When it has a status of False and a reason of UserSpecifiedUnavailable, the contents of the catalog are not being served because the catalog has been intentionally marked as unavailable.
+ The Serving condition represents whether the catalog contents are being served via the HTTP(S) web server:
+ - When status is True and reason is Available, the catalog contents are being served.
+ - When status is False and reason is Unavailable, the catalog contents are not being served because the contents are not yet available.
+ - When status is False and reason is UserSpecifiedUnavailable, the catalog contents are not being served because the catalog has been intentionally marked as unavailable.
- The Progressing condition is used to represent whether or not the ClusterCatalog is progressing or is ready to progress towards a new state.
- When it has a status of True and a reason of Retrying, there was an error in the progression of the ClusterCatalog that may be resolved on subsequent reconciliation attempts.
- When it has a status of True and a reason of Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing.
- When it has a status of False and a reason of Blocked, there was an error in the progression of the ClusterCatalog that requires manual intervention for recovery.
+ The Progressing condition represents whether the ClusterCatalog is progressing or is ready to progress towards a new state:
+ - When status is True and reason is Retrying, an error occurred that may be resolved on subsequent reconciliation attempts.
+ - When status is True and reason is Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing.
+ - When status is False and reason is Blocked, an error occurred that requires manual intervention for recovery.
- In the case that the Serving condition is True with reason Available and Progressing is True with reason Retrying, the previously fetched
- catalog contents are still being served via the HTTP(S) web server while we are progressing towards serving a new version of the catalog
- contents. This could occur when we've initially fetched the latest contents from the source for this catalog and when polling for changes
- to the contents we identify that there are updates to the contents.
+ If the system initially fetched contents and polling identifies updates, both conditions can be active simultaneously:
+ - The Serving condition remains True with reason Available because the previous contents are still served via the HTTP(S) web server.
+ - The Progressing condition is True with reason Retrying because the system is working to serve the new version.
items:
description: Condition contains details for one aspect of the current
state of this API Resource.
@@ -476,11 +463,9 @@ spec:
x-kubernetes-list-type: map
lastUnpacked:
description: |-
- lastUnpacked represents the last time the contents of the
- catalog were extracted from their source format. As an example,
- when using an Image source, the OCI image will be pulled and the
- image layers written to a file-system backed cache. We refer to the
- act of this extraction from the source format as "unpacking".
+ lastUnpacked represents the last time the catalog contents were extracted from their source format.
+ For example, when using an Image source, the OCI image is pulled and image layers are written to a file-system backed cache.
+ This extraction from the source format is called "unpacking".
format: date-time
type: string
resolvedSource:
@@ -489,14 +474,14 @@ spec:
properties:
image:
description: |-
- image is a field containing resolution information for a catalog sourced from an image.
- This field must be set when type is Image, and forbidden otherwise.
+ image contains resolution information for a catalog sourced from an image.
+ It must be set when type is Image, and forbidden otherwise.
properties:
ref:
description: |-
ref contains the resolved image digest-based reference.
- The digest format is used so users can use other tooling to fetch the exact
- OCI manifests that were used to extract the catalog contents.
+ The digest format allows you to use other tooling to fetch the exact OCI manifests
+ that were used to extract the catalog contents.
maxLength: 1000
type: string
x-kubernetes-validations:
@@ -530,12 +515,11 @@ spec:
type: object
type:
description: |-
- type is a reference to the type of source the catalog is sourced from.
- type is required.
+ type is a required field that specifies the type of source for the catalog.
The only allowed value is "Image".
- When set to "Image", information about the resolved image source will be set in the 'image' field.
+ When set to "Image", information about the resolved image source is set in the image field.
enum:
- Image
type: string
@@ -554,19 +538,16 @@ spec:
properties:
base:
description: |-
- base is a cluster-internal URL that provides endpoints for
- accessing the content of the catalog.
+ base is a cluster-internal URL that provides endpoints for accessing the catalog content.
- It is expected that clients append the path for the endpoint they wish
- to access.
+ Clients should append the path for the endpoint they want to access.
- Currently, only a single endpoint is served and is accessible at the path
- /api/v1.
+ Currently, only a single endpoint is served and is accessible at the path /api/v1.
The endpoints served for the v1 API are:
- - /all - this endpoint returns the entirety of the catalog contents in the FBC format
+ - /all - this endpoint returns the entire catalog contents in the FBC format
- As the needs of users and clients of the evolve, new endpoints may be added.
+ New endpoints may be added as needs evolve.
maxLength: 525
type: string
x-kubernetes-validations:
@@ -649,37 +630,35 @@ spec:
properties:
install:
description: |-
- install is an optional field used to configure the installation options
- for the ClusterExtension such as the pre-flight check configuration.
+ install is optional and configures installation options for the ClusterExtension,
+ such as the pre-flight check configuration.
properties:
preflight:
description: |-
- preflight is an optional field that can be used to configure the checks that are
- run before installation or upgrade of the content for the package specified in the packageName field.
+ preflight is optional and configures the checks that run before installation or upgrade
+ of the content for the package specified in the packageName field.
When specified, it replaces the default preflight configuration for install/upgrade actions.
- When not specified, the default configuration will be used.
+ When not specified, the default configuration is used.
properties:
crdUpgradeSafety:
description: |-
- crdUpgradeSafety is used to configure the CRD Upgrade Safety pre-flight
- checks that run prior to upgrades of installed content.
+ crdUpgradeSafety configures the CRD Upgrade Safety pre-flight checks that run
+ before upgrades of installed content.
- The CRD Upgrade Safety pre-flight check safeguards from unintended
- consequences of upgrading a CRD, such as data loss.
+ The CRD Upgrade Safety pre-flight check safeguards from unintended consequences of upgrading a CRD,
+ such as data loss.
properties:
enforcement:
description: |-
- enforcement is a required field, used to configure the state of the CRD Upgrade Safety pre-flight check.
+ enforcement is required and configures the state of the CRD Upgrade Safety pre-flight check.
Allowed values are "None" or "Strict". The default value is "Strict".
- When set to "None", the CRD Upgrade Safety pre-flight check will be skipped
- when performing an upgrade operation. This should be used with caution as
- unintended consequences such as data loss can occur.
+ When set to "None", the CRD Upgrade Safety pre-flight check is skipped during an upgrade operation.
+ Use this option with caution as unintended consequences such as data loss can occur.
- When set to "Strict", the CRD Upgrade Safety pre-flight check will be run when
- performing an upgrade operation.
+ When set to "Strict", the CRD Upgrade Safety pre-flight check runs during an upgrade operation.
enum:
- None
- Strict
@@ -701,16 +680,15 @@ spec:
rule: has(self.preflight)
namespace:
description: |-
- namespace is a reference to a Kubernetes namespace.
- This is the namespace in which the provided ServiceAccount must exist.
- It also designates the default namespace where namespace-scoped resources
- for the extension are applied to the cluster.
+ namespace specifies a Kubernetes namespace.
+ This is the namespace where the provided ServiceAccount must exist.
+ It also designates the default namespace where namespace-scoped resources for the extension are applied to the cluster.
Some extensions may contain namespace-scoped resources to be applied in other namespaces.
This namespace must exist.
- namespace is required, immutable, and follows the DNS label standard
- as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters or hyphens (-),
- start and end with an alphanumeric character, and be no longer than 63 characters
+ The namespace field is required, immutable, and follows the DNS label standard as defined in [RFC 1123].
+ It must contain only lowercase alphanumeric characters or hyphens (-), start and end with an alphanumeric character,
+ and be no longer than 63 characters.
[RFC 1123]: https://tools.ietf.org/html/rfc1123
maxLength: 63
@@ -722,24 +700,22 @@ spec:
rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?$")
serviceAccount:
description: |-
- serviceAccount is a reference to a ServiceAccount used to perform all interactions
- with the cluster that are required to manage the extension.
+ serviceAccount specifies a ServiceAccount used to perform all interactions with the cluster
+ that are required to manage the extension.
The ServiceAccount must be configured with the necessary permissions to perform these interactions.
The ServiceAccount must exist in the namespace referenced in the spec.
- serviceAccount is required.
+ The serviceAccount field is required.
properties:
name:
description: |-
- name is a required, immutable reference to the name of the ServiceAccount
- to be used for installation and management of the content for the package
- specified in the packageName field.
+ name is a required, immutable reference to the name of the ServiceAccount used for installation
+ and management of the content for the package specified in the packageName field.
This ServiceAccount must exist in the installNamespace.
- name follows the DNS subdomain standard as defined in [RFC 1123].
- It must contain only lowercase alphanumeric characters,
- hyphens (-) or periods (.), start and end with an alphanumeric character,
- and be no longer than 253 characters.
+ The name field follows the DNS subdomain standard as defined in [RFC 1123].
+ It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
+ start and end with an alphanumeric character, and be no longer than 253 characters.
Some examples of valid values are:
- some-serviceaccount
@@ -768,11 +744,11 @@ spec:
type: object
source:
description: |-
- source is a required field which selects the installation source of content
- for this ClusterExtension. Selection is performed by setting the sourceType.
+ source is required and selects the installation source of content for this ClusterExtension.
+ Set the sourceType field to perform the selection.
- Catalog is currently the only implemented sourceType, and setting the
- sourcetype to "Catalog" requires the catalog field to also be defined.
+ Catalog is currently the only implemented sourceType.
+ Setting sourceType to "Catalog" requires the catalog field to also be defined.
Below is a minimal example of a source definition (in yaml):
@@ -783,30 +759,29 @@ spec:
properties:
catalog:
description: |-
- catalog is used to configure how information is sourced from a catalog.
- This field is required when sourceType is "Catalog", and forbidden otherwise.
+ catalog configures how information is sourced from a catalog.
+ It is required when sourceType is "Catalog", and forbidden otherwise.
properties:
channels:
description: |-
- channels is an optional reference to a set of channels belonging to
- the package specified in the packageName field.
+ channels is optional and specifies a set of channels belonging to the package
+ specified in the packageName field.
- A "channel" is a package-author-defined stream of updates for an extension.
+ A channel is a package-author-defined stream of updates for an extension.
- Each channel in the list must follow the DNS subdomain standard
- as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
- hyphens (-) or periods (.), start and end with an alphanumeric character,
- and be no longer than 253 characters. No more than 256 channels can be specified.
+ Each channel in the list must follow the DNS subdomain standard as defined in [RFC 1123].
+ It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
+ start and end with an alphanumeric character, and be no longer than 253 characters.
+ You can specify no more than 256 channels.
- When specified, it is used to constrain the set of installable bundles and
- the automated upgrade path. This constraint is an AND operation with the
- version field. For example:
+ When specified, it constrains the set of installable bundles and the automated upgrade path.
+ This constraint is an AND operation with the version field. For example:
- Given channel is set to "foo"
- Given version is set to ">=1.0.0, <1.5.0"
- - Only bundles that exist in channel "foo" AND satisfy the version range comparison will be considered installable
- - Automatic upgrades will be constrained to upgrade edges defined by the selected channel
+ - Only bundles that exist in channel "foo" AND satisfy the version range comparison are considered installable
+ - Automatic upgrades are constrained to upgrade edges defined by the selected channel
- When unspecified, upgrade edges across all channels will be used to identify valid automatic upgrade paths.
+ When unspecified, upgrade edges across all channels are used to identify valid automatic upgrade paths.
Some examples of valid values are:
- 1.1.x
@@ -836,13 +811,12 @@ spec:
type: array
packageName:
description: |-
- packageName is a reference to the name of the package to be installed
- and is used to filter the content from catalogs.
+ packageName specifies the name of the package to be installed and is used to filter
+ the content from catalogs.
- packageName is required, immutable, and follows the DNS subdomain standard
- as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
- hyphens (-) or periods (.), start and end with an alphanumeric character,
- and be no longer than 253 characters.
+ It is required, immutable, and follows the DNS subdomain standard as defined in [RFC 1123].
+ It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
+ start and end with an alphanumeric character, and be no longer than 253 characters.
Some examples of valid values are:
- some-package
@@ -869,12 +843,9 @@ spec:
rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$")
selector:
description: |-
- selector is an optional field that can be used
- to filter the set of ClusterCatalogs used in the bundle
- selection process.
+ selector is optional and filters the set of ClusterCatalogs used in the bundle selection process.
- When unspecified, all ClusterCatalogs will be used in
- the bundle selection process.
+ When unspecified, all ClusterCatalogs are used in the bundle selection process.
properties:
matchExpressions:
description: matchExpressions is a list of label selector
@@ -922,35 +893,34 @@ spec:
upgradeConstraintPolicy:
default: CatalogProvided
description: |-
- upgradeConstraintPolicy is an optional field that controls whether
- the upgrade path(s) defined in the catalog are enforced for the package
- referenced in the packageName field.
+ upgradeConstraintPolicy is optional and controls whether the upgrade paths defined in the catalog
+ are enforced for the package referenced in the packageName field.
- Allowed values are: "CatalogProvided" or "SelfCertified", or omitted.
+ Allowed values are "CatalogProvided", "SelfCertified", or omitted.
- When this field is set to "CatalogProvided", automatic upgrades will only occur
- when upgrade constraints specified by the package author are met.
+ When set to "CatalogProvided", automatic upgrades only occur when upgrade constraints specified by the package
+ author are met.
- When this field is set to "SelfCertified", the upgrade constraints specified by
- the package author are ignored. This allows for upgrades and downgrades to
- any version of the package. This is considered a dangerous operation as it
- can lead to unknown and potentially disastrous outcomes, such as data
- loss. It is assumed that users have independently verified changes when
- using this option.
+ When set to "SelfCertified", the upgrade constraints specified by the package author are ignored.
+ This allows upgrades and downgrades to any version of the package.
+ This is considered a dangerous operation as it can lead to unknown and potentially disastrous outcomes,
+ such as data loss.
+ Use this option only if you have independently verified the changes.
- When this field is omitted, the default value is "CatalogProvided".
+ When omitted, the default value is "CatalogProvided".
enum:
- CatalogProvided
- SelfCertified
type: string
version:
description: |-
- version is an optional semver constraint (a specific version or range of versions). When unspecified, the latest version available will be installed.
+ version is an optional semver constraint (a specific version or range of versions).
+ When unspecified, the latest version available is installed.
Acceptable version ranges are no longer than 64 characters.
- Version ranges are composed of comma- or space-delimited values and one or
- more comparison operators, known as comparison strings. Additional
- comparison strings can be added using the OR operator (||).
+ Version ranges are composed of comma- or space-delimited values and one or more comparison operators,
+ known as comparison strings.
+ You can add additional comparison strings using the OR operator (||).
# Range Comparisons
@@ -1028,13 +998,12 @@ spec:
type: object
sourceType:
description: |-
- sourceType is a required reference to the type of install source.
+ sourceType is required and specifies the type of install source.
- Allowed values are "Catalog"
+ The only allowed value is "Catalog".
- When this field is set to "Catalog", information for determining the
- appropriate bundle of content to install will be fetched from
- ClusterCatalog resources existing on the cluster.
+ When set to "Catalog", information for determining the appropriate bundle of content to install
+ is fetched from ClusterCatalog resources on the cluster.
When using the Catalog sourceType, the catalog field must also be set.
enum:
- Catalog
@@ -1060,21 +1029,21 @@ spec:
description: |-
The set of condition types which apply to all spec.source variations are Installed and Progressing.
- The Installed condition represents whether or not the bundle has been installed for this ClusterExtension.
- When Installed is True and the Reason is Succeeded, the bundle has been successfully installed.
- When Installed is False and the Reason is Failed, the bundle has failed to install.
+ The Installed condition represents whether the bundle has been installed for this ClusterExtension:
+ - When Installed is True and the Reason is Succeeded, the bundle has been successfully installed.
+ - When Installed is False and the Reason is Failed, the bundle has failed to install.
The Progressing condition represents whether or not the ClusterExtension is advancing towards a new state.
When Progressing is True and the Reason is Succeeded, the ClusterExtension is making progress towards a new state.
When Progressing is True and the Reason is Retrying, the ClusterExtension has encountered an error that could be resolved on subsequent reconciliation attempts.
When Progressing is False and the Reason is Blocked, the ClusterExtension has encountered an error that requires manual intervention for recovery.
- When the ClusterExtension is sourced from a catalog, if may also communicate a deprecation condition.
- These are indications from a package owner to guide users away from a particular package, channel, or bundle.
- BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog.
- ChannelDeprecated is set if the requested channel is marked deprecated in the catalog.
- PackageDeprecated is set if the requested package is marked deprecated in the catalog.
- Deprecated is a rollup condition that is present when any of the deprecated conditions are present.
+ When the ClusterExtension is sourced from a catalog, it may also communicate a deprecation condition.
+ These are indications from a package owner to guide users away from a particular package, channel, or bundle:
+ - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog.
+ - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog.
+ - PackageDeprecated is set if the requested package is marked deprecated in the catalog.
+ - Deprecated is a rollup condition that is present when any of the deprecated conditions are present.
items:
description: Condition contains details for one aspect of the current
state of this API Resource.
@@ -1139,17 +1108,16 @@ spec:
properties:
bundle:
description: |-
- bundle is a required field which represents the identifying attributes of a bundle.
+ bundle is required and represents the identifying attributes of a bundle.
- A "bundle" is a versioned set of content that represents the resources that
- need to be applied to a cluster to install a package.
+ A "bundle" is a versioned set of content that represents the resources that need to be applied
+ to a cluster to install a package.
properties:
name:
description: |-
- name is required and follows the DNS subdomain standard
- as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
- hyphens (-) or periods (.), start and end with an alphanumeric character,
- and be no longer than 253 characters.
+ name is required and follows the DNS subdomain standard as defined in [RFC 1123].
+ It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
+ start and end with an alphanumeric character, and be no longer than 253 characters.
type: string
x-kubernetes-validations:
- message: packageName must be a valid DNS1123 subdomain.
@@ -1159,8 +1127,8 @@ spec:
rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$")
version:
description: |-
- version is a required field and is a reference to the version that this bundle represents
- version follows the semantic versioning standard as defined in https://semver.org/.
+ version is required and references the version that this bundle represents.
+ It follows the semantic versioning standard as defined in https://semver.org/.
type: string
x-kubernetes-validations:
- message: version must be well-formed semver
diff --git a/openshift/catalogd/manifests-experimental.yaml b/openshift/catalogd/manifests-experimental.yaml
index 4dacdee86..5ac59edcf 100644
--- a/openshift/catalogd/manifests-experimental.yaml
+++ b/openshift/catalogd/manifests-experimental.yaml
@@ -121,7 +121,7 @@ spec:
schema:
openAPIV3Schema:
description: |-
- ClusterCatalog enables users to make File-Based Catalog (FBC) catalog data available to the cluster.
+ ClusterCatalog makes File-Based Catalog (FBC) data available to your cluster.
For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs
properties:
apiVersion:
@@ -143,29 +143,24 @@ spec:
type: object
spec:
description: |-
- spec is the desired state of the ClusterCatalog.
- spec is required.
- The controller will work to ensure that the desired
- catalog is unpacked and served over the catalog content HTTP server.
+ spec is a required field that defines the desired state of the ClusterCatalog.
+ The controller ensures that the catalog is unpacked and served over the catalog content HTTP server.
properties:
availabilityMode:
default: Available
description: |-
- availabilityMode allows users to define how the ClusterCatalog is made available to clients on the cluster.
- availabilityMode is optional.
+ availabilityMode is an optional field that defines how the ClusterCatalog is made available to clients on the cluster.
- Allowed values are "Available" and "Unavailable" and omitted.
+ Allowed values are "Available", "Unavailable", or omitted.
When omitted, the default value is "Available".
- When set to "Available", the catalog contents will be unpacked and served over the catalog content HTTP server.
- Setting the availabilityMode to "Available" tells clients that they should consider this ClusterCatalog
- and its contents as usable.
+ When set to "Available", the catalog contents are unpacked and served over the catalog content HTTP server.
+ Clients should consider this ClusterCatalog and its contents as usable.
- When set to "Unavailable", the catalog contents will no longer be served over the catalog content HTTP server.
- When set to this availabilityMode it should be interpreted the same as the ClusterCatalog not existing.
- Setting the availabilityMode to "Unavailable" can be useful in scenarios where a user may not want
- to delete the ClusterCatalog all together, but would still like it to be treated as if it doesn't exist.
+ When set to "Unavailable", the catalog contents are no longer served over the catalog content HTTP server.
+ Treat this the same as if the ClusterCatalog does not exist.
+ Use "Unavailable" when you want to keep the ClusterCatalog but treat it as if it doesn't exist.
enum:
- Unavailable
- Available
@@ -173,19 +168,18 @@ spec:
priority:
default: 0
description: |-
- priority allows the user to define a priority for a ClusterCatalog.
- priority is optional.
+ priority is an optional field that defines a priority for this ClusterCatalog.
- A ClusterCatalog's priority is used by clients as a tie-breaker between ClusterCatalogs that meet the client's requirements.
- A higher number means higher priority.
+ Clients use the ClusterCatalog priority as a tie-breaker between ClusterCatalogs that meet their requirements.
+ Higher numbers mean higher priority.
- It is up to clients to decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements.
- When deciding how to break the tie in this scenario, it is recommended that clients prompt their users for additional input.
+ Clients decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements.
+ Clients should prompt users for additional input to break the tie.
- When omitted, the default priority is 0 because that is the zero value of integers.
+ When omitted, the default priority is 0.
- Negative numbers can be used to specify a priority lower than the default.
- Positive numbers can be used to specify a priority higher than the default.
+ Use negative numbers to specify a priority lower than the default.
+ Use positive numbers to specify a priority higher than the default.
The lowest possible value is -2147483648.
The highest possible value is 2147483647.
@@ -193,15 +187,12 @@ spec:
type: integer
source:
description: |-
- source allows a user to define the source of a catalog.
- A "catalog" contains information on content that can be installed on a cluster.
- Providing a catalog source makes the contents of the catalog discoverable and usable by
- other on-cluster components.
- These on-cluster components may do a variety of things with this information, such as
- presenting the content in a GUI dashboard or installing content from the catalog on the cluster.
+ source is a required field that defines the source of a catalog.
+ A catalog contains information on content that can be installed on a cluster.
+ The catalog source makes catalog contents discoverable and usable by other on-cluster components.
+ These components can present the content in a GUI dashboard or install content from the catalog on the cluster.
The catalog source must contain catalog metadata in the File-Based Catalog (FBC) format.
For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs.
- source is a required field.
Below is a minimal example of a ClusterCatalogSpec that sources a catalog from an image:
@@ -212,25 +203,23 @@ spec:
properties:
image:
description: |-
- image is used to configure how catalog contents are sourced from an OCI image.
- This field is required when type is Image, and forbidden otherwise.
+ image configures how catalog contents are sourced from an OCI image.
+ It is required when type is Image, and forbidden otherwise.
properties:
pollIntervalMinutes:
description: |-
- pollIntervalMinutes allows the user to set the interval, in minutes, at which the image source should be polled for new content.
- pollIntervalMinutes is optional.
- pollIntervalMinutes can not be specified when ref is a digest-based reference.
+ pollIntervalMinutes is an optional field that sets the interval, in minutes, at which the image source is polled for new content.
+ You cannot specify pollIntervalMinutes when ref is a digest-based reference.
- When omitted, the image will not be polled for new content.
+ When omitted, the image is not polled for new content.
minimum: 1
type: integer
ref:
description: |-
- ref allows users to define the reference to a container image containing Catalog contents.
- ref is required.
- ref can not be more than 1000 characters.
+ ref is a required field that defines the reference to a container image containing catalog contents.
+ It cannot be more than 1000 characters.
- A reference can be broken down into 3 parts - the domain, name, and identifier.
+ A reference has 3 parts: the domain, name, and identifier.
The domain is typically the registry where an image is located.
It must be alphanumeric characters (lowercase and uppercase) separated by the "." character.
@@ -290,12 +279,11 @@ spec:
rule: 'self.ref.find(''(@.*:)'') != "" ? !has(self.pollIntervalMinutes) : true'
type:
description: |-
- type is a reference to the type of source the catalog is sourced from.
- type is required.
+ type is a required field that specifies the type of source for the catalog.
The only allowed value is "Image".
- When set to "Image", the ClusterCatalog content will be sourced from an OCI image.
+ When set to "Image", the ClusterCatalog content is sourced from an OCI image.
When using an image source, the image field must be set and must be the only field defined for this type.
enum:
- Image
@@ -311,31 +299,30 @@ spec:
type: object
status:
description: |-
- status contains information about the state of the ClusterCatalog such as:
- - Whether or not the catalog contents are being served via the catalog content HTTP server
- - Whether or not the ClusterCatalog is progressing to a new state
+ status contains the following information about the state of the ClusterCatalog:
+ - Whether the catalog contents are being served via the catalog content HTTP server
+ - Whether the ClusterCatalog is progressing to a new state
- A reference to the source from which the catalog contents were retrieved
properties:
conditions:
description: |-
- conditions is a representation of the current state for this ClusterCatalog.
+ conditions represents the current state of this ClusterCatalog.
The current condition types are Serving and Progressing.
- The Serving condition is used to represent whether or not the contents of the catalog is being served via the HTTP(S) web server.
- When it has a status of True and a reason of Available, the contents of the catalog are being served.
- When it has a status of False and a reason of Unavailable, the contents of the catalog are not being served because the contents are not yet available.
- When it has a status of False and a reason of UserSpecifiedUnavailable, the contents of the catalog are not being served because the catalog has been intentionally marked as unavailable.
+ The Serving condition represents whether the catalog contents are being served via the HTTP(S) web server:
+ - When status is True and reason is Available, the catalog contents are being served.
+ - When status is False and reason is Unavailable, the catalog contents are not being served because the contents are not yet available.
+ - When status is False and reason is UserSpecifiedUnavailable, the catalog contents are not being served because the catalog has been intentionally marked as unavailable.
- The Progressing condition is used to represent whether or not the ClusterCatalog is progressing or is ready to progress towards a new state.
- When it has a status of True and a reason of Retrying, there was an error in the progression of the ClusterCatalog that may be resolved on subsequent reconciliation attempts.
- When it has a status of True and a reason of Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing.
- When it has a status of False and a reason of Blocked, there was an error in the progression of the ClusterCatalog that requires manual intervention for recovery.
+ The Progressing condition represents whether the ClusterCatalog is progressing or is ready to progress towards a new state:
+ - When status is True and reason is Retrying, an error occurred that may be resolved on subsequent reconciliation attempts.
+ - When status is True and reason is Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing.
+ - When status is False and reason is Blocked, an error occurred that requires manual intervention for recovery.
- In the case that the Serving condition is True with reason Available and Progressing is True with reason Retrying, the previously fetched
- catalog contents are still being served via the HTTP(S) web server while we are progressing towards serving a new version of the catalog
- contents. This could occur when we've initially fetched the latest contents from the source for this catalog and when polling for changes
- to the contents we identify that there are updates to the contents.
+ If the system initially fetched contents and polling identifies updates, both conditions can be active simultaneously:
+ - The Serving condition remains True with reason Available because the previous contents are still served via the HTTP(S) web server.
+ - The Progressing condition is True with reason Retrying because the system is working to serve the new version.
items:
description: Condition contains details for one aspect of the current state of this API Resource.
properties:
@@ -395,11 +382,9 @@ spec:
x-kubernetes-list-type: map
lastUnpacked:
description: |-
- lastUnpacked represents the last time the contents of the
- catalog were extracted from their source format. As an example,
- when using an Image source, the OCI image will be pulled and the
- image layers written to a file-system backed cache. We refer to the
- act of this extraction from the source format as "unpacking".
+ lastUnpacked represents the last time the catalog contents were extracted from their source format.
+ For example, when using an Image source, the OCI image is pulled and image layers are written to a file-system backed cache.
+ This extraction from the source format is called "unpacking".
format: date-time
type: string
resolvedSource:
@@ -407,14 +392,14 @@ spec:
properties:
image:
description: |-
- image is a field containing resolution information for a catalog sourced from an image.
- This field must be set when type is Image, and forbidden otherwise.
+ image contains resolution information for a catalog sourced from an image.
+ It must be set when type is Image, and forbidden otherwise.
properties:
ref:
description: |-
ref contains the resolved image digest-based reference.
- The digest format is used so users can use other tooling to fetch the exact
- OCI manifests that were used to extract the catalog contents.
+ The digest format allows you to use other tooling to fetch the exact OCI manifests
+ that were used to extract the catalog contents.
maxLength: 1000
type: string
x-kubernetes-validations:
@@ -435,12 +420,11 @@ spec:
type: object
type:
description: |-
- type is a reference to the type of source the catalog is sourced from.
- type is required.
+ type is a required field that specifies the type of source for the catalog.
The only allowed value is "Image".
- When set to "Image", information about the resolved image source will be set in the 'image' field.
+ When set to "Image", information about the resolved image source is set in the image field.
enum:
- Image
type: string
@@ -456,19 +440,16 @@ spec:
properties:
base:
description: |-
- base is a cluster-internal URL that provides endpoints for
- accessing the content of the catalog.
+ base is a cluster-internal URL that provides endpoints for accessing the catalog content.
- It is expected that clients append the path for the endpoint they wish
- to access.
+ Clients should append the path for the endpoint they want to access.
- Currently, only a single endpoint is served and is accessible at the path
- /api/v1.
+ Currently, only a single endpoint is served and is accessible at the path /api/v1.
The endpoints served for the v1 API are:
- - /all - this endpoint returns the entirety of the catalog contents in the FBC format
+ - /all - this endpoint returns the entire catalog contents in the FBC format
- As the needs of users and clients of the evolve, new endpoints may be added.
+ New endpoints may be added as needs evolve.
maxLength: 525
type: string
x-kubernetes-validations:
diff --git a/openshift/catalogd/manifests.yaml b/openshift/catalogd/manifests.yaml
index 68b6c87f3..afefe4a28 100644
--- a/openshift/catalogd/manifests.yaml
+++ b/openshift/catalogd/manifests.yaml
@@ -121,7 +121,7 @@ spec:
schema:
openAPIV3Schema:
description: |-
- ClusterCatalog enables users to make File-Based Catalog (FBC) catalog data available to the cluster.
+ ClusterCatalog makes File-Based Catalog (FBC) data available to your cluster.
For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs
properties:
apiVersion:
@@ -143,29 +143,24 @@ spec:
type: object
spec:
description: |-
- spec is the desired state of the ClusterCatalog.
- spec is required.
- The controller will work to ensure that the desired
- catalog is unpacked and served over the catalog content HTTP server.
+ spec is a required field that defines the desired state of the ClusterCatalog.
+ The controller ensures that the catalog is unpacked and served over the catalog content HTTP server.
properties:
availabilityMode:
default: Available
description: |-
- availabilityMode allows users to define how the ClusterCatalog is made available to clients on the cluster.
- availabilityMode is optional.
+ availabilityMode is an optional field that defines how the ClusterCatalog is made available to clients on the cluster.
- Allowed values are "Available" and "Unavailable" and omitted.
+ Allowed values are "Available", "Unavailable", or omitted.
When omitted, the default value is "Available".
- When set to "Available", the catalog contents will be unpacked and served over the catalog content HTTP server.
- Setting the availabilityMode to "Available" tells clients that they should consider this ClusterCatalog
- and its contents as usable.
+ When set to "Available", the catalog contents are unpacked and served over the catalog content HTTP server.
+ Clients should consider this ClusterCatalog and its contents as usable.
- When set to "Unavailable", the catalog contents will no longer be served over the catalog content HTTP server.
- When set to this availabilityMode it should be interpreted the same as the ClusterCatalog not existing.
- Setting the availabilityMode to "Unavailable" can be useful in scenarios where a user may not want
- to delete the ClusterCatalog all together, but would still like it to be treated as if it doesn't exist.
+ When set to "Unavailable", the catalog contents are no longer served over the catalog content HTTP server.
+ Treat this the same as if the ClusterCatalog does not exist.
+ Use "Unavailable" when you want to keep the ClusterCatalog but treat it as if it doesn't exist.
enum:
- Unavailable
- Available
@@ -173,19 +168,18 @@ spec:
priority:
default: 0
description: |-
- priority allows the user to define a priority for a ClusterCatalog.
- priority is optional.
+ priority is an optional field that defines a priority for this ClusterCatalog.
- A ClusterCatalog's priority is used by clients as a tie-breaker between ClusterCatalogs that meet the client's requirements.
- A higher number means higher priority.
+ Clients use the ClusterCatalog priority as a tie-breaker between ClusterCatalogs that meet their requirements.
+ Higher numbers mean higher priority.
- It is up to clients to decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements.
- When deciding how to break the tie in this scenario, it is recommended that clients prompt their users for additional input.
+ Clients decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements.
+ Clients should prompt users for additional input to break the tie.
- When omitted, the default priority is 0 because that is the zero value of integers.
+ When omitted, the default priority is 0.
- Negative numbers can be used to specify a priority lower than the default.
- Positive numbers can be used to specify a priority higher than the default.
+ Use negative numbers to specify a priority lower than the default.
+ Use positive numbers to specify a priority higher than the default.
The lowest possible value is -2147483648.
The highest possible value is 2147483647.
@@ -193,15 +187,12 @@ spec:
type: integer
source:
description: |-
- source allows a user to define the source of a catalog.
- A "catalog" contains information on content that can be installed on a cluster.
- Providing a catalog source makes the contents of the catalog discoverable and usable by
- other on-cluster components.
- These on-cluster components may do a variety of things with this information, such as
- presenting the content in a GUI dashboard or installing content from the catalog on the cluster.
+ source is a required field that defines the source of a catalog.
+ A catalog contains information on content that can be installed on a cluster.
+ The catalog source makes catalog contents discoverable and usable by other on-cluster components.
+ These components can present the content in a GUI dashboard or install content from the catalog on the cluster.
The catalog source must contain catalog metadata in the File-Based Catalog (FBC) format.
For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs.
- source is a required field.
Below is a minimal example of a ClusterCatalogSpec that sources a catalog from an image:
@@ -212,25 +203,23 @@ spec:
properties:
image:
description: |-
- image is used to configure how catalog contents are sourced from an OCI image.
- This field is required when type is Image, and forbidden otherwise.
+ image configures how catalog contents are sourced from an OCI image.
+ It is required when type is Image, and forbidden otherwise.
properties:
pollIntervalMinutes:
description: |-
- pollIntervalMinutes allows the user to set the interval, in minutes, at which the image source should be polled for new content.
- pollIntervalMinutes is optional.
- pollIntervalMinutes can not be specified when ref is a digest-based reference.
+ pollIntervalMinutes is an optional field that sets the interval, in minutes, at which the image source is polled for new content.
+ You cannot specify pollIntervalMinutes when ref is a digest-based reference.
- When omitted, the image will not be polled for new content.
+ When omitted, the image is not polled for new content.
minimum: 1
type: integer
ref:
description: |-
- ref allows users to define the reference to a container image containing Catalog contents.
- ref is required.
- ref can not be more than 1000 characters.
+ ref is a required field that defines the reference to a container image containing catalog contents.
+ It cannot be more than 1000 characters.
- A reference can be broken down into 3 parts - the domain, name, and identifier.
+ A reference has 3 parts: the domain, name, and identifier.
The domain is typically the registry where an image is located.
It must be alphanumeric characters (lowercase and uppercase) separated by the "." character.
@@ -290,12 +279,11 @@ spec:
rule: 'self.ref.find(''(@.*:)'') != "" ? !has(self.pollIntervalMinutes) : true'
type:
description: |-
- type is a reference to the type of source the catalog is sourced from.
- type is required.
+ type is a required field that specifies the type of source for the catalog.
The only allowed value is "Image".
- When set to "Image", the ClusterCatalog content will be sourced from an OCI image.
+ When set to "Image", the ClusterCatalog content is sourced from an OCI image.
When using an image source, the image field must be set and must be the only field defined for this type.
enum:
- Image
@@ -311,31 +299,30 @@ spec:
type: object
status:
description: |-
- status contains information about the state of the ClusterCatalog such as:
- - Whether or not the catalog contents are being served via the catalog content HTTP server
- - Whether or not the ClusterCatalog is progressing to a new state
+ status contains the following information about the state of the ClusterCatalog:
+ - Whether the catalog contents are being served via the catalog content HTTP server
+ - Whether the ClusterCatalog is progressing to a new state
- A reference to the source from which the catalog contents were retrieved
properties:
conditions:
description: |-
- conditions is a representation of the current state for this ClusterCatalog.
+ conditions represents the current state of this ClusterCatalog.
The current condition types are Serving and Progressing.
- The Serving condition is used to represent whether or not the contents of the catalog is being served via the HTTP(S) web server.
- When it has a status of True and a reason of Available, the contents of the catalog are being served.
- When it has a status of False and a reason of Unavailable, the contents of the catalog are not being served because the contents are not yet available.
- When it has a status of False and a reason of UserSpecifiedUnavailable, the contents of the catalog are not being served because the catalog has been intentionally marked as unavailable.
+ The Serving condition represents whether the catalog contents are being served via the HTTP(S) web server:
+ - When status is True and reason is Available, the catalog contents are being served.
+ - When status is False and reason is Unavailable, the catalog contents are not being served because the contents are not yet available.
+ - When status is False and reason is UserSpecifiedUnavailable, the catalog contents are not being served because the catalog has been intentionally marked as unavailable.
- The Progressing condition is used to represent whether or not the ClusterCatalog is progressing or is ready to progress towards a new state.
- When it has a status of True and a reason of Retrying, there was an error in the progression of the ClusterCatalog that may be resolved on subsequent reconciliation attempts.
- When it has a status of True and a reason of Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing.
- When it has a status of False and a reason of Blocked, there was an error in the progression of the ClusterCatalog that requires manual intervention for recovery.
+ The Progressing condition represents whether the ClusterCatalog is progressing or is ready to progress towards a new state:
+ - When status is True and reason is Retrying, an error occurred that may be resolved on subsequent reconciliation attempts.
+ - When status is True and reason is Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing.
+ - When status is False and reason is Blocked, an error occurred that requires manual intervention for recovery.
- In the case that the Serving condition is True with reason Available and Progressing is True with reason Retrying, the previously fetched
- catalog contents are still being served via the HTTP(S) web server while we are progressing towards serving a new version of the catalog
- contents. This could occur when we've initially fetched the latest contents from the source for this catalog and when polling for changes
- to the contents we identify that there are updates to the contents.
+ If the system initially fetched contents and polling identifies updates, both conditions can be active simultaneously:
+ - The Serving condition remains True with reason Available because the previous contents are still served via the HTTP(S) web server.
+ - The Progressing condition is True with reason Retrying because the system is working to serve the new version.
items:
description: Condition contains details for one aspect of the current state of this API Resource.
properties:
@@ -395,11 +382,9 @@ spec:
x-kubernetes-list-type: map
lastUnpacked:
description: |-
- lastUnpacked represents the last time the contents of the
- catalog were extracted from their source format. As an example,
- when using an Image source, the OCI image will be pulled and the
- image layers written to a file-system backed cache. We refer to the
- act of this extraction from the source format as "unpacking".
+ lastUnpacked represents the last time the catalog contents were extracted from their source format.
+ For example, when using an Image source, the OCI image is pulled and image layers are written to a file-system backed cache.
+ This extraction from the source format is called "unpacking".
format: date-time
type: string
resolvedSource:
@@ -407,14 +392,14 @@ spec:
properties:
image:
description: |-
- image is a field containing resolution information for a catalog sourced from an image.
- This field must be set when type is Image, and forbidden otherwise.
+ image contains resolution information for a catalog sourced from an image.
+ It must be set when type is Image, and forbidden otherwise.
properties:
ref:
description: |-
ref contains the resolved image digest-based reference.
- The digest format is used so users can use other tooling to fetch the exact
- OCI manifests that were used to extract the catalog contents.
+ The digest format allows you to use other tooling to fetch the exact OCI manifests
+ that were used to extract the catalog contents.
maxLength: 1000
type: string
x-kubernetes-validations:
@@ -435,12 +420,11 @@ spec:
type: object
type:
description: |-
- type is a reference to the type of source the catalog is sourced from.
- type is required.
+ type is a required field that specifies the type of source for the catalog.
The only allowed value is "Image".
- When set to "Image", information about the resolved image source will be set in the 'image' field.
+ When set to "Image", information about the resolved image source is set in the image field.
enum:
- Image
type: string
@@ -456,19 +440,16 @@ spec:
properties:
base:
description: |-
- base is a cluster-internal URL that provides endpoints for
- accessing the content of the catalog.
+ base is a cluster-internal URL that provides endpoints for accessing the catalog content.
- It is expected that clients append the path for the endpoint they wish
- to access.
+ Clients should append the path for the endpoint they want to access.
- Currently, only a single endpoint is served and is accessible at the path
- /api/v1.
+ Currently, only a single endpoint is served and is accessible at the path /api/v1.
The endpoints served for the v1 API are:
- - /all - this endpoint returns the entirety of the catalog contents in the FBC format
+ - /all - this endpoint returns the entire catalog contents in the FBC format
- As the needs of users and clients of the evolve, new endpoints may be added.
+ New endpoints may be added as needs evolve.
maxLength: 525
type: string
x-kubernetes-validations:
diff --git a/openshift/operator-controller/manifests-experimental.yaml b/openshift/operator-controller/manifests-experimental.yaml
index 7ccb483b7..806d5be03 100644
--- a/openshift/operator-controller/manifests-experimental.yaml
+++ b/openshift/operator-controller/manifests-experimental.yaml
@@ -146,9 +146,9 @@ spec:
properties:
config:
description: |-
- config is an optional field used to specify bundle specific configuration
- used to configure the bundle. Configuration is bundle specific and a bundle may provide
- a configuration schema. When not specified, the default configuration of the resolved bundle will be used.
+ config is optional and specifies bundle-specific configuration.
+ Configuration is bundle-specific and a bundle may provide a configuration schema.
+ When not specified, the default configuration of the resolved bundle is used.
config is validated against a configuration schema provided by the resolved bundle. If the bundle does not provide
a configuration schema the bundle is deemed to not be configurable. More information on how
@@ -156,21 +156,19 @@ spec:
properties:
configType:
description: |-
- configType is a required reference to the type of configuration source.
+ configType is required and specifies the type of configuration source.
- Allowed values are "Inline"
+ The only allowed value is "Inline".
- When this field is set to "Inline", the cluster extension configuration is defined inline within the
- ClusterExtension resource.
+ When set to "Inline", the cluster extension configuration is defined inline within the ClusterExtension resource.
enum:
- Inline
type: string
inline:
description: |-
- inline contains JSON or YAML values specified directly in the
- ClusterExtension.
+ inline contains JSON or YAML values specified directly in the ClusterExtension.
- inline is used to specify arbitrary configuration values for the ClusterExtension.
+ It is used to specify arbitrary configuration values for the ClusterExtension.
It must be set if configType is 'Inline' and must be a valid JSON/YAML object containing at least one property.
The configuration values are validated at runtime against a JSON schema provided by the bundle.
minProperties: 1
@@ -184,37 +182,35 @@ spec:
rule: 'has(self.configType) && self.configType == ''Inline'' ?has(self.inline) : !has(self.inline)'
install:
description: |-
- install is an optional field used to configure the installation options
- for the ClusterExtension such as the pre-flight check configuration.
+ install is optional and configures installation options for the ClusterExtension,
+ such as the pre-flight check configuration.
properties:
preflight:
description: |-
- preflight is an optional field that can be used to configure the checks that are
- run before installation or upgrade of the content for the package specified in the packageName field.
+ preflight is optional and configures the checks that run before installation or upgrade
+ of the content for the package specified in the packageName field.
When specified, it replaces the default preflight configuration for install/upgrade actions.
- When not specified, the default configuration will be used.
+ When not specified, the default configuration is used.
properties:
crdUpgradeSafety:
description: |-
- crdUpgradeSafety is used to configure the CRD Upgrade Safety pre-flight
- checks that run prior to upgrades of installed content.
+ crdUpgradeSafety configures the CRD Upgrade Safety pre-flight checks that run
+ before upgrades of installed content.
- The CRD Upgrade Safety pre-flight check safeguards from unintended
- consequences of upgrading a CRD, such as data loss.
+ The CRD Upgrade Safety pre-flight check safeguards from unintended consequences of upgrading a CRD,
+ such as data loss.
properties:
enforcement:
description: |-
- enforcement is a required field, used to configure the state of the CRD Upgrade Safety pre-flight check.
+ enforcement is required and configures the state of the CRD Upgrade Safety pre-flight check.
Allowed values are "None" or "Strict". The default value is "Strict".
- When set to "None", the CRD Upgrade Safety pre-flight check will be skipped
- when performing an upgrade operation. This should be used with caution as
- unintended consequences such as data loss can occur.
+ When set to "None", the CRD Upgrade Safety pre-flight check is skipped during an upgrade operation.
+ Use this option with caution as unintended consequences such as data loss can occur.
- When set to "Strict", the CRD Upgrade Safety pre-flight check will be run when
- performing an upgrade operation.
+ When set to "Strict", the CRD Upgrade Safety pre-flight check runs during an upgrade operation.
enum:
- None
- Strict
@@ -234,16 +230,15 @@ spec:
rule: has(self.preflight)
namespace:
description: |-
- namespace is a reference to a Kubernetes namespace.
- This is the namespace in which the provided ServiceAccount must exist.
- It also designates the default namespace where namespace-scoped resources
- for the extension are applied to the cluster.
+ namespace specifies a Kubernetes namespace.
+ This is the namespace where the provided ServiceAccount must exist.
+ It also designates the default namespace where namespace-scoped resources for the extension are applied to the cluster.
Some extensions may contain namespace-scoped resources to be applied in other namespaces.
This namespace must exist.
- namespace is required, immutable, and follows the DNS label standard
- as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters or hyphens (-),
- start and end with an alphanumeric character, and be no longer than 63 characters
+ The namespace field is required, immutable, and follows the DNS label standard as defined in [RFC 1123].
+ It must contain only lowercase alphanumeric characters or hyphens (-), start and end with an alphanumeric character,
+ and be no longer than 63 characters.
[RFC 1123]: https://tools.ietf.org/html/rfc1123
maxLength: 63
@@ -255,24 +250,22 @@ spec:
rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?$")
serviceAccount:
description: |-
- serviceAccount is a reference to a ServiceAccount used to perform all interactions
- with the cluster that are required to manage the extension.
+ serviceAccount specifies a ServiceAccount used to perform all interactions with the cluster
+ that are required to manage the extension.
The ServiceAccount must be configured with the necessary permissions to perform these interactions.
The ServiceAccount must exist in the namespace referenced in the spec.
- serviceAccount is required.
+ The serviceAccount field is required.
properties:
name:
description: |-
- name is a required, immutable reference to the name of the ServiceAccount
- to be used for installation and management of the content for the package
- specified in the packageName field.
+ name is a required, immutable reference to the name of the ServiceAccount used for installation
+ and management of the content for the package specified in the packageName field.
This ServiceAccount must exist in the installNamespace.
- name follows the DNS subdomain standard as defined in [RFC 1123].
- It must contain only lowercase alphanumeric characters,
- hyphens (-) or periods (.), start and end with an alphanumeric character,
- and be no longer than 253 characters.
+ The name field follows the DNS subdomain standard as defined in [RFC 1123].
+ It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
+ start and end with an alphanumeric character, and be no longer than 253 characters.
Some examples of valid values are:
- some-serviceaccount
@@ -298,11 +291,11 @@ spec:
type: object
source:
description: |-
- source is a required field which selects the installation source of content
- for this ClusterExtension. Selection is performed by setting the sourceType.
+ source is required and selects the installation source of content for this ClusterExtension.
+ Set the sourceType field to perform the selection.
- Catalog is currently the only implemented sourceType, and setting the
- sourcetype to "Catalog" requires the catalog field to also be defined.
+ Catalog is currently the only implemented sourceType.
+ Setting sourceType to "Catalog" requires the catalog field to also be defined.
Below is a minimal example of a source definition (in yaml):
@@ -313,30 +306,29 @@ spec:
properties:
catalog:
description: |-
- catalog is used to configure how information is sourced from a catalog.
- This field is required when sourceType is "Catalog", and forbidden otherwise.
+ catalog configures how information is sourced from a catalog.
+ It is required when sourceType is "Catalog", and forbidden otherwise.
properties:
channels:
description: |-
- channels is an optional reference to a set of channels belonging to
- the package specified in the packageName field.
+ channels is optional and specifies a set of channels belonging to the package
+ specified in the packageName field.
- A "channel" is a package-author-defined stream of updates for an extension.
+ A channel is a package-author-defined stream of updates for an extension.
- Each channel in the list must follow the DNS subdomain standard
- as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
- hyphens (-) or periods (.), start and end with an alphanumeric character,
- and be no longer than 253 characters. No more than 256 channels can be specified.
+ Each channel in the list must follow the DNS subdomain standard as defined in [RFC 1123].
+ It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
+ start and end with an alphanumeric character, and be no longer than 253 characters.
+ You can specify no more than 256 channels.
- When specified, it is used to constrain the set of installable bundles and
- the automated upgrade path. This constraint is an AND operation with the
- version field. For example:
+ When specified, it constrains the set of installable bundles and the automated upgrade path.
+ This constraint is an AND operation with the version field. For example:
- Given channel is set to "foo"
- Given version is set to ">=1.0.0, <1.5.0"
- - Only bundles that exist in channel "foo" AND satisfy the version range comparison will be considered installable
- - Automatic upgrades will be constrained to upgrade edges defined by the selected channel
+ - Only bundles that exist in channel "foo" AND satisfy the version range comparison are considered installable
+ - Automatic upgrades are constrained to upgrade edges defined by the selected channel
- When unspecified, upgrade edges across all channels will be used to identify valid automatic upgrade paths.
+ When unspecified, upgrade edges across all channels are used to identify valid automatic upgrade paths.
Some examples of valid values are:
- 1.1.x
@@ -366,13 +358,12 @@ spec:
type: array
packageName:
description: |-
- packageName is a reference to the name of the package to be installed
- and is used to filter the content from catalogs.
+ packageName specifies the name of the package to be installed and is used to filter
+ the content from catalogs.
- packageName is required, immutable, and follows the DNS subdomain standard
- as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
- hyphens (-) or periods (.), start and end with an alphanumeric character,
- and be no longer than 253 characters.
+ It is required, immutable, and follows the DNS subdomain standard as defined in [RFC 1123].
+ It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
+ start and end with an alphanumeric character, and be no longer than 253 characters.
Some examples of valid values are:
- some-package
@@ -396,12 +387,9 @@ spec:
rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$")
selector:
description: |-
- selector is an optional field that can be used
- to filter the set of ClusterCatalogs used in the bundle
- selection process.
+ selector is optional and filters the set of ClusterCatalogs used in the bundle selection process.
- When unspecified, all ClusterCatalogs will be used in
- the bundle selection process.
+ When unspecified, all ClusterCatalogs are used in the bundle selection process.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
@@ -447,35 +435,34 @@ spec:
upgradeConstraintPolicy:
default: CatalogProvided
description: |-
- upgradeConstraintPolicy is an optional field that controls whether
- the upgrade path(s) defined in the catalog are enforced for the package
- referenced in the packageName field.
+ upgradeConstraintPolicy is optional and controls whether the upgrade paths defined in the catalog
+ are enforced for the package referenced in the packageName field.
- Allowed values are: "CatalogProvided" or "SelfCertified", or omitted.
+ Allowed values are "CatalogProvided", "SelfCertified", or omitted.
- When this field is set to "CatalogProvided", automatic upgrades will only occur
- when upgrade constraints specified by the package author are met.
+ When set to "CatalogProvided", automatic upgrades only occur when upgrade constraints specified by the package
+ author are met.
- When this field is set to "SelfCertified", the upgrade constraints specified by
- the package author are ignored. This allows for upgrades and downgrades to
- any version of the package. This is considered a dangerous operation as it
- can lead to unknown and potentially disastrous outcomes, such as data
- loss. It is assumed that users have independently verified changes when
- using this option.
+ When set to "SelfCertified", the upgrade constraints specified by the package author are ignored.
+ This allows upgrades and downgrades to any version of the package.
+ This is considered a dangerous operation as it can lead to unknown and potentially disastrous outcomes,
+ such as data loss.
+ Use this option only if you have independently verified the changes.
- When this field is omitted, the default value is "CatalogProvided".
+ When omitted, the default value is "CatalogProvided".
enum:
- CatalogProvided
- SelfCertified
type: string
version:
description: |-
- version is an optional semver constraint (a specific version or range of versions). When unspecified, the latest version available will be installed.
+ version is an optional semver constraint (a specific version or range of versions).
+ When unspecified, the latest version available is installed.
Acceptable version ranges are no longer than 64 characters.
- Version ranges are composed of comma- or space-delimited values and one or
- more comparison operators, known as comparison strings. Additional
- comparison strings can be added using the OR operator (||).
+ Version ranges are composed of comma- or space-delimited values and one or more comparison operators,
+ known as comparison strings.
+ You can add additional comparison strings using the OR operator (||).
# Range Comparisons
@@ -553,13 +540,12 @@ spec:
type: object
sourceType:
description: |-
- sourceType is a required reference to the type of install source.
+ sourceType is required and specifies the type of install source.
- Allowed values are "Catalog"
+ The only allowed value is "Catalog".
- When this field is set to "Catalog", information for determining the
- appropriate bundle of content to install will be fetched from
- ClusterCatalog resources existing on the cluster.
+ When set to "Catalog", information for determining the appropriate bundle of content to install
+ is fetched from ClusterCatalog resources on the cluster.
When using the Catalog sourceType, the catalog field must also be set.
enum:
- Catalog
@@ -662,9 +648,9 @@ spec:
description: |-
The set of condition types which apply to all spec.source variations are Installed and Progressing.
- The Installed condition represents whether or not the bundle has been installed for this ClusterExtension.
- When Installed is True and the Reason is Succeeded, the bundle has been successfully installed.
- When Installed is False and the Reason is Failed, the bundle has failed to install.
+ The Installed condition represents whether the bundle has been installed for this ClusterExtension:
+ - When Installed is True and the Reason is Succeeded, the bundle has been successfully installed.
+ - When Installed is False and the Reason is Failed, the bundle has failed to install.
The Progressing condition represents whether or not the ClusterExtension is advancing towards a new state.
When Progressing is True and the Reason is Succeeded, the ClusterExtension is making progress towards a new state.
@@ -673,12 +659,12 @@ spec:
When Progressing is True and Reason is RollingOut, the ClusterExtension has one or more ClusterExtensionRevisions in active roll out.
- When the ClusterExtension is sourced from a catalog, if may also communicate a deprecation condition.
- These are indications from a package owner to guide users away from a particular package, channel, or bundle.
- BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog.
- ChannelDeprecated is set if the requested channel is marked deprecated in the catalog.
- PackageDeprecated is set if the requested package is marked deprecated in the catalog.
- Deprecated is a rollup condition that is present when any of the deprecated conditions are present.
+ When the ClusterExtension is sourced from a catalog, it may also communicate a deprecation condition.
+ These are indications from a package owner to guide users away from a particular package, channel, or bundle:
+ - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog.
+ - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog.
+ - PackageDeprecated is set if the requested package is marked deprecated in the catalog.
+ - Deprecated is a rollup condition that is present when any of the deprecated conditions are present.
items:
description: Condition contains details for one aspect of the current state of this API Resource.
properties:
@@ -741,25 +727,24 @@ spec:
properties:
bundle:
description: |-
- bundle is a required field which represents the identifying attributes of a bundle.
+ bundle is required and represents the identifying attributes of a bundle.
- A "bundle" is a versioned set of content that represents the resources that
- need to be applied to a cluster to install a package.
+ A "bundle" is a versioned set of content that represents the resources that need to be applied
+ to a cluster to install a package.
properties:
name:
description: |-
- name is required and follows the DNS subdomain standard
- as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
- hyphens (-) or periods (.), start and end with an alphanumeric character,
- and be no longer than 253 characters.
+ name is required and follows the DNS subdomain standard as defined in [RFC 1123].
+ It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
+ start and end with an alphanumeric character, and be no longer than 253 characters.
type: string
x-kubernetes-validations:
- message: packageName must be a valid DNS1123 subdomain. It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), start and end with an alphanumeric character, and be no longer than 253 characters
rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$")
version:
description: |-
- version is a required field and is a reference to the version that this bundle represents
- version follows the semantic versioning standard as defined in https://semver.org/.
+ version is required and references the version that this bundle represents.
+ It follows the semantic versioning standard as defined in https://semver.org/.
type: string
x-kubernetes-validations:
- message: version must be well-formed semver
diff --git a/openshift/operator-controller/manifests.yaml b/openshift/operator-controller/manifests.yaml
index 091dfe26a..29ebdf97a 100644
--- a/openshift/operator-controller/manifests.yaml
+++ b/openshift/operator-controller/manifests.yaml
@@ -146,37 +146,35 @@ spec:
properties:
install:
description: |-
- install is an optional field used to configure the installation options
- for the ClusterExtension such as the pre-flight check configuration.
+ install is optional and configures installation options for the ClusterExtension,
+ such as the pre-flight check configuration.
properties:
preflight:
description: |-
- preflight is an optional field that can be used to configure the checks that are
- run before installation or upgrade of the content for the package specified in the packageName field.
+ preflight is optional and configures the checks that run before installation or upgrade
+ of the content for the package specified in the packageName field.
When specified, it replaces the default preflight configuration for install/upgrade actions.
- When not specified, the default configuration will be used.
+ When not specified, the default configuration is used.
properties:
crdUpgradeSafety:
description: |-
- crdUpgradeSafety is used to configure the CRD Upgrade Safety pre-flight
- checks that run prior to upgrades of installed content.
+ crdUpgradeSafety configures the CRD Upgrade Safety pre-flight checks that run
+ before upgrades of installed content.
- The CRD Upgrade Safety pre-flight check safeguards from unintended
- consequences of upgrading a CRD, such as data loss.
+ The CRD Upgrade Safety pre-flight check safeguards from unintended consequences of upgrading a CRD,
+ such as data loss.
properties:
enforcement:
description: |-
- enforcement is a required field, used to configure the state of the CRD Upgrade Safety pre-flight check.
+ enforcement is required and configures the state of the CRD Upgrade Safety pre-flight check.
Allowed values are "None" or "Strict". The default value is "Strict".
- When set to "None", the CRD Upgrade Safety pre-flight check will be skipped
- when performing an upgrade operation. This should be used with caution as
- unintended consequences such as data loss can occur.
+ When set to "None", the CRD Upgrade Safety pre-flight check is skipped during an upgrade operation.
+ Use this option with caution as unintended consequences such as data loss can occur.
- When set to "Strict", the CRD Upgrade Safety pre-flight check will be run when
- performing an upgrade operation.
+ When set to "Strict", the CRD Upgrade Safety pre-flight check runs during an upgrade operation.
enum:
- None
- Strict
@@ -196,16 +194,15 @@ spec:
rule: has(self.preflight)
namespace:
description: |-
- namespace is a reference to a Kubernetes namespace.
- This is the namespace in which the provided ServiceAccount must exist.
- It also designates the default namespace where namespace-scoped resources
- for the extension are applied to the cluster.
+ namespace specifies a Kubernetes namespace.
+ This is the namespace where the provided ServiceAccount must exist.
+ It also designates the default namespace where namespace-scoped resources for the extension are applied to the cluster.
Some extensions may contain namespace-scoped resources to be applied in other namespaces.
This namespace must exist.
- namespace is required, immutable, and follows the DNS label standard
- as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters or hyphens (-),
- start and end with an alphanumeric character, and be no longer than 63 characters
+ The namespace field is required, immutable, and follows the DNS label standard as defined in [RFC 1123].
+ It must contain only lowercase alphanumeric characters or hyphens (-), start and end with an alphanumeric character,
+ and be no longer than 63 characters.
[RFC 1123]: https://tools.ietf.org/html/rfc1123
maxLength: 63
@@ -217,24 +214,22 @@ spec:
rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?$")
serviceAccount:
description: |-
- serviceAccount is a reference to a ServiceAccount used to perform all interactions
- with the cluster that are required to manage the extension.
+ serviceAccount specifies a ServiceAccount used to perform all interactions with the cluster
+ that are required to manage the extension.
The ServiceAccount must be configured with the necessary permissions to perform these interactions.
The ServiceAccount must exist in the namespace referenced in the spec.
- serviceAccount is required.
+ The serviceAccount field is required.
properties:
name:
description: |-
- name is a required, immutable reference to the name of the ServiceAccount
- to be used for installation and management of the content for the package
- specified in the packageName field.
+ name is a required, immutable reference to the name of the ServiceAccount used for installation
+ and management of the content for the package specified in the packageName field.
This ServiceAccount must exist in the installNamespace.
- name follows the DNS subdomain standard as defined in [RFC 1123].
- It must contain only lowercase alphanumeric characters,
- hyphens (-) or periods (.), start and end with an alphanumeric character,
- and be no longer than 253 characters.
+ The name field follows the DNS subdomain standard as defined in [RFC 1123].
+ It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
+ start and end with an alphanumeric character, and be no longer than 253 characters.
Some examples of valid values are:
- some-serviceaccount
@@ -260,11 +255,11 @@ spec:
type: object
source:
description: |-
- source is a required field which selects the installation source of content
- for this ClusterExtension. Selection is performed by setting the sourceType.
+ source is required and selects the installation source of content for this ClusterExtension.
+ Set the sourceType field to perform the selection.
- Catalog is currently the only implemented sourceType, and setting the
- sourcetype to "Catalog" requires the catalog field to also be defined.
+ Catalog is currently the only implemented sourceType.
+ Setting sourceType to "Catalog" requires the catalog field to also be defined.
Below is a minimal example of a source definition (in yaml):
@@ -275,30 +270,29 @@ spec:
properties:
catalog:
description: |-
- catalog is used to configure how information is sourced from a catalog.
- This field is required when sourceType is "Catalog", and forbidden otherwise.
+ catalog configures how information is sourced from a catalog.
+ It is required when sourceType is "Catalog", and forbidden otherwise.
properties:
channels:
description: |-
- channels is an optional reference to a set of channels belonging to
- the package specified in the packageName field.
+ channels is optional and specifies a set of channels belonging to the package
+ specified in the packageName field.
- A "channel" is a package-author-defined stream of updates for an extension.
+ A channel is a package-author-defined stream of updates for an extension.
- Each channel in the list must follow the DNS subdomain standard
- as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
- hyphens (-) or periods (.), start and end with an alphanumeric character,
- and be no longer than 253 characters. No more than 256 channels can be specified.
+ Each channel in the list must follow the DNS subdomain standard as defined in [RFC 1123].
+ It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
+ start and end with an alphanumeric character, and be no longer than 253 characters.
+ You can specify no more than 256 channels.
- When specified, it is used to constrain the set of installable bundles and
- the automated upgrade path. This constraint is an AND operation with the
- version field. For example:
+ When specified, it constrains the set of installable bundles and the automated upgrade path.
+ This constraint is an AND operation with the version field. For example:
- Given channel is set to "foo"
- Given version is set to ">=1.0.0, <1.5.0"
- - Only bundles that exist in channel "foo" AND satisfy the version range comparison will be considered installable
- - Automatic upgrades will be constrained to upgrade edges defined by the selected channel
+ - Only bundles that exist in channel "foo" AND satisfy the version range comparison are considered installable
+ - Automatic upgrades are constrained to upgrade edges defined by the selected channel
- When unspecified, upgrade edges across all channels will be used to identify valid automatic upgrade paths.
+ When unspecified, upgrade edges across all channels are used to identify valid automatic upgrade paths.
Some examples of valid values are:
- 1.1.x
@@ -328,13 +322,12 @@ spec:
type: array
packageName:
description: |-
- packageName is a reference to the name of the package to be installed
- and is used to filter the content from catalogs.
+ packageName specifies the name of the package to be installed and is used to filter
+ the content from catalogs.
- packageName is required, immutable, and follows the DNS subdomain standard
- as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
- hyphens (-) or periods (.), start and end with an alphanumeric character,
- and be no longer than 253 characters.
+ It is required, immutable, and follows the DNS subdomain standard as defined in [RFC 1123].
+ It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
+ start and end with an alphanumeric character, and be no longer than 253 characters.
Some examples of valid values are:
- some-package
@@ -358,12 +351,9 @@ spec:
rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$")
selector:
description: |-
- selector is an optional field that can be used
- to filter the set of ClusterCatalogs used in the bundle
- selection process.
+ selector is optional and filters the set of ClusterCatalogs used in the bundle selection process.
- When unspecified, all ClusterCatalogs will be used in
- the bundle selection process.
+ When unspecified, all ClusterCatalogs are used in the bundle selection process.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
@@ -409,35 +399,34 @@ spec:
upgradeConstraintPolicy:
default: CatalogProvided
description: |-
- upgradeConstraintPolicy is an optional field that controls whether
- the upgrade path(s) defined in the catalog are enforced for the package
- referenced in the packageName field.
+ upgradeConstraintPolicy is optional and controls whether the upgrade paths defined in the catalog
+ are enforced for the package referenced in the packageName field.
- Allowed values are: "CatalogProvided" or "SelfCertified", or omitted.
+ Allowed values are "CatalogProvided", "SelfCertified", or omitted.
- When this field is set to "CatalogProvided", automatic upgrades will only occur
- when upgrade constraints specified by the package author are met.
+ When set to "CatalogProvided", automatic upgrades only occur when upgrade constraints specified by the package
+ author are met.
- When this field is set to "SelfCertified", the upgrade constraints specified by
- the package author are ignored. This allows for upgrades and downgrades to
- any version of the package. This is considered a dangerous operation as it
- can lead to unknown and potentially disastrous outcomes, such as data
- loss. It is assumed that users have independently verified changes when
- using this option.
+ When set to "SelfCertified", the upgrade constraints specified by the package author are ignored.
+ This allows upgrades and downgrades to any version of the package.
+ This is considered a dangerous operation as it can lead to unknown and potentially disastrous outcomes,
+ such as data loss.
+ Use this option only if you have independently verified the changes.
- When this field is omitted, the default value is "CatalogProvided".
+ When omitted, the default value is "CatalogProvided".
enum:
- CatalogProvided
- SelfCertified
type: string
version:
description: |-
- version is an optional semver constraint (a specific version or range of versions). When unspecified, the latest version available will be installed.
+ version is an optional semver constraint (a specific version or range of versions).
+ When unspecified, the latest version available is installed.
Acceptable version ranges are no longer than 64 characters.
- Version ranges are composed of comma- or space-delimited values and one or
- more comparison operators, known as comparison strings. Additional
- comparison strings can be added using the OR operator (||).
+ Version ranges are composed of comma- or space-delimited values and one or more comparison operators,
+ known as comparison strings.
+ You can add additional comparison strings using the OR operator (||).
# Range Comparisons
@@ -515,13 +504,12 @@ spec:
type: object
sourceType:
description: |-
- sourceType is a required reference to the type of install source.
+ sourceType is required and specifies the type of install source.
- Allowed values are "Catalog"
+ The only allowed value is "Catalog".
- When this field is set to "Catalog", information for determining the
- appropriate bundle of content to install will be fetched from
- ClusterCatalog resources existing on the cluster.
+ When set to "Catalog", information for determining the appropriate bundle of content to install
+ is fetched from ClusterCatalog resources on the cluster.
When using the Catalog sourceType, the catalog field must also be set.
enum:
- Catalog
@@ -544,21 +532,21 @@ spec:
description: |-
The set of condition types which apply to all spec.source variations are Installed and Progressing.
- The Installed condition represents whether or not the bundle has been installed for this ClusterExtension.
- When Installed is True and the Reason is Succeeded, the bundle has been successfully installed.
- When Installed is False and the Reason is Failed, the bundle has failed to install.
+ The Installed condition represents whether the bundle has been installed for this ClusterExtension:
+ - When Installed is True and the Reason is Succeeded, the bundle has been successfully installed.
+ - When Installed is False and the Reason is Failed, the bundle has failed to install.
The Progressing condition represents whether or not the ClusterExtension is advancing towards a new state.
When Progressing is True and the Reason is Succeeded, the ClusterExtension is making progress towards a new state.
When Progressing is True and the Reason is Retrying, the ClusterExtension has encountered an error that could be resolved on subsequent reconciliation attempts.
When Progressing is False and the Reason is Blocked, the ClusterExtension has encountered an error that requires manual intervention for recovery.
- When the ClusterExtension is sourced from a catalog, if may also communicate a deprecation condition.
- These are indications from a package owner to guide users away from a particular package, channel, or bundle.
- BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog.
- ChannelDeprecated is set if the requested channel is marked deprecated in the catalog.
- PackageDeprecated is set if the requested package is marked deprecated in the catalog.
- Deprecated is a rollup condition that is present when any of the deprecated conditions are present.
+ When the ClusterExtension is sourced from a catalog, it may also communicate a deprecation condition.
+ These are indications from a package owner to guide users away from a particular package, channel, or bundle:
+ - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog.
+ - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog.
+ - PackageDeprecated is set if the requested package is marked deprecated in the catalog.
+ - Deprecated is a rollup condition that is present when any of the deprecated conditions are present.
items:
description: Condition contains details for one aspect of the current state of this API Resource.
properties:
@@ -621,25 +609,24 @@ spec:
properties:
bundle:
description: |-
- bundle is a required field which represents the identifying attributes of a bundle.
+ bundle is required and represents the identifying attributes of a bundle.
- A "bundle" is a versioned set of content that represents the resources that
- need to be applied to a cluster to install a package.
+ A "bundle" is a versioned set of content that represents the resources that need to be applied
+ to a cluster to install a package.
properties:
name:
description: |-
- name is required and follows the DNS subdomain standard
- as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
- hyphens (-) or periods (.), start and end with an alphanumeric character,
- and be no longer than 253 characters.
+ name is required and follows the DNS subdomain standard as defined in [RFC 1123].
+ It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
+ start and end with an alphanumeric character, and be no longer than 253 characters.
type: string
x-kubernetes-validations:
- message: packageName must be a valid DNS1123 subdomain. It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), start and end with an alphanumeric character, and be no longer than 253 characters
rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$")
version:
description: |-
- version is a required field and is a reference to the version that this bundle represents
- version follows the semantic versioning standard as defined in https://semver.org/.
+ version is required and references the version that this bundle represents.
+ It follows the semantic versioning standard as defined in https://semver.org/.
type: string
x-kubernetes-validations:
- message: version must be well-formed semver
diff --git a/requirements.txt b/requirements.txt
index 35b0e3fc6..5f645321f 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,5 +1,5 @@
Babel==2.17.0
-beautifulsoup4==4.14.2
+beautifulsoup4==4.14.3
certifi==2025.11.12
charset-normalizer==3.4.4
click==8.3.1
@@ -19,7 +19,7 @@ mkdocs-material-extensions==1.3.1
packaging==25.0
paginate==0.5.7
pathspec==0.12.1
-platformdirs==4.5.0
+platformdirs==4.5.1
Pygments==2.19.2
pymdown-extensions==10.17.2
pyquery==2.0.1
diff --git a/test/e2e/README.md b/test/e2e/README.md
new file mode 100644
index 000000000..c3483e518
--- /dev/null
+++ b/test/e2e/README.md
@@ -0,0 +1,350 @@
+# E2E Tests - Godog Framework
+
+This directory contains end-to-end (e2e) tests, written using the [Godog](https://github.com/cucumber/godog) framework.
+
+## Overview
+
+### What is Godog/BDD/Cucumber?
+
+Godog is a Behavior-Driven Development (BDD) framework that allows you to write tests in a human-readable format called
+[Gherkin](https://cucumber.io/docs/gherkin/reference/). Tests are written as scenarios using Given-When-Then syntax, making them accessible to both technical and
+non-technical stakeholders.
+
+**Benefits:**
+
+- **Readable**: Tests serve as living documentation
+- **Maintainable**: Reusable step definitions reduce code duplication
+- **Collaborative**: Product owners and developers share the same test specifications
+- **Structured**: Clear separation between test scenarios and implementation
+
+## Project Structure
+
+```
+test/e2e/
+├── README.md # This file
+├── features_test.go # Test runner and suite initialization
+├── features/ # Gherkin feature files
+│ ├── install.feature # ClusterExtension installation scenarios
+│ ├── update.feature # ClusterExtension update scenarios
+│ ├── recover.feature # Recovery scenarios
+│ ├── status.feature # ClusterExtension status scenarios
+│ └── metrics.feature # Metrics endpoint scenarios
+└── steps/ # Step definitions and test utilities
+ ├── steps.go # Step definition implementations
+ ├── hooks.go # Test hooks and scenario context
+ └── testdata/ # Test data (RBAC templates, catalogs)
+ ├── rbac-template.yaml
+ ├── cluster-admin-rbac-template.yaml
+ ├── metrics-reader-rbac-template.yaml
+ ├── test-catalog-template.yaml
+ ├── extra-catalog-template.yaml
+ └── ...
+```
+
+## Architecture
+
+### 1. Test Runner (`features_test.go`)
+
+The main test entry point that configures and runs the Godog test suite.
+
+### 2. Feature Files (`features/*.feature`)
+
+Gherkin files that describe test scenarios in natural language.
+
+**Structure:**
+
+```gherkin
+Feature: [Feature Name]
+ [Feature description]
+
+ Background:
+ [Common setup steps for all scenarios]
+
+ Scenario: [Scenario Name]
+ Given [precondition]
+ When [action]
+ Then [expected result]
+ And [additional assertions]
+```
+
+**Example:**
+
+```gherkin
+Feature: Install ClusterExtension
+
+ Background:
+ Given OLM is available
+ And "test" catalog serves bundles
+ And Service account "olm-sa" with needed permissions is available in test namespace
+
+ Scenario: Install latest available version from the default channel
+ When ClusterExtension is applied
+ """
+ apiVersion: olm.operatorframework.io/v1
+ kind: ClusterExtension
+ metadata:
+ name: ${NAME}
+ spec:
+ namespace: ${TEST_NAMESPACE}
+ serviceAccount:
+ name: olm-sa
+ source:
+ sourceType: Catalog
+ catalog:
+ packageName: test
+ selector:
+ matchLabels:
+ "olm.operatorframework.io/metadata.name": test-catalog
+ ...
+ """
+ Then ClusterExtension is rolled out
+ And ClusterExtension is available
+```
+
+### 3. Step Definitions (`steps/steps.go`)
+
+Go functions that implement the steps defined in feature files. Each step is registered with a regex pattern that
+matches the Gherkin text.
+
+**Registration:**
+
+```go
+func RegisterSteps(sc *godog.ScenarioContext) {
+sc.Step(`^OLM is available$`, OLMisAvailable)
+sc.Step(`^bundle "([^"]+)" is installed in version "([^"]+)"$`, BundleInstalled)
+sc.Step(`^ClusterExtension is applied$`, ResourceIsApplied)
+// ... more steps
+}
+```
+
+**Step Implementation Pattern:**
+
+```go
+func BundleInstalled(ctx context.Context, name, version string) error {
+ sc := scenarioCtx(ctx)
+ waitFor(ctx, func() bool {
+ v, err := kubectl("get", "clusterextension", sc.clusterExtensionName, "-o", "jsonpath={.status.install.bundle}")
+ if err != nil {
+ return false
+ }
+ var bundle map[string]interface{}
+ json.Unmarshal([]byte(v), &bundle)
+ return bundle["name"] == name && bundle["version"] == version
+ })
+ return nil
+}
+```
+
+### 4. Hooks and Context (`steps/hooks.go`)
+
+Manages test lifecycle and scenario-specific context.
+
+**Hooks:**
+
+- `CheckFeatureTags`: Skips scenarios based on feature gate tags (e.g., `@WebhookProviderCertManager`)
+- `CreateScenarioContext`: Creates unique namespace and names for each scenario
+- `ScenarioCleanup`: Cleans up resources after each scenario
+
+**Variable Substitution:**
+
+Replaces `${TEST_NAMESPACE}`, `${NAME}`, and `${CATALOG_IMG}` with scenario-specific values.
+
+## Writing Tests
+
+### 1. Create a Feature File
+
+Create a new `.feature` file in `test/e2e/features/`:
+
+```gherkin
+Feature: Your Feature Name
+ Description of what this feature tests
+
+ Background:
+ Given OLM is available
+ And "test" catalog serves bundles
+
+ Scenario: Your scenario description
+ When [some action]
+ Then [expected outcome]
+```
+
+### 2. Implement Step Definitions
+
+Add step implementations in `steps/steps.go`:
+
+```go
+func RegisterSteps(sc *godog.ScenarioContext) {
+ // ... existing steps
+ sc.Step(`^your step pattern "([^"]+)"$`, YourStepFunction)
+}
+
+func YourStepFunction(ctx context.Context, param string) error {
+ sc := scenarioCtx(ctx)
+ // Implementation
+ return nil
+}
+```
+
+### 3. Use Existing Steps
+
+Leverage existing steps for common operations:
+
+- **Setup**: `Given OLM is available`, `And "test" catalog serves bundles`
+- **Resource Management**: `When ClusterExtension is applied`, `And resource is applied`
+- **Assertions**: `Then ClusterExtension is available`, `And bundle "..." is installed`
+- **Conditions**: `Then ClusterExtension reports Progressing as True with Reason Retrying:`
+
+### 4. Variable Substitution
+
+Use these variables in YAML templates:
+
+- `${NAME}`: Scenario-specific ClusterExtension name (e.g., `ce-123`)
+- `${TEST_NAMESPACE}`: Scenario-specific namespace (e.g., `ns-123`)
+- `${CATALOG_IMG}`: Catalog image reference (defaults to in-cluster registry, overridable via `CATALOG_IMG` env var)
+
+### 5. Feature Tags
+
+Use tags to conditionally run scenarios based on feature gates:
+
+```gherkin
+@WebhookProviderCertManager
+Scenario: Install operator having webhooks
+```
+
+Scenarios are skipped if the feature gate is not enabled on the deployed controller.
+
+## Running Tests
+
+### Run All Tests
+
+```bash
+make test-e2e
+```
+
+or
+
+```bash
+make test-experimental-e2e
+```
+
+
+### Run Specific Feature
+
+```bash
+go test test/e2e/features_test.go -- features/install.feature
+```
+
+### Run Specific Scenario by Tag
+
+```bash
+go test test/e2e/features_test.go --godog.tags="@WebhookProviderCertManager"
+```
+
+### Run with Debug Logging
+
+```bash
+go test -v test/e2e/features_test.go --log.debug
+```
+
+### CLI Options
+
+Godog options can be passed after `--`:
+
+```bash
+go test test/e2e/features_test.go \
+ --godog.format=pretty \
+ --godog.tags="@WebhookProviderCertManager"
+```
+
+Available formats: `pretty`, `cucumber`, `progress`, `junit`
+
+**Custom Flags:**
+
+- `--log.debug`: Enable debug logging (development mode)
+- `--k8s.cli=`: Specify path to Kubernetes CLI (default: `kubectl`)
+ - Useful for using `oc` or a specific kubectl binary
+
+**Example:**
+
+```bash
+go test test/e2e/features_test.go --log.debug --k8s.cli=oc
+```
+
+### Environment Variables
+
+- `KUBECONFIG`: Path to kubeconfig file (defaults to `~/.kube/config`)
+- `E2E_SUMMARY_OUTPUT`: Path to write test summary (optional)
+- `CATALOG_IMG`: Override default catalog image reference (optional)
+- `LOCAL_REGISTRY_HOST`: Local registry host for catalog images
+
+## Design Patterns
+
+### 1. Scenario Isolation
+
+Each scenario runs in its own namespace with unique resource names, ensuring complete isolation:
+
+- Namespace: `ns-{scenario-id}`
+- ClusterExtension: `ce-{scenario-id}`
+
+### 2. Automatic Cleanup
+
+The `ScenarioCleanup` hook ensures all resources are deleted after each scenario:
+
+- Kills background processes (e.g., kubectl port-forward)
+- Deletes ClusterExtensions
+- Deletes namespaces
+- Deletes added resources
+
+### 3. Declarative Resource Management
+
+Resources are managed declaratively using YAML templates embedded in feature files as docstrings:
+
+```gherkin
+When ClusterExtension is applied
+"""
+ apiVersion: olm.operatorframework.io/v1
+ kind: ClusterExtension
+ metadata:
+ name: ${NAME}
+ spec:
+ ...
+ """
+```
+
+### 4. Polling with Timeouts
+
+All asynchronous operations use `waitFor` with consistent timeout (300s) and tick (1s):
+
+```go
+waitFor(ctx, func() bool {
+ // Check condition
+ return conditionMet
+})
+```
+
+### 5. Feature Gate Detection
+
+Tests automatically detect enabled feature gates from the running controller and skip scenarios that require disabled
+features.
+
+## Common Step Patterns
+
+A list of available, implemented steps can be obtained by running:
+
+```shell
+go test test/e2e/features_test.go -d
+```
+
+## Best Practices
+
+1. **Keep scenarios focused**: Each scenario should test one specific behavior
+2. **Use Background wisely**: Common setup steps belong in Background
+3. **Reuse steps**: Leverage existing step definitions before creating new ones
+4. **Meaningful names**: Scenario names should clearly describe what is being tested
+5. **Avoid implementation details**: Focus on behavior, not implementation
+
+## References
+
+- [Godog Documentation](https://github.com/cucumber/godog)
+- [Gherkin Reference](https://cucumber.io/docs/gherkin/reference/)
+- [Cucumber Best Practices](https://cucumber.io/docs/guides/10-minute-tutorial/)
diff --git a/test/e2e/cluster_extension_install_test.go b/test/e2e/cluster_extension_install_test.go
deleted file mode 100644
index b1994d7e0..000000000
--- a/test/e2e/cluster_extension_install_test.go
+++ /dev/null
@@ -1,798 +0,0 @@
-package e2e
-
-import (
- "context"
- "fmt"
- "os"
- "slices"
- "testing"
- "time"
-
- "github.com/google/go-containerregistry/pkg/crane"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
- appsv1 "k8s.io/api/apps/v1"
- corev1 "k8s.io/api/core/v1"
- networkingv1 "k8s.io/api/networking/v1"
- "k8s.io/apimachinery/pkg/api/errors"
- apimeta "k8s.io/apimachinery/pkg/api/meta"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/types"
- "k8s.io/apimachinery/pkg/util/rand"
- "k8s.io/utils/ptr"
-
- ocv1 "github.com/operator-framework/operator-controller/api/v1"
- testutil "github.com/operator-framework/operator-controller/internal/shared/util/test"
- . "github.com/operator-framework/operator-controller/test/helpers"
-)
-
-const (
- artifactName = "operator-controller-e2e"
- pollDuration = time.Minute
- pollInterval = time.Second
- testCatalogRefEnvVar = "CATALOG_IMG"
- testCatalogName = "test-catalog"
-)
-
-func TestClusterExtensionInstallRegistry(t *testing.T) {
- type testCase struct {
- name string
- packageName string
- }
- for _, tc := range []testCase{
- {
- name: "no registry configuration necessary",
- packageName: "test",
- },
- {
- // NOTE: This test requires an extra configuration in /etc/containers/registries.conf, which is mounted
- // for this e2e via the ./config/components/e2e/registries-conf kustomize component as part of the e2e component.
- // The goal here is to prove that "mirrored-registry.operator-controller-e2e.svc.cluster.local:5000" is
- // mapped to the "real" registry hostname ("docker-registry.operator-controller-e2e.svc.cluster.local:5000").
- name: "package requires mirror registry configuration in /etc/containers/registries.conf",
- packageName: "test-mirrored",
- },
- } {
- t.Run(tc.name, func(t *testing.T) {
- t.Log("When a cluster extension is installed from a catalog")
- t.Log("When the extension bundle format is registry+v1")
-
- clusterExtension, extensionCatalog, sa, ns := TestInit(t)
- defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns)
- defer testutil.CollectTestArtifacts(t, artifactName, c, cfg)
-
- clusterExtension.Spec = ocv1.ClusterExtensionSpec{
- Source: ocv1.SourceConfig{
- SourceType: "Catalog",
- Catalog: &ocv1.CatalogFilter{
- PackageName: tc.packageName,
- Selector: &metav1.LabelSelector{
- MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": extensionCatalog.Name},
- },
- },
- },
- Namespace: ns.Name,
- ServiceAccount: ocv1.ServiceAccountReference{
- Name: sa.Name,
- },
- }
- t.Log("It resolves the specified package with correct bundle path")
- t.Log("By creating the ClusterExtension resource")
- require.NoError(t, c.Create(context.Background(), clusterExtension))
-
- t.Log("By eventually reporting a successful resolution and bundle path")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- }, pollDuration, pollInterval)
-
- t.Log("By eventually reporting progressing as True")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
- }, pollDuration, pollInterval)
-
- t.Log("By eventually installing the package successfully")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
- require.Contains(ct, cond.Message, "Installed bundle")
- require.NotEmpty(ct, clusterExtension.Status.Install.Bundle)
- }, pollDuration, pollInterval)
-
- t.Log("By eventually creating the NetworkPolicy named 'test-operator-network-policy'")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- var np networkingv1.NetworkPolicy
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: "test-operator-network-policy", Namespace: ns.Name}, &np))
- }, pollDuration, pollInterval)
-
- t.Log("By verifying that no templating occurs for registry+v1 bundle manifests")
- cm := corev1.ConfigMap{}
- require.NoError(t, c.Get(context.Background(), types.NamespacedName{Namespace: ns.Name, Name: "test-configmap"}, &cm))
- require.Contains(t, cm.Annotations, "shouldNotTemplate")
- require.Contains(t, cm.Annotations["shouldNotTemplate"], "{{ $labels.namespace }}")
- })
- }
-}
-
-func TestClusterExtensionInstallRegistryDynamic(t *testing.T) {
- // NOTE: Like 'TestClusterExtensionInstallRegistry', this test also requires extra configuration in /etc/containers/registries.conf
- packageName := "dynamic"
-
- t.Log("When a cluster extension is installed from a catalog")
- t.Log("When the extension bundle format is registry+v1")
-
- clusterExtension, extensionCatalog, sa, ns := TestInit(t)
- defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns)
- defer testutil.CollectTestArtifacts(t, artifactName, c, cfg)
-
- clusterExtension.Spec = ocv1.ClusterExtensionSpec{
- Source: ocv1.SourceConfig{
- SourceType: "Catalog",
- Catalog: &ocv1.CatalogFilter{
- PackageName: packageName,
- Selector: &metav1.LabelSelector{
- MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": extensionCatalog.Name},
- },
- },
- },
- Namespace: ns.Name,
- ServiceAccount: ocv1.ServiceAccountReference{
- Name: sa.Name,
- },
- }
- t.Log("It updates the registries.conf file contents")
- cm := corev1.ConfigMap{
- ObjectMeta: metav1.ObjectMeta{
- Name: "e2e-registries-conf",
- Namespace: "olmv1-system",
- },
- Data: map[string]string{
- "registries.conf": `[[registry]]
-prefix = "dynamic-registry.operator-controller-e2e.svc.cluster.local:5000"
-location = "docker-registry.operator-controller-e2e.svc.cluster.local:5000"`,
- },
- }
- require.NoError(t, c.Update(context.Background(), &cm))
-
- t.Log("It resolves the specified package with correct bundle path")
- t.Log("By creating the ClusterExtension resource")
- require.NoError(t, c.Create(context.Background(), clusterExtension))
-
- t.Log("By eventually reporting a successful resolution and bundle path")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- }, 2*time.Minute, pollInterval)
-
- // Give the check 2 minutes instead of the typical 1 for the pod's
- // files to update from the configmap change.
- // The theoretical max time is the kubelet sync period of 1 minute +
- // ConfigMap cache TTL of 1 minute = 2 minutes
- t.Log("By eventually reporting progressing as True")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
- }, 2*time.Minute, pollInterval)
-
- t.Log("By eventually installing the package successfully")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
- require.Contains(ct, cond.Message, "Installed bundle")
- require.NotEmpty(ct, clusterExtension.Status.Install.Bundle)
- }, pollDuration, pollInterval)
-}
-
-func TestClusterExtensionInstallRegistryMultipleBundles(t *testing.T) {
- t.Log("When a cluster extension is installed from a catalog")
-
- clusterExtension, extensionCatalog, sa, ns := TestInit(t)
- extraCatalogName := fmt.Sprintf("extra-test-catalog-%s", rand.String(8))
- extraCatalog, err := CreateTestCatalog(context.Background(), extraCatalogName, os.Getenv(testCatalogRefEnvVar))
- require.NoError(t, err)
-
- defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns)
- defer testutil.CollectTestArtifacts(t, artifactName, c, cfg)
- defer func(cat *ocv1.ClusterCatalog) {
- require.NoError(t, c.Delete(context.Background(), cat))
- require.Eventually(t, func() bool {
- err := c.Get(context.Background(), types.NamespacedName{Name: cat.Name}, &ocv1.ClusterCatalog{})
- return errors.IsNotFound(err)
- }, pollDuration, pollInterval)
- }(extraCatalog)
-
- clusterExtension.Spec = ocv1.ClusterExtensionSpec{
- Source: ocv1.SourceConfig{
- SourceType: "Catalog",
- Catalog: &ocv1.CatalogFilter{
- PackageName: "test",
- },
- },
- Namespace: ns.Name,
- ServiceAccount: ocv1.ServiceAccountReference{
- Name: sa.Name,
- },
- }
- t.Log("It resolves to multiple bundle paths")
- t.Log("By creating the ClusterExtension resource")
- require.NoError(t, c.Create(context.Background(), clusterExtension))
-
- t.Log("By eventually reporting a failed resolution with multiple bundles")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- }, pollDuration, pollInterval)
-
- t.Log("By eventually reporting Progressing == True and Reason Retrying")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ReasonRetrying, cond.Reason)
- // Catalog names are sorted alphabetically in the error message
- catalogs := []string{extensionCatalog.Name, extraCatalog.Name}
- slices.Sort(catalogs)
- expectedMessage := fmt.Sprintf("in multiple catalogs with the same priority %v", catalogs)
- require.Contains(ct, cond.Message, expectedMessage)
- }, pollDuration, pollInterval)
-}
-
-func TestClusterExtensionBlockInstallNonSuccessorVersion(t *testing.T) {
- t.Log("When a cluster extension is installed from a catalog")
- t.Log("When resolving upgrade edges")
-
- clusterExtension, extensionCatalog, sa, ns := TestInit(t)
- defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns)
- defer testutil.CollectTestArtifacts(t, artifactName, c, cfg)
-
- t.Log("By creating an ClusterExtension at a specified version")
- clusterExtension.Spec = ocv1.ClusterExtensionSpec{
- Source: ocv1.SourceConfig{
- SourceType: "Catalog",
- Catalog: &ocv1.CatalogFilter{
- PackageName: "test",
- Version: "1.0.0",
- // No Selector since this is an exact version match
- },
- },
- Namespace: ns.Name,
- ServiceAccount: ocv1.ServiceAccountReference{
- Name: sa.Name,
- },
- }
- require.NoError(t, c.Create(context.Background(), clusterExtension))
- t.Log("By eventually reporting a successful installation")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- require.Equal(ct,
- &ocv1.ClusterExtensionInstallStatus{Bundle: ocv1.BundleMetadata{
- Name: "test-operator.1.0.0",
- Version: "1.0.0",
- }},
- clusterExtension.Status.Install,
- )
-
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
- }, pollDuration, pollInterval)
-
- t.Log("It does not allow to upgrade the ClusterExtension to a non-successor version")
- t.Log("By updating the ClusterExtension resource to a non-successor version")
- // 1.2.0 does not replace/skip/skipRange 1.0.0.
- clusterExtension.Spec.Source.Catalog.Version = "1.2.0"
- require.NoError(t, c.Update(context.Background(), clusterExtension))
- t.Log("By eventually reporting an unsatisfiable resolution")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- }, pollDuration, pollInterval)
-
- t.Log("By eventually reporting Progressing == True and Reason Retrying")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
- require.NotNil(ct, cond)
- require.Equal(ct, ocv1.ReasonRetrying, cond.Reason)
- require.Equal(ct, "error upgrading from currently installed version \"1.0.0\": no bundles found for package \"test\" matching version \"1.2.0\"", cond.Message)
- }, pollDuration, pollInterval)
-}
-
-func TestClusterExtensionForceInstallNonSuccessorVersion(t *testing.T) {
- t.Log("When a cluster extension is installed from a catalog")
- t.Log("When resolving upgrade edges")
-
- clusterExtension, extensionCatalog, sa, ns := TestInit(t)
- defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns)
- defer testutil.CollectTestArtifacts(t, artifactName, c, cfg)
-
- t.Log("By creating an ClusterExtension at a specified version")
- clusterExtension.Spec = ocv1.ClusterExtensionSpec{
- Source: ocv1.SourceConfig{
- SourceType: "Catalog",
- Catalog: &ocv1.CatalogFilter{
- PackageName: "test",
- Version: "1.0.0",
- },
- },
- Namespace: ns.Name,
- ServiceAccount: ocv1.ServiceAccountReference{
- Name: sa.Name,
- },
- }
- require.NoError(t, c.Create(context.Background(), clusterExtension))
- t.Log("By eventually reporting a successful resolution")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
- }, pollDuration, pollInterval)
-
- t.Log("It allows to upgrade the ClusterExtension to a non-successor version")
- t.Log("By updating the ClusterExtension resource to a non-successor version")
- // 1.2.0 does not replace/skip/skipRange 1.0.0.
- clusterExtension.Spec.Source.Catalog.Version = "1.2.0"
- clusterExtension.Spec.Source.Catalog.UpgradeConstraintPolicy = ocv1.UpgradeConstraintPolicySelfCertified
- require.NoError(t, c.Update(context.Background(), clusterExtension))
- t.Log("By eventually reporting a satisfiable resolution")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
- }, pollDuration, pollInterval)
-}
-
-func TestClusterExtensionInstallSuccessorVersion(t *testing.T) {
- t.Log("When a cluster extension is installed from a catalog")
- t.Log("When resolving upgrade edges")
- clusterExtension, extensionCatalog, sa, ns := TestInit(t)
- defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns)
- defer testutil.CollectTestArtifacts(t, artifactName, c, cfg)
-
- t.Log("By creating an ClusterExtension at a specified version")
- clusterExtension.Spec = ocv1.ClusterExtensionSpec{
- Source: ocv1.SourceConfig{
- SourceType: "Catalog",
- Catalog: &ocv1.CatalogFilter{
- PackageName: "test",
- Version: "1.0.0",
- },
- },
- Namespace: ns.Name,
- ServiceAccount: ocv1.ServiceAccountReference{
- Name: sa.Name,
- },
- }
- require.NoError(t, c.Create(context.Background(), clusterExtension))
- t.Log("By eventually reporting a successful resolution")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
- }, pollDuration, pollInterval)
-
- t.Log("It does allow to upgrade the ClusterExtension to any of the successor versions within non-zero major version")
- t.Log("By updating the ClusterExtension resource by skipping versions")
- // 1.0.1 replaces 1.0.0 in the test catalog
- clusterExtension.Spec.Source.Catalog.Version = "1.0.1"
- require.NoError(t, c.Update(context.Background(), clusterExtension))
- t.Log("By eventually reporting a successful resolution and bundle path")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
- }, pollDuration, pollInterval)
-}
-
-func TestClusterExtensionInstallReResolvesWhenCatalogIsPatched(t *testing.T) {
- t.Log("When a cluster extension is installed from a catalog")
- t.Log("It resolves again when a catalog is patched with new ImageRef")
- clusterExtension, extensionCatalog, sa, ns := TestInit(t)
- defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns)
- defer testutil.CollectTestArtifacts(t, artifactName, c, cfg)
-
- clusterExtension.Spec = ocv1.ClusterExtensionSpec{
- Source: ocv1.SourceConfig{
- SourceType: "Catalog",
- Catalog: &ocv1.CatalogFilter{
- PackageName: "test",
- Selector: &metav1.LabelSelector{
- MatchExpressions: []metav1.LabelSelectorRequirement{
- {
- Key: "olm.operatorframework.io/metadata.name",
- Operator: metav1.LabelSelectorOpIn,
- Values: []string{extensionCatalog.Name},
- },
- },
- },
- },
- },
- Namespace: ns.Name,
- ServiceAccount: ocv1.ServiceAccountReference{
- Name: sa.Name,
- },
- }
- t.Log("It resolves the specified package with correct bundle path")
- t.Log("By creating the ClusterExtension resource")
- require.NoError(t, c.Create(context.Background(), clusterExtension))
-
- t.Log("By reporting a successful resolution and bundle path")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
- }, pollDuration, pollInterval)
-
- // patch imageRef tag on test-catalog image with v2 image
- t.Log("By patching the catalog ImageRef to point to the v2 catalog")
- updatedCatalogImage := fmt.Sprintf("%s/e2e/test-catalog:v2", os.Getenv("CLUSTER_REGISTRY_HOST"))
- err := patchTestCatalog(context.Background(), extensionCatalog.Name, updatedCatalogImage)
- require.NoError(t, err)
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: extensionCatalog.Name}, extensionCatalog))
- cond := apimeta.FindStatusCondition(extensionCatalog.Status.Conditions, ocv1.TypeServing)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ReasonAvailable, cond.Reason)
- }, pollDuration, pollInterval)
-
- t.Log("By eventually installing the package successfully")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
- require.Contains(ct, cond.Message, "Installed bundle")
- require.Contains(ct, clusterExtension.Status.Install.Bundle.Version, "1.3.0")
- }, pollDuration, pollInterval)
-}
-
-func TestClusterExtensionInstallReResolvesWhenNewCatalog(t *testing.T) {
- t.Log("When a cluster extension is installed from a catalog")
- t.Log("It resolves again when a new catalog is available")
-
- // Tag the image with the new tag
- var err error
- v1Image := fmt.Sprintf("%s/%s", os.Getenv("LOCAL_REGISTRY_HOST"), os.Getenv("E2E_TEST_CATALOG_V1"))
- err = crane.Tag(v1Image, latestImageTag, crane.Insecure)
- require.NoError(t, err)
-
- // create a test-catalog with latest image tag
- catalogName := fmt.Sprintf("test-catalog-%s", rand.String(8))
- latestCatalogImage := fmt.Sprintf("%s/e2e/test-catalog:latest", os.Getenv("CLUSTER_REGISTRY_HOST"))
- extensionCatalog, err := CreateTestCatalog(context.Background(), catalogName, latestCatalogImage)
- require.NoError(t, err)
- clusterExtensionName := fmt.Sprintf("clusterextension-%s", rand.String(8))
- clusterExtension := &ocv1.ClusterExtension{
- ObjectMeta: metav1.ObjectMeta{
- Name: clusterExtensionName,
- },
- }
- ns, err := CreateNamespace(context.Background(), clusterExtensionName)
- require.NoError(t, err)
- sa, err := CreateServiceAccount(context.Background(), types.NamespacedName{Name: clusterExtensionName, Namespace: ns.Name}, clusterExtensionName)
- require.NoError(t, err)
- defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns)
- defer testutil.CollectTestArtifacts(t, artifactName, c, cfg)
-
- clusterExtension.Spec = ocv1.ClusterExtensionSpec{
- Source: ocv1.SourceConfig{
- SourceType: "Catalog",
- Catalog: &ocv1.CatalogFilter{
- PackageName: "test",
- Selector: &metav1.LabelSelector{
- MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": extensionCatalog.Name},
- },
- },
- },
- Namespace: ns.Name,
- ServiceAccount: ocv1.ServiceAccountReference{
- Name: sa.Name,
- },
- }
- t.Log("It resolves the specified package with correct bundle path")
- t.Log("By creating the ClusterExtension resource")
- require.NoError(t, c.Create(context.Background(), clusterExtension))
-
- t.Log("By reporting a successful resolution and bundle path")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
- }, pollDuration, pollInterval)
-
- // update tag on test-catalog image with v2 image
- t.Log("By updating the catalog tag to point to the v2 catalog")
- v2Image := fmt.Sprintf("%s/%s", os.Getenv("LOCAL_REGISTRY_HOST"), os.Getenv("E2E_TEST_CATALOG_V2"))
- err = crane.Tag(v2Image, latestImageTag, crane.Insecure)
- require.NoError(t, err)
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: extensionCatalog.Name}, extensionCatalog))
- cond := apimeta.FindStatusCondition(extensionCatalog.Status.Conditions, ocv1.TypeServing)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ReasonAvailable, cond.Reason)
- }, pollDuration, pollInterval)
-
- t.Log("By eventually reporting a successful resolution and bundle path")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
- }, pollDuration, pollInterval)
-}
-
-func TestClusterExtensionInstallReResolvesWhenManagedContentChanged(t *testing.T) {
- t.Log("When a cluster extension is installed from a catalog")
- t.Log("It resolves again when managed content is changed")
- clusterExtension, extensionCatalog, sa, ns := TestInit(t)
- defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns)
- defer testutil.CollectTestArtifacts(t, artifactName, c, cfg)
-
- clusterExtension.Spec = ocv1.ClusterExtensionSpec{
- Source: ocv1.SourceConfig{
- SourceType: "Catalog",
- Catalog: &ocv1.CatalogFilter{
- PackageName: "test",
- Selector: &metav1.LabelSelector{
- MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": extensionCatalog.Name},
- },
- },
- },
- Namespace: ns.Name,
- ServiceAccount: ocv1.ServiceAccountReference{
- Name: sa.Name,
- },
- }
- t.Log("It installs the specified package with correct bundle path")
- t.Log("By creating the ClusterExtension resource")
- require.NoError(t, c.Create(context.Background(), clusterExtension))
-
- t.Log("By reporting a successful installation")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
- require.Contains(ct, cond.Message, "Installed bundle")
- }, pollDuration, pollInterval)
-
- t.Log("By deleting a managed resource")
- testConfigMap := &corev1.ConfigMap{
- ObjectMeta: metav1.ObjectMeta{
- Name: "test-configmap",
- Namespace: clusterExtension.Spec.Namespace,
- },
- }
- require.NoError(t, c.Delete(context.Background(), testConfigMap))
-
- t.Log("By eventually re-creating the managed resource")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: testConfigMap.Name, Namespace: testConfigMap.Namespace}, testConfigMap))
- }, pollDuration, pollInterval)
-}
-
-func TestClusterExtensionRecoversFromNoNamespaceWhenFailureFixed(t *testing.T) {
- t.Log("When a cluster extension is installed from a catalog")
- t.Log("When the extension bundle format is registry+v1")
-
- t.Log("By not creating the Namespace and ServiceAccount")
- clusterExtension, extensionCatalog := TestInitClusterExtensionClusterCatalog(t)
-
- defer TestCleanup(t, extensionCatalog, clusterExtension, nil, nil)
- defer testutil.CollectTestArtifacts(t, artifactName, c, cfg)
-
- clusterExtension.Spec = ocv1.ClusterExtensionSpec{
- Source: ocv1.SourceConfig{
- SourceType: "Catalog",
- Catalog: &ocv1.CatalogFilter{
- PackageName: "test",
- Selector: &metav1.LabelSelector{
- MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": extensionCatalog.Name},
- },
- },
- },
- Namespace: clusterExtension.Name,
- ServiceAccount: ocv1.ServiceAccountReference{
- Name: clusterExtension.Name,
- },
- }
-
- t.Log("It resolves the specified package with correct bundle path")
- t.Log("By creating the ClusterExtension resource")
- require.NoError(t, c.Create(context.Background(), clusterExtension))
-
- t.Log("By eventually reporting Progressing == True with Reason Retrying")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ReasonRetrying, cond.Reason)
- }, pollDuration, pollInterval)
-
- t.Log("By eventually reporting Installed != True")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled)
- require.NotNil(ct, cond)
- require.NotEqual(ct, metav1.ConditionTrue, cond.Status)
- }, pollDuration, pollInterval)
-
- t.Log("By creating the Namespace and ServiceAccount")
- sa, ns := TestInitServiceAccountNamespace(t, clusterExtension.Name)
- defer TestCleanup(t, nil, nil, sa, ns)
-
- // NOTE: In order to ensure predictable results we need to ensure we have a single
- // known failure with a singular fix operation. Additionally, due to the exponential
- // backoff of this eventually check we MUST ensure we do not touch the ClusterExtension
- // after creating int the Namespace and ServiceAccount.
- t.Log("By eventually installing the package successfully")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
- require.Contains(ct, cond.Message, "Installed bundle")
- require.NotEmpty(ct, clusterExtension.Status.Install)
- }, pollDuration, pollInterval)
-
- t.Log("By eventually reporting Progressing == True with Reason Success")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
- }, pollDuration, pollInterval)
-}
-
-func TestClusterExtensionRecoversFromExistingDeploymentWhenFailureFixed(t *testing.T) {
- t.Log("When a cluster extension is installed from a catalog")
- t.Log("When the extension bundle format is registry+v1")
-
- clusterExtension, extensionCatalog, sa, ns := TestInit(t)
-
- defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns)
- defer testutil.CollectTestArtifacts(t, artifactName, c, cfg)
-
- clusterExtension.Spec = ocv1.ClusterExtensionSpec{
- Source: ocv1.SourceConfig{
- SourceType: "Catalog",
- Catalog: &ocv1.CatalogFilter{
- PackageName: "test",
- Selector: &metav1.LabelSelector{
- MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": extensionCatalog.Name},
- },
- },
- },
- Namespace: clusterExtension.Name,
- ServiceAccount: ocv1.ServiceAccountReference{
- Name: clusterExtension.Name,
- },
- }
-
- t.Log("By creating a new Deployment that can not be adopted")
- newDeployment := &appsv1.Deployment{
- ObjectMeta: metav1.ObjectMeta{
- Name: "test-operator",
- Namespace: clusterExtension.Name,
- },
- Spec: appsv1.DeploymentSpec{
- Replicas: ptr.To(int32(1)),
- Selector: &metav1.LabelSelector{
- MatchLabels: map[string]string{"app": "test-operator"},
- },
- Template: corev1.PodTemplateSpec{
- ObjectMeta: metav1.ObjectMeta{
- Labels: map[string]string{"app": "test-operator"},
- },
- Spec: corev1.PodSpec{
- Containers: []corev1.Container{
- {
- Command: []string{"sleep", "1000"},
- Image: "busybox",
- ImagePullPolicy: corev1.PullAlways,
- Name: "busybox",
- SecurityContext: &corev1.SecurityContext{
- RunAsNonRoot: ptr.To(true),
- RunAsUser: ptr.To(int64(1000)),
- AllowPrivilegeEscalation: ptr.To(false),
- Capabilities: &corev1.Capabilities{
- Drop: []corev1.Capability{
- "ALL",
- },
- },
- SeccompProfile: &corev1.SeccompProfile{
- Type: corev1.SeccompProfileTypeRuntimeDefault,
- },
- },
- },
- },
- },
- },
- },
- }
- require.NoError(t, c.Create(context.Background(), newDeployment))
-
- t.Log("It resolves the specified package with correct bundle path")
- t.Log("By creating the ClusterExtension resource")
- require.NoError(t, c.Create(context.Background(), clusterExtension))
-
- t.Log("By eventually reporting Progressing == True with Reason Retrying")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ReasonRetrying, cond.Reason)
- }, pollDuration, pollInterval)
-
- t.Log("By eventually failing to install the package successfully due to no adoption support")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionFalse, cond.Status)
- // TODO: We probably _should_ be testing the reason here, but helm and boxcutter applier have different reasons.
- // Maybe we change helm to use "Absent" rather than "Failed" since the Progressing condition already captures
- // the failure?
- //require.Equal(ct, ocv1.ReasonFailed, cond.Reason)
- require.Contains(ct, cond.Message, "No bundle installed")
- }, pollDuration, pollInterval)
-
- t.Log("By deleting the new Deployment")
- require.NoError(t, c.Delete(context.Background(), newDeployment))
-
- // NOTE: In order to ensure predictable results we need to ensure we have a single
- // known failure with a singular fix operation. Additionally, due to the exponential
- // backoff of this eventually check we MUST ensure we do not touch the ClusterExtension
- // after deleting the Deployment.
- t.Log("By eventually installing the package successfully")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
- require.Contains(ct, cond.Message, "Installed bundle")
- require.NotEmpty(ct, clusterExtension.Status.Install)
- }, pollDuration, pollInterval)
-
- t.Log("By eventually reporting Progressing == True with Reason Success")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
- }, pollDuration, pollInterval)
-}
diff --git a/test/e2e/cluster_extension_revision_test.go b/test/e2e/cluster_extension_revision_test.go
deleted file mode 100644
index 5c21e66ab..000000000
--- a/test/e2e/cluster_extension_revision_test.go
+++ /dev/null
@@ -1,261 +0,0 @@
-package e2e
-
-import (
- "context"
- "fmt"
- "os"
- "slices"
- "testing"
-
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
- corev1 "k8s.io/api/core/v1"
- apimeta "k8s.io/apimachinery/pkg/api/meta"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/types"
- "k8s.io/client-go/kubernetes/scheme"
- "k8s.io/client-go/tools/remotecommand"
- ctrl "sigs.k8s.io/controller-runtime"
- "sigs.k8s.io/controller-runtime/pkg/client"
-
- ocv1 "github.com/operator-framework/operator-controller/api/v1"
- "github.com/operator-framework/operator-controller/internal/operator-controller/features"
- . "github.com/operator-framework/operator-controller/internal/shared/util/test"
- . "github.com/operator-framework/operator-controller/test/helpers"
-)
-
-func TestClusterExtensionRevision(t *testing.T) {
- SkipIfFeatureGateDisabled(t, string(features.BoxcutterRuntime))
- t.Log("When a cluster extension is installed from a catalog")
- t.Log("When the extension bundle format is registry+v1")
-
- clusterExtension, extensionCatalog, sa, ns := TestInit(t)
- defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns)
- defer CollectTestArtifacts(t, artifactName, c, cfg)
-
- clusterExtension.Spec = ocv1.ClusterExtensionSpec{
- Source: ocv1.SourceConfig{
- SourceType: "Catalog",
- Catalog: &ocv1.CatalogFilter{
- PackageName: "test",
- Version: "1.0.1",
- // we would also like to force upgrade to 1.0.2, which is not within the upgrade path
- UpgradeConstraintPolicy: ocv1.UpgradeConstraintPolicySelfCertified,
- Selector: &metav1.LabelSelector{
- MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": extensionCatalog.Name},
- },
- },
- },
- Namespace: ns.Name,
- ServiceAccount: ocv1.ServiceAccountReference{
- Name: sa.Name,
- },
- }
- t.Log("It resolves the specified package with correct bundle path")
- t.Log("By creating the ClusterExtension resource")
- require.NoError(t, c.Create(context.Background(), clusterExtension))
-
- t.Log("By eventually reporting a successful resolution and bundle path")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- }, pollDuration, pollInterval)
-
- t.Log("By revision-1 eventually reporting Progressing:True:Succeeded and Available:True:ProbesSucceeded conditions")
- var clusterExtensionRevision ocv1.ClusterExtensionRevision
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: fmt.Sprintf("%s-1", clusterExtension.Name)}, &clusterExtensionRevision))
- cond := apimeta.FindStatusCondition(clusterExtensionRevision.Status.Conditions, ocv1.ClusterExtensionRevisionTypeProgressing)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
-
- cond = apimeta.FindStatusCondition(clusterExtensionRevision.Status.Conditions, ocv1.ClusterExtensionRevisionTypeAvailable)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ClusterExtensionRevisionReasonProbesSucceeded, cond.Reason)
- }, pollDuration, pollInterval)
-
- t.Log("By eventually reporting progressing as True")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
- }, pollDuration, pollInterval)
-
- t.Log("By eventually installing the package successfully")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
- require.Contains(ct, cond.Message, "Installed bundle")
- require.NotEmpty(ct, clusterExtension.Status.Install.Bundle)
- require.Len(ct, clusterExtension.Status.ActiveRevisions, 1)
- require.Equal(ct, clusterExtension.Status.ActiveRevisions[0].Name, clusterExtensionRevision.Name)
- require.Empty(ct, clusterExtension.Status.ActiveRevisions[0].Conditions)
- }, pollDuration, pollInterval)
-
- t.Log("Check Deployment Availability Probe")
- t.Log("By making the operator pod not ready")
- podName := getPodName(t, clusterExtension.Spec.Namespace, client.MatchingLabels{"app": "olme2etest"})
- podExec(t, clusterExtension.Spec.Namespace, podName, []string{"rm", "/var/www/ready"})
-
- t.Log("By revision-1 eventually reporting Progressing:True:Succeeded and Available:False:ProbeFailure conditions")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: fmt.Sprintf("%s-1", clusterExtension.Name)}, &clusterExtensionRevision))
- cond := apimeta.FindStatusCondition(clusterExtensionRevision.Status.Conditions, ocv1.ClusterExtensionRevisionTypeProgressing)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
-
- cond = apimeta.FindStatusCondition(clusterExtensionRevision.Status.Conditions, ocv1.ClusterExtensionRevisionTypeAvailable)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionFalse, cond.Status)
- require.Equal(ct, ocv1.ClusterExtensionRevisionReasonProbeFailure, cond.Reason)
- }, pollDuration, pollInterval)
-
- t.Log("By propagating Available:False to ClusterExtension")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.ClusterExtensionRevisionTypeAvailable)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionFalse, cond.Status)
- }, pollDuration, pollInterval)
-
- t.Log("By making the operator pod ready")
- podName = getPodName(t, clusterExtension.Spec.Namespace, client.MatchingLabels{"app": "olme2etest"})
- podExec(t, clusterExtension.Spec.Namespace, podName, []string{"touch", "/var/www/ready"})
-
- t.Log("By revision-1 eventually reporting Progressing:True:Succeeded and Available:True:ProbesSucceeded conditions")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: fmt.Sprintf("%s-1", clusterExtension.Name)}, &clusterExtensionRevision))
- cond := apimeta.FindStatusCondition(clusterExtensionRevision.Status.Conditions, ocv1.ClusterExtensionRevisionTypeProgressing)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
-
- cond = apimeta.FindStatusCondition(clusterExtensionRevision.Status.Conditions, ocv1.ClusterExtensionRevisionTypeAvailable)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ClusterExtensionRevisionReasonProbesSucceeded, cond.Reason)
- }, pollDuration, pollInterval)
-
- t.Log("By propagating Available:True to ClusterExtension")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.ClusterExtensionRevisionTypeAvailable)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- }, pollDuration, pollInterval)
-
- t.Log("Check archiving")
- t.Log("By upgrading the cluster extension to v1.2.0")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- clusterExtension.Spec.Source.Catalog.Version = "1.2.0"
- require.NoError(t, c.Update(context.Background(), clusterExtension))
- }, pollDuration, pollInterval)
-
- t.Log("By revision-2 eventually reporting Progressing:True:Succeeded and Available:True:ProbesSucceeded conditions")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: fmt.Sprintf("%s-2", clusterExtension.Name)}, &clusterExtensionRevision))
- cond := apimeta.FindStatusCondition(clusterExtensionRevision.Status.Conditions, ocv1.ClusterExtensionRevisionTypeProgressing)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
-
- cond = apimeta.FindStatusCondition(clusterExtensionRevision.Status.Conditions, ocv1.ClusterExtensionRevisionTypeAvailable)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ClusterExtensionRevisionReasonProbesSucceeded, cond.Reason)
- }, pollDuration, pollInterval)
-
- t.Log("By eventually reporting progressing, available, and installed as True")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
-
- cond = apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
- require.Contains(ct, cond.Message, "Installed bundle")
- require.NotEmpty(ct, clusterExtension.Status.Install.Bundle)
- }, pollDuration, pollInterval)
-
- t.Log("By revision-1 eventually reporting Progressing:False:Archived and Available:Unknown:Archived conditions")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: fmt.Sprintf("%s-1", clusterExtension.Name)}, &clusterExtensionRevision))
- cond := apimeta.FindStatusCondition(clusterExtensionRevision.Status.Conditions, ocv1.ClusterExtensionRevisionTypeProgressing)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionFalse, cond.Status)
- require.Equal(ct, ocv1.ClusterExtensionRevisionReasonArchived, cond.Reason)
-
- cond = apimeta.FindStatusCondition(clusterExtensionRevision.Status.Conditions, ocv1.ClusterExtensionRevisionTypeAvailable)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionUnknown, cond.Status)
- require.Equal(ct, ocv1.ClusterExtensionRevisionReasonArchived, cond.Reason)
- }, pollDuration, pollInterval)
-
- t.Log("By upgrading the cluster extension to v1.0.2 containing bad image reference")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- clusterExtension.Spec.Source.Catalog.Version = "1.0.2"
- require.NoError(t, c.Update(context.Background(), clusterExtension))
- }, pollDuration, pollInterval)
-
- t.Log("By revision-3 eventually reporting Progressing:True:Succeeded and Available:False:ProbeFailure conditions")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: fmt.Sprintf("%s-3", clusterExtension.Name)}, &clusterExtensionRevision))
- cond := apimeta.FindStatusCondition(clusterExtensionRevision.Status.Conditions, ocv1.ClusterExtensionRevisionTypeProgressing)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
-
- cond = apimeta.FindStatusCondition(clusterExtensionRevision.Status.Conditions, ocv1.ClusterExtensionRevisionTypeAvailable)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionFalse, cond.Status)
- require.Equal(ct, ocv1.ClusterExtensionRevisionReasonProbeFailure, cond.Reason)
- }, pollDuration, pollInterval)
-
- t.Log("By eventually reporting more than one active revision")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- require.Len(ct, clusterExtension.Status.ActiveRevisions, 2)
- require.Equal(ct, clusterExtension.Status.ActiveRevisions[0].Name, fmt.Sprintf("%s-2", clusterExtension.Name))
- require.Equal(ct, clusterExtension.Status.ActiveRevisions[1].Name, fmt.Sprintf("%s-3", clusterExtension.Name))
- require.Empty(ct, clusterExtension.Status.ActiveRevisions[0].Conditions)
- require.NotEmpty(ct, clusterExtension.Status.ActiveRevisions[1].Conditions)
- }, pollDuration, pollInterval)
-}
-
-func getPodName(t *testing.T, podNamespace string, matchingLabels client.MatchingLabels) string {
- var podList corev1.PodList
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.List(context.Background(), &podList, client.InNamespace(podNamespace), matchingLabels))
- podList.Items = slices.DeleteFunc(podList.Items, func(pod corev1.Pod) bool {
- // Ignore terminating pods
- return pod.DeletionTimestamp != nil
- })
- require.Len(ct, podList.Items, 1)
- }, pollDuration, pollInterval)
- return podList.Items[0].Name
-}
-
-func podExec(t *testing.T, podNamespace string, podName string, cmd []string) {
- req := cs.CoreV1().RESTClient().Post().Resource("pods").Name(podName).Namespace(podNamespace).SubResource("exec")
- req.VersionedParams(&corev1.PodExecOptions{
- Command: cmd,
- Stdout: true,
- }, scheme.ParameterCodec)
- exec, err := remotecommand.NewSPDYExecutor(ctrl.GetConfigOrDie(), "POST", req.URL())
- require.NoError(t, err)
- err = exec.StreamWithContext(context.Background(), remotecommand.StreamOptions{Stdout: os.Stdout})
- require.NoError(t, err)
-}
diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go
deleted file mode 100644
index 847f5c753..000000000
--- a/test/e2e/e2e_suite_test.go
+++ /dev/null
@@ -1,79 +0,0 @@
-package e2e
-
-import (
- "context"
- "fmt"
- "os"
- "testing"
-
- apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
- utilruntime "k8s.io/apimachinery/pkg/util/runtime"
- "k8s.io/client-go/kubernetes"
- "k8s.io/client-go/rest"
- ctrl "sigs.k8s.io/controller-runtime"
- "sigs.k8s.io/controller-runtime/pkg/client"
-
- ocv1 "github.com/operator-framework/operator-controller/api/v1"
- "github.com/operator-framework/operator-controller/internal/operator-controller/scheme"
- testutil "github.com/operator-framework/operator-controller/internal/shared/util/test"
-)
-
-var (
- cfg *rest.Config
- c client.Client
- cs *kubernetes.Clientset
-)
-
-const (
- testSummaryOutputEnvVar = "E2E_SUMMARY_OUTPUT"
- latestImageTag = "latest"
-)
-
-func TestMain(m *testing.M) {
- cfg = ctrl.GetConfigOrDie()
-
- var err error
- utilruntime.Must(apiextensionsv1.AddToScheme(scheme.Scheme))
- c, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
- utilruntime.Must(err)
-
- cs, err = kubernetes.NewForConfig(cfg)
- utilruntime.Must(err)
-
- res := m.Run()
- path := os.Getenv(testSummaryOutputEnvVar)
- if path == "" {
- fmt.Printf("Note: E2E_SUMMARY_OUTPUT is unset; skipping summary generation")
- } else {
- err = testutil.PrintSummary(path)
- if err != nil {
- // Fail the run if alerts are found
- fmt.Printf("%v", err)
- os.Exit(1)
- }
- }
- os.Exit(res)
-}
-
-// patchTestCatalog will patch the existing clusterCatalog on the test cluster, provided
-// the context, catalog name, and the image reference. It returns an error
-// if any errors occurred while updating the catalog.
-func patchTestCatalog(ctx context.Context, name string, newImageRef string) error {
- // Fetch the existing ClusterCatalog
- catalog := &ocv1.ClusterCatalog{}
- err := c.Get(ctx, client.ObjectKey{Name: name}, catalog)
- if err != nil {
- return err
- }
-
- // Update the ImageRef
- catalog.Spec.Source.Image.Ref = newImageRef
-
- // Patch the ClusterCatalog
- err = c.Update(ctx, catalog)
- if err != nil {
- return err
- }
-
- return err
-}
diff --git a/test/e2e/features/install.feature b/test/e2e/features/install.feature
new file mode 100644
index 000000000..ba59ffe7d
--- /dev/null
+++ b/test/e2e/features/install.feature
@@ -0,0 +1,299 @@
+Feature: Install ClusterExtension
+
+ As an OLM user I would like to install a cluster extension from catalog
+ or get an appropriate information in case of an error.
+
+ Background:
+ Given OLM is available
+ And ClusterCatalog "test" serves bundles
+ And ServiceAccount "olm-sa" with needed permissions is available in ${TEST_NAMESPACE}
+
+ Scenario: Install latest available version
+ When ClusterExtension is applied
+ """
+ apiVersion: olm.operatorframework.io/v1
+ kind: ClusterExtension
+ metadata:
+ name: ${NAME}
+ spec:
+ namespace: ${TEST_NAMESPACE}
+ serviceAccount:
+ name: olm-sa
+ source:
+ sourceType: Catalog
+ catalog:
+ packageName: test
+ selector:
+ matchLabels:
+ "olm.operatorframework.io/metadata.name": test-catalog
+ """
+ Then ClusterExtension is rolled out
+ And ClusterExtension is available
+ And bundle "test-operator.1.2.0" is installed in version "1.2.0"
+ And resource "networkpolicy/test-operator-network-policy" is installed
+ And resource "configmap/test-configmap" is installed
+ And resource "deployment/test-operator" is installed
+
+ @mirrored-registry
+ Scenario Outline: Install latest available version from mirrored registry
+ When ClusterExtension is applied
+ """
+ apiVersion: olm.operatorframework.io/v1
+ kind: ClusterExtension
+ metadata:
+ name: ${NAME}
+ spec:
+ namespace: ${TEST_NAMESPACE}
+ serviceAccount:
+ name: olm-sa
+ source:
+ sourceType: Catalog
+ catalog:
+ packageName:
+ selector:
+ matchLabels:
+ "olm.operatorframework.io/metadata.name": test-catalog
+ """
+ Then ClusterExtension is rolled out
+ And ClusterExtension is available
+ And bundle "-operator.1.2.0" is installed in version "1.2.0"
+ And resource "networkpolicy/test-operator-network-policy" is installed
+ And resource "configmap/test-configmap" is installed
+ And resource "deployment/test-operator" is installed
+
+ Examples:
+ | package-name |
+ | test-mirrored |
+ | dynamic |
+
+
+ Scenario: Report that bundle cannot be installed when it exists in multiple catalogs with same priority
+ Given ClusterCatalog "extra" serves bundles
+ When ClusterExtension is applied
+ """
+ apiVersion: olm.operatorframework.io/v1
+ kind: ClusterExtension
+ metadata:
+ name: ${NAME}
+ spec:
+ namespace: ${TEST_NAMESPACE}
+ serviceAccount:
+ name: olm-sa
+ source:
+ sourceType: Catalog
+ catalog:
+ packageName: test
+ """
+ Then ClusterExtension reports Progressing as True with Reason Retrying and Message:
+ """
+ found bundles for package "test" in multiple catalogs with the same priority [extra-catalog test-catalog]
+ """
+
+ @SingleOwnNamespaceInstallSupport
+ Scenario: watchNamespace config is required for extension supporting single namespace
+ Given ServiceAccount "olm-admin" in test namespace is cluster admin
+ And resource is applied
+ """
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: single-namespace-operator-target
+ """
+ And ClusterExtension is applied
+ """
+ apiVersion: olm.operatorframework.io/v1
+ kind: ClusterExtension
+ metadata:
+ name: ${NAME}
+ spec:
+ namespace: ${TEST_NAMESPACE}
+ serviceAccount:
+ name: olm-admin
+ source:
+ sourceType: Catalog
+ catalog:
+ packageName: single-namespace-operator
+ selector:
+ matchLabels:
+ "olm.operatorframework.io/metadata.name": test-catalog
+ """
+ And ClusterExtension reports Progressing as True with Reason Retrying and Message:
+ """
+ error for resolved bundle "single-namespace-operator.1.0.0" with version "1.0.0":
+ invalid ClusterExtension configuration: invalid configuration: required field "watchNamespace" is missing
+ """
+ When ClusterExtension is updated to set config.watchNamespace field
+ """
+ apiVersion: olm.operatorframework.io/v1
+ kind: ClusterExtension
+ metadata:
+ name: ${NAME}
+ spec:
+ namespace: ${TEST_NAMESPACE}
+ serviceAccount:
+ name: olm-admin
+ config:
+ configType: Inline
+ inline:
+ watchNamespace: single-namespace-operator-target # added
+ source:
+ sourceType: Catalog
+ catalog:
+ packageName: single-namespace-operator
+ selector:
+ matchLabels:
+ "olm.operatorframework.io/metadata.name": test-catalog
+ """
+ Then ClusterExtension reports Installed as True
+ And bundle "single-namespace-operator.1.0.0" is installed in version "1.0.0"
+ And operator "single-namespace-operator" target namespace is "single-namespace-operator-target"
+
+ @SingleOwnNamespaceInstallSupport
+ Scenario: watchNamespace config is required for extension supporting own namespace
+ Given ServiceAccount "olm-admin" in test namespace is cluster admin
+ And ClusterExtension is applied without the watchNamespace configuration
+ """
+ apiVersion: olm.operatorframework.io/v1
+ kind: ClusterExtension
+ metadata:
+ name: ${NAME}
+ spec:
+ namespace: ${TEST_NAMESPACE}
+ serviceAccount:
+ name: olm-admin
+ source:
+ sourceType: Catalog
+ catalog:
+ packageName: own-namespace-operator
+ selector:
+ matchLabels:
+ "olm.operatorframework.io/metadata.name": test-catalog
+ """
+ And ClusterExtension reports Progressing as True with Reason Retrying and Message:
+ """
+ error for resolved bundle "own-namespace-operator.1.0.0" with version
+ "1.0.0": invalid ClusterExtension configuration: invalid configuration: required
+ field "watchNamespace" is missing
+ """
+ And ClusterExtension is updated to include the watchNamespace configuration
+ """
+ apiVersion: olm.operatorframework.io/v1
+ kind: ClusterExtension
+ metadata:
+ name: ${NAME}
+ spec:
+ namespace: ${TEST_NAMESPACE}
+ serviceAccount:
+ name: olm-admin
+ config:
+ configType: Inline
+ inline:
+ watchNamespace: some-ns # added, but not own namespace
+ source:
+ sourceType: Catalog
+ catalog:
+ packageName: own-namespace-operator
+ selector:
+ matchLabels:
+ "olm.operatorframework.io/metadata.name": test-catalog
+ """
+ And ClusterExtension reports Progressing as True with Reason Retrying and Message:
+ """
+ error for resolved bundle "own-namespace-operator.1.0.0" with version
+ "1.0.0": invalid ClusterExtension configuration: invalid configuration: 'some-ns'
+ is not valid ownNamespaceInstallMode: invalid value "some-ns": watchNamespace
+ must be "${TEST_NAMESPACE}" (the namespace where the operator is installed) because this
+ operator only supports OwnNamespace install mode
+ """
+ When ClusterExtension is updated to set watchNamespace to own namespace value
+ """
+ apiVersion: olm.operatorframework.io/v1
+ kind: ClusterExtension
+ metadata:
+ name: ${NAME}
+ spec:
+ namespace: ${TEST_NAMESPACE}
+ serviceAccount:
+ name: olm-admin
+ config:
+ configType: Inline
+ inline:
+ watchNamespace: ${TEST_NAMESPACE} # own namespace
+ source:
+ sourceType: Catalog
+ catalog:
+ packageName: own-namespace-operator
+ selector:
+ matchLabels:
+ "olm.operatorframework.io/metadata.name": test-catalog
+ """
+ Then ClusterExtension is rolled out
+ And ClusterExtension is available
+ And operator "own-namespace-operator" target namespace is "${TEST_NAMESPACE}"
+
+ @WebhookProviderCertManager
+ Scenario: Install operator having webhooks
+ Given ServiceAccount "olm-admin" in test namespace is cluster admin
+ When ClusterExtension is applied
+ """
+ apiVersion: olm.operatorframework.io/v1
+ kind: ClusterExtension
+ metadata:
+ name: ${NAME}
+ spec:
+ namespace: ${TEST_NAMESPACE}
+ serviceAccount:
+ name: olm-admin
+ source:
+ sourceType: Catalog
+ catalog:
+ packageName: webhook-operator
+ selector:
+ matchLabels:
+ "olm.operatorframework.io/metadata.name": test-catalog
+ """
+ Then ClusterExtension is rolled out
+ And ClusterExtension is available
+ And resource apply fails with error msg containing "Invalid value: false: Spec.Valid must be true"
+ """
+ apiVersion: webhook.operators.coreos.io/v1
+ kind: WebhookTest
+ metadata:
+ name: ${NAME}
+ namespace: ${TEST_NAMESPACE}
+ spec:
+ valid: false # webhook rejects it as invalid value
+ """
+ And resource is applied
+ """
+ apiVersion: webhook.operators.coreos.io/v1
+ kind: WebhookTest
+ metadata:
+ name: ${NAME}
+ namespace: ${TEST_NAMESPACE}
+ spec:
+ valid: true
+ """
+ And resource "webhooktest/${NAME}" matches
+ """
+ apiVersion: webhook.operators.coreos.io/v2
+ kind: WebhookTest
+ metadata:
+ name: ${NAME}
+ namespace: ${TEST_NAMESPACE}
+ spec:
+ conversion:
+ valid: true
+ mutate: true
+ """
+ And resource "webhooktest.v1.webhook.operators.coreos.io/${NAME}" matches
+ """
+ apiVersion: webhook.operators.coreos.io/v1
+ kind: WebhookTest
+ metadata:
+ name: ${NAME}
+ namespace: ${TEST_NAMESPACE}
+ spec:
+ valid: true
+ mutate: true
+ """
diff --git a/test/e2e/features/metrics.feature b/test/e2e/features/metrics.feature
new file mode 100644
index 000000000..ccb719198
--- /dev/null
+++ b/test/e2e/features/metrics.feature
@@ -0,0 +1,15 @@
+Feature: Exposed various metrics
+
+ Background:
+ Given OLM is available
+
+ Scenario Outline: component exposes metrics
+ Given ServiceAccount "metrics-reader" in test namespace has permissions to fetch "" metrics
+ When ServiceAccount "metrics-reader" sends request to "/metrics" endpoint of "" service
+ Then Prometheus metrics are returned in the response
+
+ Examples:
+ | component |
+ | operator-controller |
+ | catalogd |
+
\ No newline at end of file
diff --git a/test/e2e/features/recover.feature b/test/e2e/features/recover.feature
new file mode 100644
index 000000000..0438f2d1a
--- /dev/null
+++ b/test/e2e/features/recover.feature
@@ -0,0 +1,117 @@
+Feature: Recover cluster extension from errors that might occur during its lifetime
+
+ Background:
+ Given OLM is available
+ And ClusterCatalog "test" serves bundles
+
+
+ Scenario: Restore removed resource
+ Given ServiceAccount "olm-sa" with needed permissions is available in ${TEST_NAMESPACE}
+ And ClusterExtension is applied
+ """
+ apiVersion: olm.operatorframework.io/v1
+ kind: ClusterExtension
+ metadata:
+ name: ${NAME}
+ spec:
+ namespace: ${TEST_NAMESPACE}
+ serviceAccount:
+ name: olm-sa
+ source:
+ sourceType: Catalog
+ catalog:
+ packageName: test
+ selector:
+ matchLabels:
+ "olm.operatorframework.io/metadata.name": test-catalog
+ """
+ And ClusterExtension is available
+ And resource "configmap/test-configmap" exists
+ When resource "configmap/test-configmap" is removed
+ Then resource "configmap/test-configmap" is eventually restored
+
+ Scenario: Install ClusterExtension after target namespace becomes available
+ Given ClusterExtension is applied
+ """
+ apiVersion: olm.operatorframework.io/v1
+ kind: ClusterExtension
+ metadata:
+ name: ${NAME}
+ spec:
+ namespace: ${TEST_NAMESPACE}
+ serviceAccount:
+ name: olm-sa
+ source:
+ sourceType: Catalog
+ catalog:
+ packageName: test
+ selector:
+ matchLabels:
+ "olm.operatorframework.io/metadata.name": test-catalog
+ """
+ And ClusterExtension reports Progressing as True with Reason Retrying
+ When ServiceAccount "olm-sa" with needed permissions is available in ${TEST_NAMESPACE}
+ Then ClusterExtension is available
+ And ClusterExtension reports Progressing as True with Reason Succeeded
+
+ Scenario: Install ClusterExtension after conflicting resource is removed
+ Given ServiceAccount "olm-sa" with needed permissions is available in ${TEST_NAMESPACE}
+ And resource is applied
+ """
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: test-operator
+ namespace: ${TEST_NAMESPACE}
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: test-operator
+ template:
+ metadata:
+ labels:
+ app: test-operator
+ spec:
+ containers:
+ - command:
+ - "sleep"
+ args:
+ - "1000"
+ image: busybox:1.36
+ imagePullPolicy: IfNotPresent
+ name: busybox
+ securityContext:
+ runAsNonRoot: true
+ runAsUser: 1000
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ seccompProfile:
+ type: RuntimeDefault
+ """
+ And ClusterExtension is applied
+ """
+ apiVersion: olm.operatorframework.io/v1
+ kind: ClusterExtension
+ metadata:
+ name: ${NAME}
+ spec:
+ namespace: ${TEST_NAMESPACE}
+ serviceAccount:
+ name: olm-sa
+ source:
+ sourceType: Catalog
+ catalog:
+ packageName: test
+ selector:
+ matchLabels:
+ "olm.operatorframework.io/metadata.name": test-catalog
+ """
+ And ClusterExtension reports Progressing as True with Reason Retrying
+ And ClusterExtension reports Installed as False
+ When resource "deployment/test-operator" is removed
+ Then ClusterExtension is available
+ And ClusterExtension reports Progressing as True with Reason Succeeded
+ And ClusterExtension reports Installed as True
diff --git a/test/e2e/features/status.feature b/test/e2e/features/status.feature
new file mode 100644
index 000000000..5c8a3141d
--- /dev/null
+++ b/test/e2e/features/status.feature
@@ -0,0 +1,45 @@
+Feature: Report status of the managed ClusterExtension workload
+
+ As an OLM user, I would like to see reported on ClusterExtension the availability
+ change of the managed workload.
+
+ Background:
+ Given OLM is available
+ And ClusterCatalog "test" serves bundles
+ And ServiceAccount "olm-sa" with needed permissions is available in ${TEST_NAMESPACE}
+ And ClusterExtension is applied
+ """
+ apiVersion: olm.operatorframework.io/v1
+ kind: ClusterExtension
+ metadata:
+ name: ${NAME}
+ spec:
+ namespace: ${TEST_NAMESPACE}
+ serviceAccount:
+ name: olm-sa
+ source:
+ sourceType: Catalog
+ catalog:
+ packageName: test
+ selector:
+ matchLabels:
+ "olm.operatorframework.io/metadata.name": test-catalog
+ version: 1.0.0
+ """
+ And ClusterExtension is rolled out
+ And ClusterExtension is available
+
+ @BoxcutterRuntime
+ Scenario: Report availability change when managed workload is not ready
+ When resource "deployment/test-operator" reports as not ready
+ Then ClusterExtension reports Available as False with Reason ProbeFailure
+ And ClusterExtensionRevision "${NAME}-1" reports Available as False with Reason ProbeFailure
+
+ @BoxcutterRuntime
+ Scenario: Report availability change when managed workload restores its readiness
+ Given resource "deployment/test-operator" reports as not ready
+ And ClusterExtension reports Available as False with Reason ProbeFailure
+ And ClusterExtensionRevision "${NAME}-1" reports Available as False with Reason ProbeFailure
+ When resource "deployment/test-operator" reports as ready
+ Then ClusterExtension is available
+ And ClusterExtensionRevision "${NAME}-1" reports Available as True with Reason ProbesSucceeded
\ No newline at end of file
diff --git a/test/e2e/features/update.feature b/test/e2e/features/update.feature
new file mode 100644
index 000000000..dee45e32a
--- /dev/null
+++ b/test/e2e/features/update.feature
@@ -0,0 +1,244 @@
+Feature: Update ClusterExtension
+
+ As an OLM user I would like to update a ClusterExtension from a catalog
+ or get an appropriate information in case of an error.
+
+ Background:
+ Given OLM is available
+ And ClusterCatalog "test" serves bundles
+ And ServiceAccount "olm-sa" with needed permissions is available in ${TEST_NAMESPACE}
+
+ Scenario: Update to a successor version
+ Given ClusterExtension is applied
+ """
+ apiVersion: olm.operatorframework.io/v1
+ kind: ClusterExtension
+ metadata:
+ name: ${NAME}
+ spec:
+ namespace: ${TEST_NAMESPACE}
+ serviceAccount:
+ name: olm-sa
+ source:
+ sourceType: Catalog
+ catalog:
+ packageName: test
+ selector:
+ matchLabels:
+ "olm.operatorframework.io/metadata.name": test-catalog
+ version: 1.0.0
+ """
+ And ClusterExtension is rolled out
+ And ClusterExtension is available
+ When ClusterExtension is updated to version "1.0.1"
+ Then ClusterExtension is rolled out
+ And ClusterExtension is available
+ And bundle "test-operator.1.0.1" is installed in version "1.0.1"
+
+ Scenario: Cannot update extension to non successor version
+ Given ClusterExtension is applied
+ """
+ apiVersion: olm.operatorframework.io/v1
+ kind: ClusterExtension
+ metadata:
+ name: ${NAME}
+ spec:
+ namespace: ${TEST_NAMESPACE}
+ serviceAccount:
+ name: olm-sa
+ source:
+ sourceType: Catalog
+ catalog:
+ packageName: test
+ selector:
+ matchLabels:
+ "olm.operatorframework.io/metadata.name": test-catalog
+ version: 1.0.0
+ """
+ And ClusterExtension is rolled out
+ And ClusterExtension is available
+ When ClusterExtension is applied
+ """
+ apiVersion: olm.operatorframework.io/v1
+ kind: ClusterExtension
+ metadata:
+ name: ${NAME}
+ spec:
+ namespace: ${TEST_NAMESPACE}
+ serviceAccount:
+ name: olm-sa
+ source:
+ sourceType: Catalog
+ catalog:
+ packageName: test
+ selector:
+ matchLabels:
+ "olm.operatorframework.io/metadata.name": test-catalog
+ version: 1.2.0
+ """
+ Then ClusterExtension reports Progressing as True with Reason Retrying and Message:
+ """
+ error upgrading from currently installed version "1.0.0": no bundles found for package "test" matching version "1.2.0"
+ """
+
+ Scenario: Force update to non successor version
+ Given ClusterExtension is applied
+ """
+ apiVersion: olm.operatorframework.io/v1
+ kind: ClusterExtension
+ metadata:
+ name: ${NAME}
+ spec:
+ namespace: ${TEST_NAMESPACE}
+ serviceAccount:
+ name: olm-sa
+ source:
+ sourceType: Catalog
+ catalog:
+ packageName: test
+ selector:
+ matchLabels:
+ "olm.operatorframework.io/metadata.name": test-catalog
+ version: 1.0.0
+ """
+ And ClusterExtension is rolled out
+ And ClusterExtension is available
+ When ClusterExtension is applied
+ """
+ apiVersion: olm.operatorframework.io/v1
+ kind: ClusterExtension
+ metadata:
+ name: ${NAME}
+ spec:
+ namespace: ${TEST_NAMESPACE}
+ serviceAccount:
+ name: olm-sa
+ source:
+ sourceType: Catalog
+ catalog:
+ packageName: test
+ selector:
+ matchLabels:
+ "olm.operatorframework.io/metadata.name": test-catalog
+ version: 1.2.0
+ upgradeConstraintPolicy: SelfCertified
+ """
+ Then ClusterExtension is rolled out
+ And ClusterExtension is available
+ And bundle "test-operator.1.2.0" is installed in version "1.2.0"
+
+ @catalog-updates
+ Scenario: Auto update when new version becomes available in the new catalog image ref
+ Given ClusterExtension is applied
+ """
+ apiVersion: olm.operatorframework.io/v1
+ kind: ClusterExtension
+ metadata:
+ name: ${NAME}
+ spec:
+ namespace: ${TEST_NAMESPACE}
+ serviceAccount:
+ name: olm-sa
+ source:
+ sourceType: Catalog
+ catalog:
+ packageName: test
+ selector:
+ matchLabels:
+ "olm.operatorframework.io/metadata.name": test-catalog
+ """
+ And ClusterExtension is rolled out
+ And ClusterExtension is available
+ And bundle "test-operator.1.2.0" is installed in version "1.2.0"
+ When ClusterCatalog "test" is updated to version "v2"
+ Then bundle "test-operator.1.3.0" is installed in version "1.3.0"
+
+ Scenario: Auto update when new version becomes available in the same catalog image ref
+ Given "test" catalog image version "v1" is also tagged as "latest"
+ And ClusterCatalog "test" is updated to version "latest"
+ And ClusterExtension is applied
+ """
+ apiVersion: olm.operatorframework.io/v1
+ kind: ClusterExtension
+ metadata:
+ name: ${NAME}
+ spec:
+ namespace: ${TEST_NAMESPACE}
+ serviceAccount:
+ name: olm-sa
+ source:
+ sourceType: Catalog
+ catalog:
+ packageName: test
+ selector:
+ matchLabels:
+ "olm.operatorframework.io/metadata.name": test-catalog
+ """
+ And ClusterExtension is rolled out
+ And ClusterExtension is available
+ And bundle "test-operator.1.2.0" is installed in version "1.2.0"
+ When ClusterCatalog "test" image version "v2" is also tagged as "latest"
+ Then bundle "test-operator.1.3.0" is installed in version "1.3.0"
+
+ @BoxcutterRuntime
+ Scenario: Each update creates a new revision
+ Given ClusterExtension is applied
+ """
+ apiVersion: olm.operatorframework.io/v1
+ kind: ClusterExtension
+ metadata:
+ name: ${NAME}
+ spec:
+ namespace: ${TEST_NAMESPACE}
+ serviceAccount:
+ name: olm-sa
+ source:
+ sourceType: Catalog
+ catalog:
+ packageName: test
+ selector:
+ matchLabels:
+ "olm.operatorframework.io/metadata.name": test-catalog
+ version: 1.0.0
+ upgradeConstraintPolicy: SelfCertified
+ """
+ And ClusterExtension is rolled out
+ And ClusterExtension is available
+ When ClusterExtension is updated to version "1.2.0"
+ Then bundle "test-operator.1.2.0" is installed in version "1.2.0"
+ And ClusterExtension is rolled out
+ And ClusterExtension is available
+ And ClusterExtension reports "${NAME}-2" as active revision
+ And ClusterExtensionRevision "${NAME}-2" reports Progressing as True with Reason Succeeded
+ And ClusterExtensionRevision "${NAME}-2" reports Available as True with Reason ProbesSucceeded
+ And ClusterExtensionRevision "${NAME}-1" is archived
+
+ @BoxcutterRuntime
+ Scenario: Report all active revisions on ClusterExtension
+ Given ClusterExtension is applied
+ """
+ apiVersion: olm.operatorframework.io/v1
+ kind: ClusterExtension
+ metadata:
+ name: ${NAME}
+ spec:
+ namespace: ${TEST_NAMESPACE}
+ serviceAccount:
+ name: olm-sa
+ source:
+ sourceType: Catalog
+ catalog:
+ packageName: test
+ selector:
+ matchLabels:
+ "olm.operatorframework.io/metadata.name": test-catalog
+ version: 1.0.0
+ upgradeConstraintPolicy: SelfCertified
+ """
+ And ClusterExtension is rolled out
+ And ClusterExtension is available
+ When ClusterExtension is updated to version "1.0.2"
+ Then ClusterExtension reports "${NAME}-1, ${NAME}-2" as active revisions
+ And ClusterExtensionRevision "${NAME}-2" reports Progressing as True with Reason Succeeded
+ And ClusterExtensionRevision "${NAME}-2" reports Available as False with Reason ProbeFailure
+
diff --git a/test/e2e/features_test.go b/test/e2e/features_test.go
new file mode 100644
index 000000000..706c822ef
--- /dev/null
+++ b/test/e2e/features_test.go
@@ -0,0 +1,75 @@
+package e2e
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "testing"
+
+ "github.com/cucumber/godog"
+ "github.com/cucumber/godog/colors"
+ "github.com/spf13/pflag"
+
+ testutil "github.com/operator-framework/operator-controller/internal/shared/util/test"
+ "github.com/operator-framework/operator-controller/test/e2e/steps"
+)
+
+var opts = godog.Options{
+ Format: "pretty",
+ Paths: []string{"features"},
+ Output: colors.Colored(os.Stdout),
+ Concurrency: 1,
+ NoColors: true,
+}
+
+func init() {
+ godog.BindCommandLineFlags("godog.", &opts)
+}
+
+func TestMain(m *testing.M) {
+ // parse CLI arguments
+ pflag.Parse()
+ opts.Paths = pflag.Args()
+
+ // run tests
+ sc := godog.TestSuite{
+ TestSuiteInitializer: InitializeSuite,
+ ScenarioInitializer: InitializeScenario,
+ Options: &opts,
+ }.Run()
+
+ if st := m.Run(); st > sc {
+ sc = st
+ }
+ switch sc {
+ // 0 - success
+ case 0:
+
+ path := os.Getenv("E2E_SUMMARY_OUTPUT")
+ if path == "" {
+ fmt.Println("Note: E2E_SUMMARY_OUTPUT is unset; skipping summary generation")
+ } else {
+ if err := testutil.PrintSummary(path); err != nil {
+ // Fail the run if alerts are found
+ fmt.Printf("%v", err)
+ os.Exit(1)
+ }
+ }
+ return
+
+ // 1 - failed
+ // 2 - command line usage error
+ // 128 - or higher, os signal related error exit codes
+ default:
+ log.Fatalf("non-zero status returned (%d), failed to run feature tests", sc)
+ }
+}
+
+func InitializeSuite(tc *godog.TestSuiteContext) {
+ tc.BeforeSuite(steps.BeforeSuite)
+}
+
+func InitializeScenario(sc *godog.ScenarioContext) {
+ steps.RegisterSteps(sc)
+ steps.RegisterHooks(sc)
+}
diff --git a/test/e2e/metrics_test.go b/test/e2e/metrics_test.go
deleted file mode 100644
index e1fbb90f3..000000000
--- a/test/e2e/metrics_test.go
+++ /dev/null
@@ -1,297 +0,0 @@
-// Package e2e contains end-to-end tests to verify that the metrics endpoints
-// for both components. Metrics are exported and accessible by authorized users through
-// RBAC and ServiceAccount tokens.
-//
-// These tests perform the following steps:
-// 1. Create a ClusterRoleBinding to grant necessary permissions for accessing metrics.
-// 2. Generate a ServiceAccount token for authentication.
-// 3. Deploy a curl pod to interact with the metrics endpoint.
-// 4. Wait for the curl pod to become ready.
-// 5. Execute a curl command from the pod to validate the metrics endpoint.
-// 6. Clean up all resources created during the test, such as the ClusterRoleBinding and curl pod.
-//
-//nolint:gosec
-package e2e
-
-import (
- "bytes"
- "context"
- "fmt"
- "io"
- "os/exec"
- "strings"
- "testing"
- "time"
-
- "github.com/stretchr/testify/require"
- "k8s.io/apimachinery/pkg/util/rand"
-
- testutil "github.com/operator-framework/operator-controller/internal/shared/util/test"
-)
-
-// TestOperatorControllerMetricsExportedEndpoint verifies that the metrics endpoint for the operator controller
-func TestOperatorControllerMetricsExportedEndpoint(t *testing.T) {
- client := testutil.FindK8sClient(t)
- curlNamespace := createRandomNamespace(t, client)
- componentNamespace := getComponentNamespace(t, client, "control-plane=operator-controller-controller-manager")
-
- config := NewMetricsTestConfig(
- client,
- curlNamespace,
- componentNamespace,
- "operator-controller-metrics-reader",
- "operator-controller-metrics-binding",
- "operator-controller-metrics-reader",
- "oper-curl-metrics",
- "app.kubernetes.io/name=operator-controller",
- operatorControllerMetricsPort,
- )
-
- config.run(t)
-}
-
-// TestCatalogdMetricsExportedEndpoint verifies that the metrics endpoint for catalogd
-func TestCatalogdMetricsExportedEndpoint(t *testing.T) {
- client := testutil.FindK8sClient(t)
- curlNamespace := createRandomNamespace(t, client)
- componentNamespace := getComponentNamespace(t, client, "control-plane=catalogd-controller-manager")
-
- config := NewMetricsTestConfig(
- client,
- curlNamespace,
- componentNamespace,
- "catalogd-metrics-reader",
- "catalogd-metrics-binding",
- "catalogd-metrics-reader",
- "catalogd-curl-metrics",
- "app.kubernetes.io/name=catalogd",
- catalogdMetricsPort,
- )
-
- config.run(t)
-}
-
-// MetricsTestConfig holds the necessary configurations for testing metrics endpoints.
-type MetricsTestConfig struct {
- client string
- namespace string
- componentNamespace string
- clusterRole string
- clusterBinding string
- serviceAccount string
- curlPodName string
- componentSelector string
- metricsPort int
-}
-
-// NewMetricsTestConfig initializes a new MetricsTestConfig.
-func NewMetricsTestConfig(client, namespace, componentNamespace, clusterRole, clusterBinding, serviceAccount, curlPodName, componentSelector string, metricsPort int) *MetricsTestConfig {
- return &MetricsTestConfig{
- client: client,
- namespace: namespace,
- componentNamespace: componentNamespace,
- clusterRole: clusterRole,
- clusterBinding: clusterBinding,
- serviceAccount: serviceAccount,
- curlPodName: curlPodName,
- componentSelector: componentSelector,
- metricsPort: metricsPort,
- }
-}
-
-// run will execute all steps of those tests
-func (c *MetricsTestConfig) run(t *testing.T) {
- defer c.cleanup(t)
-
- c.createMetricsClusterRoleBinding(t)
- token := c.getServiceAccountToken(t)
- c.createCurlMetricsPod(t)
- c.validate(t, token)
-}
-
-// createMetricsClusterRoleBinding to binding and expose the metrics
-func (c *MetricsTestConfig) createMetricsClusterRoleBinding(t *testing.T) {
- t.Logf("Creating ClusterRoleBinding %s for %s in namespace %s", c.clusterBinding, c.serviceAccount, c.namespace)
- cmd := exec.Command(c.client, "create", "clusterrolebinding", c.clusterBinding,
- "--clusterrole="+c.clusterRole,
- "--serviceaccount="+c.namespace+":"+c.serviceAccount)
- output, err := cmd.CombinedOutput()
- require.NoError(t, err, "Error creating ClusterRoleBinding: %s", string(output))
-}
-
-// getServiceAccountToken return the token requires to have access to the metrics
-func (c *MetricsTestConfig) getServiceAccountToken(t *testing.T) string {
- t.Logf("Creating ServiceAccount %q in namespace %q", c.serviceAccount, c.namespace)
- output, err := exec.Command(c.client, "create", "serviceaccount", c.serviceAccount, "--namespace="+c.namespace).CombinedOutput()
- require.NoError(t, err, "Error creating service account: %v", string(output))
-
- t.Logf("Generating ServiceAccount token for %q in namespace %q", c.serviceAccount, c.namespace)
- cmd := exec.Command(c.client, "create", "token", c.serviceAccount, "--namespace", c.namespace)
- tokenOutput, tokenCombinedOutput, err := stdoutAndCombined(cmd)
- require.NoError(t, err, "Error creating token: %s", string(tokenCombinedOutput))
- return string(bytes.TrimSpace(tokenOutput))
-}
-
-// createCurlMetricsPod creates the Pod with curl image to allow check if the metrics are working
-func (c *MetricsTestConfig) createCurlMetricsPod(t *testing.T) {
- t.Logf("Creating curl pod (%s/%s) to validate the metrics endpoint", c.namespace, c.curlPodName)
- cmd := exec.Command(c.client, "run", c.curlPodName,
- "--image=quay.io/curl/curl:8.15.0",
- "--namespace", c.namespace,
- "--restart=Never",
- "--overrides", `{
- "spec": {
- "terminationGradePeriodSeconds": 0,
- "containers": [{
- "name": "curl",
- "image": "quay.io/curl/curl:8.15.0",
- "command": ["sh", "-c", "sleep 3600"],
- "securityContext": {
- "allowPrivilegeEscalation": false,
- "capabilities": {"drop": ["ALL"]},
- "runAsNonRoot": true,
- "runAsUser": 1000,
- "seccompProfile": {"type": "RuntimeDefault"}
- }
- }],
- "serviceAccountName": "`+c.serviceAccount+`"
- }
- }`)
- output, err := cmd.CombinedOutput()
- require.NoError(t, err, "Error creating curl pod: %s", string(output))
-}
-
-// validate verifies if is possible to access the metrics from all pods
-func (c *MetricsTestConfig) validate(t *testing.T, token string) {
- t.Log("Waiting for the curl pod to be ready")
- waitCmd := exec.Command(c.client, "wait", "--for=condition=Ready", "pod", c.curlPodName, "--namespace", c.namespace, "--timeout=60s")
- waitOutput, waitErr := waitCmd.CombinedOutput()
- require.NoError(t, waitErr, "Error waiting for curl pod to be ready: %s", string(waitOutput))
-
- // Get all pod IPs for the component
- podIPs := c.getComponentPodIPs(t)
- require.NotEmpty(t, podIPs, "No pod IPs found for component")
- t.Logf("Found %d pod(s) to scrape metrics from", len(podIPs))
-
- // Validate metrics endpoint for each pod
- for i, podIP := range podIPs {
- // Build metrics URL with pod FQDN: ..pod.cluster.local
- // Convert IP dots to dashes (e.g., 10.244.0.11 -> 10-244-0-11)
- podIPDashes := strings.ReplaceAll(podIP, ".", "-")
- metricsURL := fmt.Sprintf("https://%s.%s.pod.cluster.local:%d/metrics", podIPDashes, c.componentNamespace, c.metricsPort)
- t.Logf("Validating metrics endpoint for pod %d/%d: %s", i+1, len(podIPs), metricsURL)
-
- curlCmd := exec.Command(c.client, "exec", c.curlPodName, "--namespace", c.namespace, "--",
- "curl", "-v", "-k", "-H", "Authorization: Bearer "+token, metricsURL)
- output, err := curlCmd.CombinedOutput()
- require.NoError(t, err, "Error calling metrics endpoint %s: %s", metricsURL, string(output))
- require.Contains(t, string(output), "200 OK", "Metrics endpoint %s did not return 200 OK", metricsURL)
- t.Logf("Successfully scraped metrics from pod %d/%d", i+1, len(podIPs))
- }
-}
-
-// cleanup removes the created resources. Uses a context with timeout to prevent hangs.
-func (c *MetricsTestConfig) cleanup(t *testing.T) {
- type objDesc struct {
- resourceName string
- name string
- namespace string
- }
- objects := []objDesc{
- {"clusterrolebinding", c.clusterBinding, ""},
- {"pod", c.curlPodName, c.namespace},
- {"serviceaccount", c.serviceAccount, c.namespace},
- {"namespace", c.namespace, ""},
- }
-
- t.Log("Cleaning up resources")
- for _, obj := range objects {
- args := []string{"delete", obj.resourceName, obj.name, "--ignore-not-found=true", "--force"}
- if obj.namespace != "" {
- args = append(args, "--namespace", obj.namespace)
- }
- output, err := exec.Command(c.client, args...).CombinedOutput()
- require.NoError(t, err, "Error deleting %q %q in namespace %q: %v", obj.resourceName, obj.name, obj.namespace, string(output))
- }
-
- // Create a context with a 60-second timeout.
- ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
- defer cancel()
-
- for _, obj := range objects {
- err := waitForDeletion(ctx, c.client, obj.resourceName, obj.name, obj.namespace)
- require.NoError(t, err, "Error deleting %q %q in namespace %q", obj.resourceName, obj.name, obj.namespace)
- t.Logf("Successfully deleted %q %q in namespace %q", obj.resourceName, obj.name, obj.namespace)
- }
-}
-
-// waitForDeletion uses "kubectl wait" to block until the specified resource is deleted
-// or until the 60-second timeout is reached.
-func waitForDeletion(ctx context.Context, client, resourceType, resourceName, resourceNamespace string) error {
- args := []string{"wait", "--for=delete", "--timeout=60s", resourceType, resourceName}
- if resourceNamespace != "" {
- args = append(args, "--namespace", resourceNamespace)
- }
- cmd := exec.CommandContext(ctx, client, args...)
- output, err := cmd.CombinedOutput()
- if err != nil {
- return fmt.Errorf("error waiting for deletion of %s %s: %v, output: %s", resourceType, resourceName, err, string(output))
- }
- return nil
-}
-
-// createRandomNamespace creates a random namespace
-func createRandomNamespace(t *testing.T, client string) string {
- nsName := fmt.Sprintf("testns-%s", rand.String(8))
-
- cmd := exec.Command(client, "create", "namespace", nsName)
- output, err := cmd.CombinedOutput()
- require.NoError(t, err, "Error creating namespace: %s", string(output))
-
- return nsName
-}
-
-// getComponentNamespace returns the namespace where operator-controller or catalogd is running
-func getComponentNamespace(t *testing.T, client, selector string) string {
- cmd := exec.Command(client, "get", "pods", "--all-namespaces", "--selector="+selector, "--output=jsonpath={.items[0].metadata.namespace}")
- output, err := cmd.CombinedOutput()
- require.NoError(t, err, "Error determining namespace: %s", string(output))
-
- namespace := string(bytes.TrimSpace(output))
- if namespace == "" {
- t.Fatal("No namespace found for selector " + selector)
- }
- return namespace
-}
-
-// getComponentPodIPs returns the IP addresses of all pods matching the component selector
-func (c *MetricsTestConfig) getComponentPodIPs(t *testing.T) []string {
- cmd := exec.Command(c.client, "get", "pods",
- "--namespace="+c.componentNamespace,
- "--selector="+c.componentSelector,
- "--output=jsonpath={.items[*].status.podIP}")
- output, err := cmd.CombinedOutput()
- require.NoError(t, err, "Error getting pod IPs: %s", string(output))
-
- podIPsStr := string(bytes.TrimSpace(output))
- if podIPsStr == "" {
- return []string{}
- }
-
- // Split space-separated IPs
- fields := bytes.Fields([]byte(podIPsStr))
- ips := make([]string, len(fields))
- for i, field := range fields {
- ips[i] = string(field)
- }
- return ips
-}
-
-func stdoutAndCombined(cmd *exec.Cmd) ([]byte, []byte, error) {
- var outOnly, outAndErr bytes.Buffer
- allWriter := io.MultiWriter(&outOnly, &outAndErr)
- cmd.Stdout = allWriter
- cmd.Stderr = &outAndErr
- err := cmd.Run()
- return outOnly.Bytes(), outAndErr.Bytes(), err
-}
diff --git a/test/e2e/network_policy_test.go b/test/e2e/network_policy_test.go
deleted file mode 100644
index 8e0465f41..000000000
--- a/test/e2e/network_policy_test.go
+++ /dev/null
@@ -1,379 +0,0 @@
-package e2e
-
-import (
- "context"
- "fmt"
- "strings"
- "testing"
-
- "github.com/stretchr/testify/require"
- corev1 "k8s.io/api/core/v1"
- networkingv1 "k8s.io/api/networking/v1"
- "k8s.io/apimachinery/pkg/api/equality"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/util/intstr"
- "k8s.io/utils/ptr"
- "sigs.k8s.io/controller-runtime/pkg/client"
-
- testutil "github.com/operator-framework/operator-controller/internal/shared/util/test"
-)
-
-const (
- minJustificationLength = 40
- catalogdManagerSelector = "control-plane=catalogd-controller-manager"
- operatorManagerSelector = "control-plane=operator-controller-controller-manager"
- catalogdMetricsPort = 7443
- catalogdWebhookPort = 9443
- catalogServerPort = 8443
- operatorControllerMetricsPort = 8443
-)
-
-type portWithJustification struct {
- port []networkingv1.NetworkPolicyPort
- justification string
-}
-
-// ingressRule defines a k8s IngressRule, along with a justification.
-type ingressRule struct {
- ports []portWithJustification
- from []networkingv1.NetworkPolicyPeer
-}
-
-// egressRule defines a k8s egressRule, along with a justification.
-type egressRule struct {
- ports []portWithJustification
- to []networkingv1.NetworkPolicyPeer
-}
-
-// AllowedPolicyDefinition defines the expected structure and justifications for a NetworkPolicy.
-type allowedPolicyDefinition struct {
- selector metav1.LabelSelector
- policyTypes []networkingv1.PolicyType
- ingressRule ingressRule
- egressRule egressRule
- denyAllIngressJustification string // Justification if Ingress is in PolicyTypes and IngressRules is empty
- denyAllEgressJustification string // Justification if Egress is in PolicyTypes and EgressRules is empty
-}
-
-var denyAllPolicySpec = allowedPolicyDefinition{
- selector: metav1.LabelSelector{}, // Empty selector, matches all pods
- policyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeIngress, networkingv1.PolicyTypeEgress},
- // No IngressRules means deny all ingress if PolicyTypeIngress is present
- // No EgressRules means deny all egress if PolicyTypeEgress is present
- denyAllIngressJustification: "Denies all ingress traffic to pods selected by this policy by default, unless explicitly allowed by other policy rules, ensuring a baseline secure posture.",
- denyAllEgressJustification: "Denies all egress traffic from pods selected by this policy by default, unless explicitly allowed by other policy rules, minimizing potential exfiltration paths.",
-}
-
-var prometheuSpec = allowedPolicyDefinition{
- selector: metav1.LabelSelector{MatchLabels: map[string]string{"app.kubernetes.io/name": "prometheus"}},
- policyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeIngress, networkingv1.PolicyTypeEgress},
- ingressRule: ingressRule{
- ports: []portWithJustification{
- {
- port: nil,
- justification: "Allows access to the prometheus pod",
- },
- },
- },
- egressRule: egressRule{
- ports: []portWithJustification{
- {
- port: nil,
- justification: "Allows prometheus to access other pods",
- },
- },
- },
-}
-
-// Ref: https://docs.google.com/document/d/1bHEEWzA65u-kjJFQRUY1iBuMIIM1HbPy4MeDLX4NI3o/edit?usp=sharing
-var allowedNetworkPolicies = map[string]allowedPolicyDefinition{
- "catalogd-controller-manager": {
- selector: metav1.LabelSelector{MatchLabels: map[string]string{"control-plane": "catalogd-controller-manager"}},
- policyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeIngress, networkingv1.PolicyTypeEgress},
- ingressRule: ingressRule{
- ports: []portWithJustification{
- {
- port: []networkingv1.NetworkPolicyPort{{Protocol: ptr.To(corev1.ProtocolTCP), Port: &intstr.IntOrString{Type: intstr.Int, IntVal: catalogdMetricsPort}}},
- justification: "Allows Prometheus to scrape metrics from catalogd, which is essential for monitoring its performance and health.",
- },
- {
- port: []networkingv1.NetworkPolicyPort{{Protocol: ptr.To(corev1.ProtocolTCP), Port: &intstr.IntOrString{Type: intstr.Int, IntVal: catalogdWebhookPort}}},
- justification: "Permits Kubernetes API server to reach catalogd's mutating admission webhook, ensuring integrity of catalog resources.",
- },
- {
- port: []networkingv1.NetworkPolicyPort{{Protocol: ptr.To(corev1.ProtocolTCP), Port: &intstr.IntOrString{Type: intstr.Int, IntVal: catalogServerPort}}},
- justification: "Enables clients (eg. operator-controller) to query catalog metadata from catalogd, which is a core function for bundle resolution and operator discovery.",
- },
- },
- },
- egressRule: egressRule{
- ports: []portWithJustification{
- {
- port: nil, // Empty Ports means allow all egress
- justification: "Permits catalogd to fetch catalog images from arbitrary container registries and communicate with the Kubernetes API server for its operational needs.",
- },
- },
- },
- },
- "operator-controller-controller-manager": {
- selector: metav1.LabelSelector{MatchLabels: map[string]string{"control-plane": "operator-controller-controller-manager"}},
- policyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeIngress, networkingv1.PolicyTypeEgress},
- ingressRule: ingressRule{
- ports: []portWithJustification{
- {
- port: []networkingv1.NetworkPolicyPort{{Protocol: ptr.To(corev1.ProtocolTCP), Port: &intstr.IntOrString{Type: intstr.Int, IntVal: operatorControllerMetricsPort}}},
- justification: "Allows Prometheus to scrape metrics from operator-controller, which is crucial for monitoring its activity, reconciliations, and overall health.",
- },
- },
- },
- egressRule: egressRule{
- ports: []portWithJustification{
- {
- port: nil, // Empty Ports means allow all egress
- justification: "Enables operator-controller to pull bundle images from arbitrary image registries, connect to catalogd's HTTPS server for metadata, and interact with the Kubernetes API server.",
- },
- },
- },
- },
-}
-
-func TestNetworkPolicyJustifications(t *testing.T) {
- ctx := context.Background()
-
- // Validate justifications have min length in the allowedNetworkPolicies definition
- for name, policyDef := range allowedNetworkPolicies {
- for i, pwj := range policyDef.ingressRule.ports {
- require.GreaterOrEqualf(t, len(pwj.justification), minJustificationLength,
- "Justification for ingress PortWithJustification entry %d in policy %q is too short: %q", i, name, pwj.justification)
- }
- for i, pwj := range policyDef.egressRule.ports { // Corrected variable name from 'rule' to 'pwj'
- require.GreaterOrEqualf(t, len(pwj.justification), minJustificationLength,
- "Justification for egress PortWithJustification entry %d in policy %q is too short: %q", i, name, pwj.justification)
- }
- if policyDef.denyAllIngressJustification != "" {
- require.GreaterOrEqualf(t, len(policyDef.denyAllIngressJustification), minJustificationLength,
- "DenyAllIngressJustification for policy %q is too short: %q", name, policyDef.denyAllIngressJustification)
- }
- if policyDef.denyAllEgressJustification != "" {
- require.GreaterOrEqualf(t, len(policyDef.denyAllEgressJustification), minJustificationLength,
- "DenyAllEgressJustification for policy %q is too short: %q", name, policyDef.denyAllEgressJustification)
- }
- }
-
- clientForComponent := testutil.FindK8sClient(t)
-
- operatorControllerNamespace := getComponentNamespace(t, clientForComponent, operatorManagerSelector)
- catalogDNamespace := getComponentNamespace(t, clientForComponent, catalogdManagerSelector)
-
- policies := &networkingv1.NetworkPolicyList{}
- err := c.List(ctx, policies, client.InNamespace(operatorControllerNamespace))
- require.NoError(t, err, "Failed to list NetworkPolicies in namespace %q", operatorControllerNamespace)
-
- clusterPolicies := policies.Items
-
- if operatorControllerNamespace != catalogDNamespace {
- policies := &networkingv1.NetworkPolicyList{}
- err := c.List(ctx, policies, client.InNamespace(catalogDNamespace))
- require.NoError(t, err, "Failed to list NetworkPolicies in namespace %q", catalogDNamespace)
- clusterPolicies = append(clusterPolicies, policies.Items...)
-
- t.Log("Detected dual-namespace configuration, expecting two prefixed 'default-deny-all-traffic' policies.")
- allowedNetworkPolicies["catalogd-default-deny-all-traffic"] = denyAllPolicySpec
- allowedNetworkPolicies["operator-controller-default-deny-all-traffic"] = denyAllPolicySpec
- } else {
- t.Log("Detected single-namespace configuration, expecting one 'default-deny-all-traffic' policy.")
- allowedNetworkPolicies["default-deny-all-traffic"] = denyAllPolicySpec
- t.Log("Detected single-namespace configuration, expecting 'prometheus' policy.")
- allowedNetworkPolicies["prometheus"] = prometheuSpec
- }
-
- validatedRegistryPolicies := make(map[string]bool)
-
- for _, policy := range clusterPolicies {
- t.Run(fmt.Sprintf("Policy_%s", strings.ReplaceAll(policy.Name, "-", "_")), func(t *testing.T) {
- expectedPolicy, found := allowedNetworkPolicies[policy.Name]
- require.Truef(t, found, "NetworkPolicy %q found in cluster but not in allowed registry. Namespace: %s", policy.Name, policy.Namespace)
- validatedRegistryPolicies[policy.Name] = true
-
- // 1. Compare PodSelector
- require.True(t, equality.Semantic.DeepEqual(expectedPolicy.selector, policy.Spec.PodSelector),
- "PodSelector mismatch for policy %q. Expected: %+v, Got: %+v", policy.Name, expectedPolicy.selector, policy.Spec.PodSelector)
-
- // 2. Compare PolicyTypes
- require.ElementsMatchf(t, expectedPolicy.policyTypes, policy.Spec.PolicyTypes,
- "PolicyTypes mismatch for policy %q.", policy.Name)
-
- // 3. Validate Ingress Rules
- hasIngressPolicyType := false
- for _, pt := range policy.Spec.PolicyTypes {
- if pt == networkingv1.PolicyTypeIngress {
- hasIngressPolicyType = true
- break
- }
- }
-
- if hasIngressPolicyType {
- switch len(policy.Spec.Ingress) {
- case 0:
- validateDenyAllIngress(t, policy.Name, expectedPolicy)
- case 1:
- validateSingleIngressRule(t, policy.Name, policy.Spec.Ingress[0], expectedPolicy)
- default:
- require.Failf(t, "Policy %q in cluster has %d ingress rules. Allowed definition supports at most 1 explicit ingress rule.", policy.Name, len(policy.Spec.Ingress))
- }
- } else {
- validateNoIngress(t, policy.Name, policy, expectedPolicy)
- }
-
- // 4. Validate Egress Rules
- hasEgressPolicyType := false
- for _, pt := range policy.Spec.PolicyTypes {
- if pt == networkingv1.PolicyTypeEgress {
- hasEgressPolicyType = true
- break
- }
- }
-
- if hasEgressPolicyType {
- switch len(policy.Spec.Egress) {
- case 0:
- validateDenyAllEgress(t, policy.Name, expectedPolicy)
- case 1:
- validateSingleEgressRule(t, policy.Name, policy.Spec.Egress[0], expectedPolicy)
- default:
- require.Failf(t, "Policy %q in cluster has %d egress rules. Allowed definition supports at most 1 explicit egress rule.", policy.Name, len(policy.Spec.Egress))
- }
- } else {
- validateNoEgress(t, policy, expectedPolicy)
- }
- })
- }
-
- // 5. Ensure all policies in the registry were found in the cluster
- require.Len(t, validatedRegistryPolicies, len(allowedNetworkPolicies),
- "Mismatch between number of expected policies in registry (%d) and number of policies found & validated in cluster (%d). Missing policies from registry: %v", len(allowedNetworkPolicies), len(validatedRegistryPolicies), missingPolicies(allowedNetworkPolicies, validatedRegistryPolicies))
-}
-
-func missingPolicies(expected map[string]allowedPolicyDefinition, actual map[string]bool) []string {
- missing := []string{}
- for k := range expected {
- if !actual[k] {
- missing = append(missing, k)
- }
- }
- return missing
-}
-
-// validateNoEgress confirms that a policy which does not have spec.PolicyType=Egress specified
-// has no corresponding egress rules or expectations defined.
-func validateNoEgress(t *testing.T, policy networkingv1.NetworkPolicy, expectedPolicy allowedPolicyDefinition) {
- // Policy is NOT expected to affect Egress traffic (no Egress in PolicyTypes)
- // Expected: Cluster has no egress rules; Registry has no DenyAllEgressJustification and empty EgressRule.
- require.Emptyf(t, policy.Spec.Egress,
- "Policy %q: Cluster does not have Egress PolicyType, but has Egress rules defined.", policy.Name)
- require.Emptyf(t, expectedPolicy.denyAllEgressJustification,
- "Policy %q: Cluster does not have Egress PolicyType. Registry's DenyAllEgressJustification is not empty.", policy.Name)
- require.Emptyf(t, expectedPolicy.egressRule.ports,
- "Policy %q: Cluster does not have Egress PolicyType. Registry's EgressRule.Ports is not empty.", policy.Name)
- require.Emptyf(t, expectedPolicy.egressRule.to,
- "Policy %q: Cluster does not have Egress PolicyType. Registry's EgressRule.To is not empty.", policy.Name)
-}
-
-// validateDenyAllEgress confirms that a policy with Egress PolicyType but no explicit rules
-// correctly corresponds to a "deny all" expectation.
-func validateDenyAllEgress(t *testing.T, policyName string, expectedPolicy allowedPolicyDefinition) {
- // Cluster: PolicyType Egress is present, but no explicit egress rules -> Deny All Egress by this policy.
- // Expected: DenyAllEgressJustification is set; EgressRule.Ports and .To are empty.
- require.NotEmptyf(t, expectedPolicy.denyAllEgressJustification,
- "Policy %q: Cluster has Egress PolicyType but no rules (deny all). Registry's DenyAllEgressJustification is empty.", policyName)
- require.Emptyf(t, expectedPolicy.egressRule.ports,
- "Policy %q: Cluster has Egress PolicyType but no rules (deny all). Registry's EgressRule.Ports is not empty.", policyName)
- require.Emptyf(t, expectedPolicy.egressRule.to,
- "Policy %q: Cluster has Egress PolicyType but no rules (deny all). Registry's EgressRule.To is not empty.", policyName)
-}
-
-// validateSingleEgressRule validates a policy that has exactly one explicit egress rule,
-// distinguishing between "allow-all" and more specific rules.
-func validateSingleEgressRule(t *testing.T, policyName string, clusterEgressRule networkingv1.NetworkPolicyEgressRule, expectedPolicy allowedPolicyDefinition) {
- // Cluster: PolicyType Egress is present, and there's one explicit egress rule.
- // Expected: DenyAllEgressJustification is empty; EgressRule matches the cluster's rule.
- expectedEgressRule := expectedPolicy.egressRule
-
- require.Emptyf(t, expectedPolicy.denyAllEgressJustification,
- "Policy %q: Cluster has a specific Egress rule. Registry's DenyAllEgressJustification should be empty.", policyName)
-
- isClusterRuleAllowAllPorts := len(clusterEgressRule.Ports) == 0
- isClusterRuleAllowAllPeers := len(clusterEgressRule.To) == 0
-
- if isClusterRuleAllowAllPorts && isClusterRuleAllowAllPeers { // Handles egress: [{}] - allow all ports to all peers
- require.Lenf(t, expectedEgressRule.ports, 1,
- "Policy %q (allow-all egress): Expected EgressRule.Ports to have 1 justification entry, got %d", policyName, len(expectedEgressRule.ports))
- if len(expectedEgressRule.ports) == 1 { // Guard against panic
- require.Nilf(t, expectedEgressRule.ports[0].port,
- "Policy %q (allow-all egress): Expected EgressRule.Ports[0].Port to be nil, got %+v", policyName, expectedEgressRule.ports[0].port)
- }
- require.Conditionf(t, func() bool { return len(expectedEgressRule.to) == 0 },
- "Policy %q (allow-all egress): Expected EgressRule.To to be empty for allow-all peers, got %+v", policyName, expectedEgressRule.to)
- } else {
- // Specific egress rule (not the simple allow-all ports and allow-all peers)
- require.True(t, equality.Semantic.DeepEqual(expectedEgressRule.to, clusterEgressRule.To),
- "Policy %q, Egress Rule: 'To' mismatch.\nExpected: %+v\nGot: %+v", policyName, expectedEgressRule.to, clusterEgressRule.To)
-
- var allExpectedPortsFromPwJ []networkingv1.NetworkPolicyPort
- for _, pwj := range expectedEgressRule.ports {
- allExpectedPortsFromPwJ = append(allExpectedPortsFromPwJ, pwj.port...)
- }
- require.ElementsMatchf(t, allExpectedPortsFromPwJ, clusterEgressRule.Ports,
- "Policy %q, Egress Rule: 'Ports' mismatch (aggregated from PortWithJustification). Expected: %+v, Got: %+v", policyName, allExpectedPortsFromPwJ, clusterEgressRule.Ports)
- }
-}
-
-// validateNoIngress confirms that a policy which does not have the Ingress PolicyType
-// has no corresponding ingress rules or expectations defined.
-func validateNoIngress(t *testing.T, policyName string, clusterPolicy networkingv1.NetworkPolicy, expectedPolicy allowedPolicyDefinition) {
- // Policy is NOT expected to affect Ingress traffic (no Ingress in PolicyTypes)
- // Expected: Cluster has no ingress rules; Registry has no DenyAllIngressJustification and empty IngressRule.
- require.Emptyf(t, clusterPolicy.Spec.Ingress,
- "Policy %q: Cluster does not have Ingress PolicyType, but has Ingress rules defined.", policyName)
- require.Emptyf(t, expectedPolicy.denyAllIngressJustification,
- "Policy %q: Cluster does not have Ingress PolicyType. Registry's DenyAllIngressJustification is not empty.", policyName)
- require.Emptyf(t, expectedPolicy.ingressRule.ports,
- "Policy %q: Cluster does not have Ingress PolicyType. Registry's IngressRule.Ports is not empty.", policyName)
- require.Emptyf(t, expectedPolicy.ingressRule.from,
- "Policy %q: Cluster does not have Ingress PolicyType. Registry's IngressRule.From is not empty.", policyName)
-}
-
-// validateDenyAllIngress confirms that a policy with Ingress PolicyType but no explicit rules
-// correctly corresponds to a "deny all" expectation.
-func validateDenyAllIngress(t *testing.T, policyName string, expectedPolicy allowedPolicyDefinition) {
- // Cluster: PolicyType Ingress is present, but no explicit ingress rules -> Deny All Ingress by this policy.
- // Expected: DenyAllIngressJustification is set; IngressRule.Ports and .From are empty.
- require.NotEmptyf(t, expectedPolicy.denyAllIngressJustification,
- "Policy %q: Cluster has Ingress PolicyType but no rules (deny all). Registry's DenyAllIngressJustification is empty.", policyName)
- require.Emptyf(t, expectedPolicy.ingressRule.ports,
- "Policy %q: Cluster has Ingress PolicyType but no rules (deny all). Registry's IngressRule.Ports is not empty.", policyName)
- require.Emptyf(t, expectedPolicy.ingressRule.from,
- "Policy %q: Cluster has Ingress PolicyType but no rules (deny all). Registry's IngressRule.From is not empty.", policyName)
-}
-
-// validateSingleIngressRule validates a policy that has exactly one explicit ingress rule.
-func validateSingleIngressRule(t *testing.T, policyName string, clusterIngressRule networkingv1.NetworkPolicyIngressRule, expectedPolicy allowedPolicyDefinition) {
- // Cluster: PolicyType Ingress is present, and there's one explicit ingress rule.
- // Expected: DenyAllIngressJustification is empty; IngressRule matches the cluster's rule.
- expectedIngressRule := expectedPolicy.ingressRule
-
- require.Emptyf(t, expectedPolicy.denyAllIngressJustification,
- "Policy %q: Cluster has a specific Ingress rule. Registry's DenyAllIngressJustification should be empty.", policyName)
-
- // Compare 'From'
- require.True(t, equality.Semantic.DeepEqual(expectedIngressRule.from, clusterIngressRule.From),
- "Policy %q, Ingress Rule: 'From' mismatch.\nExpected: %+v\nGot: %+v", policyName, expectedIngressRule.from, clusterIngressRule.From)
-
- // Compare 'Ports' by aggregating the ports from our justified structure
- var allExpectedPortsFromPwJ []networkingv1.NetworkPolicyPort
- for _, pwj := range expectedIngressRule.ports {
- allExpectedPortsFromPwJ = append(allExpectedPortsFromPwJ, pwj.port...)
- }
- require.ElementsMatchf(t, allExpectedPortsFromPwJ, clusterIngressRule.Ports,
- "Policy %q, Ingress Rule: 'Ports' mismatch (aggregated from PortWithJustification). Expected: %+v, Got: %+v", policyName, allExpectedPortsFromPwJ, clusterIngressRule.Ports)
-}
diff --git a/test/e2e/single_namespace_support_test.go b/test/e2e/single_namespace_support_test.go
deleted file mode 100644
index 190e786ba..000000000
--- a/test/e2e/single_namespace_support_test.go
+++ /dev/null
@@ -1,412 +0,0 @@
-package e2e
-
-import (
- "context"
- "fmt"
- "os"
- "testing"
-
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
- appsv1 "k8s.io/api/apps/v1"
- corev1 "k8s.io/api/core/v1"
- rbacv1 "k8s.io/api/rbac/v1"
- apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
- apimeta "k8s.io/apimachinery/pkg/api/meta"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/types"
- "k8s.io/utils/ptr"
-
- ocv1 "github.com/operator-framework/operator-controller/api/v1"
- testutil "github.com/operator-framework/operator-controller/internal/shared/util/test"
- . "github.com/operator-framework/operator-controller/test/helpers"
-)
-
-const (
- soNsFlag = "SingleOwnNamespaceInstallSupport"
-)
-
-func TestClusterExtensionSingleNamespaceSupport(t *testing.T) {
- SkipIfFeatureGateDisabled(t, soNsFlag)
- t.Log("Test support for cluster extension config")
- defer testutil.CollectTestArtifacts(t, artifactName, c, cfg)
-
- t.Log("By creating install namespace, watch namespace and necessary rbac resources")
- namespace := corev1.Namespace{
- ObjectMeta: metav1.ObjectMeta{
- Name: "single-namespace-operator",
- },
- }
- require.NoError(t, c.Create(t.Context(), &namespace))
- t.Cleanup(func() {
- require.NoError(t, c.Delete(context.Background(), &namespace))
- })
-
- watchNamespace := corev1.Namespace{
- ObjectMeta: metav1.ObjectMeta{
- Name: "single-namespace-operator-target",
- },
- }
- require.NoError(t, c.Create(t.Context(), &watchNamespace))
- t.Cleanup(func() {
- require.NoError(t, c.Delete(context.Background(), &watchNamespace))
- })
-
- serviceAccount := corev1.ServiceAccount{
- ObjectMeta: metav1.ObjectMeta{
- Name: "single-namespace-operator-installer",
- Namespace: namespace.GetName(),
- },
- }
- require.NoError(t, c.Create(t.Context(), &serviceAccount))
- t.Cleanup(func() {
- require.NoError(t, c.Delete(context.Background(), &serviceAccount))
- })
-
- clusterRoleBinding := &rbacv1.ClusterRoleBinding{
- ObjectMeta: metav1.ObjectMeta{
- Name: "single-namespace-operator-installer",
- },
- Subjects: []rbacv1.Subject{
- {
- Kind: "ServiceAccount",
- APIGroup: corev1.GroupName,
- Name: serviceAccount.GetName(),
- Namespace: serviceAccount.GetNamespace(),
- },
- },
- RoleRef: rbacv1.RoleRef{
- APIGroup: rbacv1.GroupName,
- Kind: "ClusterRole",
- Name: "cluster-admin",
- },
- }
- require.NoError(t, c.Create(t.Context(), clusterRoleBinding))
- t.Cleanup(func() {
- require.NoError(t, c.Delete(context.Background(), clusterRoleBinding))
- })
-
- t.Log("By creating the test-catalog ClusterCatalog")
- extensionCatalog := &ocv1.ClusterCatalog{
- ObjectMeta: metav1.ObjectMeta{
- Name: "test-catalog",
- },
- Spec: ocv1.ClusterCatalogSpec{
- Source: ocv1.CatalogSource{
- Type: ocv1.SourceTypeImage,
- Image: &ocv1.ImageSource{
- Ref: fmt.Sprintf("%s/e2e/test-catalog:v1", os.Getenv("CLUSTER_REGISTRY_HOST")),
- PollIntervalMinutes: ptr.To(1),
- },
- },
- },
- }
- require.NoError(t, c.Create(t.Context(), extensionCatalog))
- t.Cleanup(func() {
- require.NoError(t, c.Delete(context.Background(), extensionCatalog))
- })
-
- t.Log("By waiting for the catalog to serve its metadata")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: extensionCatalog.GetName()}, extensionCatalog))
- cond := apimeta.FindStatusCondition(extensionCatalog.Status.Conditions, ocv1.TypeServing)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ReasonAvailable, cond.Reason)
- }, pollDuration, pollInterval)
-
- t.Log("By attempting to install the single-namespace-operator ClusterExtension without any configuration")
- clusterExtension := &ocv1.ClusterExtension{
- ObjectMeta: metav1.ObjectMeta{
- Name: "single-namespace-operator-extension",
- },
- Spec: ocv1.ClusterExtensionSpec{
- Source: ocv1.SourceConfig{
- SourceType: "Catalog",
- Catalog: &ocv1.CatalogFilter{
- PackageName: "single-namespace-operator",
- Selector: &metav1.LabelSelector{
- MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": extensionCatalog.Name},
- },
- },
- },
- Namespace: namespace.GetName(),
- ServiceAccount: ocv1.ServiceAccountReference{
- Name: serviceAccount.GetName(),
- },
- },
- }
- require.NoError(t, c.Create(t.Context(), clusterExtension))
- t.Cleanup(func() {
- require.NoError(t, c.Delete(context.Background(), clusterExtension))
- })
-
- t.Log("By waiting for single-namespace-operator extension installation to fail")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(t.Context(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ReasonRetrying, cond.Reason)
- require.Contains(ct, cond.Message, `required field "watchNamespace" is missing`)
- }, pollDuration, pollInterval)
-
- t.Log("By updating the ClusterExtension configuration with a watchNamespace")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(t, c.Get(t.Context(), types.NamespacedName{Name: clusterExtension.GetName()}, clusterExtension))
- clusterExtension.Spec.Config = &ocv1.ClusterExtensionConfig{
- ConfigType: ocv1.ClusterExtensionConfigTypeInline,
- Inline: &apiextensionsv1.JSON{
- Raw: []byte(fmt.Sprintf(`{"watchNamespace": "%s"}`, watchNamespace.GetName())),
- },
- }
- require.NoError(t, c.Update(t.Context(), clusterExtension))
- }, pollDuration, pollInterval)
-
- t.Log("By waiting for single-namespace-operator extension to be installed successfully")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(t.Context(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
- require.Contains(ct, cond.Message, "Installed bundle")
- require.NotNil(ct, clusterExtension.Status.Install)
- require.NotEmpty(ct, clusterExtension.Status.Install.Bundle)
- }, pollDuration, pollInterval)
-
- t.Log("By ensuring the single-namespace-operator deployment is correctly configured to watch the watch namespace")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- deployment := &appsv1.Deployment{}
- require.NoError(ct, c.Get(t.Context(), types.NamespacedName{Namespace: namespace.GetName(), Name: "single-namespace-operator"}, deployment))
- require.NotNil(ct, deployment.Spec.Template.GetAnnotations())
- require.Equal(ct, watchNamespace.GetName(), deployment.Spec.Template.GetAnnotations()["olm.targetNamespaces"])
- }, pollDuration, pollInterval)
-}
-
-func TestClusterExtensionOwnNamespaceSupport(t *testing.T) {
- SkipIfFeatureGateDisabled(t, soNsFlag)
- t.Log("Test support for cluster extension with OwnNamespace install mode support")
- defer testutil.CollectTestArtifacts(t, artifactName, c, cfg)
-
- t.Log("By creating install namespace, watch namespace and necessary rbac resources")
- namespace := corev1.Namespace{
- ObjectMeta: metav1.ObjectMeta{
- Name: "own-namespace-operator",
- },
- }
- require.NoError(t, c.Create(t.Context(), &namespace))
- t.Cleanup(func() {
- require.NoError(t, c.Delete(context.Background(), &namespace))
- })
-
- serviceAccount := corev1.ServiceAccount{
- ObjectMeta: metav1.ObjectMeta{
- Name: "own-namespace-operator-installer",
- Namespace: namespace.GetName(),
- },
- }
- require.NoError(t, c.Create(t.Context(), &serviceAccount))
- t.Cleanup(func() {
- require.NoError(t, c.Delete(context.Background(), &serviceAccount))
- })
-
- clusterRoleBinding := &rbacv1.ClusterRoleBinding{
- ObjectMeta: metav1.ObjectMeta{
- Name: "own-namespace-operator-installer",
- },
- Subjects: []rbacv1.Subject{
- {
- Kind: "ServiceAccount",
- APIGroup: corev1.GroupName,
- Name: serviceAccount.GetName(),
- Namespace: serviceAccount.GetNamespace(),
- },
- },
- RoleRef: rbacv1.RoleRef{
- APIGroup: rbacv1.GroupName,
- Kind: "ClusterRole",
- Name: "cluster-admin",
- },
- }
- require.NoError(t, c.Create(t.Context(), clusterRoleBinding))
- t.Cleanup(func() {
- require.NoError(t, c.Delete(context.Background(), clusterRoleBinding))
- })
-
- t.Log("By creating the test-catalog ClusterCatalog")
- extensionCatalog := &ocv1.ClusterCatalog{
- ObjectMeta: metav1.ObjectMeta{
- Name: "test-catalog",
- },
- Spec: ocv1.ClusterCatalogSpec{
- Source: ocv1.CatalogSource{
- Type: ocv1.SourceTypeImage,
- Image: &ocv1.ImageSource{
- Ref: fmt.Sprintf("%s/e2e/test-catalog:v1", os.Getenv("CLUSTER_REGISTRY_HOST")),
- PollIntervalMinutes: ptr.To(1),
- },
- },
- },
- }
- require.NoError(t, c.Create(t.Context(), extensionCatalog))
- t.Cleanup(func() {
- require.NoError(t, c.Delete(context.Background(), extensionCatalog))
- })
-
- t.Log("By waiting for the catalog to serve its metadata")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: extensionCatalog.GetName()}, extensionCatalog))
- cond := apimeta.FindStatusCondition(extensionCatalog.Status.Conditions, ocv1.TypeServing)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ReasonAvailable, cond.Reason)
- }, pollDuration, pollInterval)
-
- t.Log("By attempting to install the own-namespace-operator ClusterExtension without any configuration")
- clusterExtension := &ocv1.ClusterExtension{
- ObjectMeta: metav1.ObjectMeta{
- Name: "own-namespace-operator-extension",
- },
- Spec: ocv1.ClusterExtensionSpec{
- Source: ocv1.SourceConfig{
- SourceType: "Catalog",
- Catalog: &ocv1.CatalogFilter{
- PackageName: "own-namespace-operator",
- Selector: &metav1.LabelSelector{
- MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": extensionCatalog.Name},
- },
- },
- },
- Namespace: namespace.GetName(),
- ServiceAccount: ocv1.ServiceAccountReference{
- Name: serviceAccount.GetName(),
- },
- },
- }
- require.NoError(t, c.Create(t.Context(), clusterExtension))
- t.Cleanup(func() {
- require.NoError(t, c.Delete(context.Background(), clusterExtension))
- })
-
- t.Log("By waiting for own-namespace-operator extension installation to fail")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(t.Context(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ReasonRetrying, cond.Reason)
- require.Contains(ct, cond.Message, `required field "watchNamespace" is missing`)
- }, pollDuration, pollInterval)
-
- t.Log("By updating the ClusterExtension configuration with a watchNamespace other than the install namespace")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(t, c.Get(t.Context(), types.NamespacedName{Name: clusterExtension.GetName()}, clusterExtension))
- clusterExtension.Spec.Config = &ocv1.ClusterExtensionConfig{
- ConfigType: ocv1.ClusterExtensionConfigTypeInline,
- Inline: &apiextensionsv1.JSON{
- Raw: []byte(`{"watchNamespace": "some-namespace"}`),
- },
- }
- require.NoError(t, c.Update(t.Context(), clusterExtension))
- }, pollDuration, pollInterval)
-
- t.Log("By waiting for own-namespace-operator extension installation to fail")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(t.Context(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ReasonRetrying, cond.Reason)
- require.Contains(ct, cond.Message, "invalid ClusterExtension configuration")
- require.Contains(ct, cond.Message, fmt.Sprintf("watchNamespace must be \"%s\"", clusterExtension.Spec.Namespace))
- require.Contains(ct, cond.Message, "OwnNamespace install mode")
- }, pollDuration, pollInterval)
-
- t.Log("By updating the ClusterExtension configuration with a watchNamespace = install namespace")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(t, c.Get(t.Context(), types.NamespacedName{Name: clusterExtension.GetName()}, clusterExtension))
- clusterExtension.Spec.Config = &ocv1.ClusterExtensionConfig{
- ConfigType: ocv1.ClusterExtensionConfigTypeInline,
- Inline: &apiextensionsv1.JSON{
- Raw: []byte(fmt.Sprintf(`{"watchNamespace": "%s"}`, clusterExtension.Spec.Namespace)),
- },
- }
- require.NoError(t, c.Update(t.Context(), clusterExtension))
- }, pollDuration, pollInterval)
-
- t.Log("By waiting for own-namespace-operator extension to be installed successfully")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(t.Context(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
- require.Contains(ct, cond.Message, "Installed bundle")
- require.NotNil(ct, clusterExtension.Status.Install)
- require.NotEmpty(ct, clusterExtension.Status.Install.Bundle)
- }, pollDuration, pollInterval)
-
- t.Log("By ensuring the own-namespace-operator deployment is correctly configured to watch the watch namespace")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- deployment := &appsv1.Deployment{}
- require.NoError(ct, c.Get(t.Context(), types.NamespacedName{Namespace: namespace.GetName(), Name: "own-namespace-operator"}, deployment))
- require.NotNil(ct, deployment.Spec.Template.GetAnnotations())
- require.Equal(ct, clusterExtension.Spec.Namespace, deployment.Spec.Template.GetAnnotations()["olm.targetNamespaces"])
- }, pollDuration, pollInterval)
-}
-
-func TestClusterExtensionVersionUpdate(t *testing.T) {
- SkipIfFeatureGateDisabled(t, soNsFlag)
- t.Log("When a cluster extension is installed from a catalog")
- t.Log("When resolving upgrade edges")
-
- clusterExtension, extensionCatalog, sa, ns := TestInit(t)
- defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns)
- defer testutil.CollectTestArtifacts(t, artifactName, c, cfg)
-
- t.Log("By creating an ClusterExtension at a specified version")
- clusterExtension.Spec = ocv1.ClusterExtensionSpec{
- Source: ocv1.SourceConfig{
- SourceType: "Catalog",
- Catalog: &ocv1.CatalogFilter{
- PackageName: "test",
- Version: "1.0.0",
- },
- },
- Namespace: ns.Name,
- ServiceAccount: ocv1.ServiceAccountReference{
- Name: sa.Name,
- },
- }
- require.NoError(t, c.Create(context.Background(), clusterExtension))
- t.Log("By eventually reporting a successful resolution")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
- }, pollDuration, pollInterval)
-
- t.Log("It allows to upgrade the ClusterExtension to a non-successor version")
- t.Log("By forcing update of ClusterExtension resource to a non-successor version")
- // 1.2.0 does not replace/skip/skipRange 1.0.0.
- clusterExtension.Spec.Source.Catalog.Version = "1.2.0"
- clusterExtension.Spec.Source.Catalog.UpgradeConstraintPolicy = ocv1.UpgradeConstraintPolicySelfCertified
- require.NoError(t, c.Update(context.Background(), clusterExtension))
- t.Log("By eventually reporting a satisfiable resolution")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
- }, pollDuration, pollInterval)
- t.Log("We should have two ClusterExtensionRevision resources")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- cerList := &ocv1.ClusterExtensionRevisionList{}
- require.NoError(ct, c.List(context.Background(), cerList))
- require.Len(ct, cerList.Items, 2)
- }, pollDuration, pollInterval)
-}
diff --git a/test/e2e/steps/hooks.go b/test/e2e/steps/hooks.go
new file mode 100644
index 000000000..ad47d5102
--- /dev/null
+++ b/test/e2e/steps/hooks.go
@@ -0,0 +1,166 @@
+package steps
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "os/exec"
+ "regexp"
+ "strconv"
+
+ "github.com/cucumber/godog"
+ "github.com/go-logr/logr"
+ "github.com/spf13/pflag"
+ "k8s.io/api/apps/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/component-base/featuregate"
+ "k8s.io/klog/v2/textlogger"
+
+ "github.com/operator-framework/operator-controller/internal/operator-controller/features"
+)
+
+type resource struct {
+ name string
+ kind string
+}
+
+type scenarioContext struct {
+ id string
+ namespace string
+ clusterExtensionName string
+ removedResources []unstructured.Unstructured
+ backGroundCmds []*exec.Cmd
+ metricsResponse map[string]string
+}
+
+type contextKey string
+
+const (
+ scenarioContextKey contextKey = "scenario-context"
+)
+
+var (
+ devMode = false
+ featureGates = map[featuregate.Feature]bool{
+ features.WebhookProviderCertManager: true,
+ features.PreflightPermissions: false,
+ features.SingleOwnNamespaceInstallSupport: false,
+ features.SyntheticPermissions: false,
+ features.WebhookProviderOpenshiftServiceCA: false,
+ features.HelmChartSupport: false,
+ features.BoxcutterRuntime: false,
+ }
+ logger logr.Logger
+)
+
+func init() {
+ flagSet := pflag.CommandLine
+ flagSet.BoolVar(&devMode, "log.debug", false, "print debug log level")
+}
+
+func RegisterHooks(sc *godog.ScenarioContext) {
+ sc.Before(CheckFeatureTags)
+ sc.Before(CreateScenarioContext)
+
+ sc.After(ScenarioCleanup)
+}
+
+func BeforeSuite() {
+ if devMode {
+ logger = textlogger.NewLogger(textlogger.NewConfig(textlogger.Verbosity(1)))
+ } else {
+ logger = textlogger.NewLogger(textlogger.NewConfig())
+ }
+
+ raw, err := k8sClient("get", "deployments", "-A", "-l", "app.kubernetes.io/part-of=olm", "-o", "jsonpath={.items}")
+ if err != nil {
+ panic(fmt.Errorf("failed to get OLM deployments: %v", err))
+ }
+ dl := []v1.Deployment{}
+ if err := json.Unmarshal([]byte(raw), &dl); err != nil {
+ panic(fmt.Errorf("failed to unmarshal OLM deployments: %v", err))
+ }
+ var olm *v1.Deployment
+
+ for _, d := range dl {
+ if d.Name == olmDeploymentName {
+ olm = &d
+ olmNamespace = d.Namespace
+ break
+ }
+ }
+
+ featureGatePattern := regexp.MustCompile(`--feature-gates=([[:alnum:]]+)=(true|false)`)
+ for _, c := range olm.Spec.Template.Spec.Containers {
+ if c.Name == "manager" {
+ for _, arg := range c.Args {
+ if matches := featureGatePattern.FindStringSubmatch(arg); matches != nil {
+ v, err := strconv.ParseBool(matches[2])
+ if err != nil {
+ panic(fmt.Errorf("failed to parse feature gate %q=%q: %v", matches[1], matches[2], err))
+ }
+ featureGates[featuregate.Feature(matches[1])] = v
+ }
+ }
+ }
+ }
+ logger.Info(fmt.Sprintf("Enabled feature gates: %v", featureGates))
+}
+
+func CheckFeatureTags(ctx context.Context, sc *godog.Scenario) (context.Context, error) {
+ for _, tag := range sc.Tags {
+ if enabled, found := featureGates[featuregate.Feature(tag.Name[1:])]; found && !enabled {
+ logger.Info(fmt.Sprintf("Skipping scenario %q because feature gate %q is disabled", sc.Name, tag.Name[1:]))
+ return ctx, godog.ErrSkip
+ }
+ }
+ return ctx, nil
+}
+
+func CreateScenarioContext(ctx context.Context, sc *godog.Scenario) (context.Context, error) {
+ scCtx := &scenarioContext{
+ id: sc.Id,
+ namespace: fmt.Sprintf("ns-%s", sc.Id),
+ clusterExtensionName: fmt.Sprintf("ce-%s", sc.Id),
+ }
+ return context.WithValue(ctx, scenarioContextKey, scCtx), nil
+}
+
+func scenarioCtx(ctx context.Context) *scenarioContext {
+ return ctx.Value(scenarioContextKey).(*scenarioContext)
+}
+
+func stderrOutput(err error) string {
+ var exitErr *exec.ExitError
+ if errors.As(err, &exitErr) && exitErr != nil {
+ return string(exitErr.Stderr)
+ }
+ return ""
+}
+
+func ScenarioCleanup(ctx context.Context, _ *godog.Scenario, err error) (context.Context, error) {
+ sc := scenarioCtx(ctx)
+ for _, bgCmd := range sc.backGroundCmds {
+ if p := bgCmd.Process; p != nil {
+ _ = p.Kill()
+ }
+ }
+ if err != nil {
+ return ctx, err
+ }
+
+ forDeletion := []resource{}
+ if sc.clusterExtensionName != "" {
+ forDeletion = append(forDeletion, resource{name: sc.clusterExtensionName, kind: "clusterextension"})
+ }
+ forDeletion = append(forDeletion, resource{name: sc.namespace, kind: "namespace"})
+ go func() {
+ for _, r := range forDeletion {
+ if _, err := k8sClient("delete", r.kind, r.name, "--ignore-not-found=true"); err != nil {
+ logger.Info("Error deleting resource", "name", r.name, "namespace", sc.namespace, "stderr", stderrOutput(err))
+ }
+ }
+ }()
+ return ctx, nil
+}
diff --git a/test/e2e/steps/steps.go b/test/e2e/steps/steps.go
new file mode 100644
index 000000000..7beb17752
--- /dev/null
+++ b/test/e2e/steps/steps.go
@@ -0,0 +1,728 @@
+package steps
+
+import (
+ "bytes"
+ "context"
+ "crypto/tls"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "reflect"
+ "strings"
+ "time"
+
+ "github.com/cucumber/godog"
+ jsonpatch "github.com/evanphx/json-patch"
+ "github.com/google/go-cmp/cmp"
+ "github.com/google/go-containerregistry/pkg/crane"
+ "github.com/prometheus/common/expfmt"
+ "github.com/prometheus/common/model"
+ "github.com/spf13/pflag"
+ "github.com/stretchr/testify/require"
+ appsv1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/util/sets"
+ "k8s.io/utils/ptr"
+ "sigs.k8s.io/yaml"
+
+ ocv1 "github.com/operator-framework/operator-controller/api/v1"
+)
+
+const (
+ olmDeploymentName = "operator-controller-controller-manager"
+ timeout = 5 * time.Minute
+ tick = 1 * time.Second
+)
+
+var (
+ olmNamespace = "olmv1-system"
+ kubeconfigPath string
+ k8sCli string
+)
+
+func RegisterSteps(sc *godog.ScenarioContext) {
+ sc.Step(`^OLM is available$`, OLMisAvailable)
+ sc.Step(`^(?i)bundle "([^"]+)" is installed in version "([^"]+)"$`, BundleInstalled)
+
+ sc.Step(`^(?i)ClusterExtension is applied(?:\s+.*)?$`, ResourceIsApplied)
+ sc.Step(`^(?i)ClusterExtension is updated to version "([^"]+)"$`, ClusterExtensionVersionUpdate)
+ sc.Step(`^(?i)ClusterExtension is updated(?:\s+.*)?$`, ResourceIsApplied)
+ sc.Step(`^(?i)ClusterExtension is available$`, ClusterExtensionIsAvailable)
+ sc.Step(`^(?i)ClusterExtension is rolled out$`, ClusterExtensionIsRolledOut)
+ sc.Step(`^(?i)ClusterExtension reports "([^"]+)" as active revision(s?)$`, ClusterExtensionReportsActiveRevisions)
+ sc.Step(`^(?i)ClusterExtension reports ([[:alnum:]]+) as ([[:alnum:]]+) with Reason ([[:alnum:]]+) and Message:$`, ClusterExtensionReportsCondition)
+ sc.Step(`^(?i)ClusterExtension reports ([[:alnum:]]+) as ([[:alnum:]]+) with Reason ([[:alnum:]]+)$`, ClusterExtensionReportsConditionWithoutMsg)
+ sc.Step(`^(?i)ClusterExtension reports ([[:alnum:]]+) as ([[:alnum:]]+)$`, ClusterExtensionReportsConditionWithoutReason)
+ sc.Step(`^(?i)ClusterExtensionRevision "([^"]+)" reports ([[:alnum:]]+) as ([[:alnum:]]+) with Reason ([[:alnum:]]+)$`, ClusterExtensionRevisionReportsConditionWithoutMsg)
+ sc.Step(`^(?i)ClusterExtensionRevision "([^"]+)" is archived$`, ClusterExtensionRevisionIsArchived)
+
+ sc.Step(`^(?i)resource "([^"]+)" is installed$`, ResourceAvailable)
+ sc.Step(`^(?i)resource "([^"]+)" is available$`, ResourceAvailable)
+ sc.Step(`^(?i)resource "([^"]+)" is removed$`, ResourceRemoved)
+ sc.Step(`^(?i)resource "([^"]+)" exists$`, ResourceAvailable)
+ sc.Step(`^(?i)resource is applied$`, ResourceIsApplied)
+ sc.Step(`^(?i)resource "deployment/test-operator" reports as (not ready|ready)$`, MarkTestOperatorNotReady)
+
+ sc.Step(`^(?i)resource apply fails with error msg containing "([^"]+)"$`, ResourceApplyFails)
+ sc.Step(`^(?i)resource "([^"]+)" is eventually restored$`, ResourceRestored)
+ sc.Step(`^(?i)resource "([^"]+)" matches$`, ResourceMatches)
+
+ sc.Step(`^(?i)ServiceAccount "([^"]*)" with needed permissions is available in test namespace$`, ServiceAccountWithNeededPermissionsIsAvailableInNamespace)
+ sc.Step(`^(?i)ServiceAccount "([^"]*)" with needed permissions is available in \${TEST_NAMESPACE}$`, ServiceAccountWithNeededPermissionsIsAvailableInNamespace)
+ sc.Step(`^(?i)ServiceAccount "([^"]*)" in test namespace is cluster admin$`, ServiceAccountWithClusterAdminPermissionsIsAvailableInNamespace)
+ sc.Step(`^(?i)ServiceAccount "([^"]+)" in test namespace has permissions to fetch "([^"]+)" metrics$`, ServiceAccountWithFetchMetricsPermissions)
+ sc.Step(`^(?i)ServiceAccount "([^"]+)" sends request to "([^"]+)" endpoint of "([^"]+)" service$`, SendMetricsRequest)
+
+ sc.Step(`^"([^"]+)" catalog is updated to version "([^"]+)"$`, CatalogIsUpdatedToVersion)
+ sc.Step(`^(?i)ClusterCatalog "([^"]+)" is updated to version "([^"]+)"$`, CatalogIsUpdatedToVersion)
+ sc.Step(`^"([^"]+)" catalog serves bundles$`, CatalogServesBundles)
+ sc.Step(`^(?i)ClusterCatalog "([^"]+)" serves bundles$`, CatalogServesBundles)
+ sc.Step(`^"([^"]+)" catalog image version "([^"]+)" is also tagged as "([^"]+)"$`, TagCatalogImage)
+ sc.Step(`^(?i)ClusterCatalog "([^"]+)" image version "([^"]+)" is also tagged as "([^"]+)"$`, TagCatalogImage)
+
+ sc.Step(`^(?i)operator "([^"]+)" target namespace is "([^"]+)"$`, OperatorTargetNamespace)
+ sc.Step(`^(?i)Prometheus metrics are returned in the response$`, PrometheusMetricsAreReturned)
+}
+
+func init() {
+ flagSet := pflag.CommandLine
+ flagSet.StringVar(&k8sCli, "k8s.cli", "kubectl", "Path to k8s cli")
+ if v, found := os.LookupEnv("KUBECONFIG"); found {
+ kubeconfigPath = v
+ } else {
+ home, err := os.UserHomeDir()
+ if err != nil {
+ panic(fmt.Sprintf("cannot determine user home directory: %v", err))
+ }
+ flagSet.StringVar(&kubeconfigPath, "kubeconfig", filepath.Join(home, ".kube", "config"), "Paths to a kubeconfig. Only required if out-of-cluster.")
+ }
+}
+
+func k8sClient(args ...string) (string, error) {
+ cmd := exec.Command(k8sCli, args...)
+ logger.V(1).Info("Running", "command", strings.Join(cmd.Args, " "))
+ cmd.Env = append(os.Environ(), fmt.Sprintf("KUBECONFIG=%s", kubeconfigPath))
+ b, err := cmd.Output()
+ if err != nil {
+ logger.V(1).Info("Failed to run", "command", strings.Join(cmd.Args, " "), "stderr", stderrOutput(err), "error", err)
+ }
+ output := string(b)
+ logger.V(1).Info("Output", "command", strings.Join(cmd.Args, " "), "output", output)
+ return output, err
+}
+
+func k8scliWithInput(yaml string, args ...string) (string, error) {
+ cmd := exec.Command(k8sCli, args...)
+ cmd.Stdin = bytes.NewBufferString(yaml)
+ cmd.Env = append(os.Environ(), fmt.Sprintf("KUBECONFIG=%s", kubeconfigPath))
+ b, err := cmd.Output()
+ return string(b), err
+}
+
+func OLMisAvailable(ctx context.Context) error {
+ require.Eventually(godog.T(ctx), func() bool {
+ v, err := k8sClient("get", "deployment", "-n", olmNamespace, olmDeploymentName, "-o", "jsonpath='{.status.conditions[?(@.type==\"Available\")].status}'")
+ if err != nil {
+ return false
+ }
+ return v == "'True'"
+ }, timeout, tick)
+ return nil
+}
+
+func BundleInstalled(ctx context.Context, name, version string) error {
+ sc := scenarioCtx(ctx)
+ waitFor(ctx, func() bool {
+ v, err := k8sClient("get", "clusterextension", sc.clusterExtensionName, "-o", "jsonpath={.status.install.bundle}")
+ if err != nil {
+ return false
+ }
+ var bundle map[string]interface{}
+ if err := json.Unmarshal([]byte(v), &bundle); err != nil {
+ return false
+ }
+ return bundle["name"] == name && bundle["version"] == version
+ })
+ return nil
+}
+
+func toUnstructured(yamlContent string) (*unstructured.Unstructured, error) {
+ var u map[string]any
+ if err := yaml.Unmarshal([]byte(yamlContent), &u); err != nil {
+ return nil, err
+ }
+ return &unstructured.Unstructured{Object: u}, nil
+}
+
+func substituteScenarioVars(content string, sc *scenarioContext) string {
+ vars := map[string]string{
+ "TEST_NAMESPACE": sc.namespace,
+ "NAME": sc.clusterExtensionName,
+ "CATALOG_IMG": "docker-registry.operator-controller-e2e.svc.cluster.local:5000/e2e/test-catalog:v1",
+ }
+ if v, found := os.LookupEnv("CATALOG_IMG"); found {
+ vars["CATALOG_IMG"] = v
+ }
+ m := func(k string) string {
+ if v, found := vars[k]; found {
+ return v
+ }
+ return ""
+ }
+ return os.Expand(content, m)
+}
+
+func ResourceApplyFails(ctx context.Context, errMsg string, yamlTemplate *godog.DocString) error {
+ sc := scenarioCtx(ctx)
+ yamlContent := substituteScenarioVars(yamlTemplate.Content, sc)
+ _, err := toUnstructured(yamlContent)
+ if err != nil {
+ return fmt.Errorf("failed to parse resource yaml: %v", err)
+ }
+ waitFor(ctx, func() bool {
+ _, err := k8scliWithInput(yamlContent, "apply", "-f", "-")
+ if err == nil {
+ return false
+ }
+ if stdErr := stderrOutput(err); !strings.Contains(stdErr, errMsg) {
+ return false
+ }
+ return true
+ })
+ return nil
+}
+
+func ClusterExtensionVersionUpdate(ctx context.Context, version string) error {
+ sc := scenarioCtx(ctx)
+ patch := map[string]any{
+ "spec": map[string]any{
+ "source": map[string]any{
+ "catalog": map[string]any{
+ "version": version,
+ },
+ },
+ },
+ }
+ pb, err := json.Marshal(patch)
+ if err != nil {
+ return err
+ }
+ _, err = k8sClient("patch", "clusterextension", sc.clusterExtensionName, "--type", "merge", "-p", string(pb))
+ return err
+}
+
+func ResourceIsApplied(ctx context.Context, yamlTemplate *godog.DocString) error {
+ sc := scenarioCtx(ctx)
+ yamlContent := substituteScenarioVars(yamlTemplate.Content, sc)
+ res, err := toUnstructured(yamlContent)
+ if err != nil {
+ return fmt.Errorf("failed to parse resource yaml: %v", err)
+ }
+ out, err := k8scliWithInput(yamlContent, "apply", "-f", "-")
+ if err != nil {
+ return fmt.Errorf("failed to apply resource %v %w", out, err)
+ }
+ if res.GetKind() == "ClusterExtension" {
+ sc.clusterExtensionName = res.GetName()
+ }
+ return nil
+}
+
+func ClusterExtensionIsAvailable(ctx context.Context) error {
+ sc := scenarioCtx(ctx)
+ require.Eventually(godog.T(ctx), func() bool {
+ v, err := k8sClient("get", "clusterextension", sc.clusterExtensionName, "-o", "jsonpath={.status.conditions[?(@.type==\"Installed\")].status}")
+ if err != nil {
+ return false
+ }
+ return v == "True"
+ }, timeout, tick)
+ return nil
+}
+
+func ClusterExtensionIsRolledOut(ctx context.Context) error {
+ sc := scenarioCtx(ctx)
+ require.Eventually(godog.T(ctx), func() bool {
+ v, err := k8sClient("get", "clusterextension", sc.clusterExtensionName, "-o", "jsonpath={.status.conditions[?(@.type==\"Progressing\")]}")
+ if err != nil {
+ return false
+ }
+
+ var condition map[string]interface{}
+ if err := json.Unmarshal([]byte(v), &condition); err != nil {
+ return false
+ }
+ return condition["status"] == "True" && condition["reason"] == "Succeeded" && condition["type"] == "Progressing"
+ }, timeout, tick)
+ return nil
+}
+
+func waitFor(ctx context.Context, conditionFn func() bool) {
+ require.Eventually(godog.T(ctx), conditionFn, timeout, tick)
+}
+
+func waitForCondition(ctx context.Context, resourceType, resourceName, conditionType, conditionStatus string, conditionReason *string, msg *string) error {
+ require.Eventually(godog.T(ctx), func() bool {
+ v, err := k8sClient("get", resourceType, resourceName, "-o", fmt.Sprintf("jsonpath={.status.conditions[?(@.type==\"%s\")]}", conditionType))
+ if err != nil {
+ return false
+ }
+
+ var condition map[string]interface{}
+ if err := json.Unmarshal([]byte(v), &condition); err != nil {
+ return false
+ }
+ if condition["status"] != conditionStatus {
+ return false
+ }
+ if conditionReason != nil && condition["reason"] != *conditionReason {
+ return false
+ }
+ if msg != nil && condition["message"] != *msg {
+ return false
+ }
+
+ return true
+ }, timeout, tick)
+ return nil
+}
+
+func waitForExtensionCondition(ctx context.Context, conditionType, conditionStatus string, conditionReason *string, msg *string) error {
+ sc := scenarioCtx(ctx)
+ return waitForCondition(ctx, "clusterextension", sc.clusterExtensionName, conditionType, conditionStatus, conditionReason, msg)
+}
+
+func ClusterExtensionReportsCondition(ctx context.Context, conditionType, conditionStatus, conditionReason string, msg *godog.DocString) error {
+ var conditionMsg *string
+ if msg != nil {
+ conditionMsg = ptr.To(substituteScenarioVars(strings.Join(strings.Fields(msg.Content), " "), scenarioCtx(ctx)))
+ }
+ return waitForExtensionCondition(ctx, conditionType, conditionStatus, &conditionReason, conditionMsg)
+}
+
+func ClusterExtensionReportsConditionWithoutMsg(ctx context.Context, conditionType, conditionStatus, conditionReason string) error {
+ return ClusterExtensionReportsCondition(ctx, conditionType, conditionStatus, conditionReason, nil)
+}
+
+func ClusterExtensionReportsConditionWithoutReason(ctx context.Context, conditionType, conditionStatus string) error {
+ return waitForExtensionCondition(ctx, conditionType, conditionStatus, nil, nil)
+}
+
+func ClusterExtensionReportsActiveRevisions(ctx context.Context, rawRevisionNames string) error {
+ sc := scenarioCtx(ctx)
+ expectedRevisionNames := sets.New[string]()
+ for _, rev := range strings.Split(rawRevisionNames, ",") {
+ expectedRevisionNames.Insert(substituteScenarioVars(strings.TrimSpace(rev), sc))
+ }
+
+ waitFor(ctx, func() bool {
+ v, err := k8sClient("get", "clusterextension", sc.clusterExtensionName, "-o", "jsonpath={.status.activeRevisions}")
+ if err != nil {
+ return false
+ }
+ var activeRevisions []ocv1.RevisionStatus
+ if err := json.Unmarshal([]byte(v), &activeRevisions); err != nil {
+ return false
+ }
+ activeRevisionsNames := sets.New[string]()
+ for _, rev := range activeRevisions {
+ activeRevisionsNames.Insert(rev.Name)
+ }
+ return activeRevisionsNames.Equal(expectedRevisionNames)
+ })
+ return nil
+}
+
+func ClusterExtensionRevisionReportsConditionWithoutMsg(ctx context.Context, revisionName, conditionType, conditionStatus, conditionReason string) error {
+ return waitForCondition(ctx, "clusterextensionrevision", substituteScenarioVars(revisionName, scenarioCtx(ctx)), conditionType, conditionStatus, &conditionReason, nil)
+}
+
+func ClusterExtensionRevisionIsArchived(ctx context.Context, revisionName string) error {
+ return waitForCondition(ctx, "clusterextensionrevision", substituteScenarioVars(revisionName, scenarioCtx(ctx)), "Progressing", "False", ptr.To("Archived"), nil)
+}
+
+func ResourceAvailable(ctx context.Context, resource string) error {
+ sc := scenarioCtx(ctx)
+ resource = substituteScenarioVars(resource, sc)
+ rtype, name, found := strings.Cut(resource, "/")
+ if !found {
+ return fmt.Errorf("resource %s is not in the format /", resource)
+ }
+ waitFor(ctx, func() bool {
+ _, err := k8sClient("get", rtype, name, "-n", sc.namespace)
+ return err == nil
+ })
+ return nil
+}
+
+func ResourceRemoved(ctx context.Context, resource string) error {
+ sc := scenarioCtx(ctx)
+ rtype, name, found := strings.Cut(resource, "/")
+ if !found {
+ return fmt.Errorf("resource %s is not in the format /", resource)
+ }
+ yaml, err := k8sClient("get", rtype, name, "-n", sc.namespace, "-o", "yaml")
+ if err != nil {
+ return err
+ }
+ obj, err := toUnstructured(yaml)
+ if err != nil {
+ return err
+ }
+ sc.removedResources = append(sc.removedResources, *obj)
+ _, err = k8sClient("delete", rtype, name, "-n", sc.namespace)
+ return err
+}
+
+func ResourceMatches(ctx context.Context, resource string, requiredContentTemplate *godog.DocString) error {
+ sc := scenarioCtx(ctx)
+ resource = substituteScenarioVars(resource, sc)
+ rtype, name, found := strings.Cut(resource, "/")
+ if !found {
+ return fmt.Errorf("resource %s is not in the format /", resource)
+ }
+ requiredContent, err := toUnstructured(substituteScenarioVars(requiredContentTemplate.Content, sc))
+ if err != nil {
+ return fmt.Errorf("failed to parse required resource yaml: %v", err)
+ }
+ waitFor(ctx, func() bool {
+ objJson, err := k8sClient("get", rtype, name, "-n", sc.namespace, "-o", "json")
+ if err != nil {
+ return false
+ }
+ obj, err := toUnstructured(objJson)
+ if err != nil {
+ return false
+ }
+ patch, err := json.Marshal(requiredContent.Object)
+ if err != nil {
+ return false
+ }
+ updJson, err := jsonpatch.MergePatch([]byte(objJson), patch)
+ if err != nil {
+ return false
+ }
+ upd, err := toUnstructured(string(updJson))
+ if err != nil {
+ return false
+ }
+
+ return len(cmp.Diff(upd.Object, obj.Object)) == 0
+ })
+ return nil
+}
+
+func ResourceRestored(ctx context.Context, resource string) error {
+ sc := scenarioCtx(ctx)
+ rtype, name, found := strings.Cut(resource, "/")
+ if !found {
+ return fmt.Errorf("resource %s is not in the format /", resource)
+ }
+ waitFor(ctx, func() bool {
+ yaml, err := k8sClient("get", rtype, name, "-n", sc.namespace, "-o", "yaml")
+ if err != nil {
+ return false
+ }
+ obj, err := toUnstructured(yaml)
+ if err != nil {
+ return false
+ }
+ ct := obj.GetCreationTimestamp()
+
+ for i, removed := range sc.removedResources {
+ rct := removed.GetCreationTimestamp()
+ if removed.GetName() == obj.GetName() && removed.GetKind() == obj.GetKind() && rct.Before(&ct) {
+ switch rtype {
+ case "configmap":
+ if !reflect.DeepEqual(removed.Object["data"], obj.Object["data"]) {
+ return false
+ }
+ default:
+ if !reflect.DeepEqual(removed.Object["spec"], obj.Object["spec"]) {
+ return false
+ }
+ }
+ sc.removedResources = append(sc.removedResources[:i], sc.removedResources[i+1:]...)
+ return true
+ }
+ }
+ return false
+ })
+ return nil
+}
+
+func applyPermissionsToServiceAccount(ctx context.Context, serviceAccount, rbacTemplate string, keyValue ...string) error {
+ sc := scenarioCtx(ctx)
+ yamlContent, err := os.ReadFile(filepath.Join("steps", "testdata", rbacTemplate))
+ if err != nil {
+ return fmt.Errorf("failed to read RBAC template yaml: %v", err)
+ }
+
+ vars := map[string]string{
+ "TEST_NAMESPACE": sc.namespace,
+ "SERVICE_ACCOUNT_NAME": serviceAccount,
+ "SERVICEACCOUNT_NAME": serviceAccount,
+ "CLUSTER_EXTENSION_NAME": sc.clusterExtensionName,
+ "CLUSTEREXTENSION_NAME": sc.clusterExtensionName,
+ }
+ if len(keyValue) > 0 {
+ for i := 0; i < len(keyValue); i += 2 {
+ vars[keyValue[i]] = keyValue[i+1]
+ }
+ }
+ m := func(k string) string {
+ if v, found := vars[k]; found {
+ return v
+ }
+ return ""
+ }
+
+ // Replace template variables
+ yaml := os.Expand(string(yamlContent), m)
+
+ // Apply the RBAC configuration
+ _, err = k8scliWithInput(yaml, "apply", "-f", "-")
+ if err != nil {
+ return fmt.Errorf("failed to apply RBAC configuration: %v: %s", err, stderrOutput(err))
+ }
+
+ return nil
+}
+
+func ServiceAccountWithNeededPermissionsIsAvailableInNamespace(ctx context.Context, serviceAccount string) error {
+ return applyPermissionsToServiceAccount(ctx, serviceAccount, "rbac-template.yaml")
+}
+
+func ServiceAccountWithClusterAdminPermissionsIsAvailableInNamespace(ctx context.Context, serviceAccount string) error {
+ return applyPermissionsToServiceAccount(ctx, serviceAccount, "cluster-admin-rbac-template.yaml")
+}
+
+func ServiceAccountWithFetchMetricsPermissions(ctx context.Context, serviceAccount string, controllerName string) error {
+ return applyPermissionsToServiceAccount(ctx, serviceAccount, "metrics-reader-rbac-template.yaml", "CONTROLLER_NAME", controllerName)
+}
+
+func httpGet(url string, token string) (*http.Response, error) {
+ tr := &http.Transport{
+ TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, //nolint:gosec // we don't care about the certificate
+ }
+ client := &http.Client{Transport: tr}
+
+ req, err := http.NewRequest("GET", url, nil)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Authorization", "Bearer "+token)
+
+ resp, err := client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func randomAvailablePort() (int, error) {
+ l, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ return 0, err
+ }
+ defer l.Close()
+ return l.Addr().(*net.TCPAddr).Port, nil
+}
+
+func SendMetricsRequest(ctx context.Context, serviceAccount string, endpoint string, controllerName string) error {
+ sc := scenarioCtx(ctx)
+ v, err := k8sClient("get", "service", "-n", olmNamespace, fmt.Sprintf("%s-service", controllerName), "-o", "json")
+ if err != nil {
+ return err
+ }
+ var service corev1.Service
+ if err := json.Unmarshal([]byte(v), &service); err != nil {
+ return err
+ }
+ podNameCmd := []string{"get", "pod", "-n", olmNamespace, "-o", "jsonpath={.items}"}
+ for k, v := range service.Spec.Selector {
+ podNameCmd = append(podNameCmd, fmt.Sprintf("--selector=%s=%s", k, v))
+ }
+ v, err = k8sClient(podNameCmd...)
+ if err != nil {
+ return err
+ }
+
+ var pods []corev1.Pod
+ if err := json.Unmarshal([]byte(v), &pods); err != nil {
+ return err
+ }
+ token, err := k8sClient("create", "token", serviceAccount, "-n", sc.namespace)
+ if err != nil {
+ return err
+ }
+ var metricsPort int32
+ for _, p := range service.Spec.Ports {
+ if p.Name == "metrics" {
+ metricsPort = p.Port
+ break
+ }
+ }
+ sc.metricsResponse = make(map[string]string)
+ for _, p := range pods {
+ port, err := randomAvailablePort()
+ if err != nil {
+ return err
+ }
+ portForwardCmd := exec.Command(k8sCli, "port-forward", "-n", p.Namespace, fmt.Sprintf("pod/%s", p.Name), fmt.Sprintf("%d:%d", port, metricsPort)) //nolint:gosec // perfectly safe to start port-forwarder for provided controller name
+ logger.V(1).Info("starting port-forward", "command", strings.Join(portForwardCmd.Args, " "))
+ if err := portForwardCmd.Start(); err != nil {
+ logger.Error(err, fmt.Sprintf("failed to start port-forward for pod %s", p.Name))
+ return err
+ }
+ waitFor(ctx, func() bool {
+ resp, err := httpGet(fmt.Sprintf("https://localhost:%d%s", port, endpoint), token)
+ if err != nil {
+ return false
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode == http.StatusOK {
+ b, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return false
+ }
+ sc.metricsResponse[p.Name] = string(b)
+ return true
+ }
+ b, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return false
+ }
+ logger.V(1).Info("failed to get metrics", "pod", p.Name, "response", string(b))
+ return false
+ })
+ if err := portForwardCmd.Process.Kill(); err != nil {
+ return err
+ }
+ if _, err := portForwardCmd.Process.Wait(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func CatalogIsUpdatedToVersion(name, version string) error {
+ ref, err := k8sClient("get", "clustercatalog", fmt.Sprintf("%s-catalog", name), "-o", "jsonpath={.spec.source.image.ref}")
+ if err != nil {
+ return err
+ }
+ i := strings.LastIndexByte(ref, ':')
+ if i == -1 {
+ return fmt.Errorf("failed to find tag in image reference %s", ref)
+ }
+ base := ref[:i]
+ patch := map[string]any{
+ "spec": map[string]any{
+ "source": map[string]any{
+ "image": map[string]any{
+ "ref": fmt.Sprintf("%s:%s", base, version),
+ },
+ },
+ },
+ }
+ pb, err := json.Marshal(patch)
+ if err != nil {
+ return err
+ }
+ _, err = k8sClient("patch", "clustercatalog", fmt.Sprintf("%s-catalog", name), "--type", "merge", "-p", string(pb))
+ return err
+}
+
+func CatalogServesBundles(ctx context.Context, catalogName string) error {
+ yamlContent, err := os.ReadFile(filepath.Join("steps", "testdata", fmt.Sprintf("%s-catalog-template.yaml", catalogName)))
+ if err != nil {
+ return fmt.Errorf("failed to read catalog yaml: %v", err)
+ }
+
+ _, err = k8scliWithInput(substituteScenarioVars(string(yamlContent), scenarioCtx(ctx)), "apply", "-f", "-")
+ if err != nil {
+ return fmt.Errorf("failed to apply catalog: %v", err)
+ }
+
+ return nil
+}
+
+func TagCatalogImage(name, oldTag, newTag string) error {
+ imageRef := fmt.Sprintf("%s/%s", os.Getenv("LOCAL_REGISTRY_HOST"), fmt.Sprintf("e2e/%s-catalog:%s", name, oldTag))
+ return crane.Tag(imageRef, newTag, crane.Insecure)
+}
+
+func PrometheusMetricsAreReturned(ctx context.Context) error {
+ sc := scenarioCtx(ctx)
+ for podName, mr := range sc.metricsResponse {
+ if mr == "" {
+ return fmt.Errorf("metrics response is empty for pod %s", podName)
+ }
+ parser := expfmt.NewTextParser(model.UTF8Validation)
+ metricsFamilies, err := parser.TextToMetricFamilies(strings.NewReader(mr))
+ if err != nil {
+ return fmt.Errorf("failed to parse metrics response for pod %s: %v", podName, err)
+ }
+ if len(metricsFamilies) == 0 {
+ return fmt.Errorf("metrics response does not contain any metrics for pod %s", podName)
+ }
+ }
+ return nil
+}
+
+func OperatorTargetNamespace(ctx context.Context, operator, namespace string) error {
+ sc := scenarioCtx(ctx)
+ namespace = substituteScenarioVars(namespace, sc)
+ raw, err := k8sClient("get", "deployment", "-n", sc.namespace, operator, "-o", "json")
+ if err != nil {
+ return err
+ }
+ d := &appsv1.Deployment{}
+ if err := json.Unmarshal([]byte(raw), d); err != nil {
+ return err
+ }
+
+ if tns := d.Spec.Template.Annotations["olm.targetNamespaces"]; tns != namespace {
+ return fmt.Errorf("expected target namespace %s, got %s", namespace, tns)
+ }
+ return nil
+}
+
+func MarkTestOperatorNotReady(ctx context.Context, state string) error {
+ sc := scenarioCtx(ctx)
+ v, err := k8sClient("get", "deployment", "-n", sc.namespace, "test-operator", "-o", "jsonpath={.spec.selector.matchLabels}")
+ if err != nil {
+ return err
+ }
+ var labels map[string]string
+ if err := json.Unmarshal([]byte(v), &labels); err != nil {
+ return err
+ }
+ podNameCmd := []string{"get", "pod", "-n", sc.namespace, "-o", "jsonpath={.items[0].metadata.name}"}
+ for k, v := range labels {
+ podNameCmd = append(podNameCmd, fmt.Sprintf("--selector=%s=%s", k, v))
+ }
+ podName, err := k8sClient(podNameCmd...)
+ if err != nil {
+ return err
+ }
+ var op string
+ switch state {
+ case "not ready":
+ op = "rm"
+ case "ready":
+ op = "touch"
+ default:
+ return fmt.Errorf("invalid state %s", state)
+ }
+ _, err = k8sClient("exec", podName, "-n", sc.namespace, "--", op, "/var/www/ready")
+ return err
+}
diff --git a/test/e2e/steps/testdata/cluster-admin-rbac-template.yaml b/test/e2e/steps/testdata/cluster-admin-rbac-template.yaml
new file mode 100644
index 000000000..c020c7ca5
--- /dev/null
+++ b/test/e2e/steps/testdata/cluster-admin-rbac-template.yaml
@@ -0,0 +1,24 @@
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: ${TEST_NAMESPACE}
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: ${SERVICEACCOUNT_NAME}
+ namespace: ${TEST_NAMESPACE}
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: ${TEST_NAMESPACE}-${SERVICEACCOUNT_NAME}-cluster-admin-binding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: cluster-admin
+subjects:
+ - kind: ServiceAccount
+ name: ${SERVICEACCOUNT_NAME}
+ namespace: ${TEST_NAMESPACE}
diff --git a/test/e2e/steps/testdata/extra-catalog-template.yaml b/test/e2e/steps/testdata/extra-catalog-template.yaml
new file mode 100644
index 000000000..a43d9b324
--- /dev/null
+++ b/test/e2e/steps/testdata/extra-catalog-template.yaml
@@ -0,0 +1,11 @@
+apiVersion: olm.operatorframework.io/v1
+kind: ClusterCatalog
+metadata:
+ name: extra-catalog
+spec:
+ priority: 0
+ source:
+ type: Image
+ image:
+ pollIntervalMinutes: 1
+ ref: ${CATALOG_IMG}
diff --git a/test/e2e/steps/testdata/metrics-reader-rbac-template.yaml b/test/e2e/steps/testdata/metrics-reader-rbac-template.yaml
new file mode 100644
index 000000000..4001f8681
--- /dev/null
+++ b/test/e2e/steps/testdata/metrics-reader-rbac-template.yaml
@@ -0,0 +1,24 @@
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: ${TEST_NAMESPACE}
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: ${SERVICEACCOUNT_NAME}
+ namespace: ${TEST_NAMESPACE}
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: ${CONTROLLER_NAME}-metrics-reader-binding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ${CONTROLLER_NAME}-metrics-reader
+subjects:
+ - kind: ServiceAccount
+ name: ${SERVICEACCOUNT_NAME}
+ namespace: ${TEST_NAMESPACE}
diff --git a/test/e2e/steps/testdata/rbac-template.yaml b/test/e2e/steps/testdata/rbac-template.yaml
new file mode 100644
index 000000000..d975d7698
--- /dev/null
+++ b/test/e2e/steps/testdata/rbac-template.yaml
@@ -0,0 +1,77 @@
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: ${TEST_NAMESPACE}
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: ${SERVICEACCOUNT_NAME}
+ namespace: ${TEST_NAMESPACE}
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: ${TEST_NAMESPACE}-${SERVICEACCOUNT_NAME}-olm-admin-clusterrole
+rules:
+ - apiGroups: [olm.operatorframework.io]
+ resources: [clusterextensions, clusterextensions/finalizers]
+ resourceNames: ["${CLUSTEREXTENSION_NAME}"]
+ verbs: [update]
+ # Allow ClusterExtensionRevisions to set blockOwnerDeletion ownerReferences
+ - apiGroups: [olm.operatorframework.io]
+ resources: [clusterextensionrevisions, clusterextensionrevisions/finalizers]
+ verbs: [update, create, list, watch, get, delete, patch]
+
+ - apiGroups: [apiextensions.k8s.io]
+ resources: [customresourcedefinitions]
+ verbs: [update, create, list, watch, get, delete, patch]
+ - apiGroups: [""]
+ resources:
+ - configmaps
+ - secrets
+ - services
+ - serviceaccounts
+ - events
+ - namespaces
+ verbs: [update, create, list, watch, get, delete, patch]
+ - apiGroups: ["apps"]
+ resources:
+ - deployments
+ verbs: [ update, create, list, watch, get, delete, patch ]
+ - apiGroups: ["networking.k8s.io"]
+ resources:
+ - networkpolicies
+ verbs: [ update, create, list, watch, get, delete, patch ]
+ - apiGroups: ["rbac.authorization.k8s.io"]
+ resources:
+ - clusterroles
+ - roles
+ - clusterrolebindings
+ - rolebindings
+ verbs: [ update, create, list, watch, get, delete, patch ]
+ - apiGroups: ["coordination.k8s.io"]
+ resources: ["leases"]
+ verbs: [ update, create, list, watch, get, delete, patch ]
+ - apiGroups: ["authorization.k8s.io"]
+ resources: ["subjectaccessreviews"]
+ verbs: [create]
+ - apiGroups: ["authentication.k8s.io"]
+ resources: ["tokenreviews"]
+ verbs: [create]
+
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: ${TEST_NAMESPACE}-${SERVICEACCOUNT_NAME}-install-binding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ${TEST_NAMESPACE}-${SERVICEACCOUNT_NAME}-olm-admin-clusterrole
+subjects:
+ - kind: ServiceAccount
+ name: ${SERVICEACCOUNT_NAME}
+ namespace: ${TEST_NAMESPACE}
diff --git a/test/e2e/steps/testdata/test-catalog-template.yaml b/test/e2e/steps/testdata/test-catalog-template.yaml
new file mode 100644
index 000000000..7e46872f3
--- /dev/null
+++ b/test/e2e/steps/testdata/test-catalog-template.yaml
@@ -0,0 +1,11 @@
+apiVersion: olm.operatorframework.io/v1
+kind: ClusterCatalog
+metadata:
+ name: test-catalog
+spec:
+ priority: 0
+ source:
+ type: Image
+ image:
+ pollIntervalMinutes: 1
+ ref: ${CATALOG_IMG}
diff --git a/test/e2e/webhook_support_test.go b/test/e2e/webhook_support_test.go
deleted file mode 100644
index 9fd05184a..000000000
--- a/test/e2e/webhook_support_test.go
+++ /dev/null
@@ -1,237 +0,0 @@
-package e2e
-
-import (
- "context"
- "fmt"
- "os"
- "testing"
-
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
- appsv1 "k8s.io/api/apps/v1"
- corev1 "k8s.io/api/core/v1"
- rbacv1 "k8s.io/api/rbac/v1"
- apimeta "k8s.io/apimachinery/pkg/api/meta"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
- "k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/apimachinery/pkg/types"
- "k8s.io/client-go/dynamic"
- "k8s.io/utils/ptr"
-
- ocv1 "github.com/operator-framework/operator-controller/api/v1"
- testutil "github.com/operator-framework/operator-controller/internal/shared/util/test"
- . "github.com/operator-framework/operator-controller/test/helpers"
-)
-
-var dynamicClient dynamic.Interface
-
-func TestWebhookSupport(t *testing.T) {
- SkipIfFeatureGateDisabled(t, "WebhookProviderCertManager")
- t.Log("Test support for bundles with webhooks")
- defer testutil.CollectTestArtifacts(t, artifactName, c, cfg)
-
- if dynamicClient == nil {
- var err error
- dynamicClient, err = dynamic.NewForConfig(cfg)
- require.NoError(t, err)
- }
-
- t.Log("By creating install namespace, and necessary rbac resources")
- namespace := corev1.Namespace{
- ObjectMeta: metav1.ObjectMeta{
- Name: "webhook-operator",
- },
- }
- require.NoError(t, c.Create(t.Context(), &namespace))
- t.Cleanup(func() {
- require.NoError(t, c.Delete(context.Background(), &namespace))
- })
-
- serviceAccount := corev1.ServiceAccount{
- ObjectMeta: metav1.ObjectMeta{
- Name: "webhook-operator-installer",
- Namespace: namespace.GetName(),
- },
- }
- require.NoError(t, c.Create(t.Context(), &serviceAccount))
- t.Cleanup(func() {
- require.NoError(t, c.Delete(context.Background(), &serviceAccount))
- })
-
- clusterRoleBinding := &rbacv1.ClusterRoleBinding{
- ObjectMeta: metav1.ObjectMeta{
- Name: "webhook-operator-installer",
- },
- Subjects: []rbacv1.Subject{
- {
- Kind: "ServiceAccount",
- APIGroup: corev1.GroupName,
- Name: serviceAccount.GetName(),
- Namespace: serviceAccount.GetNamespace(),
- },
- },
- RoleRef: rbacv1.RoleRef{
- APIGroup: rbacv1.GroupName,
- Kind: "ClusterRole",
- Name: "cluster-admin",
- },
- }
- require.NoError(t, c.Create(t.Context(), clusterRoleBinding))
- t.Cleanup(func() {
- require.NoError(t, c.Delete(context.Background(), clusterRoleBinding))
- })
-
- t.Log("By creating the webhook-operator ClusterCatalog")
- extensionCatalog := &ocv1.ClusterCatalog{
- ObjectMeta: metav1.ObjectMeta{
- Name: "webhook-operator-catalog",
- },
- Spec: ocv1.ClusterCatalogSpec{
- Source: ocv1.CatalogSource{
- Type: ocv1.SourceTypeImage,
- Image: &ocv1.ImageSource{
- Ref: fmt.Sprintf("%s/e2e/test-catalog:v1", os.Getenv("CLUSTER_REGISTRY_HOST")),
- PollIntervalMinutes: ptr.To(1),
- },
- },
- },
- }
- require.NoError(t, c.Create(t.Context(), extensionCatalog))
- t.Cleanup(func() {
- require.NoError(t, c.Delete(context.Background(), extensionCatalog))
- })
-
- t.Log("By waiting for the catalog to serve its metadata")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: extensionCatalog.GetName()}, extensionCatalog))
- cond := apimeta.FindStatusCondition(extensionCatalog.Status.Conditions, ocv1.TypeServing)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ReasonAvailable, cond.Reason)
- }, pollDuration, pollInterval)
-
- t.Log("By installing the webhook-operator ClusterExtension")
- clusterExtension := &ocv1.ClusterExtension{
- ObjectMeta: metav1.ObjectMeta{
- Name: "webhook-operator-extension",
- },
- Spec: ocv1.ClusterExtensionSpec{
- Source: ocv1.SourceConfig{
- SourceType: "Catalog",
- Catalog: &ocv1.CatalogFilter{
- PackageName: "webhook-operator",
- Selector: &metav1.LabelSelector{
- MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": extensionCatalog.Name},
- },
- },
- },
- Namespace: namespace.GetName(),
- ServiceAccount: ocv1.ServiceAccountReference{
- Name: serviceAccount.GetName(),
- },
- },
- }
- require.NoError(t, c.Create(t.Context(), clusterExtension))
- t.Cleanup(func() {
- require.NoError(t, c.Delete(context.Background(), clusterExtension))
- })
-
- t.Log("By waiting for webhook-operator extension to be installed successfully")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- require.NoError(ct, c.Get(t.Context(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled)
- require.NotNil(ct, cond)
- require.Equal(ct, metav1.ConditionTrue, cond.Status)
- require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
- require.Contains(ct, cond.Message, "Installed bundle")
- require.NotNil(ct, clusterExtension.Status.Install)
- require.NotEmpty(ct, clusterExtension.Status.Install.Bundle)
- }, pollDuration, pollInterval)
-
- t.Log("By waiting for webhook-operator deployment to be available")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- deployment := &appsv1.Deployment{}
- require.NoError(ct, c.Get(t.Context(), types.NamespacedName{Namespace: namespace.GetName(), Name: "webhook-operator-controller-manager"}, deployment))
- available := false
- for _, cond := range deployment.Status.Conditions {
- if cond.Type == appsv1.DeploymentAvailable {
- available = cond.Status == corev1.ConditionTrue
- }
- }
- require.True(ct, available)
- }, pollDuration, pollInterval)
-
- v1Gvr := schema.GroupVersionResource{
- Group: "webhook.operators.coreos.io",
- Version: "v1",
- Resource: "webhooktests",
- }
- v1Client := dynamicClient.Resource(v1Gvr).Namespace(namespace.GetName())
-
- t.Log("By eventually seeing that invalid CR creation is rejected by the validating webhook")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- obj := getWebhookOperatorResource("invalid-test-cr", namespace.GetName(), false)
- _, err := v1Client.Create(t.Context(), obj, metav1.CreateOptions{})
- require.Error(ct, err)
- require.Contains(ct, err.Error(), "Invalid value: false: Spec.Valid must be true")
- }, pollDuration, pollInterval)
-
- var (
- res *unstructured.Unstructured
- err error
- obj = getWebhookOperatorResource("valid-test-cr", namespace.GetName(), true)
- )
-
- t.Log("By eventually creating a valid CR")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- res, err = v1Client.Create(t.Context(), obj, metav1.CreateOptions{})
- require.NoError(ct, err)
- }, pollDuration, pollInterval)
- t.Cleanup(func() {
- require.NoError(t, v1Client.Delete(context.Background(), obj.GetName(), metav1.DeleteOptions{}))
- })
-
- require.Equal(t, map[string]interface{}{
- "valid": true,
- "mutate": true,
- }, res.Object["spec"])
-
- t.Log("By checking a valid CR is converted to v2 by the conversion webhook")
- v2Gvr := schema.GroupVersionResource{
- Group: "webhook.operators.coreos.io",
- Version: "v2",
- Resource: "webhooktests",
- }
- v2Client := dynamicClient.Resource(v2Gvr).Namespace(namespace.GetName())
-
- t.Log("By eventually getting the valid CR with a v2 client")
- require.EventuallyWithT(t, func(ct *assert.CollectT) {
- res, err = v2Client.Get(t.Context(), obj.GetName(), metav1.GetOptions{})
- require.NoError(ct, err)
- }, pollDuration, pollInterval)
-
- t.Log("and verifying that the CR is correctly converted")
- require.Equal(t, map[string]interface{}{
- "conversion": map[string]interface{}{
- "valid": true,
- "mutate": true,
- },
- }, res.Object["spec"])
-}
-
-func getWebhookOperatorResource(name string, namespace string, valid bool) *unstructured.Unstructured {
- return &unstructured.Unstructured{
- Object: map[string]interface{}{
- "apiVersion": "webhook.operators.coreos.io/v1",
- "kind": "webhooktests",
- "metadata": map[string]interface{}{
- "name": name,
- "namespace": namespace,
- },
- "spec": map[string]interface{}{
- "valid": valid,
- },
- },
- }
-}
diff --git a/vendor/github.com/cucumber/gherkin/go/v26/.gitignore b/vendor/github.com/cucumber/gherkin/go/v26/.gitignore
new file mode 100644
index 000000000..7b0ee7aeb
--- /dev/null
+++ b/vendor/github.com/cucumber/gherkin/go/v26/.gitignore
@@ -0,0 +1,17 @@
+.built
+.compared
+.deps
+.dist
+.dist-compressed
+.go-get
+.gofmt
+.linted
+.tested*
+acceptance/
+bin/
+dist/
+dist_compressed/
+*.bin
+*.iml
+# upx dist/cucumber-gherkin-openbsd-386 fails with a core dump
+core.*.!usr!bin!upx-ucl
diff --git a/vendor/github.com/cucumber/gherkin/go/v26/LICENSE b/vendor/github.com/cucumber/gherkin/go/v26/LICENSE
new file mode 100644
index 000000000..29e136102
--- /dev/null
+++ b/vendor/github.com/cucumber/gherkin/go/v26/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) Cucumber Ltd, Gaspar Nagy, Björn Rasmusson, Peter Sergeant
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/cucumber/gherkin/go/v26/Makefile b/vendor/github.com/cucumber/gherkin/go/v26/Makefile
new file mode 100644
index 000000000..d7d420854
--- /dev/null
+++ b/vendor/github.com/cucumber/gherkin/go/v26/Makefile
@@ -0,0 +1,83 @@
+SHELL := /usr/bin/env bash
+
+GHERKIN_LANGUAGES_JSON = dialects_builtin.go
+GHERKIN_PARSER = parser.go
+GHERKIN_RAZOR = parser.go.razor
+SOURCE_FILES = $(shell find . -name "*.go" | grep -v $(GHERKIN_PARSER))
+
+GHERKIN = bin/gherkin
+GHERKIN_GENERATE_TOKENS = bin/gherkin-generate-tokens
+
+GOOD_FEATURE_FILES = $(shell find ../testdata/good -name "*.feature")
+BAD_FEATURE_FILES = $(shell find ../testdata/bad -name "*.feature")
+
+TOKENS = $(patsubst ../testdata/%,acceptance/testdata/%.tokens,$(GOOD_FEATURE_FILES))
+ASTS = $(patsubst ../testdata/%,acceptance/testdata/%.ast.ndjson,$(GOOD_FEATURE_FILES))
+PICKLES = $(patsubst ../testdata/%,acceptance/testdata/%.pickles.ndjson,$(GOOD_FEATURE_FILES))
+SOURCES = $(patsubst ../testdata/%,acceptance/testdata/%.source.ndjson,$(GOOD_FEATURE_FILES))
+ERRORS = $(patsubst ../testdata/%,acceptance/testdata/%.errors.ndjson,$(BAD_FEATURE_FILES))
+
+.DEFAULT_GOAL = help
+
+help: ## Show this help
+ @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \n\nWhere is one of:\n"} /^[$$()% a-zA-Z_-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
+
+generate: $(GHERKIN_PARSER) ## Generate gherkin parser files
+
+clean-generate: ## Remove generated Gherkin parser files ## Generate gherkin parser files
+ rm -f $(GHERKIN_PARSER)
+
+copy-gherkin-languages: $(GHERKIN_LANGUAGES_JSON) ## Copy gherkin-languages.json and/or generate derived files
+
+clean-gherkin-languages: ## Remove gherkin-languages.json and any derived files
+ rm -f $(GHERKIN_LANGUAGES_JSON)
+
+clean: ## Remove all build artifacts and files generated by the acceptance tests
+ rm -rf .built
+ rm -rf acceptance
+ rm -rf bin
+
+.DELETE_ON_ERROR:
+
+acceptance: .built $(TOKENS) $(ASTS) $(PICKLES) $(ERRORS) $(SOURCES) ## Build acceptance test dir and compare results with reference
+
+.built: bin/gherkin-generate-tokens bin/gherkin
+ touch $@
+
+bin/gherkin-generate-tokens:
+ go build -o $@ ./gherkin-generate-tokens
+
+bin/gherkin:
+ go build -o $@ -a ./main
+
+dialects_builtin.go: ../gherkin-languages.json dialects_builtin.go.jq
+ cat $< | jq --sort-keys --from-file dialects_builtin.go.jq --raw-output --compact-output > $@
+
+$(GHERKIN_PARSER): $(GHERKIN_RAZOR) ../gherkin.berp
+ berp -g ../gherkin.berp -t $< -o $@ --noBOM
+ gofmt -w $@
+
+acceptance/testdata/%.tokens: ../testdata/% ../testdata/%.tokens
+ mkdir -p $(@D)
+ $(GHERKIN_GENERATE_TOKENS) $< > $@
+ diff --unified $<.tokens $@
+
+acceptance/testdata/%.ast.ndjson: ../testdata/% ../testdata/%.ast.ndjson
+ mkdir -p $(@D)
+ $(GHERKIN) --no-source --no-pickles --predictable-ids $< | jq --sort-keys --compact-output "." > $@
+ diff --unified <(jq "." $<.ast.ndjson) <(jq "." $@)
+
+acceptance/testdata/%.pickles.ndjson: ../testdata/% ../testdata/%.pickles.ndjson
+ mkdir -p $(@D)
+ $(GHERKIN) --no-source --no-ast --predictable-ids $< | jq --sort-keys --compact-output "." > $@
+ diff --unified <(jq "." $<.pickles.ndjson) <(jq "." $@)
+
+acceptance/testdata/%.source.ndjson: ../testdata/% ../testdata/%.source.ndjson
+ mkdir -p $(@D)
+ $(GHERKIN) --no-ast --no-pickles --predictable-ids $< | jq --sort-keys --compact-output "." > $@
+ diff --unified <(jq "." $<.source.ndjson) <(jq "." $@)
+
+acceptance/testdata/%.errors.ndjson: ../testdata/% ../testdata/%.errors.ndjson
+ mkdir -p $(@D)
+ $(GHERKIN) --no-source --predictable-ids $< | jq --sort-keys --compact-output "." > $@
+ diff --unified <(jq "." $<.errors.ndjson) <(jq "." $@)
diff --git a/vendor/github.com/cucumber/gherkin/go/v26/README.md b/vendor/github.com/cucumber/gherkin/go/v26/README.md
new file mode 100644
index 000000000..cdaba03c9
--- /dev/null
+++ b/vendor/github.com/cucumber/gherkin/go/v26/README.md
@@ -0,0 +1,37 @@
+# Gherkin for Go
+
+[](http://godoc.org/github.com/cucumber/gherkin/go)
+
+Gherkin parser/compiler for Go. Please see [Gherkin](https://github.com/cucumber/gherkin) for details.
+
+## Building
+
+You need Go installed (obviously). You also need to make sure your `PATH`
+points to where Go installs packages:
+
+```bash
+# Add go bin to path
+export PATH=$(go env GOPATH)/bin:${PATH}
+```
+
+Now build it:
+
+```
+make .dist
+```
+
+You should have cross-compiled binaries in `./dist/`.
+
+## Compress binaries
+
+You need [upx](https://upx.github.io/) installed.
+
+```
+make .dist
+make .dist-compressed
+```
+
+Your `./dist_compressed/` directory should now have compressed binaries.
+Compression fails for some binaries, so you likely won't have a full set.
+
+The build copies the successfully compressed binaries back to `./dist/`.
diff --git a/vendor/github.com/cucumber/gherkin/go/v26/astbuilder.go b/vendor/github.com/cucumber/gherkin/go/v26/astbuilder.go
new file mode 100644
index 000000000..54853d02b
--- /dev/null
+++ b/vendor/github.com/cucumber/gherkin/go/v26/astbuilder.go
@@ -0,0 +1,453 @@
+package gherkin
+
+import (
+ "github.com/cucumber/messages/go/v21"
+ "strings"
+)
+
+type AstBuilder interface {
+ Builder
+ GetGherkinDocument() *messages.GherkinDocument
+}
+
+type astBuilder struct {
+ stack []*astNode
+ comments []*messages.Comment
+ newId func() string
+}
+
+func (t *astBuilder) Reset() {
+ t.comments = []*messages.Comment{}
+ t.stack = []*astNode{}
+ t.push(newAstNode(RuleTypeNone))
+}
+
+func (t *astBuilder) GetGherkinDocument() *messages.GherkinDocument {
+ res := t.currentNode().getSingle(RuleTypeGherkinDocument, nil)
+ if val, ok := res.(*messages.GherkinDocument); ok {
+ return val
+ }
+ return nil
+}
+
+type astNode struct {
+ ruleType RuleType
+ subNodes map[RuleType][]interface{}
+}
+
+func (a *astNode) add(rt RuleType, obj interface{}) {
+ a.subNodes[rt] = append(a.subNodes[rt], obj)
+}
+
+func (a *astNode) getSingle(rt RuleType, defaultValue interface{}) interface{} {
+ if val, ok := a.subNodes[rt]; ok {
+ for i := range val {
+ return val[i]
+ }
+ }
+ return defaultValue
+}
+
+func (a *astNode) getItems(rt RuleType) []interface{} {
+ var res []interface{}
+ if val, ok := a.subNodes[rt]; ok {
+ for i := range val {
+ res = append(res, val[i])
+ }
+ }
+ return res
+}
+
+func (a *astNode) getToken(tt TokenType) *Token {
+ if val, ok := a.getSingle(tt.RuleType(), nil).(*Token); ok {
+ return val
+ }
+ return nil
+}
+
+func (a *astNode) getTokens(tt TokenType) []*Token {
+ var items = a.getItems(tt.RuleType())
+ var tokens []*Token
+ for i := range items {
+ if val, ok := items[i].(*Token); ok {
+ tokens = append(tokens, val)
+ }
+ }
+ return tokens
+}
+
+func (t *astBuilder) currentNode() *astNode {
+ if len(t.stack) > 0 {
+ return t.stack[len(t.stack)-1]
+ }
+ return nil
+}
+
+func newAstNode(rt RuleType) *astNode {
+ return &astNode{
+ ruleType: rt,
+ subNodes: make(map[RuleType][]interface{}),
+ }
+}
+
+func NewAstBuilder(newId func() string) AstBuilder {
+ builder := new(astBuilder)
+ builder.newId = newId
+ builder.comments = []*messages.Comment{}
+ builder.push(newAstNode(RuleTypeNone))
+ return builder
+}
+
+func (t *astBuilder) push(n *astNode) {
+ t.stack = append(t.stack, n)
+}
+
+func (t *astBuilder) pop() *astNode {
+ x := t.stack[len(t.stack)-1]
+ t.stack = t.stack[:len(t.stack)-1]
+ return x
+}
+
+func (t *astBuilder) Build(tok *Token) (bool, error) {
+ if tok.Type == TokenTypeComment {
+ comment := &messages.Comment{
+ Location: astLocation(tok),
+ Text: tok.Text,
+ }
+ t.comments = append(t.comments, comment)
+ } else {
+ t.currentNode().add(tok.Type.RuleType(), tok)
+ }
+ return true, nil
+}
+
+func (t *astBuilder) StartRule(r RuleType) (bool, error) {
+ t.push(newAstNode(r))
+ return true, nil
+}
+
+func (t *astBuilder) EndRule(r RuleType) (bool, error) {
+ node := t.pop()
+ transformedNode, err := t.transformNode(node)
+ t.currentNode().add(node.ruleType, transformedNode)
+ return true, err
+}
+
+func (t *astBuilder) transformNode(node *astNode) (interface{}, error) {
+ switch node.ruleType {
+
+ case RuleTypeStep:
+ stepLine := node.getToken(TokenTypeStepLine)
+
+ step := &messages.Step{
+ Location: astLocation(stepLine),
+ Keyword: stepLine.Keyword,
+ KeywordType: stepLine.KeywordType,
+ Text: stepLine.Text,
+ Id: t.newId(),
+ }
+ dataTable := node.getSingle(RuleTypeDataTable, nil)
+ if dataTable != nil {
+ step.DataTable = dataTable.(*messages.DataTable)
+ } else {
+ docString := node.getSingle(RuleTypeDocString, nil)
+ if docString != nil {
+ step.DocString = docString.(*messages.DocString)
+ }
+ }
+
+ return step, nil
+
+ case RuleTypeDocString:
+ separatorToken := node.getToken(TokenTypeDocStringSeparator)
+ lineTokens := node.getTokens(TokenTypeOther)
+ var text string
+ for i := range lineTokens {
+ if i > 0 {
+ text += "\n"
+ }
+ text += lineTokens[i].Text
+ }
+ docString := &messages.DocString{
+ Location: astLocation(separatorToken),
+ Content: text,
+ Delimiter: separatorToken.Keyword,
+ }
+ if len(separatorToken.Text) > 0 {
+ docString.MediaType = separatorToken.Text
+ }
+
+ return docString, nil
+
+ case RuleTypeDataTable:
+ rows, err := astTableRows(node, t.newId)
+ dt := &messages.DataTable{
+ Location: rows[0].Location,
+ Rows: rows,
+ }
+ return dt, err
+
+ case RuleTypeBackground:
+ backgroundLine := node.getToken(TokenTypeBackgroundLine)
+ bg := &messages.Background{
+ Id: t.newId(),
+ Location: astLocation(backgroundLine),
+ Keyword: backgroundLine.Keyword,
+ Name: backgroundLine.Text,
+ Description: getDescription(node),
+ Steps: astSteps(node),
+ }
+ return bg, nil
+
+ case RuleTypeScenarioDefinition:
+ scenarioNode := node.getSingle(RuleTypeScenario, nil).(*astNode)
+ scenarioLine := scenarioNode.getToken(TokenTypeScenarioLine)
+ tags := astTags(node, t.newId)
+ sc := &messages.Scenario{
+ Id: t.newId(),
+ Tags: tags,
+ Location: astLocation(scenarioLine),
+ Keyword: scenarioLine.Keyword,
+ Name: scenarioLine.Text,
+ Description: getDescription(scenarioNode),
+ Steps: astSteps(scenarioNode),
+ Examples: astExamples(scenarioNode),
+ }
+
+ return sc, nil
+
+ case RuleTypeExamplesDefinition:
+ tags := astTags(node, t.newId)
+ examplesNode := node.getSingle(RuleTypeExamples, nil).(*astNode)
+ examplesLine := examplesNode.getToken(TokenTypeExamplesLine)
+ examplesTable := examplesNode.getSingle(RuleTypeExamplesTable, make([]*messages.TableRow, 0)).([]*messages.TableRow)
+
+ var tableHeader *messages.TableRow
+ var tableBody []*messages.TableRow
+
+ if len(examplesTable) > 0 {
+ tableHeader = examplesTable[0]
+ tableBody = examplesTable[1:]
+ } else {
+ tableHeader = nil
+ tableBody = examplesTable
+ }
+
+ ex := &messages.Examples{
+ Id: t.newId(),
+ Tags: tags,
+ Location: astLocation(examplesLine),
+ Keyword: examplesLine.Keyword,
+ Name: examplesLine.Text,
+ Description: getDescription(examplesNode),
+ TableHeader: tableHeader,
+ TableBody: tableBody,
+ }
+ return ex, nil
+
+ case RuleTypeExamplesTable:
+ allRows, err := astTableRows(node, t.newId)
+ return allRows, err
+
+ case RuleTypeDescription:
+ lineTokens := node.getTokens(TokenTypeOther)
+ // Trim trailing empty lines
+ end := len(lineTokens)
+ for end > 0 && strings.TrimSpace(lineTokens[end-1].Text) == "" {
+ end--
+ }
+ var desc []string
+ for i := range lineTokens[0:end] {
+ desc = append(desc, lineTokens[i].Text)
+ }
+ return strings.Join(desc, "\n"), nil
+
+ case RuleTypeFeature:
+ header := node.getSingle(RuleTypeFeatureHeader, nil).(*astNode)
+ tags := astTags(header, t.newId)
+ featureLine := header.getToken(TokenTypeFeatureLine)
+ if featureLine == nil {
+ return nil, nil
+ }
+
+ children := make([]*messages.FeatureChild, 0)
+ background, _ := node.getSingle(RuleTypeBackground, nil).(*messages.Background)
+ if background != nil {
+ children = append(children, &messages.FeatureChild{
+ Background: background,
+ })
+ }
+ scenarios := node.getItems(RuleTypeScenarioDefinition)
+ for i := range scenarios {
+ scenario := scenarios[i].(*messages.Scenario)
+ children = append(children, &messages.FeatureChild{
+ Scenario: scenario,
+ })
+ }
+ rules := node.getItems(RuleTypeRule)
+ for i := range rules {
+ rule := rules[i].(*messages.Rule)
+ children = append(children, &messages.FeatureChild{
+ Rule: rule,
+ })
+ }
+
+ feature := &messages.Feature{
+ Tags: tags,
+ Location: astLocation(featureLine),
+ Language: featureLine.GherkinDialect,
+ Keyword: featureLine.Keyword,
+ Name: featureLine.Text,
+ Description: getDescription(header),
+ Children: children,
+ }
+ return feature, nil
+
+ case RuleTypeRule:
+ header := node.getSingle(RuleTypeRuleHeader, nil).(*astNode)
+ ruleLine := header.getToken(TokenTypeRuleLine)
+ if ruleLine == nil {
+ return nil, nil
+ }
+
+ tags := astTags(header, t.newId)
+ var children []*messages.RuleChild
+ background, _ := node.getSingle(RuleTypeBackground, nil).(*messages.Background)
+
+ if background != nil {
+ children = append(children, &messages.RuleChild{
+ Background: background,
+ })
+ }
+ scenarios := node.getItems(RuleTypeScenarioDefinition)
+ for i := range scenarios {
+ scenario := scenarios[i].(*messages.Scenario)
+ children = append(children, &messages.RuleChild{
+ Scenario: scenario,
+ })
+ }
+
+ rule := &messages.Rule{
+ Id: t.newId(),
+ Location: astLocation(ruleLine),
+ Keyword: ruleLine.Keyword,
+ Name: ruleLine.Text,
+ Description: getDescription(header),
+ Children: children,
+ Tags: tags,
+ }
+ return rule, nil
+
+ case RuleTypeGherkinDocument:
+ feature, _ := node.getSingle(RuleTypeFeature, nil).(*messages.Feature)
+
+ doc := &messages.GherkinDocument{}
+ if feature != nil {
+ doc.Feature = feature
+ }
+ doc.Comments = t.comments
+ return doc, nil
+ }
+ return node, nil
+}
+
+func getDescription(node *astNode) string {
+ return node.getSingle(RuleTypeDescription, "").(string)
+}
+
+func astLocation(t *Token) *messages.Location {
+ return &messages.Location{
+ Line: int64(t.Location.Line),
+ Column: int64(t.Location.Column),
+ }
+}
+
+func astTableRows(t *astNode, newId func() string) (rows []*messages.TableRow, err error) {
+ rows = []*messages.TableRow{}
+ tokens := t.getTokens(TokenTypeTableRow)
+ for i := range tokens {
+ row := &messages.TableRow{
+ Id: newId(),
+ Location: astLocation(tokens[i]),
+ Cells: astTableCells(tokens[i]),
+ }
+ rows = append(rows, row)
+ }
+ err = ensureCellCount(rows)
+ return
+}
+
+func ensureCellCount(rows []*messages.TableRow) error {
+ if len(rows) <= 1 {
+ return nil
+ }
+ cellCount := len(rows[0].Cells)
+ for i := range rows {
+ if cellCount != len(rows[i].Cells) {
+ return &parseError{"inconsistent cell count within the table", &Location{
+ Line: int(rows[i].Location.Line),
+ Column: int(rows[i].Location.Column),
+ }}
+ }
+ }
+ return nil
+}
+
+func astTableCells(t *Token) (cells []*messages.TableCell) {
+ cells = []*messages.TableCell{}
+ for i := range t.Items {
+ item := t.Items[i]
+ cell := &messages.TableCell{}
+ cell.Location = &messages.Location{
+ Line: int64(t.Location.Line),
+ Column: int64(item.Column),
+ }
+ cell.Value = item.Text
+ cells = append(cells, cell)
+ }
+ return
+}
+
+func astSteps(t *astNode) (steps []*messages.Step) {
+ steps = []*messages.Step{}
+ tokens := t.getItems(RuleTypeStep)
+ for i := range tokens {
+ step, _ := tokens[i].(*messages.Step)
+ steps = append(steps, step)
+ }
+ return
+}
+
+func astExamples(t *astNode) (examples []*messages.Examples) {
+ examples = []*messages.Examples{}
+ tokens := t.getItems(RuleTypeExamplesDefinition)
+ for i := range tokens {
+ example, _ := tokens[i].(*messages.Examples)
+ examples = append(examples, example)
+ }
+ return
+}
+
+func astTags(node *astNode, newId func() string) (tags []*messages.Tag) {
+ tags = []*messages.Tag{}
+ tagsNode, ok := node.getSingle(RuleTypeTags, nil).(*astNode)
+ if !ok {
+ return
+ }
+ tokens := tagsNode.getTokens(TokenTypeTagLine)
+ for i := range tokens {
+ token := tokens[i]
+ for k := range token.Items {
+ item := token.Items[k]
+ tag := &messages.Tag{}
+ tag.Location = &messages.Location{
+ Line: int64(token.Location.Line),
+ Column: int64(item.Column),
+ }
+ tag.Name = item.Text
+ tag.Id = newId()
+ tags = append(tags, tag)
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/cucumber/gherkin/go/v26/dialect.go b/vendor/github.com/cucumber/gherkin/go/v26/dialect.go
new file mode 100644
index 000000000..212df62b2
--- /dev/null
+++ b/vendor/github.com/cucumber/gherkin/go/v26/dialect.go
@@ -0,0 +1,58 @@
+package gherkin
+
+import messages "github.com/cucumber/messages/go/v21"
+
+type Dialect struct {
+ Language string
+ Name string
+ Native string
+ Keywords map[string][]string
+ KeywordTypes map[string]messages.StepKeywordType
+}
+
+func (g *Dialect) FeatureKeywords() []string {
+ return g.Keywords["feature"]
+}
+
+func (g *Dialect) RuleKeywords() []string {
+ return g.Keywords["rule"]
+}
+
+func (g *Dialect) ScenarioKeywords() []string {
+ return g.Keywords["scenario"]
+}
+
+func (g *Dialect) StepKeywords() []string {
+ result := g.Keywords["given"]
+ result = append(result, g.Keywords["when"]...)
+ result = append(result, g.Keywords["then"]...)
+ result = append(result, g.Keywords["and"]...)
+ result = append(result, g.Keywords["but"]...)
+ return result
+}
+
+func (g *Dialect) BackgroundKeywords() []string {
+ return g.Keywords["background"]
+}
+
+func (g *Dialect) ScenarioOutlineKeywords() []string {
+ return g.Keywords["scenarioOutline"]
+}
+
+func (g *Dialect) ExamplesKeywords() []string {
+ return g.Keywords["examples"]
+}
+
+func (g *Dialect) StepKeywordType(keyword string) messages.StepKeywordType {
+ return g.KeywordTypes[keyword]
+}
+
+type DialectProvider interface {
+ GetDialect(language string) *Dialect
+}
+
+type gherkinDialectMap map[string]*Dialect
+
+func (g gherkinDialectMap) GetDialect(language string) *Dialect {
+ return g[language]
+}
diff --git a/vendor/github.com/cucumber/gherkin/go/v26/dialects_builtin.go b/vendor/github.com/cucumber/gherkin/go/v26/dialects_builtin.go
new file mode 100644
index 000000000..2362612d8
--- /dev/null
+++ b/vendor/github.com/cucumber/gherkin/go/v26/dialects_builtin.go
@@ -0,0 +1,5229 @@
+package gherkin
+
+import messages "github.com/cucumber/messages/go/v21"
+
+// Builtin dialects for af (Afrikaans), am (Armenian), an (Aragonese), ar (Arabic), ast (Asturian), az (Azerbaijani), be (Belarusian), bg (Bulgarian), bm (Malay), bs (Bosnian), ca (Catalan), cs (Czech), cy-GB (Welsh), da (Danish), de (German), el (Greek), em (Emoji), en (English), en-Scouse (Scouse), en-au (Australian), en-lol (LOLCAT), en-old (Old English), en-pirate (Pirate), en-tx (Texas), eo (Esperanto), es (Spanish), et (Estonian), fa (Persian), fi (Finnish), fr (French), ga (Irish), gj (Gujarati), gl (Galician), he (Hebrew), hi (Hindi), hr (Croatian), ht (Creole), hu (Hungarian), id (Indonesian), is (Icelandic), it (Italian), ja (Japanese), jv (Javanese), ka (Georgian), kn (Kannada), ko (Korean), lt (Lithuanian), lu (Luxemburgish), lv (Latvian), mk-Cyrl (Macedonian), mk-Latn (Macedonian (Latin)), mn (Mongolian), ne (Nepali), nl (Dutch), no (Norwegian), pa (Panjabi), pl (Polish), pt (Portuguese), ro (Romanian), ru (Russian), sk (Slovak), sl (Slovenian), sr-Cyrl (Serbian), sr-Latn (Serbian (Latin)), sv (Swedish), ta (Tamil), th (Thai), te (Telugu), tlh (Klingon), tr (Turkish), tt (Tatar), uk (Ukrainian), ur (Urdu), uz (Uzbek), vi (Vietnamese), zh-CN (Chinese simplified), zh-TW (Chinese traditional), mr (Marathi), amh (Amharic)
+func DialectsBuiltin() DialectProvider {
+ return builtinDialects
+}
+
+const (
+ feature = "feature"
+ rule = "rule"
+ background = "background"
+ scenario = "scenario"
+ scenarioOutline = "scenarioOutline"
+ examples = "examples"
+ given = "given"
+ when = "when"
+ then = "then"
+ and = "and"
+ but = "but"
+)
+
+var builtinDialects = gherkinDialectMap{
+ "af": &Dialect{
+ "af", "Afrikaans", "Afrikaans", map[string][]string{
+ feature: {
+ "Funksie",
+ "Besigheid Behoefte",
+ "Vermoë",
+ },
+ rule: {
+ "Regel",
+ },
+ background: {
+ "Agtergrond",
+ },
+ scenario: {
+ "Voorbeeld",
+ "Situasie",
+ },
+ scenarioOutline: {
+ "Situasie Uiteensetting",
+ },
+ examples: {
+ "Voorbeelde",
+ },
+ given: {
+ "* ",
+ "Gegewe ",
+ },
+ when: {
+ "* ",
+ "Wanneer ",
+ },
+ then: {
+ "* ",
+ "Dan ",
+ },
+ and: {
+ "* ",
+ "En ",
+ },
+ but: {
+ "* ",
+ "Maar ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Gegewe ": messages.StepKeywordType_CONTEXT,
+
+ "Wanneer ": messages.StepKeywordType_ACTION,
+
+ "Dan ": messages.StepKeywordType_OUTCOME,
+
+ "En ": messages.StepKeywordType_CONJUNCTION,
+
+ "Maar ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "am": &Dialect{
+ "am", "Armenian", "հայերեն", map[string][]string{
+ feature: {
+ "Ֆունկցիոնալություն",
+ "Հատկություն",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "Կոնտեքստ",
+ },
+ scenario: {
+ "Օրինակ",
+ "Սցենար",
+ },
+ scenarioOutline: {
+ "Սցենարի կառուցվացքը",
+ },
+ examples: {
+ "Օրինակներ",
+ },
+ given: {
+ "* ",
+ "Դիցուք ",
+ },
+ when: {
+ "* ",
+ "Եթե ",
+ "Երբ ",
+ },
+ then: {
+ "* ",
+ "Ապա ",
+ },
+ and: {
+ "* ",
+ "Եվ ",
+ },
+ but: {
+ "* ",
+ "Բայց ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Դիցուք ": messages.StepKeywordType_CONTEXT,
+
+ "Եթե ": messages.StepKeywordType_ACTION,
+
+ "Երբ ": messages.StepKeywordType_ACTION,
+
+ "Ապա ": messages.StepKeywordType_OUTCOME,
+
+ "Եվ ": messages.StepKeywordType_CONJUNCTION,
+
+ "Բայց ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "an": &Dialect{
+ "an", "Aragonese", "Aragonés", map[string][]string{
+ feature: {
+ "Caracteristica",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "Antecedents",
+ },
+ scenario: {
+ "Eixemplo",
+ "Caso",
+ },
+ scenarioOutline: {
+ "Esquema del caso",
+ },
+ examples: {
+ "Eixemplos",
+ },
+ given: {
+ "* ",
+ "Dau ",
+ "Dada ",
+ "Daus ",
+ "Dadas ",
+ },
+ when: {
+ "* ",
+ "Cuan ",
+ },
+ then: {
+ "* ",
+ "Alavez ",
+ "Allora ",
+ "Antonces ",
+ },
+ and: {
+ "* ",
+ "Y ",
+ "E ",
+ },
+ but: {
+ "* ",
+ "Pero ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Dau ": messages.StepKeywordType_CONTEXT,
+
+ "Dada ": messages.StepKeywordType_CONTEXT,
+
+ "Daus ": messages.StepKeywordType_CONTEXT,
+
+ "Dadas ": messages.StepKeywordType_CONTEXT,
+
+ "Cuan ": messages.StepKeywordType_ACTION,
+
+ "Alavez ": messages.StepKeywordType_OUTCOME,
+
+ "Allora ": messages.StepKeywordType_OUTCOME,
+
+ "Antonces ": messages.StepKeywordType_OUTCOME,
+
+ "Y ": messages.StepKeywordType_CONJUNCTION,
+
+ "E ": messages.StepKeywordType_CONJUNCTION,
+
+ "Pero ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "ar": &Dialect{
+ "ar", "Arabic", "العربية", map[string][]string{
+ feature: {
+ "خاصية",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "الخلفية",
+ },
+ scenario: {
+ "مثال",
+ "سيناريو",
+ },
+ scenarioOutline: {
+ "سيناريو مخطط",
+ },
+ examples: {
+ "امثلة",
+ },
+ given: {
+ "* ",
+ "بفرض ",
+ },
+ when: {
+ "* ",
+ "متى ",
+ "عندما ",
+ },
+ then: {
+ "* ",
+ "اذاً ",
+ "ثم ",
+ },
+ and: {
+ "* ",
+ "و ",
+ },
+ but: {
+ "* ",
+ "لكن ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "بفرض ": messages.StepKeywordType_CONTEXT,
+
+ "متى ": messages.StepKeywordType_ACTION,
+
+ "عندما ": messages.StepKeywordType_ACTION,
+
+ "اذاً ": messages.StepKeywordType_OUTCOME,
+
+ "ثم ": messages.StepKeywordType_OUTCOME,
+
+ "و ": messages.StepKeywordType_CONJUNCTION,
+
+ "لكن ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "ast": &Dialect{
+ "ast", "Asturian", "asturianu", map[string][]string{
+ feature: {
+ "Carauterística",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "Antecedentes",
+ },
+ scenario: {
+ "Exemplo",
+ "Casu",
+ },
+ scenarioOutline: {
+ "Esbozu del casu",
+ },
+ examples: {
+ "Exemplos",
+ },
+ given: {
+ "* ",
+ "Dáu ",
+ "Dada ",
+ "Daos ",
+ "Daes ",
+ },
+ when: {
+ "* ",
+ "Cuando ",
+ },
+ then: {
+ "* ",
+ "Entós ",
+ },
+ and: {
+ "* ",
+ "Y ",
+ "Ya ",
+ },
+ but: {
+ "* ",
+ "Peru ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Dáu ": messages.StepKeywordType_CONTEXT,
+
+ "Dada ": messages.StepKeywordType_CONTEXT,
+
+ "Daos ": messages.StepKeywordType_CONTEXT,
+
+ "Daes ": messages.StepKeywordType_CONTEXT,
+
+ "Cuando ": messages.StepKeywordType_ACTION,
+
+ "Entós ": messages.StepKeywordType_OUTCOME,
+
+ "Y ": messages.StepKeywordType_CONJUNCTION,
+
+ "Ya ": messages.StepKeywordType_CONJUNCTION,
+
+ "Peru ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "az": &Dialect{
+ "az", "Azerbaijani", "Azərbaycanca", map[string][]string{
+ feature: {
+ "Özəllik",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "Keçmiş",
+ "Kontekst",
+ },
+ scenario: {
+ "Nümunə",
+ "Ssenari",
+ },
+ scenarioOutline: {
+ "Ssenarinin strukturu",
+ },
+ examples: {
+ "Nümunələr",
+ },
+ given: {
+ "* ",
+ "Tutaq ki ",
+ "Verilir ",
+ },
+ when: {
+ "* ",
+ "Əgər ",
+ "Nə vaxt ki ",
+ },
+ then: {
+ "* ",
+ "O halda ",
+ },
+ and: {
+ "* ",
+ "Və ",
+ "Həm ",
+ },
+ but: {
+ "* ",
+ "Amma ",
+ "Ancaq ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Tutaq ki ": messages.StepKeywordType_CONTEXT,
+
+ "Verilir ": messages.StepKeywordType_CONTEXT,
+
+ "Əgər ": messages.StepKeywordType_ACTION,
+
+ "Nə vaxt ki ": messages.StepKeywordType_ACTION,
+
+ "O halda ": messages.StepKeywordType_OUTCOME,
+
+ "Və ": messages.StepKeywordType_CONJUNCTION,
+
+ "Həm ": messages.StepKeywordType_CONJUNCTION,
+
+ "Amma ": messages.StepKeywordType_CONJUNCTION,
+
+ "Ancaq ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "be": &Dialect{
+ "be", "Belarusian", "Беларуская", map[string][]string{
+ feature: {
+ "Функцыянальнасць",
+ "Фіча",
+ },
+ rule: {
+ "Правілы",
+ },
+ background: {
+ "Кантэкст",
+ },
+ scenario: {
+ "Сцэнарый",
+ "Cцэнар",
+ },
+ scenarioOutline: {
+ "Шаблон сцэнарыя",
+ "Узор сцэнара",
+ },
+ examples: {
+ "Прыклады",
+ },
+ given: {
+ "* ",
+ "Няхай ",
+ "Дадзена ",
+ },
+ when: {
+ "* ",
+ "Калі ",
+ },
+ then: {
+ "* ",
+ "Тады ",
+ },
+ and: {
+ "* ",
+ "I ",
+ "Ды ",
+ "Таксама ",
+ },
+ but: {
+ "* ",
+ "Але ",
+ "Інакш ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Няхай ": messages.StepKeywordType_CONTEXT,
+
+ "Дадзена ": messages.StepKeywordType_CONTEXT,
+
+ "Калі ": messages.StepKeywordType_ACTION,
+
+ "Тады ": messages.StepKeywordType_OUTCOME,
+
+ "I ": messages.StepKeywordType_CONJUNCTION,
+
+ "Ды ": messages.StepKeywordType_CONJUNCTION,
+
+ "Таксама ": messages.StepKeywordType_CONJUNCTION,
+
+ "Але ": messages.StepKeywordType_CONJUNCTION,
+
+ "Інакш ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "bg": &Dialect{
+ "bg", "Bulgarian", "български", map[string][]string{
+ feature: {
+ "Функционалност",
+ },
+ rule: {
+ "Правило",
+ },
+ background: {
+ "Предистория",
+ },
+ scenario: {
+ "Пример",
+ "Сценарий",
+ },
+ scenarioOutline: {
+ "Рамка на сценарий",
+ },
+ examples: {
+ "Примери",
+ },
+ given: {
+ "* ",
+ "Дадено ",
+ },
+ when: {
+ "* ",
+ "Когато ",
+ },
+ then: {
+ "* ",
+ "То ",
+ },
+ and: {
+ "* ",
+ "И ",
+ },
+ but: {
+ "* ",
+ "Но ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Дадено ": messages.StepKeywordType_CONTEXT,
+
+ "Когато ": messages.StepKeywordType_ACTION,
+
+ "То ": messages.StepKeywordType_OUTCOME,
+
+ "И ": messages.StepKeywordType_CONJUNCTION,
+
+ "Но ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "bm": &Dialect{
+ "bm", "Malay", "Bahasa Melayu", map[string][]string{
+ feature: {
+ "Fungsi",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "Latar Belakang",
+ },
+ scenario: {
+ "Senario",
+ "Situasi",
+ "Keadaan",
+ },
+ scenarioOutline: {
+ "Kerangka Senario",
+ "Kerangka Situasi",
+ "Kerangka Keadaan",
+ "Garis Panduan Senario",
+ },
+ examples: {
+ "Contoh",
+ },
+ given: {
+ "* ",
+ "Diberi ",
+ "Bagi ",
+ },
+ when: {
+ "* ",
+ "Apabila ",
+ },
+ then: {
+ "* ",
+ "Maka ",
+ "Kemudian ",
+ },
+ and: {
+ "* ",
+ "Dan ",
+ },
+ but: {
+ "* ",
+ "Tetapi ",
+ "Tapi ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Diberi ": messages.StepKeywordType_CONTEXT,
+
+ "Bagi ": messages.StepKeywordType_CONTEXT,
+
+ "Apabila ": messages.StepKeywordType_ACTION,
+
+ "Maka ": messages.StepKeywordType_OUTCOME,
+
+ "Kemudian ": messages.StepKeywordType_OUTCOME,
+
+ "Dan ": messages.StepKeywordType_CONJUNCTION,
+
+ "Tetapi ": messages.StepKeywordType_CONJUNCTION,
+
+ "Tapi ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "bs": &Dialect{
+ "bs", "Bosnian", "Bosanski", map[string][]string{
+ feature: {
+ "Karakteristika",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "Pozadina",
+ },
+ scenario: {
+ "Primjer",
+ "Scenariju",
+ "Scenario",
+ },
+ scenarioOutline: {
+ "Scenariju-obris",
+ "Scenario-outline",
+ },
+ examples: {
+ "Primjeri",
+ },
+ given: {
+ "* ",
+ "Dato ",
+ },
+ when: {
+ "* ",
+ "Kada ",
+ },
+ then: {
+ "* ",
+ "Zatim ",
+ },
+ and: {
+ "* ",
+ "I ",
+ "A ",
+ },
+ but: {
+ "* ",
+ "Ali ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Dato ": messages.StepKeywordType_CONTEXT,
+
+ "Kada ": messages.StepKeywordType_ACTION,
+
+ "Zatim ": messages.StepKeywordType_OUTCOME,
+
+ "I ": messages.StepKeywordType_CONJUNCTION,
+
+ "A ": messages.StepKeywordType_CONJUNCTION,
+
+ "Ali ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "ca": &Dialect{
+ "ca", "Catalan", "català", map[string][]string{
+ feature: {
+ "Característica",
+ "Funcionalitat",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "Rerefons",
+ "Antecedents",
+ },
+ scenario: {
+ "Exemple",
+ "Escenari",
+ },
+ scenarioOutline: {
+ "Esquema de l'escenari",
+ },
+ examples: {
+ "Exemples",
+ },
+ given: {
+ "* ",
+ "Donat ",
+ "Donada ",
+ "Atès ",
+ "Atesa ",
+ },
+ when: {
+ "* ",
+ "Quan ",
+ },
+ then: {
+ "* ",
+ "Aleshores ",
+ "Cal ",
+ },
+ and: {
+ "* ",
+ "I ",
+ },
+ but: {
+ "* ",
+ "Però ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Donat ": messages.StepKeywordType_CONTEXT,
+
+ "Donada ": messages.StepKeywordType_CONTEXT,
+
+ "Atès ": messages.StepKeywordType_CONTEXT,
+
+ "Atesa ": messages.StepKeywordType_CONTEXT,
+
+ "Quan ": messages.StepKeywordType_ACTION,
+
+ "Aleshores ": messages.StepKeywordType_OUTCOME,
+
+ "Cal ": messages.StepKeywordType_OUTCOME,
+
+ "I ": messages.StepKeywordType_CONJUNCTION,
+
+ "Però ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "cs": &Dialect{
+ "cs", "Czech", "Česky", map[string][]string{
+ feature: {
+ "Požadavek",
+ },
+ rule: {
+ "Pravidlo",
+ },
+ background: {
+ "Pozadí",
+ "Kontext",
+ },
+ scenario: {
+ "Příklad",
+ "Scénář",
+ },
+ scenarioOutline: {
+ "Náčrt Scénáře",
+ "Osnova scénáře",
+ },
+ examples: {
+ "Příklady",
+ },
+ given: {
+ "* ",
+ "Pokud ",
+ "Za předpokladu ",
+ },
+ when: {
+ "* ",
+ "Když ",
+ },
+ then: {
+ "* ",
+ "Pak ",
+ },
+ and: {
+ "* ",
+ "A také ",
+ "A ",
+ },
+ but: {
+ "* ",
+ "Ale ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Pokud ": messages.StepKeywordType_CONTEXT,
+
+ "Za předpokladu ": messages.StepKeywordType_CONTEXT,
+
+ "Když ": messages.StepKeywordType_ACTION,
+
+ "Pak ": messages.StepKeywordType_OUTCOME,
+
+ "A také ": messages.StepKeywordType_CONJUNCTION,
+
+ "A ": messages.StepKeywordType_CONJUNCTION,
+
+ "Ale ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "cy-GB": &Dialect{
+ "cy-GB", "Welsh", "Cymraeg", map[string][]string{
+ feature: {
+ "Arwedd",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "Cefndir",
+ },
+ scenario: {
+ "Enghraifft",
+ "Scenario",
+ },
+ scenarioOutline: {
+ "Scenario Amlinellol",
+ },
+ examples: {
+ "Enghreifftiau",
+ },
+ given: {
+ "* ",
+ "Anrhegedig a ",
+ },
+ when: {
+ "* ",
+ "Pryd ",
+ },
+ then: {
+ "* ",
+ "Yna ",
+ },
+ and: {
+ "* ",
+ "A ",
+ },
+ but: {
+ "* ",
+ "Ond ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Anrhegedig a ": messages.StepKeywordType_CONTEXT,
+
+ "Pryd ": messages.StepKeywordType_ACTION,
+
+ "Yna ": messages.StepKeywordType_OUTCOME,
+
+ "A ": messages.StepKeywordType_CONJUNCTION,
+
+ "Ond ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "da": &Dialect{
+ "da", "Danish", "dansk", map[string][]string{
+ feature: {
+ "Egenskab",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "Baggrund",
+ },
+ scenario: {
+ "Eksempel",
+ "Scenarie",
+ },
+ scenarioOutline: {
+ "Abstrakt Scenario",
+ },
+ examples: {
+ "Eksempler",
+ },
+ given: {
+ "* ",
+ "Givet ",
+ },
+ when: {
+ "* ",
+ "Når ",
+ },
+ then: {
+ "* ",
+ "Så ",
+ },
+ and: {
+ "* ",
+ "Og ",
+ },
+ but: {
+ "* ",
+ "Men ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Givet ": messages.StepKeywordType_CONTEXT,
+
+ "Når ": messages.StepKeywordType_ACTION,
+
+ "Så ": messages.StepKeywordType_OUTCOME,
+
+ "Og ": messages.StepKeywordType_CONJUNCTION,
+
+ "Men ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "de": &Dialect{
+ "de", "German", "Deutsch", map[string][]string{
+ feature: {
+ "Funktionalität",
+ "Funktion",
+ },
+ rule: {
+ "Rule",
+ "Regel",
+ },
+ background: {
+ "Grundlage",
+ "Hintergrund",
+ "Voraussetzungen",
+ "Vorbedingungen",
+ },
+ scenario: {
+ "Beispiel",
+ "Szenario",
+ },
+ scenarioOutline: {
+ "Szenariogrundriss",
+ "Szenarien",
+ },
+ examples: {
+ "Beispiele",
+ },
+ given: {
+ "* ",
+ "Angenommen ",
+ "Gegeben sei ",
+ "Gegeben seien ",
+ },
+ when: {
+ "* ",
+ "Wenn ",
+ },
+ then: {
+ "* ",
+ "Dann ",
+ },
+ and: {
+ "* ",
+ "Und ",
+ },
+ but: {
+ "* ",
+ "Aber ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Angenommen ": messages.StepKeywordType_CONTEXT,
+
+ "Gegeben sei ": messages.StepKeywordType_CONTEXT,
+
+ "Gegeben seien ": messages.StepKeywordType_CONTEXT,
+
+ "Wenn ": messages.StepKeywordType_ACTION,
+
+ "Dann ": messages.StepKeywordType_OUTCOME,
+
+ "Und ": messages.StepKeywordType_CONJUNCTION,
+
+ "Aber ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "el": &Dialect{
+ "el", "Greek", "Ελληνικά", map[string][]string{
+ feature: {
+ "Δυνατότητα",
+ "Λειτουργία",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "Υπόβαθρο",
+ },
+ scenario: {
+ "Παράδειγμα",
+ "Σενάριο",
+ },
+ scenarioOutline: {
+ "Περιγραφή Σεναρίου",
+ "Περίγραμμα Σεναρίου",
+ },
+ examples: {
+ "Παραδείγματα",
+ "Σενάρια",
+ },
+ given: {
+ "* ",
+ "Δεδομένου ",
+ },
+ when: {
+ "* ",
+ "Όταν ",
+ },
+ then: {
+ "* ",
+ "Τότε ",
+ },
+ and: {
+ "* ",
+ "Και ",
+ },
+ but: {
+ "* ",
+ "Αλλά ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Δεδομένου ": messages.StepKeywordType_CONTEXT,
+
+ "Όταν ": messages.StepKeywordType_ACTION,
+
+ "Τότε ": messages.StepKeywordType_OUTCOME,
+
+ "Και ": messages.StepKeywordType_CONJUNCTION,
+
+ "Αλλά ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "em": &Dialect{
+ "em", "Emoji", "😀", map[string][]string{
+ feature: {
+ "📚",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "💤",
+ },
+ scenario: {
+ "🥒",
+ "📕",
+ },
+ scenarioOutline: {
+ "📖",
+ },
+ examples: {
+ "📓",
+ },
+ given: {
+ "* ",
+ "😐",
+ },
+ when: {
+ "* ",
+ "🎬",
+ },
+ then: {
+ "* ",
+ "🙏",
+ },
+ and: {
+ "* ",
+ "😂",
+ },
+ but: {
+ "* ",
+ "😔",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "😐": messages.StepKeywordType_CONTEXT,
+
+ "🎬": messages.StepKeywordType_ACTION,
+
+ "🙏": messages.StepKeywordType_OUTCOME,
+
+ "😂": messages.StepKeywordType_CONJUNCTION,
+
+ "😔": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "en": &Dialect{
+ "en", "English", "English", map[string][]string{
+ feature: {
+ "Feature",
+ "Business Need",
+ "Ability",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "Background",
+ },
+ scenario: {
+ "Example",
+ "Scenario",
+ },
+ scenarioOutline: {
+ "Scenario Outline",
+ "Scenario Template",
+ },
+ examples: {
+ "Examples",
+ "Scenarios",
+ },
+ given: {
+ "* ",
+ "Given ",
+ },
+ when: {
+ "* ",
+ "When ",
+ },
+ then: {
+ "* ",
+ "Then ",
+ },
+ and: {
+ "* ",
+ "And ",
+ },
+ but: {
+ "* ",
+ "But ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Given ": messages.StepKeywordType_CONTEXT,
+
+ "When ": messages.StepKeywordType_ACTION,
+
+ "Then ": messages.StepKeywordType_OUTCOME,
+
+ "And ": messages.StepKeywordType_CONJUNCTION,
+
+ "But ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "en-Scouse": &Dialect{
+ "en-Scouse", "Scouse", "Scouse", map[string][]string{
+ feature: {
+ "Feature",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "Dis is what went down",
+ },
+ scenario: {
+ "The thing of it is",
+ },
+ scenarioOutline: {
+ "Wharrimean is",
+ },
+ examples: {
+ "Examples",
+ },
+ given: {
+ "* ",
+ "Givun ",
+ "Youse know when youse got ",
+ },
+ when: {
+ "* ",
+ "Wun ",
+ "Youse know like when ",
+ },
+ then: {
+ "* ",
+ "Dun ",
+ "Den youse gotta ",
+ },
+ and: {
+ "* ",
+ "An ",
+ },
+ but: {
+ "* ",
+ "Buh ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Givun ": messages.StepKeywordType_CONTEXT,
+
+ "Youse know when youse got ": messages.StepKeywordType_CONTEXT,
+
+ "Wun ": messages.StepKeywordType_ACTION,
+
+ "Youse know like when ": messages.StepKeywordType_ACTION,
+
+ "Dun ": messages.StepKeywordType_OUTCOME,
+
+ "Den youse gotta ": messages.StepKeywordType_OUTCOME,
+
+ "An ": messages.StepKeywordType_CONJUNCTION,
+
+ "Buh ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "en-au": &Dialect{
+ "en-au", "Australian", "Australian", map[string][]string{
+ feature: {
+ "Pretty much",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "First off",
+ },
+ scenario: {
+ "Awww, look mate",
+ },
+ scenarioOutline: {
+ "Reckon it's like",
+ },
+ examples: {
+ "You'll wanna",
+ },
+ given: {
+ "* ",
+ "Y'know ",
+ },
+ when: {
+ "* ",
+ "It's just unbelievable ",
+ },
+ then: {
+ "* ",
+ "But at the end of the day I reckon ",
+ },
+ and: {
+ "* ",
+ "Too right ",
+ },
+ but: {
+ "* ",
+ "Yeah nah ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Y'know ": messages.StepKeywordType_CONTEXT,
+
+ "It's just unbelievable ": messages.StepKeywordType_ACTION,
+
+ "But at the end of the day I reckon ": messages.StepKeywordType_OUTCOME,
+
+ "Too right ": messages.StepKeywordType_CONJUNCTION,
+
+ "Yeah nah ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "en-lol": &Dialect{
+ "en-lol", "LOLCAT", "LOLCAT", map[string][]string{
+ feature: {
+ "OH HAI",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "B4",
+ },
+ scenario: {
+ "MISHUN",
+ },
+ scenarioOutline: {
+ "MISHUN SRSLY",
+ },
+ examples: {
+ "EXAMPLZ",
+ },
+ given: {
+ "* ",
+ "I CAN HAZ ",
+ },
+ when: {
+ "* ",
+ "WEN ",
+ },
+ then: {
+ "* ",
+ "DEN ",
+ },
+ and: {
+ "* ",
+ "AN ",
+ },
+ but: {
+ "* ",
+ "BUT ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "I CAN HAZ ": messages.StepKeywordType_CONTEXT,
+
+ "WEN ": messages.StepKeywordType_ACTION,
+
+ "DEN ": messages.StepKeywordType_OUTCOME,
+
+ "AN ": messages.StepKeywordType_CONJUNCTION,
+
+ "BUT ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "en-old": &Dialect{
+ "en-old", "Old English", "Englisc", map[string][]string{
+ feature: {
+ "Hwaet",
+ "Hwæt",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "Aer",
+ "Ær",
+ },
+ scenario: {
+ "Swa",
+ },
+ scenarioOutline: {
+ "Swa hwaer swa",
+ "Swa hwær swa",
+ },
+ examples: {
+ "Se the",
+ "Se þe",
+ "Se ðe",
+ },
+ given: {
+ "* ",
+ "Thurh ",
+ "Þurh ",
+ "Ðurh ",
+ },
+ when: {
+ "* ",
+ "Bæþsealf ",
+ "Bæþsealfa ",
+ "Bæþsealfe ",
+ "Ciricæw ",
+ "Ciricæwe ",
+ "Ciricæwa ",
+ },
+ then: {
+ "* ",
+ "Tha ",
+ "Þa ",
+ "Ða ",
+ "Tha the ",
+ "Þa þe ",
+ "Ða ðe ",
+ },
+ and: {
+ "* ",
+ "Ond ",
+ "7 ",
+ },
+ but: {
+ "* ",
+ "Ac ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Thurh ": messages.StepKeywordType_CONTEXT,
+
+ "Þurh ": messages.StepKeywordType_CONTEXT,
+
+ "Ðurh ": messages.StepKeywordType_CONTEXT,
+
+ "Bæþsealf ": messages.StepKeywordType_ACTION,
+
+ "Bæþsealfa ": messages.StepKeywordType_ACTION,
+
+ "Bæþsealfe ": messages.StepKeywordType_ACTION,
+
+ "Ciricæw ": messages.StepKeywordType_ACTION,
+
+ "Ciricæwe ": messages.StepKeywordType_ACTION,
+
+ "Ciricæwa ": messages.StepKeywordType_ACTION,
+
+ "Tha ": messages.StepKeywordType_OUTCOME,
+
+ "Þa ": messages.StepKeywordType_OUTCOME,
+
+ "Ða ": messages.StepKeywordType_OUTCOME,
+
+ "Tha the ": messages.StepKeywordType_OUTCOME,
+
+ "Þa þe ": messages.StepKeywordType_OUTCOME,
+
+ "Ða ðe ": messages.StepKeywordType_OUTCOME,
+
+ "Ond ": messages.StepKeywordType_CONJUNCTION,
+
+ "7 ": messages.StepKeywordType_CONJUNCTION,
+
+ "Ac ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "en-pirate": &Dialect{
+ "en-pirate", "Pirate", "Pirate", map[string][]string{
+ feature: {
+ "Ahoy matey!",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "Yo-ho-ho",
+ },
+ scenario: {
+ "Heave to",
+ },
+ scenarioOutline: {
+ "Shiver me timbers",
+ },
+ examples: {
+ "Dead men tell no tales",
+ },
+ given: {
+ "* ",
+ "Gangway! ",
+ },
+ when: {
+ "* ",
+ "Blimey! ",
+ },
+ then: {
+ "* ",
+ "Let go and haul ",
+ },
+ and: {
+ "* ",
+ "Aye ",
+ },
+ but: {
+ "* ",
+ "Avast! ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Gangway! ": messages.StepKeywordType_CONTEXT,
+
+ "Blimey! ": messages.StepKeywordType_ACTION,
+
+ "Let go and haul ": messages.StepKeywordType_OUTCOME,
+
+ "Aye ": messages.StepKeywordType_CONJUNCTION,
+
+ "Avast! ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "en-tx": &Dialect{
+ "en-tx", "Texas", "Texas", map[string][]string{
+ feature: {
+ "This ain’t my first rodeo",
+ "All gussied up",
+ },
+ rule: {
+ "Rule ",
+ },
+ background: {
+ "Lemme tell y'all a story",
+ },
+ scenario: {
+ "All hat and no cattle",
+ },
+ scenarioOutline: {
+ "Serious as a snake bite",
+ "Busy as a hound in flea season",
+ },
+ examples: {
+ "Now that's a story longer than a cattle drive in July",
+ },
+ given: {
+ "Fixin' to ",
+ "All git out ",
+ },
+ when: {
+ "Quick out of the chute ",
+ },
+ then: {
+ "There’s no tree but bears some fruit ",
+ },
+ and: {
+ "Come hell or high water ",
+ },
+ but: {
+ "Well now hold on, I'll you what ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Fixin' to ": messages.StepKeywordType_CONTEXT,
+
+ "All git out ": messages.StepKeywordType_CONTEXT,
+
+ "Quick out of the chute ": messages.StepKeywordType_ACTION,
+
+ "There’s no tree but bears some fruit ": messages.StepKeywordType_OUTCOME,
+
+ "Come hell or high water ": messages.StepKeywordType_CONJUNCTION,
+
+ "Well now hold on, I'll you what ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "eo": &Dialect{
+ "eo", "Esperanto", "Esperanto", map[string][]string{
+ feature: {
+ "Trajto",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "Fono",
+ },
+ scenario: {
+ "Ekzemplo",
+ "Scenaro",
+ "Kazo",
+ },
+ scenarioOutline: {
+ "Konturo de la scenaro",
+ "Skizo",
+ "Kazo-skizo",
+ },
+ examples: {
+ "Ekzemploj",
+ },
+ given: {
+ "* ",
+ "Donitaĵo ",
+ "Komence ",
+ },
+ when: {
+ "* ",
+ "Se ",
+ },
+ then: {
+ "* ",
+ "Do ",
+ },
+ and: {
+ "* ",
+ "Kaj ",
+ },
+ but: {
+ "* ",
+ "Sed ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Donitaĵo ": messages.StepKeywordType_CONTEXT,
+
+ "Komence ": messages.StepKeywordType_CONTEXT,
+
+ "Se ": messages.StepKeywordType_ACTION,
+
+ "Do ": messages.StepKeywordType_OUTCOME,
+
+ "Kaj ": messages.StepKeywordType_CONJUNCTION,
+
+ "Sed ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "es": &Dialect{
+ "es", "Spanish", "español", map[string][]string{
+ feature: {
+ "Característica",
+ "Necesidad del negocio",
+ "Requisito",
+ },
+ rule: {
+ "Regla",
+ "Regla de negocio",
+ },
+ background: {
+ "Antecedentes",
+ },
+ scenario: {
+ "Ejemplo",
+ "Escenario",
+ },
+ scenarioOutline: {
+ "Esquema del escenario",
+ },
+ examples: {
+ "Ejemplos",
+ },
+ given: {
+ "* ",
+ "Dado ",
+ "Dada ",
+ "Dados ",
+ "Dadas ",
+ },
+ when: {
+ "* ",
+ "Cuando ",
+ },
+ then: {
+ "* ",
+ "Entonces ",
+ },
+ and: {
+ "* ",
+ "Y ",
+ "E ",
+ },
+ but: {
+ "* ",
+ "Pero ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Dado ": messages.StepKeywordType_CONTEXT,
+
+ "Dada ": messages.StepKeywordType_CONTEXT,
+
+ "Dados ": messages.StepKeywordType_CONTEXT,
+
+ "Dadas ": messages.StepKeywordType_CONTEXT,
+
+ "Cuando ": messages.StepKeywordType_ACTION,
+
+ "Entonces ": messages.StepKeywordType_OUTCOME,
+
+ "Y ": messages.StepKeywordType_CONJUNCTION,
+
+ "E ": messages.StepKeywordType_CONJUNCTION,
+
+ "Pero ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "et": &Dialect{
+ "et", "Estonian", "eesti keel", map[string][]string{
+ feature: {
+ "Omadus",
+ },
+ rule: {
+ "Reegel",
+ },
+ background: {
+ "Taust",
+ },
+ scenario: {
+ "Juhtum",
+ "Stsenaarium",
+ },
+ scenarioOutline: {
+ "Raamjuhtum",
+ "Raamstsenaarium",
+ },
+ examples: {
+ "Juhtumid",
+ },
+ given: {
+ "* ",
+ "Eeldades ",
+ },
+ when: {
+ "* ",
+ "Kui ",
+ },
+ then: {
+ "* ",
+ "Siis ",
+ },
+ and: {
+ "* ",
+ "Ja ",
+ },
+ but: {
+ "* ",
+ "Kuid ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Eeldades ": messages.StepKeywordType_CONTEXT,
+
+ "Kui ": messages.StepKeywordType_ACTION,
+
+ "Siis ": messages.StepKeywordType_OUTCOME,
+
+ "Ja ": messages.StepKeywordType_CONJUNCTION,
+
+ "Kuid ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "fa": &Dialect{
+ "fa", "Persian", "فارسی", map[string][]string{
+ feature: {
+ "وِیژگی",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "زمینه",
+ },
+ scenario: {
+ "مثال",
+ "سناریو",
+ },
+ scenarioOutline: {
+ "الگوی سناریو",
+ },
+ examples: {
+ "نمونه ها",
+ },
+ given: {
+ "* ",
+ "با فرض ",
+ },
+ when: {
+ "* ",
+ "هنگامی ",
+ },
+ then: {
+ "* ",
+ "آنگاه ",
+ },
+ and: {
+ "* ",
+ "و ",
+ },
+ but: {
+ "* ",
+ "اما ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "با فرض ": messages.StepKeywordType_CONTEXT,
+
+ "هنگامی ": messages.StepKeywordType_ACTION,
+
+ "آنگاه ": messages.StepKeywordType_OUTCOME,
+
+ "و ": messages.StepKeywordType_CONJUNCTION,
+
+ "اما ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "fi": &Dialect{
+ "fi", "Finnish", "suomi", map[string][]string{
+ feature: {
+ "Ominaisuus",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "Tausta",
+ },
+ scenario: {
+ "Tapaus",
+ },
+ scenarioOutline: {
+ "Tapausaihio",
+ },
+ examples: {
+ "Tapaukset",
+ },
+ given: {
+ "* ",
+ "Oletetaan ",
+ },
+ when: {
+ "* ",
+ "Kun ",
+ },
+ then: {
+ "* ",
+ "Niin ",
+ },
+ and: {
+ "* ",
+ "Ja ",
+ },
+ but: {
+ "* ",
+ "Mutta ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Oletetaan ": messages.StepKeywordType_CONTEXT,
+
+ "Kun ": messages.StepKeywordType_ACTION,
+
+ "Niin ": messages.StepKeywordType_OUTCOME,
+
+ "Ja ": messages.StepKeywordType_CONJUNCTION,
+
+ "Mutta ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "fr": &Dialect{
+ "fr", "French", "français", map[string][]string{
+ feature: {
+ "Fonctionnalité",
+ },
+ rule: {
+ "Règle",
+ },
+ background: {
+ "Contexte",
+ },
+ scenario: {
+ "Exemple",
+ "Scénario",
+ },
+ scenarioOutline: {
+ "Plan du scénario",
+ "Plan du Scénario",
+ },
+ examples: {
+ "Exemples",
+ },
+ given: {
+ "* ",
+ "Soit ",
+ "Sachant que ",
+ "Sachant qu'",
+ "Sachant ",
+ "Etant donné que ",
+ "Etant donné qu'",
+ "Etant donné ",
+ "Etant donnée ",
+ "Etant donnés ",
+ "Etant données ",
+ "Étant donné que ",
+ "Étant donné qu'",
+ "Étant donné ",
+ "Étant donnée ",
+ "Étant donnés ",
+ "Étant données ",
+ },
+ when: {
+ "* ",
+ "Quand ",
+ "Lorsque ",
+ "Lorsqu'",
+ },
+ then: {
+ "* ",
+ "Alors ",
+ "Donc ",
+ },
+ and: {
+ "* ",
+ "Et que ",
+ "Et qu'",
+ "Et ",
+ },
+ but: {
+ "* ",
+ "Mais que ",
+ "Mais qu'",
+ "Mais ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Soit ": messages.StepKeywordType_CONTEXT,
+
+ "Sachant que ": messages.StepKeywordType_CONTEXT,
+
+ "Sachant qu'": messages.StepKeywordType_CONTEXT,
+
+ "Sachant ": messages.StepKeywordType_CONTEXT,
+
+ "Etant donné que ": messages.StepKeywordType_CONTEXT,
+
+ "Etant donné qu'": messages.StepKeywordType_CONTEXT,
+
+ "Etant donné ": messages.StepKeywordType_CONTEXT,
+
+ "Etant donnée ": messages.StepKeywordType_CONTEXT,
+
+ "Etant donnés ": messages.StepKeywordType_CONTEXT,
+
+ "Etant données ": messages.StepKeywordType_CONTEXT,
+
+ "Étant donné que ": messages.StepKeywordType_CONTEXT,
+
+ "Étant donné qu'": messages.StepKeywordType_CONTEXT,
+
+ "Étant donné ": messages.StepKeywordType_CONTEXT,
+
+ "Étant donnée ": messages.StepKeywordType_CONTEXT,
+
+ "Étant donnés ": messages.StepKeywordType_CONTEXT,
+
+ "Étant données ": messages.StepKeywordType_CONTEXT,
+
+ "Quand ": messages.StepKeywordType_ACTION,
+
+ "Lorsque ": messages.StepKeywordType_ACTION,
+
+ "Lorsqu'": messages.StepKeywordType_ACTION,
+
+ "Alors ": messages.StepKeywordType_OUTCOME,
+
+ "Donc ": messages.StepKeywordType_OUTCOME,
+
+ "Et que ": messages.StepKeywordType_CONJUNCTION,
+
+ "Et qu'": messages.StepKeywordType_CONJUNCTION,
+
+ "Et ": messages.StepKeywordType_CONJUNCTION,
+
+ "Mais que ": messages.StepKeywordType_CONJUNCTION,
+
+ "Mais qu'": messages.StepKeywordType_CONJUNCTION,
+
+ "Mais ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "ga": &Dialect{
+ "ga", "Irish", "Gaeilge", map[string][]string{
+ feature: {
+ "Gné",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "Cúlra",
+ },
+ scenario: {
+ "Sampla",
+ "Cás",
+ },
+ scenarioOutline: {
+ "Cás Achomair",
+ },
+ examples: {
+ "Samplaí",
+ },
+ given: {
+ "* ",
+ "Cuir i gcás go",
+ "Cuir i gcás nach",
+ "Cuir i gcás gur",
+ "Cuir i gcás nár",
+ },
+ when: {
+ "* ",
+ "Nuair a",
+ "Nuair nach",
+ "Nuair ba",
+ "Nuair nár",
+ },
+ then: {
+ "* ",
+ "Ansin",
+ },
+ and: {
+ "* ",
+ "Agus",
+ },
+ but: {
+ "* ",
+ "Ach",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Cuir i gcás go": messages.StepKeywordType_CONTEXT,
+
+ "Cuir i gcás nach": messages.StepKeywordType_CONTEXT,
+
+ "Cuir i gcás gur": messages.StepKeywordType_CONTEXT,
+
+ "Cuir i gcás nár": messages.StepKeywordType_CONTEXT,
+
+ "Nuair a": messages.StepKeywordType_ACTION,
+
+ "Nuair nach": messages.StepKeywordType_ACTION,
+
+ "Nuair ba": messages.StepKeywordType_ACTION,
+
+ "Nuair nár": messages.StepKeywordType_ACTION,
+
+ "Ansin": messages.StepKeywordType_OUTCOME,
+
+ "Agus": messages.StepKeywordType_CONJUNCTION,
+
+ "Ach": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "gj": &Dialect{
+ "gj", "Gujarati", "ગુજરાતી", map[string][]string{
+ feature: {
+ "લક્ષણ",
+ "વ્યાપાર જરૂર",
+ "ક્ષમતા",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "બેકગ્રાઉન્ડ",
+ },
+ scenario: {
+ "ઉદાહરણ",
+ "સ્થિતિ",
+ },
+ scenarioOutline: {
+ "પરિદ્દશ્ય રૂપરેખા",
+ "પરિદ્દશ્ય ઢાંચો",
+ },
+ examples: {
+ "ઉદાહરણો",
+ },
+ given: {
+ "* ",
+ "આપેલ છે ",
+ },
+ when: {
+ "* ",
+ "ક્યારે ",
+ },
+ then: {
+ "* ",
+ "પછી ",
+ },
+ and: {
+ "* ",
+ "અને ",
+ },
+ but: {
+ "* ",
+ "પણ ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "આપેલ છે ": messages.StepKeywordType_CONTEXT,
+
+ "ક્યારે ": messages.StepKeywordType_ACTION,
+
+ "પછી ": messages.StepKeywordType_OUTCOME,
+
+ "અને ": messages.StepKeywordType_CONJUNCTION,
+
+ "પણ ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "gl": &Dialect{
+ "gl", "Galician", "galego", map[string][]string{
+ feature: {
+ "Característica",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "Contexto",
+ },
+ scenario: {
+ "Exemplo",
+ "Escenario",
+ },
+ scenarioOutline: {
+ "Esbozo do escenario",
+ },
+ examples: {
+ "Exemplos",
+ },
+ given: {
+ "* ",
+ "Dado ",
+ "Dada ",
+ "Dados ",
+ "Dadas ",
+ },
+ when: {
+ "* ",
+ "Cando ",
+ },
+ then: {
+ "* ",
+ "Entón ",
+ "Logo ",
+ },
+ and: {
+ "* ",
+ "E ",
+ },
+ but: {
+ "* ",
+ "Mais ",
+ "Pero ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Dado ": messages.StepKeywordType_CONTEXT,
+
+ "Dada ": messages.StepKeywordType_CONTEXT,
+
+ "Dados ": messages.StepKeywordType_CONTEXT,
+
+ "Dadas ": messages.StepKeywordType_CONTEXT,
+
+ "Cando ": messages.StepKeywordType_ACTION,
+
+ "Entón ": messages.StepKeywordType_OUTCOME,
+
+ "Logo ": messages.StepKeywordType_OUTCOME,
+
+ "E ": messages.StepKeywordType_CONJUNCTION,
+
+ "Mais ": messages.StepKeywordType_CONJUNCTION,
+
+ "Pero ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "he": &Dialect{
+ "he", "Hebrew", "עברית", map[string][]string{
+ feature: {
+ "תכונה",
+ },
+ rule: {
+ "כלל",
+ },
+ background: {
+ "רקע",
+ },
+ scenario: {
+ "דוגמא",
+ "תרחיש",
+ },
+ scenarioOutline: {
+ "תבנית תרחיש",
+ },
+ examples: {
+ "דוגמאות",
+ },
+ given: {
+ "* ",
+ "בהינתן ",
+ },
+ when: {
+ "* ",
+ "כאשר ",
+ },
+ then: {
+ "* ",
+ "אז ",
+ "אזי ",
+ },
+ and: {
+ "* ",
+ "וגם ",
+ },
+ but: {
+ "* ",
+ "אבל ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "בהינתן ": messages.StepKeywordType_CONTEXT,
+
+ "כאשר ": messages.StepKeywordType_ACTION,
+
+ "אז ": messages.StepKeywordType_OUTCOME,
+
+ "אזי ": messages.StepKeywordType_OUTCOME,
+
+ "וגם ": messages.StepKeywordType_CONJUNCTION,
+
+ "אבל ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "hi": &Dialect{
+ "hi", "Hindi", "हिंदी", map[string][]string{
+ feature: {
+ "रूप लेख",
+ },
+ rule: {
+ "नियम",
+ },
+ background: {
+ "पृष्ठभूमि",
+ },
+ scenario: {
+ "परिदृश्य",
+ },
+ scenarioOutline: {
+ "परिदृश्य रूपरेखा",
+ },
+ examples: {
+ "उदाहरण",
+ },
+ given: {
+ "* ",
+ "अगर ",
+ "यदि ",
+ "चूंकि ",
+ },
+ when: {
+ "* ",
+ "जब ",
+ "कदा ",
+ },
+ then: {
+ "* ",
+ "तब ",
+ "तदा ",
+ },
+ and: {
+ "* ",
+ "और ",
+ "तथा ",
+ },
+ but: {
+ "* ",
+ "पर ",
+ "परन्तु ",
+ "किन्तु ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "अगर ": messages.StepKeywordType_CONTEXT,
+
+ "यदि ": messages.StepKeywordType_CONTEXT,
+
+ "चूंकि ": messages.StepKeywordType_CONTEXT,
+
+ "जब ": messages.StepKeywordType_ACTION,
+
+ "कदा ": messages.StepKeywordType_ACTION,
+
+ "तब ": messages.StepKeywordType_OUTCOME,
+
+ "तदा ": messages.StepKeywordType_OUTCOME,
+
+ "और ": messages.StepKeywordType_CONJUNCTION,
+
+ "तथा ": messages.StepKeywordType_CONJUNCTION,
+
+ "पर ": messages.StepKeywordType_CONJUNCTION,
+
+ "परन्तु ": messages.StepKeywordType_CONJUNCTION,
+
+ "किन्तु ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "hr": &Dialect{
+ "hr", "Croatian", "hrvatski", map[string][]string{
+ feature: {
+ "Osobina",
+ "Mogućnost",
+ "Mogucnost",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "Pozadina",
+ },
+ scenario: {
+ "Primjer",
+ "Scenarij",
+ },
+ scenarioOutline: {
+ "Skica",
+ "Koncept",
+ },
+ examples: {
+ "Primjeri",
+ "Scenariji",
+ },
+ given: {
+ "* ",
+ "Zadan ",
+ "Zadani ",
+ "Zadano ",
+ "Ukoliko ",
+ },
+ when: {
+ "* ",
+ "Kada ",
+ "Kad ",
+ },
+ then: {
+ "* ",
+ "Onda ",
+ },
+ and: {
+ "* ",
+ "I ",
+ },
+ but: {
+ "* ",
+ "Ali ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Zadan ": messages.StepKeywordType_CONTEXT,
+
+ "Zadani ": messages.StepKeywordType_CONTEXT,
+
+ "Zadano ": messages.StepKeywordType_CONTEXT,
+
+ "Ukoliko ": messages.StepKeywordType_CONTEXT,
+
+ "Kada ": messages.StepKeywordType_ACTION,
+
+ "Kad ": messages.StepKeywordType_ACTION,
+
+ "Onda ": messages.StepKeywordType_OUTCOME,
+
+ "I ": messages.StepKeywordType_CONJUNCTION,
+
+ "Ali ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "ht": &Dialect{
+ "ht", "Creole", "kreyòl", map[string][]string{
+ feature: {
+ "Karakteristik",
+ "Mak",
+ "Fonksyonalite",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "Kontèks",
+ "Istorik",
+ },
+ scenario: {
+ "Senaryo",
+ },
+ scenarioOutline: {
+ "Plan senaryo",
+ "Plan Senaryo",
+ "Senaryo deskripsyon",
+ "Senaryo Deskripsyon",
+ "Dyagram senaryo",
+ "Dyagram Senaryo",
+ },
+ examples: {
+ "Egzanp",
+ },
+ given: {
+ "* ",
+ "Sipoze ",
+ "Sipoze ke ",
+ "Sipoze Ke ",
+ },
+ when: {
+ "* ",
+ "Lè ",
+ "Le ",
+ },
+ then: {
+ "* ",
+ "Lè sa a ",
+ "Le sa a ",
+ },
+ and: {
+ "* ",
+ "Ak ",
+ "Epi ",
+ "E ",
+ },
+ but: {
+ "* ",
+ "Men ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Sipoze ": messages.StepKeywordType_CONTEXT,
+
+ "Sipoze ke ": messages.StepKeywordType_CONTEXT,
+
+ "Sipoze Ke ": messages.StepKeywordType_CONTEXT,
+
+ "Lè ": messages.StepKeywordType_ACTION,
+
+ "Le ": messages.StepKeywordType_ACTION,
+
+ "Lè sa a ": messages.StepKeywordType_OUTCOME,
+
+ "Le sa a ": messages.StepKeywordType_OUTCOME,
+
+ "Ak ": messages.StepKeywordType_CONJUNCTION,
+
+ "Epi ": messages.StepKeywordType_CONJUNCTION,
+
+ "E ": messages.StepKeywordType_CONJUNCTION,
+
+ "Men ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "hu": &Dialect{
+ "hu", "Hungarian", "magyar", map[string][]string{
+ feature: {
+ "Jellemző",
+ },
+ rule: {
+ "Szabály",
+ },
+ background: {
+ "Háttér",
+ },
+ scenario: {
+ "Példa",
+ "Forgatókönyv",
+ },
+ scenarioOutline: {
+ "Forgatókönyv vázlat",
+ },
+ examples: {
+ "Példák",
+ },
+ given: {
+ "* ",
+ "Amennyiben ",
+ "Adott ",
+ },
+ when: {
+ "* ",
+ "Majd ",
+ "Ha ",
+ "Amikor ",
+ },
+ then: {
+ "* ",
+ "Akkor ",
+ },
+ and: {
+ "* ",
+ "És ",
+ },
+ but: {
+ "* ",
+ "De ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Amennyiben ": messages.StepKeywordType_CONTEXT,
+
+ "Adott ": messages.StepKeywordType_CONTEXT,
+
+ "Majd ": messages.StepKeywordType_ACTION,
+
+ "Ha ": messages.StepKeywordType_ACTION,
+
+ "Amikor ": messages.StepKeywordType_ACTION,
+
+ "Akkor ": messages.StepKeywordType_OUTCOME,
+
+ "És ": messages.StepKeywordType_CONJUNCTION,
+
+ "De ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "id": &Dialect{
+ "id", "Indonesian", "Bahasa Indonesia", map[string][]string{
+ feature: {
+ "Fitur",
+ },
+ rule: {
+ "Rule",
+ "Aturan",
+ },
+ background: {
+ "Dasar",
+ "Latar Belakang",
+ },
+ scenario: {
+ "Skenario",
+ },
+ scenarioOutline: {
+ "Skenario konsep",
+ "Garis-Besar Skenario",
+ },
+ examples: {
+ "Contoh",
+ "Misal",
+ },
+ given: {
+ "* ",
+ "Dengan ",
+ "Diketahui ",
+ "Diasumsikan ",
+ "Bila ",
+ "Jika ",
+ },
+ when: {
+ "* ",
+ "Ketika ",
+ },
+ then: {
+ "* ",
+ "Maka ",
+ "Kemudian ",
+ },
+ and: {
+ "* ",
+ "Dan ",
+ },
+ but: {
+ "* ",
+ "Tapi ",
+ "Tetapi ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Dengan ": messages.StepKeywordType_CONTEXT,
+
+ "Diketahui ": messages.StepKeywordType_CONTEXT,
+
+ "Diasumsikan ": messages.StepKeywordType_CONTEXT,
+
+ "Bila ": messages.StepKeywordType_CONTEXT,
+
+ "Jika ": messages.StepKeywordType_CONTEXT,
+
+ "Ketika ": messages.StepKeywordType_ACTION,
+
+ "Maka ": messages.StepKeywordType_OUTCOME,
+
+ "Kemudian ": messages.StepKeywordType_OUTCOME,
+
+ "Dan ": messages.StepKeywordType_CONJUNCTION,
+
+ "Tapi ": messages.StepKeywordType_CONJUNCTION,
+
+ "Tetapi ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "is": &Dialect{
+ "is", "Icelandic", "Íslenska", map[string][]string{
+ feature: {
+ "Eiginleiki",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "Bakgrunnur",
+ },
+ scenario: {
+ "Atburðarás",
+ },
+ scenarioOutline: {
+ "Lýsing Atburðarásar",
+ "Lýsing Dæma",
+ },
+ examples: {
+ "Dæmi",
+ "Atburðarásir",
+ },
+ given: {
+ "* ",
+ "Ef ",
+ },
+ when: {
+ "* ",
+ "Þegar ",
+ },
+ then: {
+ "* ",
+ "Þá ",
+ },
+ and: {
+ "* ",
+ "Og ",
+ },
+ but: {
+ "* ",
+ "En ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Ef ": messages.StepKeywordType_CONTEXT,
+
+ "Þegar ": messages.StepKeywordType_ACTION,
+
+ "Þá ": messages.StepKeywordType_OUTCOME,
+
+ "Og ": messages.StepKeywordType_CONJUNCTION,
+
+ "En ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "it": &Dialect{
+ "it", "Italian", "italiano", map[string][]string{
+ feature: {
+ "Funzionalità",
+ "Esigenza di Business",
+ "Abilità",
+ },
+ rule: {
+ "Regola",
+ },
+ background: {
+ "Contesto",
+ },
+ scenario: {
+ "Esempio",
+ "Scenario",
+ },
+ scenarioOutline: {
+ "Schema dello scenario",
+ },
+ examples: {
+ "Esempi",
+ },
+ given: {
+ "* ",
+ "Dato ",
+ "Data ",
+ "Dati ",
+ "Date ",
+ },
+ when: {
+ "* ",
+ "Quando ",
+ },
+ then: {
+ "* ",
+ "Allora ",
+ },
+ and: {
+ "* ",
+ "E ",
+ },
+ but: {
+ "* ",
+ "Ma ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Dato ": messages.StepKeywordType_CONTEXT,
+
+ "Data ": messages.StepKeywordType_CONTEXT,
+
+ "Dati ": messages.StepKeywordType_CONTEXT,
+
+ "Date ": messages.StepKeywordType_CONTEXT,
+
+ "Quando ": messages.StepKeywordType_ACTION,
+
+ "Allora ": messages.StepKeywordType_OUTCOME,
+
+ "E ": messages.StepKeywordType_CONJUNCTION,
+
+ "Ma ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "ja": &Dialect{
+ "ja", "Japanese", "日本語", map[string][]string{
+ feature: {
+ "フィーチャ",
+ "機能",
+ },
+ rule: {
+ "ルール",
+ },
+ background: {
+ "背景",
+ },
+ scenario: {
+ "シナリオ",
+ },
+ scenarioOutline: {
+ "シナリオアウトライン",
+ "シナリオテンプレート",
+ "テンプレ",
+ "シナリオテンプレ",
+ },
+ examples: {
+ "例",
+ "サンプル",
+ },
+ given: {
+ "* ",
+ "前提",
+ },
+ when: {
+ "* ",
+ "もし",
+ },
+ then: {
+ "* ",
+ "ならば",
+ },
+ and: {
+ "* ",
+ "且つ",
+ "かつ",
+ },
+ but: {
+ "* ",
+ "然し",
+ "しかし",
+ "但し",
+ "ただし",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "前提": messages.StepKeywordType_CONTEXT,
+
+ "もし": messages.StepKeywordType_ACTION,
+
+ "ならば": messages.StepKeywordType_OUTCOME,
+
+ "且つ": messages.StepKeywordType_CONJUNCTION,
+
+ "かつ": messages.StepKeywordType_CONJUNCTION,
+
+ "然し": messages.StepKeywordType_CONJUNCTION,
+
+ "しかし": messages.StepKeywordType_CONJUNCTION,
+
+ "但し": messages.StepKeywordType_CONJUNCTION,
+
+ "ただし": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "jv": &Dialect{
+ "jv", "Javanese", "Basa Jawa", map[string][]string{
+ feature: {
+ "Fitur",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "Dasar",
+ },
+ scenario: {
+ "Skenario",
+ },
+ scenarioOutline: {
+ "Konsep skenario",
+ },
+ examples: {
+ "Conto",
+ "Contone",
+ },
+ given: {
+ "* ",
+ "Nalika ",
+ "Nalikaning ",
+ },
+ when: {
+ "* ",
+ "Manawa ",
+ "Menawa ",
+ },
+ then: {
+ "* ",
+ "Njuk ",
+ "Banjur ",
+ },
+ and: {
+ "* ",
+ "Lan ",
+ },
+ but: {
+ "* ",
+ "Tapi ",
+ "Nanging ",
+ "Ananging ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Nalika ": messages.StepKeywordType_CONTEXT,
+
+ "Nalikaning ": messages.StepKeywordType_CONTEXT,
+
+ "Manawa ": messages.StepKeywordType_ACTION,
+
+ "Menawa ": messages.StepKeywordType_ACTION,
+
+ "Njuk ": messages.StepKeywordType_OUTCOME,
+
+ "Banjur ": messages.StepKeywordType_OUTCOME,
+
+ "Lan ": messages.StepKeywordType_CONJUNCTION,
+
+ "Tapi ": messages.StepKeywordType_CONJUNCTION,
+
+ "Nanging ": messages.StepKeywordType_CONJUNCTION,
+
+ "Ananging ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "ka": &Dialect{
+ "ka", "Georgian", "ქართული", map[string][]string{
+ feature: {
+ "თვისება",
+ "მოთხოვნა",
+ },
+ rule: {
+ "წესი",
+ },
+ background: {
+ "კონტექსტი",
+ },
+ scenario: {
+ "მაგალითად",
+ "მაგალითი",
+ "მაგ",
+ "სცენარი",
+ },
+ scenarioOutline: {
+ "სცენარის ნიმუში",
+ "სცენარის შაბლონი",
+ "ნიმუში",
+ "შაბლონი",
+ },
+ examples: {
+ "მაგალითები",
+ },
+ given: {
+ "* ",
+ "მოცემული ",
+ "მოცემულია ",
+ "ვთქვათ ",
+ },
+ when: {
+ "* ",
+ "როდესაც ",
+ "როცა ",
+ "როგორც კი ",
+ "თუ ",
+ },
+ then: {
+ "* ",
+ "მაშინ ",
+ },
+ and: {
+ "* ",
+ "და ",
+ "ასევე ",
+ },
+ but: {
+ "* ",
+ "მაგრამ ",
+ "თუმცა ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "მოცემული ": messages.StepKeywordType_CONTEXT,
+
+ "მოცემულია ": messages.StepKeywordType_CONTEXT,
+
+ "ვთქვათ ": messages.StepKeywordType_CONTEXT,
+
+ "როდესაც ": messages.StepKeywordType_ACTION,
+
+ "როცა ": messages.StepKeywordType_ACTION,
+
+ "როგორც კი ": messages.StepKeywordType_ACTION,
+
+ "თუ ": messages.StepKeywordType_ACTION,
+
+ "მაშინ ": messages.StepKeywordType_OUTCOME,
+
+ "და ": messages.StepKeywordType_CONJUNCTION,
+
+ "ასევე ": messages.StepKeywordType_CONJUNCTION,
+
+ "მაგრამ ": messages.StepKeywordType_CONJUNCTION,
+
+ "თუმცა ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "kn": &Dialect{
+ "kn", "Kannada", "ಕನ್ನಡ", map[string][]string{
+ feature: {
+ "ಹೆಚ್ಚಳ",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "ಹಿನ್ನೆಲೆ",
+ },
+ scenario: {
+ "ಉದಾಹರಣೆ",
+ "ಕಥಾಸಾರಾಂಶ",
+ },
+ scenarioOutline: {
+ "ವಿವರಣೆ",
+ },
+ examples: {
+ "ಉದಾಹರಣೆಗಳು",
+ },
+ given: {
+ "* ",
+ "ನೀಡಿದ ",
+ },
+ when: {
+ "* ",
+ "ಸ್ಥಿತಿಯನ್ನು ",
+ },
+ then: {
+ "* ",
+ "ನಂತರ ",
+ },
+ and: {
+ "* ",
+ "ಮತ್ತು ",
+ },
+ but: {
+ "* ",
+ "ಆದರೆ ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "ನೀಡಿದ ": messages.StepKeywordType_CONTEXT,
+
+ "ಸ್ಥಿತಿಯನ್ನು ": messages.StepKeywordType_ACTION,
+
+ "ನಂತರ ": messages.StepKeywordType_OUTCOME,
+
+ "ಮತ್ತು ": messages.StepKeywordType_CONJUNCTION,
+
+ "ಆದರೆ ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "ko": &Dialect{
+ "ko", "Korean", "한국어", map[string][]string{
+ feature: {
+ "기능",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "배경",
+ },
+ scenario: {
+ "시나리오",
+ },
+ scenarioOutline: {
+ "시나리오 개요",
+ },
+ examples: {
+ "예",
+ },
+ given: {
+ "* ",
+ "조건",
+ "먼저",
+ },
+ when: {
+ "* ",
+ "만일",
+ "만약",
+ },
+ then: {
+ "* ",
+ "그러면",
+ },
+ and: {
+ "* ",
+ "그리고",
+ },
+ but: {
+ "* ",
+ "하지만",
+ "단",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "조건": messages.StepKeywordType_CONTEXT,
+
+ "먼저": messages.StepKeywordType_CONTEXT,
+
+ "만일": messages.StepKeywordType_ACTION,
+
+ "만약": messages.StepKeywordType_ACTION,
+
+ "그러면": messages.StepKeywordType_OUTCOME,
+
+ "그리고": messages.StepKeywordType_CONJUNCTION,
+
+ "하지만": messages.StepKeywordType_CONJUNCTION,
+
+ "단": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "lt": &Dialect{
+ "lt", "Lithuanian", "lietuvių kalba", map[string][]string{
+ feature: {
+ "Savybė",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "Kontekstas",
+ },
+ scenario: {
+ "Pavyzdys",
+ "Scenarijus",
+ },
+ scenarioOutline: {
+ "Scenarijaus šablonas",
+ },
+ examples: {
+ "Pavyzdžiai",
+ "Scenarijai",
+ "Variantai",
+ },
+ given: {
+ "* ",
+ "Duota ",
+ },
+ when: {
+ "* ",
+ "Kai ",
+ },
+ then: {
+ "* ",
+ "Tada ",
+ },
+ and: {
+ "* ",
+ "Ir ",
+ },
+ but: {
+ "* ",
+ "Bet ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Duota ": messages.StepKeywordType_CONTEXT,
+
+ "Kai ": messages.StepKeywordType_ACTION,
+
+ "Tada ": messages.StepKeywordType_OUTCOME,
+
+ "Ir ": messages.StepKeywordType_CONJUNCTION,
+
+ "Bet ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "lu": &Dialect{
+ "lu", "Luxemburgish", "Lëtzebuergesch", map[string][]string{
+ feature: {
+ "Funktionalitéit",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "Hannergrond",
+ },
+ scenario: {
+ "Beispill",
+ "Szenario",
+ },
+ scenarioOutline: {
+ "Plang vum Szenario",
+ },
+ examples: {
+ "Beispiller",
+ },
+ given: {
+ "* ",
+ "ugeholl ",
+ },
+ when: {
+ "* ",
+ "wann ",
+ },
+ then: {
+ "* ",
+ "dann ",
+ },
+ and: {
+ "* ",
+ "an ",
+ "a ",
+ },
+ but: {
+ "* ",
+ "awer ",
+ "mä ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "ugeholl ": messages.StepKeywordType_CONTEXT,
+
+ "wann ": messages.StepKeywordType_ACTION,
+
+ "dann ": messages.StepKeywordType_OUTCOME,
+
+ "an ": messages.StepKeywordType_CONJUNCTION,
+
+ "a ": messages.StepKeywordType_CONJUNCTION,
+
+ "awer ": messages.StepKeywordType_CONJUNCTION,
+
+ "mä ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "lv": &Dialect{
+ "lv", "Latvian", "latviešu", map[string][]string{
+ feature: {
+ "Funkcionalitāte",
+ "Fīča",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "Konteksts",
+ "Situācija",
+ },
+ scenario: {
+ "Piemērs",
+ "Scenārijs",
+ },
+ scenarioOutline: {
+ "Scenārijs pēc parauga",
+ },
+ examples: {
+ "Piemēri",
+ "Paraugs",
+ },
+ given: {
+ "* ",
+ "Kad ",
+ },
+ when: {
+ "* ",
+ "Ja ",
+ },
+ then: {
+ "* ",
+ "Tad ",
+ },
+ and: {
+ "* ",
+ "Un ",
+ },
+ but: {
+ "* ",
+ "Bet ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Kad ": messages.StepKeywordType_CONTEXT,
+
+ "Ja ": messages.StepKeywordType_ACTION,
+
+ "Tad ": messages.StepKeywordType_OUTCOME,
+
+ "Un ": messages.StepKeywordType_CONJUNCTION,
+
+ "Bet ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "mk-Cyrl": &Dialect{
+ "mk-Cyrl", "Macedonian", "Македонски", map[string][]string{
+ feature: {
+ "Функционалност",
+ "Бизнис потреба",
+ "Можност",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "Контекст",
+ "Содржина",
+ },
+ scenario: {
+ "Пример",
+ "Сценарио",
+ "На пример",
+ },
+ scenarioOutline: {
+ "Преглед на сценарија",
+ "Скица",
+ "Концепт",
+ },
+ examples: {
+ "Примери",
+ "Сценарија",
+ },
+ given: {
+ "* ",
+ "Дадено ",
+ "Дадена ",
+ },
+ when: {
+ "* ",
+ "Кога ",
+ },
+ then: {
+ "* ",
+ "Тогаш ",
+ },
+ and: {
+ "* ",
+ "И ",
+ },
+ but: {
+ "* ",
+ "Но ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Дадено ": messages.StepKeywordType_CONTEXT,
+
+ "Дадена ": messages.StepKeywordType_CONTEXT,
+
+ "Кога ": messages.StepKeywordType_ACTION,
+
+ "Тогаш ": messages.StepKeywordType_OUTCOME,
+
+ "И ": messages.StepKeywordType_CONJUNCTION,
+
+ "Но ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "mk-Latn": &Dialect{
+ "mk-Latn", "Macedonian (Latin)", "Makedonski (Latinica)", map[string][]string{
+ feature: {
+ "Funkcionalnost",
+ "Biznis potreba",
+ "Mozhnost",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "Kontekst",
+ "Sodrzhina",
+ },
+ scenario: {
+ "Scenario",
+ "Na primer",
+ },
+ scenarioOutline: {
+ "Pregled na scenarija",
+ "Skica",
+ "Koncept",
+ },
+ examples: {
+ "Primeri",
+ "Scenaria",
+ },
+ given: {
+ "* ",
+ "Dadeno ",
+ "Dadena ",
+ },
+ when: {
+ "* ",
+ "Koga ",
+ },
+ then: {
+ "* ",
+ "Togash ",
+ },
+ and: {
+ "* ",
+ "I ",
+ },
+ but: {
+ "* ",
+ "No ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Dadeno ": messages.StepKeywordType_CONTEXT,
+
+ "Dadena ": messages.StepKeywordType_CONTEXT,
+
+ "Koga ": messages.StepKeywordType_ACTION,
+
+ "Togash ": messages.StepKeywordType_OUTCOME,
+
+ "I ": messages.StepKeywordType_CONJUNCTION,
+
+ "No ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "mn": &Dialect{
+ "mn", "Mongolian", "монгол", map[string][]string{
+ feature: {
+ "Функц",
+ "Функционал",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "Агуулга",
+ },
+ scenario: {
+ "Сценар",
+ },
+ scenarioOutline: {
+ "Сценарын төлөвлөгөө",
+ },
+ examples: {
+ "Тухайлбал",
+ },
+ given: {
+ "* ",
+ "Өгөгдсөн нь ",
+ "Анх ",
+ },
+ when: {
+ "* ",
+ "Хэрэв ",
+ },
+ then: {
+ "* ",
+ "Тэгэхэд ",
+ "Үүний дараа ",
+ },
+ and: {
+ "* ",
+ "Мөн ",
+ "Тэгээд ",
+ },
+ but: {
+ "* ",
+ "Гэхдээ ",
+ "Харин ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Өгөгдсөн нь ": messages.StepKeywordType_CONTEXT,
+
+ "Анх ": messages.StepKeywordType_CONTEXT,
+
+ "Хэрэв ": messages.StepKeywordType_ACTION,
+
+ "Тэгэхэд ": messages.StepKeywordType_OUTCOME,
+
+ "Үүний дараа ": messages.StepKeywordType_OUTCOME,
+
+ "Мөн ": messages.StepKeywordType_CONJUNCTION,
+
+ "Тэгээд ": messages.StepKeywordType_CONJUNCTION,
+
+ "Гэхдээ ": messages.StepKeywordType_CONJUNCTION,
+
+ "Харин ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "ne": &Dialect{
+ "ne", "Nepali", "नेपाली", map[string][]string{
+ feature: {
+ "सुविधा",
+ "विशेषता",
+ },
+ rule: {
+ "नियम",
+ },
+ background: {
+ "पृष्ठभूमी",
+ },
+ scenario: {
+ "परिदृश्य",
+ },
+ scenarioOutline: {
+ "परिदृश्य रूपरेखा",
+ },
+ examples: {
+ "उदाहरण",
+ "उदाहरणहरु",
+ },
+ given: {
+ "* ",
+ "दिइएको ",
+ "दिएको ",
+ "यदि ",
+ },
+ when: {
+ "* ",
+ "जब ",
+ },
+ then: {
+ "* ",
+ "त्यसपछि ",
+ "अनी ",
+ },
+ and: {
+ "* ",
+ "र ",
+ "अनि ",
+ },
+ but: {
+ "* ",
+ "तर ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "दिइएको ": messages.StepKeywordType_CONTEXT,
+
+ "दिएको ": messages.StepKeywordType_CONTEXT,
+
+ "यदि ": messages.StepKeywordType_CONTEXT,
+
+ "जब ": messages.StepKeywordType_ACTION,
+
+ "त्यसपछि ": messages.StepKeywordType_OUTCOME,
+
+ "अनी ": messages.StepKeywordType_OUTCOME,
+
+ "र ": messages.StepKeywordType_CONJUNCTION,
+
+ "अनि ": messages.StepKeywordType_CONJUNCTION,
+
+ "तर ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "nl": &Dialect{
+ "nl", "Dutch", "Nederlands", map[string][]string{
+ feature: {
+ "Functionaliteit",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "Achtergrond",
+ },
+ scenario: {
+ "Voorbeeld",
+ "Scenario",
+ },
+ scenarioOutline: {
+ "Abstract Scenario",
+ },
+ examples: {
+ "Voorbeelden",
+ },
+ given: {
+ "* ",
+ "Gegeven ",
+ "Stel ",
+ },
+ when: {
+ "* ",
+ "Als ",
+ "Wanneer ",
+ },
+ then: {
+ "* ",
+ "Dan ",
+ },
+ and: {
+ "* ",
+ "En ",
+ },
+ but: {
+ "* ",
+ "Maar ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Gegeven ": messages.StepKeywordType_CONTEXT,
+
+ "Stel ": messages.StepKeywordType_CONTEXT,
+
+ "Als ": messages.StepKeywordType_ACTION,
+
+ "Wanneer ": messages.StepKeywordType_ACTION,
+
+ "Dan ": messages.StepKeywordType_OUTCOME,
+
+ "En ": messages.StepKeywordType_CONJUNCTION,
+
+ "Maar ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "no": &Dialect{
+ "no", "Norwegian", "norsk", map[string][]string{
+ feature: {
+ "Egenskap",
+ },
+ rule: {
+ "Regel",
+ },
+ background: {
+ "Bakgrunn",
+ },
+ scenario: {
+ "Eksempel",
+ "Scenario",
+ },
+ scenarioOutline: {
+ "Scenariomal",
+ "Abstrakt Scenario",
+ },
+ examples: {
+ "Eksempler",
+ },
+ given: {
+ "* ",
+ "Gitt ",
+ },
+ when: {
+ "* ",
+ "Når ",
+ },
+ then: {
+ "* ",
+ "Så ",
+ },
+ and: {
+ "* ",
+ "Og ",
+ },
+ but: {
+ "* ",
+ "Men ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Gitt ": messages.StepKeywordType_CONTEXT,
+
+ "Når ": messages.StepKeywordType_ACTION,
+
+ "Så ": messages.StepKeywordType_OUTCOME,
+
+ "Og ": messages.StepKeywordType_CONJUNCTION,
+
+ "Men ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "pa": &Dialect{
+ "pa", "Panjabi", "ਪੰਜਾਬੀ", map[string][]string{
+ feature: {
+ "ਖਾਸੀਅਤ",
+ "ਮੁਹਾਂਦਰਾ",
+ "ਨਕਸ਼ ਨੁਹਾਰ",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "ਪਿਛੋਕੜ",
+ },
+ scenario: {
+ "ਉਦਾਹਰਨ",
+ "ਪਟਕਥਾ",
+ },
+ scenarioOutline: {
+ "ਪਟਕਥਾ ਢਾਂਚਾ",
+ "ਪਟਕਥਾ ਰੂਪ ਰੇਖਾ",
+ },
+ examples: {
+ "ਉਦਾਹਰਨਾਂ",
+ },
+ given: {
+ "* ",
+ "ਜੇਕਰ ",
+ "ਜਿਵੇਂ ਕਿ ",
+ },
+ when: {
+ "* ",
+ "ਜਦੋਂ ",
+ },
+ then: {
+ "* ",
+ "ਤਦ ",
+ },
+ and: {
+ "* ",
+ "ਅਤੇ ",
+ },
+ but: {
+ "* ",
+ "ਪਰ ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "ਜੇਕਰ ": messages.StepKeywordType_CONTEXT,
+
+ "ਜਿਵੇਂ ਕਿ ": messages.StepKeywordType_CONTEXT,
+
+ "ਜਦੋਂ ": messages.StepKeywordType_ACTION,
+
+ "ਤਦ ": messages.StepKeywordType_OUTCOME,
+
+ "ਅਤੇ ": messages.StepKeywordType_CONJUNCTION,
+
+ "ਪਰ ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "pl": &Dialect{
+ "pl", "Polish", "polski", map[string][]string{
+ feature: {
+ "Właściwość",
+ "Funkcja",
+ "Aspekt",
+ "Potrzeba biznesowa",
+ },
+ rule: {
+ "Zasada",
+ "Reguła",
+ },
+ background: {
+ "Założenia",
+ },
+ scenario: {
+ "Przykład",
+ "Scenariusz",
+ },
+ scenarioOutline: {
+ "Szablon scenariusza",
+ },
+ examples: {
+ "Przykłady",
+ },
+ given: {
+ "* ",
+ "Zakładając ",
+ "Mając ",
+ "Zakładając, że ",
+ },
+ when: {
+ "* ",
+ "Jeżeli ",
+ "Jeśli ",
+ "Gdy ",
+ "Kiedy ",
+ },
+ then: {
+ "* ",
+ "Wtedy ",
+ },
+ and: {
+ "* ",
+ "Oraz ",
+ "I ",
+ },
+ but: {
+ "* ",
+ "Ale ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Zakładając ": messages.StepKeywordType_CONTEXT,
+
+ "Mając ": messages.StepKeywordType_CONTEXT,
+
+ "Zakładając, że ": messages.StepKeywordType_CONTEXT,
+
+ "Jeżeli ": messages.StepKeywordType_ACTION,
+
+ "Jeśli ": messages.StepKeywordType_ACTION,
+
+ "Gdy ": messages.StepKeywordType_ACTION,
+
+ "Kiedy ": messages.StepKeywordType_ACTION,
+
+ "Wtedy ": messages.StepKeywordType_OUTCOME,
+
+ "Oraz ": messages.StepKeywordType_CONJUNCTION,
+
+ "I ": messages.StepKeywordType_CONJUNCTION,
+
+ "Ale ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "pt": &Dialect{
+ "pt", "Portuguese", "português", map[string][]string{
+ feature: {
+ "Funcionalidade",
+ "Característica",
+ "Caracteristica",
+ },
+ rule: {
+ "Regra",
+ },
+ background: {
+ "Contexto",
+ "Cenário de Fundo",
+ "Cenario de Fundo",
+ "Fundo",
+ },
+ scenario: {
+ "Exemplo",
+ "Cenário",
+ "Cenario",
+ },
+ scenarioOutline: {
+ "Esquema do Cenário",
+ "Esquema do Cenario",
+ "Delineação do Cenário",
+ "Delineacao do Cenario",
+ },
+ examples: {
+ "Exemplos",
+ "Cenários",
+ "Cenarios",
+ },
+ given: {
+ "* ",
+ "Dado ",
+ "Dada ",
+ "Dados ",
+ "Dadas ",
+ },
+ when: {
+ "* ",
+ "Quando ",
+ },
+ then: {
+ "* ",
+ "Então ",
+ "Entao ",
+ },
+ and: {
+ "* ",
+ "E ",
+ },
+ but: {
+ "* ",
+ "Mas ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Dado ": messages.StepKeywordType_CONTEXT,
+
+ "Dada ": messages.StepKeywordType_CONTEXT,
+
+ "Dados ": messages.StepKeywordType_CONTEXT,
+
+ "Dadas ": messages.StepKeywordType_CONTEXT,
+
+ "Quando ": messages.StepKeywordType_ACTION,
+
+ "Então ": messages.StepKeywordType_OUTCOME,
+
+ "Entao ": messages.StepKeywordType_OUTCOME,
+
+ "E ": messages.StepKeywordType_CONJUNCTION,
+
+ "Mas ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "ro": &Dialect{
+ "ro", "Romanian", "română", map[string][]string{
+ feature: {
+ "Functionalitate",
+ "Funcționalitate",
+ "Funcţionalitate",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "Context",
+ },
+ scenario: {
+ "Exemplu",
+ "Scenariu",
+ },
+ scenarioOutline: {
+ "Structura scenariu",
+ "Structură scenariu",
+ },
+ examples: {
+ "Exemple",
+ },
+ given: {
+ "* ",
+ "Date fiind ",
+ "Dat fiind ",
+ "Dată fiind",
+ "Dati fiind ",
+ "Dați fiind ",
+ "Daţi fiind ",
+ },
+ when: {
+ "* ",
+ "Cand ",
+ "Când ",
+ },
+ then: {
+ "* ",
+ "Atunci ",
+ },
+ and: {
+ "* ",
+ "Si ",
+ "Și ",
+ "Şi ",
+ },
+ but: {
+ "* ",
+ "Dar ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Date fiind ": messages.StepKeywordType_CONTEXT,
+
+ "Dat fiind ": messages.StepKeywordType_CONTEXT,
+
+ "Dată fiind": messages.StepKeywordType_CONTEXT,
+
+ "Dati fiind ": messages.StepKeywordType_CONTEXT,
+
+ "Dați fiind ": messages.StepKeywordType_CONTEXT,
+
+ "Daţi fiind ": messages.StepKeywordType_CONTEXT,
+
+ "Cand ": messages.StepKeywordType_ACTION,
+
+ "Când ": messages.StepKeywordType_ACTION,
+
+ "Atunci ": messages.StepKeywordType_OUTCOME,
+
+ "Si ": messages.StepKeywordType_CONJUNCTION,
+
+ "Și ": messages.StepKeywordType_CONJUNCTION,
+
+ "Şi ": messages.StepKeywordType_CONJUNCTION,
+
+ "Dar ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "ru": &Dialect{
+ "ru", "Russian", "русский", map[string][]string{
+ feature: {
+ "Функция",
+ "Функциональность",
+ "Функционал",
+ "Свойство",
+ "Фича",
+ },
+ rule: {
+ "Правило",
+ },
+ background: {
+ "Предыстория",
+ "Контекст",
+ },
+ scenario: {
+ "Пример",
+ "Сценарий",
+ },
+ scenarioOutline: {
+ "Структура сценария",
+ "Шаблон сценария",
+ },
+ examples: {
+ "Примеры",
+ },
+ given: {
+ "* ",
+ "Допустим ",
+ "Дано ",
+ "Пусть ",
+ },
+ when: {
+ "* ",
+ "Когда ",
+ "Если ",
+ },
+ then: {
+ "* ",
+ "То ",
+ "Затем ",
+ "Тогда ",
+ },
+ and: {
+ "* ",
+ "И ",
+ "К тому же ",
+ "Также ",
+ },
+ but: {
+ "* ",
+ "Но ",
+ "А ",
+ "Иначе ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Допустим ": messages.StepKeywordType_CONTEXT,
+
+ "Дано ": messages.StepKeywordType_CONTEXT,
+
+ "Пусть ": messages.StepKeywordType_CONTEXT,
+
+ "Когда ": messages.StepKeywordType_ACTION,
+
+ "Если ": messages.StepKeywordType_ACTION,
+
+ "То ": messages.StepKeywordType_OUTCOME,
+
+ "Затем ": messages.StepKeywordType_OUTCOME,
+
+ "Тогда ": messages.StepKeywordType_OUTCOME,
+
+ "И ": messages.StepKeywordType_CONJUNCTION,
+
+ "К тому же ": messages.StepKeywordType_CONJUNCTION,
+
+ "Также ": messages.StepKeywordType_CONJUNCTION,
+
+ "Но ": messages.StepKeywordType_CONJUNCTION,
+
+ "А ": messages.StepKeywordType_CONJUNCTION,
+
+ "Иначе ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "sk": &Dialect{
+ "sk", "Slovak", "Slovensky", map[string][]string{
+ feature: {
+ "Požiadavka",
+ "Funkcia",
+ "Vlastnosť",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "Pozadie",
+ },
+ scenario: {
+ "Príklad",
+ "Scenár",
+ },
+ scenarioOutline: {
+ "Náčrt Scenáru",
+ "Náčrt Scenára",
+ "Osnova Scenára",
+ },
+ examples: {
+ "Príklady",
+ },
+ given: {
+ "* ",
+ "Pokiaľ ",
+ "Za predpokladu ",
+ },
+ when: {
+ "* ",
+ "Keď ",
+ "Ak ",
+ },
+ then: {
+ "* ",
+ "Tak ",
+ "Potom ",
+ },
+ and: {
+ "* ",
+ "A ",
+ "A tiež ",
+ "A taktiež ",
+ "A zároveň ",
+ },
+ but: {
+ "* ",
+ "Ale ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Pokiaľ ": messages.StepKeywordType_CONTEXT,
+
+ "Za predpokladu ": messages.StepKeywordType_CONTEXT,
+
+ "Keď ": messages.StepKeywordType_ACTION,
+
+ "Ak ": messages.StepKeywordType_ACTION,
+
+ "Tak ": messages.StepKeywordType_OUTCOME,
+
+ "Potom ": messages.StepKeywordType_OUTCOME,
+
+ "A ": messages.StepKeywordType_CONJUNCTION,
+
+ "A tiež ": messages.StepKeywordType_CONJUNCTION,
+
+ "A taktiež ": messages.StepKeywordType_CONJUNCTION,
+
+ "A zároveň ": messages.StepKeywordType_CONJUNCTION,
+
+ "Ale ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "sl": &Dialect{
+ "sl", "Slovenian", "Slovenski", map[string][]string{
+ feature: {
+ "Funkcionalnost",
+ "Funkcija",
+ "Možnosti",
+ "Moznosti",
+ "Lastnost",
+ "Značilnost",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "Kontekst",
+ "Osnova",
+ "Ozadje",
+ },
+ scenario: {
+ "Primer",
+ "Scenarij",
+ },
+ scenarioOutline: {
+ "Struktura scenarija",
+ "Skica",
+ "Koncept",
+ "Oris scenarija",
+ "Osnutek",
+ },
+ examples: {
+ "Primeri",
+ "Scenariji",
+ },
+ given: {
+ "Dano ",
+ "Podano ",
+ "Zaradi ",
+ "Privzeto ",
+ },
+ when: {
+ "Ko ",
+ "Ce ",
+ "Če ",
+ "Kadar ",
+ },
+ then: {
+ "Nato ",
+ "Potem ",
+ "Takrat ",
+ },
+ and: {
+ "In ",
+ "Ter ",
+ },
+ but: {
+ "Toda ",
+ "Ampak ",
+ "Vendar ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Dano ": messages.StepKeywordType_CONTEXT,
+
+ "Podano ": messages.StepKeywordType_CONTEXT,
+
+ "Zaradi ": messages.StepKeywordType_CONTEXT,
+
+ "Privzeto ": messages.StepKeywordType_CONTEXT,
+
+ "Ko ": messages.StepKeywordType_ACTION,
+
+ "Ce ": messages.StepKeywordType_ACTION,
+
+ "Če ": messages.StepKeywordType_ACTION,
+
+ "Kadar ": messages.StepKeywordType_ACTION,
+
+ "Nato ": messages.StepKeywordType_OUTCOME,
+
+ "Potem ": messages.StepKeywordType_OUTCOME,
+
+ "Takrat ": messages.StepKeywordType_OUTCOME,
+
+ "In ": messages.StepKeywordType_CONJUNCTION,
+
+ "Ter ": messages.StepKeywordType_CONJUNCTION,
+
+ "Toda ": messages.StepKeywordType_CONJUNCTION,
+
+ "Ampak ": messages.StepKeywordType_CONJUNCTION,
+
+ "Vendar ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "sr-Cyrl": &Dialect{
+ "sr-Cyrl", "Serbian", "Српски", map[string][]string{
+ feature: {
+ "Функционалност",
+ "Могућност",
+ "Особина",
+ },
+ rule: {
+ "Правило",
+ },
+ background: {
+ "Контекст",
+ "Основа",
+ "Позадина",
+ },
+ scenario: {
+ "Пример",
+ "Сценарио",
+ "Пример",
+ },
+ scenarioOutline: {
+ "Структура сценарија",
+ "Скица",
+ "Концепт",
+ },
+ examples: {
+ "Примери",
+ "Сценарији",
+ },
+ given: {
+ "* ",
+ "За дато ",
+ "За дате ",
+ "За дати ",
+ },
+ when: {
+ "* ",
+ "Када ",
+ "Кад ",
+ },
+ then: {
+ "* ",
+ "Онда ",
+ },
+ and: {
+ "* ",
+ "И ",
+ },
+ but: {
+ "* ",
+ "Али ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "За дато ": messages.StepKeywordType_CONTEXT,
+
+ "За дате ": messages.StepKeywordType_CONTEXT,
+
+ "За дати ": messages.StepKeywordType_CONTEXT,
+
+ "Када ": messages.StepKeywordType_ACTION,
+
+ "Кад ": messages.StepKeywordType_ACTION,
+
+ "Онда ": messages.StepKeywordType_OUTCOME,
+
+ "И ": messages.StepKeywordType_CONJUNCTION,
+
+ "Али ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "sr-Latn": &Dialect{
+ "sr-Latn", "Serbian (Latin)", "Srpski (Latinica)", map[string][]string{
+ feature: {
+ "Funkcionalnost",
+ "Mogućnost",
+ "Mogucnost",
+ "Osobina",
+ },
+ rule: {
+ "Pravilo",
+ },
+ background: {
+ "Kontekst",
+ "Osnova",
+ "Pozadina",
+ },
+ scenario: {
+ "Scenario",
+ "Primer",
+ },
+ scenarioOutline: {
+ "Struktura scenarija",
+ "Skica",
+ "Koncept",
+ },
+ examples: {
+ "Primeri",
+ "Scenariji",
+ },
+ given: {
+ "* ",
+ "Za dato ",
+ "Za date ",
+ "Za dati ",
+ },
+ when: {
+ "* ",
+ "Kada ",
+ "Kad ",
+ },
+ then: {
+ "* ",
+ "Onda ",
+ },
+ and: {
+ "* ",
+ "I ",
+ },
+ but: {
+ "* ",
+ "Ali ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Za dato ": messages.StepKeywordType_CONTEXT,
+
+ "Za date ": messages.StepKeywordType_CONTEXT,
+
+ "Za dati ": messages.StepKeywordType_CONTEXT,
+
+ "Kada ": messages.StepKeywordType_ACTION,
+
+ "Kad ": messages.StepKeywordType_ACTION,
+
+ "Onda ": messages.StepKeywordType_OUTCOME,
+
+ "I ": messages.StepKeywordType_CONJUNCTION,
+
+ "Ali ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "sv": &Dialect{
+ "sv", "Swedish", "Svenska", map[string][]string{
+ feature: {
+ "Egenskap",
+ },
+ rule: {
+ "Regel",
+ },
+ background: {
+ "Bakgrund",
+ },
+ scenario: {
+ "Scenario",
+ },
+ scenarioOutline: {
+ "Abstrakt Scenario",
+ "Scenariomall",
+ },
+ examples: {
+ "Exempel",
+ },
+ given: {
+ "* ",
+ "Givet ",
+ },
+ when: {
+ "* ",
+ "När ",
+ },
+ then: {
+ "* ",
+ "Så ",
+ },
+ and: {
+ "* ",
+ "Och ",
+ },
+ but: {
+ "* ",
+ "Men ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Givet ": messages.StepKeywordType_CONTEXT,
+
+ "När ": messages.StepKeywordType_ACTION,
+
+ "Så ": messages.StepKeywordType_OUTCOME,
+
+ "Och ": messages.StepKeywordType_CONJUNCTION,
+
+ "Men ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "ta": &Dialect{
+ "ta", "Tamil", "தமிழ்", map[string][]string{
+ feature: {
+ "அம்சம்",
+ "வணிக தேவை",
+ "திறன்",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "பின்னணி",
+ },
+ scenario: {
+ "உதாரணமாக",
+ "காட்சி",
+ },
+ scenarioOutline: {
+ "காட்சி சுருக்கம்",
+ "காட்சி வார்ப்புரு",
+ },
+ examples: {
+ "எடுத்துக்காட்டுகள்",
+ "காட்சிகள்",
+ "நிலைமைகளில்",
+ },
+ given: {
+ "* ",
+ "கொடுக்கப்பட்ட ",
+ },
+ when: {
+ "* ",
+ "எப்போது ",
+ },
+ then: {
+ "* ",
+ "அப்பொழுது ",
+ },
+ and: {
+ "* ",
+ "மேலும் ",
+ "மற்றும் ",
+ },
+ but: {
+ "* ",
+ "ஆனால் ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "கொடுக்கப்பட்ட ": messages.StepKeywordType_CONTEXT,
+
+ "எப்போது ": messages.StepKeywordType_ACTION,
+
+ "அப்பொழுது ": messages.StepKeywordType_OUTCOME,
+
+ "மேலும் ": messages.StepKeywordType_CONJUNCTION,
+
+ "மற்றும் ": messages.StepKeywordType_CONJUNCTION,
+
+ "ஆனால் ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "th": &Dialect{
+ "th", "Thai", "ไทย", map[string][]string{
+ feature: {
+ "โครงหลัก",
+ "ความต้องการทางธุรกิจ",
+ "ความสามารถ",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "แนวคิด",
+ },
+ scenario: {
+ "เหตุการณ์",
+ },
+ scenarioOutline: {
+ "สรุปเหตุการณ์",
+ "โครงสร้างของเหตุการณ์",
+ },
+ examples: {
+ "ชุดของตัวอย่าง",
+ "ชุดของเหตุการณ์",
+ },
+ given: {
+ "* ",
+ "กำหนดให้ ",
+ },
+ when: {
+ "* ",
+ "เมื่อ ",
+ },
+ then: {
+ "* ",
+ "ดังนั้น ",
+ },
+ and: {
+ "* ",
+ "และ ",
+ },
+ but: {
+ "* ",
+ "แต่ ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "กำหนดให้ ": messages.StepKeywordType_CONTEXT,
+
+ "เมื่อ ": messages.StepKeywordType_ACTION,
+
+ "ดังนั้น ": messages.StepKeywordType_OUTCOME,
+
+ "และ ": messages.StepKeywordType_CONJUNCTION,
+
+ "แต่ ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "te": &Dialect{
+ "te", "Telugu", "తెలుగు", map[string][]string{
+ feature: {
+ "గుణము",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "నేపథ్యం",
+ },
+ scenario: {
+ "ఉదాహరణ",
+ "సన్నివేశం",
+ },
+ scenarioOutline: {
+ "కథనం",
+ },
+ examples: {
+ "ఉదాహరణలు",
+ },
+ given: {
+ "* ",
+ "చెప్పబడినది ",
+ },
+ when: {
+ "* ",
+ "ఈ పరిస్థితిలో ",
+ },
+ then: {
+ "* ",
+ "అప్పుడు ",
+ },
+ and: {
+ "* ",
+ "మరియు ",
+ },
+ but: {
+ "* ",
+ "కాని ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "చెప్పబడినది ": messages.StepKeywordType_CONTEXT,
+
+ "ఈ పరిస్థితిలో ": messages.StepKeywordType_ACTION,
+
+ "అప్పుడు ": messages.StepKeywordType_OUTCOME,
+
+ "మరియు ": messages.StepKeywordType_CONJUNCTION,
+
+ "కాని ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "tlh": &Dialect{
+ "tlh", "Klingon", "tlhIngan", map[string][]string{
+ feature: {
+ "Qap",
+ "Qu'meH 'ut",
+ "perbogh",
+ "poQbogh malja'",
+ "laH",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "mo'",
+ },
+ scenario: {
+ "lut",
+ },
+ scenarioOutline: {
+ "lut chovnatlh",
+ },
+ examples: {
+ "ghantoH",
+ "lutmey",
+ },
+ given: {
+ "* ",
+ "ghu' noblu' ",
+ "DaH ghu' bejlu' ",
+ },
+ when: {
+ "* ",
+ "qaSDI' ",
+ },
+ then: {
+ "* ",
+ "vaj ",
+ },
+ and: {
+ "* ",
+ "'ej ",
+ "latlh ",
+ },
+ but: {
+ "* ",
+ "'ach ",
+ "'a ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "ghu' noblu' ": messages.StepKeywordType_CONTEXT,
+
+ "DaH ghu' bejlu' ": messages.StepKeywordType_CONTEXT,
+
+ "qaSDI' ": messages.StepKeywordType_ACTION,
+
+ "vaj ": messages.StepKeywordType_OUTCOME,
+
+ "'ej ": messages.StepKeywordType_CONJUNCTION,
+
+ "latlh ": messages.StepKeywordType_CONJUNCTION,
+
+ "'ach ": messages.StepKeywordType_CONJUNCTION,
+
+ "'a ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "tr": &Dialect{
+ "tr", "Turkish", "Türkçe", map[string][]string{
+ feature: {
+ "Özellik",
+ },
+ rule: {
+ "Kural",
+ },
+ background: {
+ "Geçmiş",
+ },
+ scenario: {
+ "Örnek",
+ "Senaryo",
+ },
+ scenarioOutline: {
+ "Senaryo taslağı",
+ },
+ examples: {
+ "Örnekler",
+ },
+ given: {
+ "* ",
+ "Diyelim ki ",
+ },
+ when: {
+ "* ",
+ "Eğer ki ",
+ },
+ then: {
+ "* ",
+ "O zaman ",
+ },
+ and: {
+ "* ",
+ "Ve ",
+ },
+ but: {
+ "* ",
+ "Fakat ",
+ "Ama ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Diyelim ki ": messages.StepKeywordType_CONTEXT,
+
+ "Eğer ki ": messages.StepKeywordType_ACTION,
+
+ "O zaman ": messages.StepKeywordType_OUTCOME,
+
+ "Ve ": messages.StepKeywordType_CONJUNCTION,
+
+ "Fakat ": messages.StepKeywordType_CONJUNCTION,
+
+ "Ama ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "tt": &Dialect{
+ "tt", "Tatar", "Татарча", map[string][]string{
+ feature: {
+ "Мөмкинлек",
+ "Үзенчәлеклелек",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "Кереш",
+ },
+ scenario: {
+ "Сценарий",
+ },
+ scenarioOutline: {
+ "Сценарийның төзелеше",
+ },
+ examples: {
+ "Үрнәкләр",
+ "Мисаллар",
+ },
+ given: {
+ "* ",
+ "Әйтик ",
+ },
+ when: {
+ "* ",
+ "Әгәр ",
+ },
+ then: {
+ "* ",
+ "Нәтиҗәдә ",
+ },
+ and: {
+ "* ",
+ "Һәм ",
+ "Вә ",
+ },
+ but: {
+ "* ",
+ "Ләкин ",
+ "Әмма ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Әйтик ": messages.StepKeywordType_CONTEXT,
+
+ "Әгәр ": messages.StepKeywordType_ACTION,
+
+ "Нәтиҗәдә ": messages.StepKeywordType_OUTCOME,
+
+ "Һәм ": messages.StepKeywordType_CONJUNCTION,
+
+ "Вә ": messages.StepKeywordType_CONJUNCTION,
+
+ "Ләкин ": messages.StepKeywordType_CONJUNCTION,
+
+ "Әмма ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "uk": &Dialect{
+ "uk", "Ukrainian", "Українська", map[string][]string{
+ feature: {
+ "Функціонал",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "Передумова",
+ },
+ scenario: {
+ "Приклад",
+ "Сценарій",
+ },
+ scenarioOutline: {
+ "Структура сценарію",
+ },
+ examples: {
+ "Приклади",
+ },
+ given: {
+ "* ",
+ "Припустимо ",
+ "Припустимо, що ",
+ "Нехай ",
+ "Дано ",
+ },
+ when: {
+ "* ",
+ "Якщо ",
+ "Коли ",
+ },
+ then: {
+ "* ",
+ "То ",
+ "Тоді ",
+ },
+ and: {
+ "* ",
+ "І ",
+ "А також ",
+ "Та ",
+ },
+ but: {
+ "* ",
+ "Але ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Припустимо ": messages.StepKeywordType_CONTEXT,
+
+ "Припустимо, що ": messages.StepKeywordType_CONTEXT,
+
+ "Нехай ": messages.StepKeywordType_CONTEXT,
+
+ "Дано ": messages.StepKeywordType_CONTEXT,
+
+ "Якщо ": messages.StepKeywordType_ACTION,
+
+ "Коли ": messages.StepKeywordType_ACTION,
+
+ "То ": messages.StepKeywordType_OUTCOME,
+
+ "Тоді ": messages.StepKeywordType_OUTCOME,
+
+ "І ": messages.StepKeywordType_CONJUNCTION,
+
+ "А також ": messages.StepKeywordType_CONJUNCTION,
+
+ "Та ": messages.StepKeywordType_CONJUNCTION,
+
+ "Але ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "ur": &Dialect{
+ "ur", "Urdu", "اردو", map[string][]string{
+ feature: {
+ "صلاحیت",
+ "کاروبار کی ضرورت",
+ "خصوصیت",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "پس منظر",
+ },
+ scenario: {
+ "منظرنامہ",
+ },
+ scenarioOutline: {
+ "منظر نامے کا خاکہ",
+ },
+ examples: {
+ "مثالیں",
+ },
+ given: {
+ "* ",
+ "اگر ",
+ "بالفرض ",
+ "فرض کیا ",
+ },
+ when: {
+ "* ",
+ "جب ",
+ },
+ then: {
+ "* ",
+ "پھر ",
+ "تب ",
+ },
+ and: {
+ "* ",
+ "اور ",
+ },
+ but: {
+ "* ",
+ "لیکن ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "اگر ": messages.StepKeywordType_CONTEXT,
+
+ "بالفرض ": messages.StepKeywordType_CONTEXT,
+
+ "فرض کیا ": messages.StepKeywordType_CONTEXT,
+
+ "جب ": messages.StepKeywordType_ACTION,
+
+ "پھر ": messages.StepKeywordType_OUTCOME,
+
+ "تب ": messages.StepKeywordType_OUTCOME,
+
+ "اور ": messages.StepKeywordType_CONJUNCTION,
+
+ "لیکن ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "uz": &Dialect{
+ "uz", "Uzbek", "Узбекча", map[string][]string{
+ feature: {
+ "Функционал",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "Тарих",
+ },
+ scenario: {
+ "Сценарий",
+ },
+ scenarioOutline: {
+ "Сценарий структураси",
+ },
+ examples: {
+ "Мисоллар",
+ },
+ given: {
+ "* ",
+ "Belgilangan ",
+ },
+ when: {
+ "* ",
+ "Агар ",
+ },
+ then: {
+ "* ",
+ "Унда ",
+ },
+ and: {
+ "* ",
+ "Ва ",
+ },
+ but: {
+ "* ",
+ "Лекин ",
+ "Бирок ",
+ "Аммо ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Belgilangan ": messages.StepKeywordType_CONTEXT,
+
+ "Агар ": messages.StepKeywordType_ACTION,
+
+ "Унда ": messages.StepKeywordType_OUTCOME,
+
+ "Ва ": messages.StepKeywordType_CONJUNCTION,
+
+ "Лекин ": messages.StepKeywordType_CONJUNCTION,
+
+ "Бирок ": messages.StepKeywordType_CONJUNCTION,
+
+ "Аммо ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "vi": &Dialect{
+ "vi", "Vietnamese", "Tiếng Việt", map[string][]string{
+ feature: {
+ "Tính năng",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "Bối cảnh",
+ },
+ scenario: {
+ "Tình huống",
+ "Kịch bản",
+ },
+ scenarioOutline: {
+ "Khung tình huống",
+ "Khung kịch bản",
+ },
+ examples: {
+ "Dữ liệu",
+ },
+ given: {
+ "* ",
+ "Biết ",
+ "Cho ",
+ },
+ when: {
+ "* ",
+ "Khi ",
+ },
+ then: {
+ "* ",
+ "Thì ",
+ },
+ and: {
+ "* ",
+ "Và ",
+ },
+ but: {
+ "* ",
+ "Nhưng ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "Biết ": messages.StepKeywordType_CONTEXT,
+
+ "Cho ": messages.StepKeywordType_CONTEXT,
+
+ "Khi ": messages.StepKeywordType_ACTION,
+
+ "Thì ": messages.StepKeywordType_OUTCOME,
+
+ "Và ": messages.StepKeywordType_CONJUNCTION,
+
+ "Nhưng ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "zh-CN": &Dialect{
+ "zh-CN", "Chinese simplified", "简体中文", map[string][]string{
+ feature: {
+ "功能",
+ },
+ rule: {
+ "Rule",
+ "规则",
+ },
+ background: {
+ "背景",
+ },
+ scenario: {
+ "场景",
+ "剧本",
+ },
+ scenarioOutline: {
+ "场景大纲",
+ "剧本大纲",
+ },
+ examples: {
+ "例子",
+ },
+ given: {
+ "* ",
+ "假如",
+ "假设",
+ "假定",
+ },
+ when: {
+ "* ",
+ "当",
+ },
+ then: {
+ "* ",
+ "那么",
+ },
+ and: {
+ "* ",
+ "而且",
+ "并且",
+ "同时",
+ },
+ but: {
+ "* ",
+ "但是",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "假如": messages.StepKeywordType_CONTEXT,
+
+ "假设": messages.StepKeywordType_CONTEXT,
+
+ "假定": messages.StepKeywordType_CONTEXT,
+
+ "当": messages.StepKeywordType_ACTION,
+
+ "那么": messages.StepKeywordType_OUTCOME,
+
+ "而且": messages.StepKeywordType_CONJUNCTION,
+
+ "并且": messages.StepKeywordType_CONJUNCTION,
+
+ "同时": messages.StepKeywordType_CONJUNCTION,
+
+ "但是": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "zh-TW": &Dialect{
+ "zh-TW", "Chinese traditional", "繁體中文", map[string][]string{
+ feature: {
+ "功能",
+ },
+ rule: {
+ "Rule",
+ },
+ background: {
+ "背景",
+ },
+ scenario: {
+ "場景",
+ "劇本",
+ },
+ scenarioOutline: {
+ "場景大綱",
+ "劇本大綱",
+ },
+ examples: {
+ "例子",
+ },
+ given: {
+ "* ",
+ "假如",
+ "假設",
+ "假定",
+ },
+ when: {
+ "* ",
+ "當",
+ },
+ then: {
+ "* ",
+ "那麼",
+ },
+ and: {
+ "* ",
+ "而且",
+ "並且",
+ "同時",
+ },
+ but: {
+ "* ",
+ "但是",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "假如": messages.StepKeywordType_CONTEXT,
+
+ "假設": messages.StepKeywordType_CONTEXT,
+
+ "假定": messages.StepKeywordType_CONTEXT,
+
+ "當": messages.StepKeywordType_ACTION,
+
+ "那麼": messages.StepKeywordType_OUTCOME,
+
+ "而且": messages.StepKeywordType_CONJUNCTION,
+
+ "並且": messages.StepKeywordType_CONJUNCTION,
+
+ "同時": messages.StepKeywordType_CONJUNCTION,
+
+ "但是": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "mr": &Dialect{
+ "mr", "Marathi", "मराठी", map[string][]string{
+ feature: {
+ "वैशिष्ट्य",
+ "सुविधा",
+ },
+ rule: {
+ "नियम",
+ },
+ background: {
+ "पार्श्वभूमी",
+ },
+ scenario: {
+ "परिदृश्य",
+ },
+ scenarioOutline: {
+ "परिदृश्य रूपरेखा",
+ },
+ examples: {
+ "उदाहरण",
+ },
+ given: {
+ "* ",
+ "जर",
+ "दिलेल्या प्रमाणे ",
+ },
+ when: {
+ "* ",
+ "जेव्हा ",
+ },
+ then: {
+ "* ",
+ "मग ",
+ "तेव्हा ",
+ },
+ and: {
+ "* ",
+ "आणि ",
+ "तसेच ",
+ },
+ but: {
+ "* ",
+ "पण ",
+ "परंतु ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "जर": messages.StepKeywordType_CONTEXT,
+
+ "दिलेल्या प्रमाणे ": messages.StepKeywordType_CONTEXT,
+
+ "जेव्हा ": messages.StepKeywordType_ACTION,
+
+ "मग ": messages.StepKeywordType_OUTCOME,
+
+ "तेव्हा ": messages.StepKeywordType_OUTCOME,
+
+ "आणि ": messages.StepKeywordType_CONJUNCTION,
+
+ "तसेच ": messages.StepKeywordType_CONJUNCTION,
+
+ "पण ": messages.StepKeywordType_CONJUNCTION,
+
+ "परंतु ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+ "amh": &Dialect{
+ "amh", "Amharic", "አማርኛ", map[string][]string{
+ feature: {
+ "ስራ",
+ "የተፈለገው ስራ",
+ "የሚፈለገው ድርጊት",
+ },
+ rule: {
+ "ህግ",
+ },
+ background: {
+ "ቅድመ ሁኔታ",
+ "መነሻ",
+ "መነሻ ሀሳብ",
+ },
+ scenario: {
+ "ምሳሌ",
+ "ሁናቴ",
+ },
+ scenarioOutline: {
+ "ሁናቴ ዝርዝር",
+ "ሁናቴ አብነት",
+ },
+ examples: {
+ "ምሳሌዎች",
+ "ሁናቴዎች",
+ },
+ given: {
+ "* ",
+ "የተሰጠ ",
+ },
+ when: {
+ "* ",
+ "መቼ ",
+ },
+ then: {
+ "* ",
+ "ከዚያ ",
+ },
+ and: {
+ "* ",
+ "እና ",
+ },
+ but: {
+ "* ",
+ "ግን ",
+ },
+ },
+ map[string]messages.StepKeywordType{
+ "የተሰጠ ": messages.StepKeywordType_CONTEXT,
+
+ "መቼ ": messages.StepKeywordType_ACTION,
+
+ "ከዚያ ": messages.StepKeywordType_OUTCOME,
+
+ "እና ": messages.StepKeywordType_CONJUNCTION,
+
+ "ግን ": messages.StepKeywordType_CONJUNCTION,
+
+ "* ": messages.StepKeywordType_UNKNOWN,
+ }},
+}
diff --git a/vendor/github.com/cucumber/gherkin/go/v26/dialects_builtin.go.jq b/vendor/github.com/cucumber/gherkin/go/v26/dialects_builtin.go.jq
new file mode 100644
index 000000000..72ef6170f
--- /dev/null
+++ b/vendor/github.com/cucumber/gherkin/go/v26/dialects_builtin.go.jq
@@ -0,0 +1,110 @@
+. as $root
+| (
+ [ to_entries[]
+ | [
+ "\t",(.key|@json),": &Dialect{\n",
+ "\t\t", (.key|@json),", ", (.value.name|@json),", ", (.value.native|@json), ", map[string][]string{\n"
+ ] + (
+ [ .value
+ | {"feature","rule","background","scenario","scenarioOutline","examples","given","when","then","and","but"}
+ | to_entries[]
+ | "\t\t\t"+(.key), ": {\n",
+ ([ .value[] | "\t\t\t\t", @json, ",\n" ]|add),
+ "\t\t\t},\n"
+ ]
+ ) + [
+ "\t\t},\n",
+ "\t\tmap[string]messages.StepKeywordType{\n"
+ ] + (
+ [ .value.given
+ | (
+ [ .[] | select(. != "* ") |
+ "\t\t\t",
+ @json,
+ ": messages.StepKeywordType_CONTEXT",
+ ",\n\n"
+ ] | add
+ ),
+ ""
+ ]
+ +
+ [ .value.when
+ | (
+ [ .[] | select(. != "* ") |
+ "\t\t\t",
+ @json,
+ ": messages.StepKeywordType_ACTION",
+ ",\n\n"
+ ] | add
+ ),
+ ""
+ ]
+ +
+ [ .value.then
+ | (
+ [ .[] | select(. != "* ") |
+ "\t\t\t",
+ @json,
+ ": messages.StepKeywordType_OUTCOME",
+ ",\n\n"
+ ] | add
+ ),
+ ""
+ ]
+ +
+ [ .value.and
+ | (
+ [ .[] | select(. != "* ") |
+ "\t\t\t",
+ @json,
+ ": messages.StepKeywordType_CONJUNCTION",
+ ",\n\n"
+ ] | add
+ ),
+ ""
+ ]
+ +
+ [ .value.but
+ | (
+ [ .[] | select(. != "* ") |
+ "\t\t\t",
+ @json,
+ ": messages.StepKeywordType_CONJUNCTION",
+ ",\n\n"
+ ] | add
+ ),
+ ""
+ ]
+ + [
+ "\t\t\t\"* \": messages.StepKeywordType_UNKNOWN,\n"
+ ]
+ ) + [
+ "\t\t}",
+ "},\n"
+ ]
+ | add
+ ]
+ | add
+ )
+| "package gherkin\n\n"
++ "import messages \"github.com/cucumber/messages/go/v21\"\n\n"
++ "// Builtin dialects for " + ([ $root | to_entries[] | .key+" ("+.value.name+")" ] | join(", ")) + "\n"
++ "func DialectsBuiltin() DialectProvider {\n"
++ "\treturn builtinDialects\n"
++ "}\n\n"
++ "const (\n"
++ " feature = \"feature\"\n"
++ " rule = \"rule\"\n"
++ " background = \"background\"\n"
++ " scenario = \"scenario\"\n"
++ " scenarioOutline = \"scenarioOutline\"\n"
++ " examples = \"examples\"\n"
++ " given = \"given\"\n"
++ " when = \"when\"\n"
++ " then = \"then\"\n"
++ " and = \"and\"\n"
++ " but = \"but\"\n"
++ ")\n\n"
++ "var builtinDialects = gherkinDialectMap{\n"
++ .
++ "}"
diff --git a/vendor/github.com/cucumber/gherkin/go/v26/gherkin.go b/vendor/github.com/cucumber/gherkin/go/v26/gherkin.go
new file mode 100644
index 000000000..524d16e11
--- /dev/null
+++ b/vendor/github.com/cucumber/gherkin/go/v26/gherkin.go
@@ -0,0 +1,143 @@
+package gherkin
+
+import (
+ "bufio"
+ "fmt"
+ "github.com/cucumber/messages/go/v21"
+ "io"
+ "strings"
+)
+
+type Parser interface {
+ StopAtFirstError(b bool)
+ Parse(s Scanner, m Matcher) (err error)
+}
+
+/*
+The Scanner reads a gherkin doc (typically read from a .feature file) and creates a token for
+each line. The tokens are passed to the parser, which outputs an AST (Abstract Syntax Tree).
+
+If the scanner sees a # language header, it will reconfigure itself dynamically to look for
+Gherkin keywords for the associated language. The keywords are defined in gherkin-languages.json.
+*/
+type Scanner interface {
+ Scan() (line *Line, atEof bool, err error)
+}
+
+type Builder interface {
+ Build(*Token) (bool, error)
+ StartRule(RuleType) (bool, error)
+ EndRule(RuleType) (bool, error)
+ Reset()
+}
+
+type Token struct {
+ Type TokenType
+ Keyword string
+ KeywordType messages.StepKeywordType
+ Text string
+ Items []*LineSpan
+ GherkinDialect string
+ Indent string
+ Location *Location
+}
+
+func (t *Token) IsEOF() bool {
+ return t.Type == TokenTypeEOF
+}
+func (t *Token) String() string {
+ return fmt.Sprintf("%v: %s/%s", t.Type, t.Keyword, t.Text)
+}
+
+type LineSpan struct {
+ Column int
+ Text string
+}
+
+func (l *LineSpan) String() string {
+ return fmt.Sprintf("%d:%s", l.Column, l.Text)
+}
+
+type parser struct {
+ builder Builder
+ stopAtFirstError bool
+}
+
+func NewParser(b Builder) Parser {
+ return &parser{
+ builder: b,
+ }
+}
+
+func (p *parser) StopAtFirstError(b bool) {
+ p.stopAtFirstError = b
+}
+
+func NewScanner(r io.Reader) Scanner {
+ return &scanner{
+ s: bufio.NewScanner(r),
+ line: 0,
+ }
+}
+
+type scanner struct {
+ s *bufio.Scanner
+ line int
+}
+
+func (t *scanner) Scan() (line *Line, atEof bool, err error) {
+ scanning := t.s.Scan()
+ if !scanning {
+ err = t.s.Err()
+ if err == nil {
+ atEof = true
+ }
+ }
+ if err == nil {
+ t.line += 1
+ str := t.s.Text()
+ line = &Line{str, t.line, strings.TrimLeft(str, " \t"), atEof}
+ }
+ return
+}
+
+type Line struct {
+ LineText string
+ LineNumber int
+ TrimmedLineText string
+ AtEof bool
+}
+
+func (g *Line) Indent() int {
+ return len(g.LineText) - len(g.TrimmedLineText)
+}
+
+func (g *Line) IsEmpty() bool {
+ return len(g.TrimmedLineText) == 0
+}
+
+func (g *Line) IsEof() bool {
+ return g.AtEof
+}
+
+func (g *Line) StartsWith(prefix string) bool {
+ return strings.HasPrefix(g.TrimmedLineText, prefix)
+}
+
+func ParseGherkinDocument(in io.Reader, newId func() string) (gherkinDocument *messages.GherkinDocument, err error) {
+ return ParseGherkinDocumentForLanguage(in, DefaultDialect, newId)
+}
+
+func ParseGherkinDocumentForLanguage(in io.Reader, language string, newId func() string) (gherkinDocument *messages.GherkinDocument, err error) {
+
+ builder := NewAstBuilder(newId)
+ parser := NewParser(builder)
+ parser.StopAtFirstError(false)
+ matcher := NewLanguageMatcher(DialectsBuiltin(), language)
+
+ scanner := NewScanner(in)
+
+ err = parser.Parse(scanner, matcher)
+
+ return builder.GetGherkinDocument(), err
+}
diff --git a/vendor/github.com/cucumber/gherkin/go/v26/matcher.go b/vendor/github.com/cucumber/gherkin/go/v26/matcher.go
new file mode 100644
index 000000000..fda4e6852
--- /dev/null
+++ b/vendor/github.com/cucumber/gherkin/go/v26/matcher.go
@@ -0,0 +1,301 @@
+package gherkin
+
+import (
+ "regexp"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+const (
+ DefaultDialect = "en"
+ CommentPrefix = "#"
+ TagPrefix = "@"
+ TitleKeywordSeparator = ":"
+ TableCellSeparator = '|'
+ EscapeChar = '\\'
+ EscapedNewline = 'n'
+ DocstringSeparator = "\"\"\""
+ DocstringAlternativeSeparator = "```"
+)
+
+type matcher struct {
+ gdp DialectProvider
+ defaultLang string
+ lang string
+ dialect *Dialect
+ activeDocStringSeparator string
+ indentToRemove int
+ languagePattern *regexp.Regexp
+}
+
+func NewMatcher(gdp DialectProvider) Matcher {
+ return &matcher{
+ gdp: gdp,
+ defaultLang: DefaultDialect,
+ lang: DefaultDialect,
+ dialect: gdp.GetDialect(DefaultDialect),
+ languagePattern: regexp.MustCompile("^\\s*#\\s*language\\s*:\\s*([a-zA-Z\\-_]+)\\s*$"),
+ }
+}
+
+func NewLanguageMatcher(gdp DialectProvider, language string) Matcher {
+ return &matcher{
+ gdp: gdp,
+ defaultLang: language,
+ lang: language,
+ dialect: gdp.GetDialect(language),
+ languagePattern: regexp.MustCompile("^\\s*#\\s*language\\s*:\\s*([a-zA-Z\\-_]+)\\s*$"),
+ }
+}
+
+func (m *matcher) Reset() {
+ m.indentToRemove = 0
+ m.activeDocStringSeparator = ""
+ if m.lang != "en" {
+ m.dialect = m.gdp.GetDialect(m.defaultLang)
+ m.lang = "en"
+ }
+}
+
+func (m *matcher) newTokenAtLocation(line, index int) (token *Token) {
+ column := index + 1
+ token = new(Token)
+ token.GherkinDialect = m.lang
+ token.Location = &Location{line, column}
+ return
+}
+
+func (m *matcher) MatchEOF(line *Line) (ok bool, token *Token, err error) {
+ if line.IsEof() {
+ token, ok = m.newTokenAtLocation(line.LineNumber, line.Indent()), true
+ token.Type = TokenTypeEOF
+ }
+ return
+}
+
+func (m *matcher) MatchEmpty(line *Line) (ok bool, token *Token, err error) {
+ if line.IsEmpty() {
+ token, ok = m.newTokenAtLocation(line.LineNumber, line.Indent()), true
+ token.Type = TokenTypeEmpty
+ }
+ return
+}
+
+func (m *matcher) MatchComment(line *Line) (ok bool, token *Token, err error) {
+ if line.StartsWith(CommentPrefix) {
+ token, ok = m.newTokenAtLocation(line.LineNumber, 0), true
+ token.Type = TokenTypeComment
+ token.Text = line.LineText
+ }
+ return
+}
+
+func (m *matcher) MatchTagLine(line *Line) (ok bool, token *Token, err error) {
+ if !line.StartsWith(TagPrefix) {
+ return
+ }
+ commentDelimiter := regexp.MustCompile(`\s+` + CommentPrefix)
+ uncommentedLine := commentDelimiter.Split(line.TrimmedLineText, 2)[0]
+ var tags []*LineSpan
+ var column = line.Indent() + 1
+
+ splits := strings.Split(uncommentedLine, TagPrefix)
+ for i := range splits {
+ txt := strings.TrimRightFunc(splits[i], func(r rune) bool {
+ return unicode.IsSpace(r)
+ })
+ if len(txt) == 0 {
+ continue
+ }
+ if !regexp.MustCompile(`^\S+$`).MatchString(txt) {
+ location := &Location{line.LineNumber, column}
+ msg := "A tag may not contain whitespace"
+ err = &parseError{msg, location}
+ break
+ }
+ tags = append(tags, &LineSpan{column, TagPrefix + txt})
+ column = column + utf8.RuneCountInString(splits[i]) + 1
+ }
+
+ token, ok = m.newTokenAtLocation(line.LineNumber, line.Indent()), true
+ token.Type = TokenTypeTagLine
+ token.Items = tags
+
+ return
+}
+
+func (m *matcher) matchTitleLine(line *Line, tokenType TokenType, keywords []string) (ok bool, token *Token, err error) {
+ for i := range keywords {
+ keyword := keywords[i]
+ if line.StartsWith(keyword + TitleKeywordSeparator) {
+ token, ok = m.newTokenAtLocation(line.LineNumber, line.Indent()), true
+ token.Type = tokenType
+ token.Keyword = keyword
+ token.Text = strings.Trim(line.TrimmedLineText[len(keyword)+1:], " ")
+ return
+ }
+ }
+ return
+}
+
+func (m *matcher) MatchFeatureLine(line *Line) (ok bool, token *Token, err error) {
+ return m.matchTitleLine(line, TokenTypeFeatureLine, m.dialect.FeatureKeywords())
+}
+func (m *matcher) MatchRuleLine(line *Line) (ok bool, token *Token, err error) {
+ return m.matchTitleLine(line, TokenTypeRuleLine, m.dialect.RuleKeywords())
+}
+func (m *matcher) MatchBackgroundLine(line *Line) (ok bool, token *Token, err error) {
+ return m.matchTitleLine(line, TokenTypeBackgroundLine, m.dialect.BackgroundKeywords())
+}
+func (m *matcher) MatchScenarioLine(line *Line) (ok bool, token *Token, err error) {
+ ok, token, err = m.matchTitleLine(line, TokenTypeScenarioLine, m.dialect.ScenarioKeywords())
+ if ok || (err != nil) {
+ return ok, token, err
+ }
+ ok, token, err = m.matchTitleLine(line, TokenTypeScenarioLine, m.dialect.ScenarioOutlineKeywords())
+ return ok, token, err
+}
+func (m *matcher) MatchExamplesLine(line *Line) (ok bool, token *Token, err error) {
+ return m.matchTitleLine(line, TokenTypeExamplesLine, m.dialect.ExamplesKeywords())
+}
+func (m *matcher) MatchStepLine(line *Line) (ok bool, token *Token, err error) {
+ keywords := m.dialect.StepKeywords()
+ for i := range keywords {
+ keyword := keywords[i]
+ if line.StartsWith(keyword) {
+ token, ok = m.newTokenAtLocation(line.LineNumber, line.Indent()), true
+ token.Type = TokenTypeStepLine
+ token.Keyword = keyword
+ token.KeywordType = m.dialect.StepKeywordType(keyword)
+ token.Text = strings.Trim(line.TrimmedLineText[len(keyword):], " ")
+ return
+ }
+ }
+ return
+}
+
+func (m *matcher) MatchDocStringSeparator(line *Line) (ok bool, token *Token, err error) {
+ if m.activeDocStringSeparator != "" {
+ if line.StartsWith(m.activeDocStringSeparator) {
+ // close
+ token, ok = m.newTokenAtLocation(line.LineNumber, line.Indent()), true
+ token.Type = TokenTypeDocStringSeparator
+ token.Keyword = m.activeDocStringSeparator
+
+ m.indentToRemove = 0
+ m.activeDocStringSeparator = ""
+ }
+ return
+ }
+ if line.StartsWith(DocstringSeparator) {
+ m.activeDocStringSeparator = DocstringSeparator
+ } else if line.StartsWith(DocstringAlternativeSeparator) {
+ m.activeDocStringSeparator = DocstringAlternativeSeparator
+ }
+ if m.activeDocStringSeparator != "" {
+ // open
+ mediaType := line.TrimmedLineText[len(m.activeDocStringSeparator):]
+ m.indentToRemove = line.Indent()
+ token, ok = m.newTokenAtLocation(line.LineNumber, line.Indent()), true
+ token.Type = TokenTypeDocStringSeparator
+ token.Keyword = m.activeDocStringSeparator
+ token.Text = mediaType
+ }
+ return
+}
+
+func isSpaceAndNotNewLine(r rune) bool {
+ return unicode.IsSpace(r) && r != '\n'
+}
+
+func (m *matcher) MatchTableRow(line *Line) (ok bool, token *Token, err error) {
+ var firstChar, firstPos = utf8.DecodeRuneInString(line.TrimmedLineText)
+ if firstChar == TableCellSeparator {
+ var cells []*LineSpan
+ var cell []rune
+ var startCol = line.Indent() + 2 // column where the current cell started
+ // start after the first separator, it's not included in the cell
+ for i, w, col := firstPos, 0, startCol; i < len(line.TrimmedLineText); i += w {
+ var char rune
+ char, w = utf8.DecodeRuneInString(line.TrimmedLineText[i:])
+ if char == TableCellSeparator {
+ // append current cell
+ txt := string(cell)
+
+ txtTrimmedLeadingSpace := strings.TrimLeftFunc(txt, isSpaceAndNotNewLine)
+ ind := utf8.RuneCountInString(txt) - utf8.RuneCountInString(txtTrimmedLeadingSpace)
+ txtTrimmed := strings.TrimRightFunc(txtTrimmedLeadingSpace, isSpaceAndNotNewLine)
+ cells = append(cells, &LineSpan{startCol + ind, txtTrimmed})
+ // start building next
+ cell = make([]rune, 0)
+ startCol = col + 1
+ } else if char == EscapeChar {
+ // skip this character but count the column
+ i += w
+ col++
+ char, w = utf8.DecodeRuneInString(line.TrimmedLineText[i:])
+ if char == EscapedNewline {
+ cell = append(cell, '\n')
+ } else {
+ if char != TableCellSeparator && char != EscapeChar {
+ cell = append(cell, EscapeChar)
+ }
+ cell = append(cell, char)
+ }
+ } else {
+ cell = append(cell, char)
+ }
+ col++
+ }
+
+ token, ok = m.newTokenAtLocation(line.LineNumber, line.Indent()), true
+ token.Type = TokenTypeTableRow
+ token.Items = cells
+ }
+ return
+}
+
+func (m *matcher) MatchLanguage(line *Line) (ok bool, token *Token, err error) {
+ matches := m.languagePattern.FindStringSubmatch(line.TrimmedLineText)
+ if len(matches) > 0 {
+ lang := matches[1]
+ token, ok = m.newTokenAtLocation(line.LineNumber, line.Indent()), true
+ token.Type = TokenTypeLanguage
+ token.Text = lang
+
+ dialect := m.gdp.GetDialect(lang)
+ if dialect == nil {
+ err = &parseError{"Language not supported: " + lang, token.Location}
+ } else {
+ m.lang = lang
+ m.dialect = dialect
+ }
+ }
+ return
+}
+
+func (m *matcher) MatchOther(line *Line) (ok bool, token *Token, err error) {
+ token, ok = m.newTokenAtLocation(line.LineNumber, 0), true
+ token.Type = TokenTypeOther
+
+ element := line.LineText
+ txt := strings.TrimLeft(element, " ")
+
+ if len(element)-len(txt) > m.indentToRemove {
+ token.Text = m.unescapeDocString(element[m.indentToRemove:])
+ } else {
+ token.Text = m.unescapeDocString(txt)
+ }
+ return
+}
+
+func (m *matcher) unescapeDocString(text string) string {
+ if m.activeDocStringSeparator == DocstringSeparator {
+ return strings.Replace(text, "\\\"\\\"\\\"", DocstringSeparator, -1)
+ }
+ if m.activeDocStringSeparator == DocstringAlternativeSeparator {
+ return strings.Replace(text, "\\`\\`\\`", DocstringAlternativeSeparator, -1)
+ }
+ return text
+}
diff --git a/vendor/github.com/cucumber/gherkin/go/v26/messages.go b/vendor/github.com/cucumber/gherkin/go/v26/messages.go
new file mode 100644
index 000000000..a3b7c1b71
--- /dev/null
+++ b/vendor/github.com/cucumber/gherkin/go/v26/messages.go
@@ -0,0 +1,120 @@
+package gherkin
+
+import (
+ "encoding/json"
+ "fmt"
+ "github.com/cucumber/messages/go/v21"
+ "io"
+ "io/ioutil"
+ "strings"
+)
+
+func Messages(
+ paths []string,
+ decoder *json.Decoder,
+ language string,
+ includeSource bool,
+ includeGherkinDocument bool,
+ includePickles bool,
+ encoder *json.Encoder,
+ newId func() string,
+) ([]messages.Envelope, error) {
+ var result []messages.Envelope
+ var err error
+
+ handleMessage := func(result []messages.Envelope, message *messages.Envelope) ([]messages.Envelope, error) {
+ if encoder != nil {
+ err = encoder.Encode(message)
+ return result, err
+ } else {
+ result = append(result, *message)
+ }
+
+ return result, err
+ }
+
+ processSource := func(source *messages.Source) error {
+ if includeSource {
+ result, err = handleMessage(result, &messages.Envelope{
+ Source: source,
+ })
+ }
+ doc, err := ParseGherkinDocumentForLanguage(strings.NewReader(source.Data), language, newId)
+ if errs, ok := err.(parseErrors); ok {
+ // expected parse errors
+ for _, err := range errs {
+ if pe, ok := err.(*parseError); ok {
+ result, err = handleMessage(result, pe.asMessage(source.Uri))
+ } else {
+ return fmt.Errorf("parse feature file: %s, unexpected error: %+v\n", source.Uri, err)
+ }
+ }
+ return nil
+ }
+
+ if includeGherkinDocument {
+ doc.Uri = source.Uri
+ result, err = handleMessage(result, &messages.Envelope{
+ GherkinDocument: doc,
+ })
+ }
+
+ if includePickles {
+ for _, pickle := range Pickles(*doc, source.Uri, newId) {
+ result, err = handleMessage(result, &messages.Envelope{
+ Pickle: pickle,
+ })
+ }
+ }
+ return nil
+ }
+
+ if len(paths) == 0 {
+ for {
+ envelope := &messages.Envelope{}
+ err := decoder.Decode(envelope)
+ //marshal, err := json.Marshal(envelope)
+ //fmt.Println(string(marshal))
+ if err == io.EOF {
+ break
+ }
+
+ if envelope.Source != nil {
+ err = processSource(envelope.Source)
+ if err != nil {
+ return result, err
+ }
+ }
+ }
+ } else {
+ for _, path := range paths {
+ in, err := ioutil.ReadFile(path)
+ if err != nil {
+ return result, fmt.Errorf("read feature file: %s - %+v", path, err)
+ }
+ source := &messages.Source{
+ Uri: path,
+ Data: string(in),
+ MediaType: "text/x.cucumber.gherkin+plain",
+ }
+ processSource(source)
+ }
+ }
+
+ return result, err
+}
+
+func (a *parseError) asMessage(uri string) *messages.Envelope {
+ return &messages.Envelope{
+ ParseError: &messages.ParseError{
+ Message: a.Error(),
+ Source: &messages.SourceReference{
+ Uri: uri,
+ Location: &messages.Location{
+ Line: int64(a.loc.Line),
+ Column: int64(a.loc.Column),
+ },
+ },
+ },
+ }
+}
diff --git a/vendor/github.com/cucumber/gherkin/go/v26/parser.go b/vendor/github.com/cucumber/gherkin/go/v26/parser.go
new file mode 100644
index 000000000..570e4babe
--- /dev/null
+++ b/vendor/github.com/cucumber/gherkin/go/v26/parser.go
@@ -0,0 +1,4654 @@
+//
+// This file is generated. Do not edit! Edit parser.go.razor instead.
+
+package gherkin
+
+import (
+ "fmt"
+ "strings"
+)
+
+type TokenType int
+
+const (
+ TokenTypeNone TokenType = iota
+ TokenTypeEOF
+ TokenTypeEmpty
+ TokenTypeComment
+ TokenTypeTagLine
+ TokenTypeFeatureLine
+ TokenTypeRuleLine
+ TokenTypeBackgroundLine
+ TokenTypeScenarioLine
+ TokenTypeExamplesLine
+ TokenTypeStepLine
+ TokenTypeDocStringSeparator
+ TokenTypeTableRow
+ TokenTypeLanguage
+ TokenTypeOther
+)
+
+func tokenTypeForRule(rt RuleType) TokenType {
+ return TokenTypeNone
+}
+
+func (t TokenType) Name() string {
+ switch t {
+ case TokenTypeEOF:
+ return "EOF"
+ case TokenTypeEmpty:
+ return "Empty"
+ case TokenTypeComment:
+ return "Comment"
+ case TokenTypeTagLine:
+ return "TagLine"
+ case TokenTypeFeatureLine:
+ return "FeatureLine"
+ case TokenTypeRuleLine:
+ return "RuleLine"
+ case TokenTypeBackgroundLine:
+ return "BackgroundLine"
+ case TokenTypeScenarioLine:
+ return "ScenarioLine"
+ case TokenTypeExamplesLine:
+ return "ExamplesLine"
+ case TokenTypeStepLine:
+ return "StepLine"
+ case TokenTypeDocStringSeparator:
+ return "DocStringSeparator"
+ case TokenTypeTableRow:
+ return "TableRow"
+ case TokenTypeLanguage:
+ return "Language"
+ case TokenTypeOther:
+ return "Other"
+ }
+ return ""
+}
+
+func (t TokenType) RuleType() RuleType {
+ switch t {
+ case TokenTypeEOF:
+ return RuleTypeEOF
+ case TokenTypeEmpty:
+ return RuleTypeEmpty
+ case TokenTypeComment:
+ return RuleTypeComment
+ case TokenTypeTagLine:
+ return RuleTypeTagLine
+ case TokenTypeFeatureLine:
+ return RuleTypeFeatureLine
+ case TokenTypeRuleLine:
+ return RuleTypeRuleLine
+ case TokenTypeBackgroundLine:
+ return RuleTypeBackgroundLine
+ case TokenTypeScenarioLine:
+ return RuleTypeScenarioLine
+ case TokenTypeExamplesLine:
+ return RuleTypeExamplesLine
+ case TokenTypeStepLine:
+ return RuleTypeStepLine
+ case TokenTypeDocStringSeparator:
+ return RuleTypeDocStringSeparator
+ case TokenTypeTableRow:
+ return RuleTypeTableRow
+ case TokenTypeLanguage:
+ return RuleTypeLanguage
+ case TokenTypeOther:
+ return RuleTypeOther
+ }
+ return RuleTypeNone
+}
+
+type RuleType int
+
+const (
+ RuleTypeNone RuleType = iota
+
+ RuleTypeEOF
+ RuleTypeEmpty
+ RuleTypeComment
+ RuleTypeTagLine
+ RuleTypeFeatureLine
+ RuleTypeRuleLine
+ RuleTypeBackgroundLine
+ RuleTypeScenarioLine
+ RuleTypeExamplesLine
+ RuleTypeStepLine
+ RuleTypeDocStringSeparator
+ RuleTypeTableRow
+ RuleTypeLanguage
+ RuleTypeOther
+ RuleTypeGherkinDocument
+ RuleTypeFeature
+ RuleTypeFeatureHeader
+ RuleTypeRule
+ RuleTypeRuleHeader
+ RuleTypeBackground
+ RuleTypeScenarioDefinition
+ RuleTypeScenario
+ RuleTypeExamplesDefinition
+ RuleTypeExamples
+ RuleTypeExamplesTable
+ RuleTypeStep
+ RuleTypeStepArg
+ RuleTypeDataTable
+ RuleTypeDocString
+ RuleTypeTags
+ RuleTypeDescriptionHelper
+ RuleTypeDescription
+)
+
+func (t RuleType) IsEOF() bool {
+ return t == RuleTypeEOF
+}
+func (t RuleType) Name() string {
+ switch t {
+ case RuleTypeEOF:
+ return "#EOF"
+ case RuleTypeEmpty:
+ return "#Empty"
+ case RuleTypeComment:
+ return "#Comment"
+ case RuleTypeTagLine:
+ return "#TagLine"
+ case RuleTypeFeatureLine:
+ return "#FeatureLine"
+ case RuleTypeRuleLine:
+ return "#RuleLine"
+ case RuleTypeBackgroundLine:
+ return "#BackgroundLine"
+ case RuleTypeScenarioLine:
+ return "#ScenarioLine"
+ case RuleTypeExamplesLine:
+ return "#ExamplesLine"
+ case RuleTypeStepLine:
+ return "#StepLine"
+ case RuleTypeDocStringSeparator:
+ return "#DocStringSeparator"
+ case RuleTypeTableRow:
+ return "#TableRow"
+ case RuleTypeLanguage:
+ return "#Language"
+ case RuleTypeOther:
+ return "#Other"
+ case RuleTypeGherkinDocument:
+ return "GherkinDocument"
+ case RuleTypeFeature:
+ return "Feature"
+ case RuleTypeFeatureHeader:
+ return "FeatureHeader"
+ case RuleTypeRule:
+ return "Rule"
+ case RuleTypeRuleHeader:
+ return "RuleHeader"
+ case RuleTypeBackground:
+ return "Background"
+ case RuleTypeScenarioDefinition:
+ return "ScenarioDefinition"
+ case RuleTypeScenario:
+ return "Scenario"
+ case RuleTypeExamplesDefinition:
+ return "ExamplesDefinition"
+ case RuleTypeExamples:
+ return "Examples"
+ case RuleTypeExamplesTable:
+ return "ExamplesTable"
+ case RuleTypeStep:
+ return "Step"
+ case RuleTypeStepArg:
+ return "StepArg"
+ case RuleTypeDataTable:
+ return "DataTable"
+ case RuleTypeDocString:
+ return "DocString"
+ case RuleTypeTags:
+ return "Tags"
+ case RuleTypeDescriptionHelper:
+ return "DescriptionHelper"
+ case RuleTypeDescription:
+ return "Description"
+ }
+ return ""
+}
+
+type Location struct {
+ Line int
+ Column int
+}
+
+type parseError struct {
+ msg string
+ loc *Location
+}
+
+func (a *parseError) Error() string {
+ return fmt.Sprintf("(%d:%d): %s", a.loc.Line, a.loc.Column, a.msg)
+}
+
+type parseErrors []error
+
+func (pe parseErrors) Error() string {
+ var ret = []string{"Parser errors:"}
+ for i := range pe {
+ ret = append(ret, pe[i].Error())
+ }
+ return strings.Join(ret, "\n")
+}
+
+func (p *parser) Parse(s Scanner, m Matcher) (err error) {
+ p.builder.Reset()
+ m.Reset()
+ ctxt := &parseContext{p, s, p.builder, m, nil, nil}
+ var state int
+ ctxt.startRule(RuleTypeGherkinDocument)
+ for {
+ gl, eof, err := ctxt.scan()
+ if err != nil {
+ ctxt.addError(err)
+ if p.stopAtFirstError {
+ break
+ }
+ }
+ state, err = ctxt.match(state, gl)
+ if err != nil {
+ ctxt.addError(err)
+ if p.stopAtFirstError {
+ break
+ }
+ }
+ if eof {
+ // done! \o/
+ break
+ }
+ }
+ ctxt.endRule(RuleTypeGherkinDocument)
+ if len(ctxt.errors) > 0 {
+ return ctxt.errors
+ }
+ return
+}
+
+type parseContext struct {
+ p *parser
+ s Scanner
+ b Builder
+ m Matcher
+ queue []*scanResult
+ errors parseErrors
+}
+
+func (ctxt *parseContext) addError(e error) {
+ ctxt.errors = append(ctxt.errors, e)
+ // if (p.errors.length > 10)
+ // throw Errors.CompositeParserException.create(p.errors);
+}
+
+type scanResult struct {
+ line *Line
+ atEof bool
+ err error
+}
+
+func (ctxt *parseContext) scan() (*Line, bool, error) {
+ l := len(ctxt.queue)
+ if l > 0 {
+ x := ctxt.queue[0]
+ ctxt.queue = ctxt.queue[1:]
+ return x.line, x.atEof, x.err
+ }
+ return ctxt.s.Scan()
+}
+
+func (ctxt *parseContext) startRule(r RuleType) (bool, error) {
+ ok, err := ctxt.b.StartRule(r)
+ if err != nil {
+ ctxt.addError(err)
+ }
+ return ok, err
+}
+
+func (ctxt *parseContext) endRule(r RuleType) (bool, error) {
+ ok, err := ctxt.b.EndRule(r)
+ if err != nil {
+ ctxt.addError(err)
+ }
+ return ok, err
+}
+
+func (ctxt *parseContext) build(t *Token) (bool, error) {
+ ok, err := ctxt.b.Build(t)
+ if err != nil {
+ ctxt.addError(err)
+ }
+ return ok, err
+}
+
+func (ctxt *parseContext) match(state int, line *Line) (newState int, err error) {
+ switch state {
+ case 0:
+ return ctxt.matchAt0(line)
+ case 1:
+ return ctxt.matchAt1(line)
+ case 2:
+ return ctxt.matchAt2(line)
+ case 3:
+ return ctxt.matchAt3(line)
+ case 4:
+ return ctxt.matchAt4(line)
+ case 5:
+ return ctxt.matchAt5(line)
+ case 6:
+ return ctxt.matchAt6(line)
+ case 7:
+ return ctxt.matchAt7(line)
+ case 8:
+ return ctxt.matchAt8(line)
+ case 9:
+ return ctxt.matchAt9(line)
+ case 10:
+ return ctxt.matchAt10(line)
+ case 11:
+ return ctxt.matchAt11(line)
+ case 12:
+ return ctxt.matchAt12(line)
+ case 13:
+ return ctxt.matchAt13(line)
+ case 14:
+ return ctxt.matchAt14(line)
+ case 15:
+ return ctxt.matchAt15(line)
+ case 16:
+ return ctxt.matchAt16(line)
+ case 17:
+ return ctxt.matchAt17(line)
+ case 18:
+ return ctxt.matchAt18(line)
+ case 19:
+ return ctxt.matchAt19(line)
+ case 20:
+ return ctxt.matchAt20(line)
+ case 21:
+ return ctxt.matchAt21(line)
+ case 22:
+ return ctxt.matchAt22(line)
+ case 23:
+ return ctxt.matchAt23(line)
+ case 24:
+ return ctxt.matchAt24(line)
+ case 25:
+ return ctxt.matchAt25(line)
+ case 26:
+ return ctxt.matchAt26(line)
+ case 27:
+ return ctxt.matchAt27(line)
+ case 28:
+ return ctxt.matchAt28(line)
+ case 29:
+ return ctxt.matchAt29(line)
+ case 30:
+ return ctxt.matchAt30(line)
+ case 31:
+ return ctxt.matchAt31(line)
+ case 32:
+ return ctxt.matchAt32(line)
+ case 33:
+ return ctxt.matchAt33(line)
+ case 34:
+ return ctxt.matchAt34(line)
+ case 35:
+ return ctxt.matchAt35(line)
+ case 36:
+ return ctxt.matchAt36(line)
+ case 37:
+ return ctxt.matchAt37(line)
+ case 38:
+ return ctxt.matchAt38(line)
+ case 39:
+ return ctxt.matchAt39(line)
+ case 40:
+ return ctxt.matchAt40(line)
+ case 41:
+ return ctxt.matchAt41(line)
+ case 43:
+ return ctxt.matchAt43(line)
+ case 44:
+ return ctxt.matchAt44(line)
+ case 45:
+ return ctxt.matchAt45(line)
+ case 46:
+ return ctxt.matchAt46(line)
+ case 47:
+ return ctxt.matchAt47(line)
+ case 48:
+ return ctxt.matchAt48(line)
+ case 49:
+ return ctxt.matchAt49(line)
+ case 50:
+ return ctxt.matchAt50(line)
+ default:
+ return state, fmt.Errorf("Unknown state: %+v", state)
+ }
+}
+
+// Start
+func (ctxt *parseContext) matchAt0(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchEOF(line); ok {
+ ctxt.build(token)
+ return 42, err
+ }
+ if ok, token, err := ctxt.matchLanguage(line); ok {
+ ctxt.startRule(RuleTypeFeature)
+ ctxt.startRule(RuleTypeFeatureHeader)
+ ctxt.build(token)
+ return 1, err
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.startRule(RuleTypeFeature)
+ ctxt.startRule(RuleTypeFeatureHeader)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 2, err
+ }
+ if ok, token, err := ctxt.matchFeatureLine(line); ok {
+ ctxt.startRule(RuleTypeFeature)
+ ctxt.startRule(RuleTypeFeatureHeader)
+ ctxt.build(token)
+ return 3, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.build(token)
+ return 0, err
+ }
+ if ok, token, err := ctxt.matchEmpty(line); ok {
+ ctxt.build(token)
+ return 0, err
+ }
+
+ // var stateComment = "State: 0 - Start"
+ var expectedTokens = []string{"#EOF", "#Language", "#TagLine", "#FeatureLine", "#Comment", "#Empty"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 0, err
+}
+
+// GherkinDocument:0>Feature:0>FeatureHeader:0>#Language:0
+func (ctxt *parseContext) matchAt1(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 2, err
+ }
+ if ok, token, err := ctxt.matchFeatureLine(line); ok {
+ ctxt.build(token)
+ return 3, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.build(token)
+ return 1, err
+ }
+ if ok, token, err := ctxt.matchEmpty(line); ok {
+ ctxt.build(token)
+ return 1, err
+ }
+
+ // var stateComment = "State: 1 - GherkinDocument:0>Feature:0>FeatureHeader:0>#Language:0"
+ var expectedTokens = []string{"#TagLine", "#FeatureLine", "#Comment", "#Empty"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 1, err
+}
+
+// GherkinDocument:0>Feature:0>FeatureHeader:1>Tags:0>#TagLine:0
+func (ctxt *parseContext) matchAt2(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.build(token)
+ return 2, err
+ }
+ if ok, token, err := ctxt.matchFeatureLine(line); ok {
+ ctxt.endRule(RuleTypeTags)
+ ctxt.build(token)
+ return 3, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.build(token)
+ return 2, err
+ }
+ if ok, token, err := ctxt.matchEmpty(line); ok {
+ ctxt.build(token)
+ return 2, err
+ }
+
+ // var stateComment = "State: 2 - GherkinDocument:0>Feature:0>FeatureHeader:1>Tags:0>#TagLine:0"
+ var expectedTokens = []string{"#TagLine", "#FeatureLine", "#Comment", "#Empty"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 2, err
+}
+
+// GherkinDocument:0>Feature:0>FeatureHeader:2>#FeatureLine:0
+func (ctxt *parseContext) matchAt3(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchEOF(line); ok {
+ ctxt.endRule(RuleTypeFeatureHeader)
+ ctxt.endRule(RuleTypeFeature)
+ ctxt.build(token)
+ return 42, err
+ }
+ if ok, token, err := ctxt.matchEmpty(line); ok {
+ ctxt.build(token)
+ return 3, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.build(token)
+ return 5, err
+ }
+ if ok, token, err := ctxt.matchBackgroundLine(line); ok {
+ ctxt.endRule(RuleTypeFeatureHeader)
+ ctxt.startRule(RuleTypeBackground)
+ ctxt.build(token)
+ return 6, err
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead0(line) {
+ ctxt.endRule(RuleTypeFeatureHeader)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 11, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.endRule(RuleTypeFeatureHeader)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 22, err
+ }
+ if ok, token, err := ctxt.matchScenarioLine(line); ok {
+ ctxt.endRule(RuleTypeFeatureHeader)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenario)
+ ctxt.build(token)
+ return 12, err
+ }
+ if ok, token, err := ctxt.matchRuleLine(line); ok {
+ ctxt.endRule(RuleTypeFeatureHeader)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.build(token)
+ return 23, err
+ }
+ if ok, token, err := ctxt.matchOther(line); ok {
+ ctxt.startRule(RuleTypeDescription)
+ ctxt.build(token)
+ return 4, err
+ }
+
+ // var stateComment = "State: 3 - GherkinDocument:0>Feature:0>FeatureHeader:2>#FeatureLine:0"
+ var expectedTokens = []string{"#EOF", "#Empty", "#Comment", "#BackgroundLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Other"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 3, err
+}
+
+// GherkinDocument:0>Feature:0>FeatureHeader:3>DescriptionHelper:1>Description:0>#Other:0
+func (ctxt *parseContext) matchAt4(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchEOF(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeFeatureHeader)
+ ctxt.endRule(RuleTypeFeature)
+ ctxt.build(token)
+ return 42, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.build(token)
+ return 5, err
+ }
+ if ok, token, err := ctxt.matchBackgroundLine(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeFeatureHeader)
+ ctxt.startRule(RuleTypeBackground)
+ ctxt.build(token)
+ return 6, err
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead0(line) {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeFeatureHeader)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 11, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeFeatureHeader)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 22, err
+ }
+ if ok, token, err := ctxt.matchScenarioLine(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeFeatureHeader)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenario)
+ ctxt.build(token)
+ return 12, err
+ }
+ if ok, token, err := ctxt.matchRuleLine(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeFeatureHeader)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.build(token)
+ return 23, err
+ }
+ if ok, token, err := ctxt.matchOther(line); ok {
+ ctxt.build(token)
+ return 4, err
+ }
+
+ // var stateComment = "State: 4 - GherkinDocument:0>Feature:0>FeatureHeader:3>DescriptionHelper:1>Description:0>#Other:0"
+ var expectedTokens = []string{"#EOF", "#Comment", "#BackgroundLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Other"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 4, err
+}
+
+// GherkinDocument:0>Feature:0>FeatureHeader:3>DescriptionHelper:2>#Comment:0
+func (ctxt *parseContext) matchAt5(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchEOF(line); ok {
+ ctxt.endRule(RuleTypeFeatureHeader)
+ ctxt.endRule(RuleTypeFeature)
+ ctxt.build(token)
+ return 42, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.build(token)
+ return 5, err
+ }
+ if ok, token, err := ctxt.matchBackgroundLine(line); ok {
+ ctxt.endRule(RuleTypeFeatureHeader)
+ ctxt.startRule(RuleTypeBackground)
+ ctxt.build(token)
+ return 6, err
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead0(line) {
+ ctxt.endRule(RuleTypeFeatureHeader)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 11, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.endRule(RuleTypeFeatureHeader)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 22, err
+ }
+ if ok, token, err := ctxt.matchScenarioLine(line); ok {
+ ctxt.endRule(RuleTypeFeatureHeader)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenario)
+ ctxt.build(token)
+ return 12, err
+ }
+ if ok, token, err := ctxt.matchRuleLine(line); ok {
+ ctxt.endRule(RuleTypeFeatureHeader)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.build(token)
+ return 23, err
+ }
+ if ok, token, err := ctxt.matchEmpty(line); ok {
+ ctxt.build(token)
+ return 5, err
+ }
+
+ // var stateComment = "State: 5 - GherkinDocument:0>Feature:0>FeatureHeader:3>DescriptionHelper:2>#Comment:0"
+ var expectedTokens = []string{"#EOF", "#Comment", "#BackgroundLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Empty"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 5, err
+}
+
+// GherkinDocument:0>Feature:1>Background:0>#BackgroundLine:0
+func (ctxt *parseContext) matchAt6(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchEOF(line); ok {
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.endRule(RuleTypeFeature)
+ ctxt.build(token)
+ return 42, err
+ }
+ if ok, token, err := ctxt.matchEmpty(line); ok {
+ ctxt.build(token)
+ return 6, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.build(token)
+ return 8, err
+ }
+ if ok, token, err := ctxt.matchStepLine(line); ok {
+ ctxt.startRule(RuleTypeStep)
+ ctxt.build(token)
+ return 9, err
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead0(line) {
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 11, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 22, err
+ }
+ if ok, token, err := ctxt.matchScenarioLine(line); ok {
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenario)
+ ctxt.build(token)
+ return 12, err
+ }
+ if ok, token, err := ctxt.matchRuleLine(line); ok {
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.build(token)
+ return 23, err
+ }
+ if ok, token, err := ctxt.matchOther(line); ok {
+ ctxt.startRule(RuleTypeDescription)
+ ctxt.build(token)
+ return 7, err
+ }
+
+ // var stateComment = "State: 6 - GherkinDocument:0>Feature:1>Background:0>#BackgroundLine:0"
+ var expectedTokens = []string{"#EOF", "#Empty", "#Comment", "#StepLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Other"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 6, err
+}
+
+// GherkinDocument:0>Feature:1>Background:1>DescriptionHelper:1>Description:0>#Other:0
+func (ctxt *parseContext) matchAt7(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchEOF(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.endRule(RuleTypeFeature)
+ ctxt.build(token)
+ return 42, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.build(token)
+ return 8, err
+ }
+ if ok, token, err := ctxt.matchStepLine(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.startRule(RuleTypeStep)
+ ctxt.build(token)
+ return 9, err
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead0(line) {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 11, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 22, err
+ }
+ if ok, token, err := ctxt.matchScenarioLine(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenario)
+ ctxt.build(token)
+ return 12, err
+ }
+ if ok, token, err := ctxt.matchRuleLine(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.build(token)
+ return 23, err
+ }
+ if ok, token, err := ctxt.matchOther(line); ok {
+ ctxt.build(token)
+ return 7, err
+ }
+
+ // var stateComment = "State: 7 - GherkinDocument:0>Feature:1>Background:1>DescriptionHelper:1>Description:0>#Other:0"
+ var expectedTokens = []string{"#EOF", "#Comment", "#StepLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Other"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 7, err
+}
+
+// GherkinDocument:0>Feature:1>Background:1>DescriptionHelper:2>#Comment:0
+func (ctxt *parseContext) matchAt8(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchEOF(line); ok {
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.endRule(RuleTypeFeature)
+ ctxt.build(token)
+ return 42, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.build(token)
+ return 8, err
+ }
+ if ok, token, err := ctxt.matchStepLine(line); ok {
+ ctxt.startRule(RuleTypeStep)
+ ctxt.build(token)
+ return 9, err
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead0(line) {
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 11, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 22, err
+ }
+ if ok, token, err := ctxt.matchScenarioLine(line); ok {
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenario)
+ ctxt.build(token)
+ return 12, err
+ }
+ if ok, token, err := ctxt.matchRuleLine(line); ok {
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.build(token)
+ return 23, err
+ }
+ if ok, token, err := ctxt.matchEmpty(line); ok {
+ ctxt.build(token)
+ return 8, err
+ }
+
+ // var stateComment = "State: 8 - GherkinDocument:0>Feature:1>Background:1>DescriptionHelper:2>#Comment:0"
+ var expectedTokens = []string{"#EOF", "#Comment", "#StepLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Empty"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 8, err
+}
+
+// GherkinDocument:0>Feature:1>Background:2>Step:0>#StepLine:0
+func (ctxt *parseContext) matchAt9(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchEOF(line); ok {
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.endRule(RuleTypeFeature)
+ ctxt.build(token)
+ return 42, err
+ }
+ if ok, token, err := ctxt.matchTableRow(line); ok {
+ ctxt.startRule(RuleTypeDataTable)
+ ctxt.build(token)
+ return 10, err
+ }
+ if ok, token, err := ctxt.matchDocStringSeparator(line); ok {
+ ctxt.startRule(RuleTypeDocString)
+ ctxt.build(token)
+ return 49, err
+ }
+ if ok, token, err := ctxt.matchStepLine(line); ok {
+ ctxt.endRule(RuleTypeStep)
+ ctxt.startRule(RuleTypeStep)
+ ctxt.build(token)
+ return 9, err
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead0(line) {
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 11, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 22, err
+ }
+ if ok, token, err := ctxt.matchScenarioLine(line); ok {
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenario)
+ ctxt.build(token)
+ return 12, err
+ }
+ if ok, token, err := ctxt.matchRuleLine(line); ok {
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.build(token)
+ return 23, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.build(token)
+ return 9, err
+ }
+ if ok, token, err := ctxt.matchEmpty(line); ok {
+ ctxt.build(token)
+ return 9, err
+ }
+
+ // var stateComment = "State: 9 - GherkinDocument:0>Feature:1>Background:2>Step:0>#StepLine:0"
+ var expectedTokens = []string{"#EOF", "#TableRow", "#DocStringSeparator", "#StepLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Comment", "#Empty"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 9, err
+}
+
+// GherkinDocument:0>Feature:1>Background:2>Step:1>StepArg:0>__alt0:0>DataTable:0>#TableRow:0
+func (ctxt *parseContext) matchAt10(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchEOF(line); ok {
+ ctxt.endRule(RuleTypeDataTable)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.endRule(RuleTypeFeature)
+ ctxt.build(token)
+ return 42, err
+ }
+ if ok, token, err := ctxt.matchTableRow(line); ok {
+ ctxt.build(token)
+ return 10, err
+ }
+ if ok, token, err := ctxt.matchStepLine(line); ok {
+ ctxt.endRule(RuleTypeDataTable)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.startRule(RuleTypeStep)
+ ctxt.build(token)
+ return 9, err
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead0(line) {
+ ctxt.endRule(RuleTypeDataTable)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 11, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.endRule(RuleTypeDataTable)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 22, err
+ }
+ if ok, token, err := ctxt.matchScenarioLine(line); ok {
+ ctxt.endRule(RuleTypeDataTable)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenario)
+ ctxt.build(token)
+ return 12, err
+ }
+ if ok, token, err := ctxt.matchRuleLine(line); ok {
+ ctxt.endRule(RuleTypeDataTable)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.build(token)
+ return 23, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.build(token)
+ return 10, err
+ }
+ if ok, token, err := ctxt.matchEmpty(line); ok {
+ ctxt.build(token)
+ return 10, err
+ }
+
+ // var stateComment = "State: 10 - GherkinDocument:0>Feature:1>Background:2>Step:1>StepArg:0>__alt0:0>DataTable:0>#TableRow:0"
+ var expectedTokens = []string{"#EOF", "#TableRow", "#StepLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Comment", "#Empty"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 10, err
+}
+
+// GherkinDocument:0>Feature:2>ScenarioDefinition:0>Tags:0>#TagLine:0
+func (ctxt *parseContext) matchAt11(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.build(token)
+ return 11, err
+ }
+ if ok, token, err := ctxt.matchScenarioLine(line); ok {
+ ctxt.endRule(RuleTypeTags)
+ ctxt.startRule(RuleTypeScenario)
+ ctxt.build(token)
+ return 12, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.build(token)
+ return 11, err
+ }
+ if ok, token, err := ctxt.matchEmpty(line); ok {
+ ctxt.build(token)
+ return 11, err
+ }
+
+ // var stateComment = "State: 11 - GherkinDocument:0>Feature:2>ScenarioDefinition:0>Tags:0>#TagLine:0"
+ var expectedTokens = []string{"#TagLine", "#ScenarioLine", "#Comment", "#Empty"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 11, err
+}
+
+// GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:0>#ScenarioLine:0
+func (ctxt *parseContext) matchAt12(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchEOF(line); ok {
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.endRule(RuleTypeFeature)
+ ctxt.build(token)
+ return 42, err
+ }
+ if ok, token, err := ctxt.matchEmpty(line); ok {
+ ctxt.build(token)
+ return 12, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.build(token)
+ return 14, err
+ }
+ if ok, token, err := ctxt.matchStepLine(line); ok {
+ ctxt.startRule(RuleTypeStep)
+ ctxt.build(token)
+ return 15, err
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead1(line) {
+ ctxt.startRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 17, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead0(line) {
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 11, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 22, err
+ }
+ if ok, token, err := ctxt.matchExamplesLine(line); ok {
+ ctxt.startRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeExamples)
+ ctxt.build(token)
+ return 18, err
+ }
+ if ok, token, err := ctxt.matchScenarioLine(line); ok {
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenario)
+ ctxt.build(token)
+ return 12, err
+ }
+ if ok, token, err := ctxt.matchRuleLine(line); ok {
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.build(token)
+ return 23, err
+ }
+ if ok, token, err := ctxt.matchOther(line); ok {
+ ctxt.startRule(RuleTypeDescription)
+ ctxt.build(token)
+ return 13, err
+ }
+
+ // var stateComment = "State: 12 - GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:0>#ScenarioLine:0"
+ var expectedTokens = []string{"#EOF", "#Empty", "#Comment", "#StepLine", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Other"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 12, err
+}
+
+// GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:1>DescriptionHelper:1>Description:0>#Other:0
+func (ctxt *parseContext) matchAt13(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchEOF(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.endRule(RuleTypeFeature)
+ ctxt.build(token)
+ return 42, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.build(token)
+ return 14, err
+ }
+ if ok, token, err := ctxt.matchStepLine(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.startRule(RuleTypeStep)
+ ctxt.build(token)
+ return 15, err
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead1(line) {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.startRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 17, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead0(line) {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 11, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 22, err
+ }
+ if ok, token, err := ctxt.matchExamplesLine(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.startRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeExamples)
+ ctxt.build(token)
+ return 18, err
+ }
+ if ok, token, err := ctxt.matchScenarioLine(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenario)
+ ctxt.build(token)
+ return 12, err
+ }
+ if ok, token, err := ctxt.matchRuleLine(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.build(token)
+ return 23, err
+ }
+ if ok, token, err := ctxt.matchOther(line); ok {
+ ctxt.build(token)
+ return 13, err
+ }
+
+ // var stateComment = "State: 13 - GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:1>DescriptionHelper:1>Description:0>#Other:0"
+ var expectedTokens = []string{"#EOF", "#Comment", "#StepLine", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Other"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 13, err
+}
+
+// GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:1>DescriptionHelper:2>#Comment:0
+func (ctxt *parseContext) matchAt14(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchEOF(line); ok {
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.endRule(RuleTypeFeature)
+ ctxt.build(token)
+ return 42, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.build(token)
+ return 14, err
+ }
+ if ok, token, err := ctxt.matchStepLine(line); ok {
+ ctxt.startRule(RuleTypeStep)
+ ctxt.build(token)
+ return 15, err
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead1(line) {
+ ctxt.startRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 17, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead0(line) {
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 11, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 22, err
+ }
+ if ok, token, err := ctxt.matchExamplesLine(line); ok {
+ ctxt.startRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeExamples)
+ ctxt.build(token)
+ return 18, err
+ }
+ if ok, token, err := ctxt.matchScenarioLine(line); ok {
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenario)
+ ctxt.build(token)
+ return 12, err
+ }
+ if ok, token, err := ctxt.matchRuleLine(line); ok {
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.build(token)
+ return 23, err
+ }
+ if ok, token, err := ctxt.matchEmpty(line); ok {
+ ctxt.build(token)
+ return 14, err
+ }
+
+ // var stateComment = "State: 14 - GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:1>DescriptionHelper:2>#Comment:0"
+ var expectedTokens = []string{"#EOF", "#Comment", "#StepLine", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Empty"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 14, err
+}
+
+// GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:2>Step:0>#StepLine:0
+func (ctxt *parseContext) matchAt15(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchEOF(line); ok {
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.endRule(RuleTypeFeature)
+ ctxt.build(token)
+ return 42, err
+ }
+ if ok, token, err := ctxt.matchTableRow(line); ok {
+ ctxt.startRule(RuleTypeDataTable)
+ ctxt.build(token)
+ return 16, err
+ }
+ if ok, token, err := ctxt.matchDocStringSeparator(line); ok {
+ ctxt.startRule(RuleTypeDocString)
+ ctxt.build(token)
+ return 47, err
+ }
+ if ok, token, err := ctxt.matchStepLine(line); ok {
+ ctxt.endRule(RuleTypeStep)
+ ctxt.startRule(RuleTypeStep)
+ ctxt.build(token)
+ return 15, err
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead1(line) {
+ ctxt.endRule(RuleTypeStep)
+ ctxt.startRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 17, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead0(line) {
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 11, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 22, err
+ }
+ if ok, token, err := ctxt.matchExamplesLine(line); ok {
+ ctxt.endRule(RuleTypeStep)
+ ctxt.startRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeExamples)
+ ctxt.build(token)
+ return 18, err
+ }
+ if ok, token, err := ctxt.matchScenarioLine(line); ok {
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenario)
+ ctxt.build(token)
+ return 12, err
+ }
+ if ok, token, err := ctxt.matchRuleLine(line); ok {
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.build(token)
+ return 23, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.build(token)
+ return 15, err
+ }
+ if ok, token, err := ctxt.matchEmpty(line); ok {
+ ctxt.build(token)
+ return 15, err
+ }
+
+ // var stateComment = "State: 15 - GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:2>Step:0>#StepLine:0"
+ var expectedTokens = []string{"#EOF", "#TableRow", "#DocStringSeparator", "#StepLine", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Comment", "#Empty"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 15, err
+}
+
+// GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:2>Step:1>StepArg:0>__alt0:0>DataTable:0>#TableRow:0
+func (ctxt *parseContext) matchAt16(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchEOF(line); ok {
+ ctxt.endRule(RuleTypeDataTable)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.endRule(RuleTypeFeature)
+ ctxt.build(token)
+ return 42, err
+ }
+ if ok, token, err := ctxt.matchTableRow(line); ok {
+ ctxt.build(token)
+ return 16, err
+ }
+ if ok, token, err := ctxt.matchStepLine(line); ok {
+ ctxt.endRule(RuleTypeDataTable)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.startRule(RuleTypeStep)
+ ctxt.build(token)
+ return 15, err
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead1(line) {
+ ctxt.endRule(RuleTypeDataTable)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.startRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 17, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead0(line) {
+ ctxt.endRule(RuleTypeDataTable)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 11, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.endRule(RuleTypeDataTable)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 22, err
+ }
+ if ok, token, err := ctxt.matchExamplesLine(line); ok {
+ ctxt.endRule(RuleTypeDataTable)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.startRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeExamples)
+ ctxt.build(token)
+ return 18, err
+ }
+ if ok, token, err := ctxt.matchScenarioLine(line); ok {
+ ctxt.endRule(RuleTypeDataTable)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenario)
+ ctxt.build(token)
+ return 12, err
+ }
+ if ok, token, err := ctxt.matchRuleLine(line); ok {
+ ctxt.endRule(RuleTypeDataTable)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.build(token)
+ return 23, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.build(token)
+ return 16, err
+ }
+ if ok, token, err := ctxt.matchEmpty(line); ok {
+ ctxt.build(token)
+ return 16, err
+ }
+
+ // var stateComment = "State: 16 - GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:2>Step:1>StepArg:0>__alt0:0>DataTable:0>#TableRow:0"
+ var expectedTokens = []string{"#EOF", "#TableRow", "#StepLine", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Comment", "#Empty"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 16, err
+}
+
+// GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:0>Tags:0>#TagLine:0
+func (ctxt *parseContext) matchAt17(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.build(token)
+ return 17, err
+ }
+ if ok, token, err := ctxt.matchExamplesLine(line); ok {
+ ctxt.endRule(RuleTypeTags)
+ ctxt.startRule(RuleTypeExamples)
+ ctxt.build(token)
+ return 18, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.build(token)
+ return 17, err
+ }
+ if ok, token, err := ctxt.matchEmpty(line); ok {
+ ctxt.build(token)
+ return 17, err
+ }
+
+ // var stateComment = "State: 17 - GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:0>Tags:0>#TagLine:0"
+ var expectedTokens = []string{"#TagLine", "#ExamplesLine", "#Comment", "#Empty"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 17, err
+}
+
+// GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:0>#ExamplesLine:0
+func (ctxt *parseContext) matchAt18(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchEOF(line); ok {
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.endRule(RuleTypeFeature)
+ ctxt.build(token)
+ return 42, err
+ }
+ if ok, token, err := ctxt.matchEmpty(line); ok {
+ ctxt.build(token)
+ return 18, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.build(token)
+ return 20, err
+ }
+ if ok, token, err := ctxt.matchTableRow(line); ok {
+ ctxt.startRule(RuleTypeExamplesTable)
+ ctxt.build(token)
+ return 21, err
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead1(line) {
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 17, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead0(line) {
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 11, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 22, err
+ }
+ if ok, token, err := ctxt.matchExamplesLine(line); ok {
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeExamples)
+ ctxt.build(token)
+ return 18, err
+ }
+ if ok, token, err := ctxt.matchScenarioLine(line); ok {
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenario)
+ ctxt.build(token)
+ return 12, err
+ }
+ if ok, token, err := ctxt.matchRuleLine(line); ok {
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.build(token)
+ return 23, err
+ }
+ if ok, token, err := ctxt.matchOther(line); ok {
+ ctxt.startRule(RuleTypeDescription)
+ ctxt.build(token)
+ return 19, err
+ }
+
+ // var stateComment = "State: 18 - GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:0>#ExamplesLine:0"
+ var expectedTokens = []string{"#EOF", "#Empty", "#Comment", "#TableRow", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Other"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 18, err
+}
+
+// GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:1>DescriptionHelper:1>Description:0>#Other:0
+func (ctxt *parseContext) matchAt19(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchEOF(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.endRule(RuleTypeFeature)
+ ctxt.build(token)
+ return 42, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.build(token)
+ return 20, err
+ }
+ if ok, token, err := ctxt.matchTableRow(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.startRule(RuleTypeExamplesTable)
+ ctxt.build(token)
+ return 21, err
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead1(line) {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 17, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead0(line) {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 11, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 22, err
+ }
+ if ok, token, err := ctxt.matchExamplesLine(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeExamples)
+ ctxt.build(token)
+ return 18, err
+ }
+ if ok, token, err := ctxt.matchScenarioLine(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenario)
+ ctxt.build(token)
+ return 12, err
+ }
+ if ok, token, err := ctxt.matchRuleLine(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.build(token)
+ return 23, err
+ }
+ if ok, token, err := ctxt.matchOther(line); ok {
+ ctxt.build(token)
+ return 19, err
+ }
+
+ // var stateComment = "State: 19 - GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:1>DescriptionHelper:1>Description:0>#Other:0"
+ var expectedTokens = []string{"#EOF", "#Comment", "#TableRow", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Other"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 19, err
+}
+
+// GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:1>DescriptionHelper:2>#Comment:0
+func (ctxt *parseContext) matchAt20(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchEOF(line); ok {
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.endRule(RuleTypeFeature)
+ ctxt.build(token)
+ return 42, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.build(token)
+ return 20, err
+ }
+ if ok, token, err := ctxt.matchTableRow(line); ok {
+ ctxt.startRule(RuleTypeExamplesTable)
+ ctxt.build(token)
+ return 21, err
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead1(line) {
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 17, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead0(line) {
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 11, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 22, err
+ }
+ if ok, token, err := ctxt.matchExamplesLine(line); ok {
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeExamples)
+ ctxt.build(token)
+ return 18, err
+ }
+ if ok, token, err := ctxt.matchScenarioLine(line); ok {
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenario)
+ ctxt.build(token)
+ return 12, err
+ }
+ if ok, token, err := ctxt.matchRuleLine(line); ok {
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.build(token)
+ return 23, err
+ }
+ if ok, token, err := ctxt.matchEmpty(line); ok {
+ ctxt.build(token)
+ return 20, err
+ }
+
+ // var stateComment = "State: 20 - GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:1>DescriptionHelper:2>#Comment:0"
+ var expectedTokens = []string{"#EOF", "#Comment", "#TableRow", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Empty"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 20, err
+}
+
+// GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:2>ExamplesTable:0>#TableRow:0
+func (ctxt *parseContext) matchAt21(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchEOF(line); ok {
+ ctxt.endRule(RuleTypeExamplesTable)
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.endRule(RuleTypeFeature)
+ ctxt.build(token)
+ return 42, err
+ }
+ if ok, token, err := ctxt.matchTableRow(line); ok {
+ ctxt.build(token)
+ return 21, err
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead1(line) {
+ ctxt.endRule(RuleTypeExamplesTable)
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 17, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead0(line) {
+ ctxt.endRule(RuleTypeExamplesTable)
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 11, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.endRule(RuleTypeExamplesTable)
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 22, err
+ }
+ if ok, token, err := ctxt.matchExamplesLine(line); ok {
+ ctxt.endRule(RuleTypeExamplesTable)
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeExamples)
+ ctxt.build(token)
+ return 18, err
+ }
+ if ok, token, err := ctxt.matchScenarioLine(line); ok {
+ ctxt.endRule(RuleTypeExamplesTable)
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenario)
+ ctxt.build(token)
+ return 12, err
+ }
+ if ok, token, err := ctxt.matchRuleLine(line); ok {
+ ctxt.endRule(RuleTypeExamplesTable)
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.build(token)
+ return 23, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.build(token)
+ return 21, err
+ }
+ if ok, token, err := ctxt.matchEmpty(line); ok {
+ ctxt.build(token)
+ return 21, err
+ }
+
+ // var stateComment = "State: 21 - GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:2>ExamplesTable:0>#TableRow:0"
+ var expectedTokens = []string{"#EOF", "#TableRow", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Comment", "#Empty"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 21, err
+}
+
+// GherkinDocument:0>Feature:3>Rule:0>RuleHeader:0>Tags:0>#TagLine:0
+func (ctxt *parseContext) matchAt22(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.build(token)
+ return 22, err
+ }
+ if ok, token, err := ctxt.matchRuleLine(line); ok {
+ ctxt.endRule(RuleTypeTags)
+ ctxt.build(token)
+ return 23, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.build(token)
+ return 22, err
+ }
+ if ok, token, err := ctxt.matchEmpty(line); ok {
+ ctxt.build(token)
+ return 22, err
+ }
+
+ // var stateComment = "State: 22 - GherkinDocument:0>Feature:3>Rule:0>RuleHeader:0>Tags:0>#TagLine:0"
+ var expectedTokens = []string{"#TagLine", "#RuleLine", "#Comment", "#Empty"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 22, err
+}
+
+// GherkinDocument:0>Feature:3>Rule:0>RuleHeader:1>#RuleLine:0
+func (ctxt *parseContext) matchAt23(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchEOF(line); ok {
+ ctxt.endRule(RuleTypeRuleHeader)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.endRule(RuleTypeFeature)
+ ctxt.build(token)
+ return 42, err
+ }
+ if ok, token, err := ctxt.matchEmpty(line); ok {
+ ctxt.build(token)
+ return 23, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.build(token)
+ return 25, err
+ }
+ if ok, token, err := ctxt.matchBackgroundLine(line); ok {
+ ctxt.endRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeBackground)
+ ctxt.build(token)
+ return 26, err
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead0(line) {
+ ctxt.endRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 31, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.endRule(RuleTypeRuleHeader)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 22, err
+ }
+ if ok, token, err := ctxt.matchScenarioLine(line); ok {
+ ctxt.endRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenario)
+ ctxt.build(token)
+ return 32, err
+ }
+ if ok, token, err := ctxt.matchRuleLine(line); ok {
+ ctxt.endRule(RuleTypeRuleHeader)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.build(token)
+ return 23, err
+ }
+ if ok, token, err := ctxt.matchOther(line); ok {
+ ctxt.startRule(RuleTypeDescription)
+ ctxt.build(token)
+ return 24, err
+ }
+
+ // var stateComment = "State: 23 - GherkinDocument:0>Feature:3>Rule:0>RuleHeader:1>#RuleLine:0"
+ var expectedTokens = []string{"#EOF", "#Empty", "#Comment", "#BackgroundLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Other"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 23, err
+}
+
+// GherkinDocument:0>Feature:3>Rule:0>RuleHeader:2>DescriptionHelper:1>Description:0>#Other:0
+func (ctxt *parseContext) matchAt24(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchEOF(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeRuleHeader)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.endRule(RuleTypeFeature)
+ ctxt.build(token)
+ return 42, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.build(token)
+ return 25, err
+ }
+ if ok, token, err := ctxt.matchBackgroundLine(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeBackground)
+ ctxt.build(token)
+ return 26, err
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead0(line) {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 31, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeRuleHeader)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 22, err
+ }
+ if ok, token, err := ctxt.matchScenarioLine(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenario)
+ ctxt.build(token)
+ return 32, err
+ }
+ if ok, token, err := ctxt.matchRuleLine(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeRuleHeader)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.build(token)
+ return 23, err
+ }
+ if ok, token, err := ctxt.matchOther(line); ok {
+ ctxt.build(token)
+ return 24, err
+ }
+
+ // var stateComment = "State: 24 - GherkinDocument:0>Feature:3>Rule:0>RuleHeader:2>DescriptionHelper:1>Description:0>#Other:0"
+ var expectedTokens = []string{"#EOF", "#Comment", "#BackgroundLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Other"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 24, err
+}
+
+// GherkinDocument:0>Feature:3>Rule:0>RuleHeader:2>DescriptionHelper:2>#Comment:0
+func (ctxt *parseContext) matchAt25(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchEOF(line); ok {
+ ctxt.endRule(RuleTypeRuleHeader)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.endRule(RuleTypeFeature)
+ ctxt.build(token)
+ return 42, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.build(token)
+ return 25, err
+ }
+ if ok, token, err := ctxt.matchBackgroundLine(line); ok {
+ ctxt.endRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeBackground)
+ ctxt.build(token)
+ return 26, err
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead0(line) {
+ ctxt.endRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 31, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.endRule(RuleTypeRuleHeader)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 22, err
+ }
+ if ok, token, err := ctxt.matchScenarioLine(line); ok {
+ ctxt.endRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenario)
+ ctxt.build(token)
+ return 32, err
+ }
+ if ok, token, err := ctxt.matchRuleLine(line); ok {
+ ctxt.endRule(RuleTypeRuleHeader)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.build(token)
+ return 23, err
+ }
+ if ok, token, err := ctxt.matchEmpty(line); ok {
+ ctxt.build(token)
+ return 25, err
+ }
+
+ // var stateComment = "State: 25 - GherkinDocument:0>Feature:3>Rule:0>RuleHeader:2>DescriptionHelper:2>#Comment:0"
+ var expectedTokens = []string{"#EOF", "#Comment", "#BackgroundLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Empty"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 25, err
+}
+
+// GherkinDocument:0>Feature:3>Rule:1>Background:0>#BackgroundLine:0
+func (ctxt *parseContext) matchAt26(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchEOF(line); ok {
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.endRule(RuleTypeFeature)
+ ctxt.build(token)
+ return 42, err
+ }
+ if ok, token, err := ctxt.matchEmpty(line); ok {
+ ctxt.build(token)
+ return 26, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.build(token)
+ return 28, err
+ }
+ if ok, token, err := ctxt.matchStepLine(line); ok {
+ ctxt.startRule(RuleTypeStep)
+ ctxt.build(token)
+ return 29, err
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead0(line) {
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 31, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 22, err
+ }
+ if ok, token, err := ctxt.matchScenarioLine(line); ok {
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenario)
+ ctxt.build(token)
+ return 32, err
+ }
+ if ok, token, err := ctxt.matchRuleLine(line); ok {
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.build(token)
+ return 23, err
+ }
+ if ok, token, err := ctxt.matchOther(line); ok {
+ ctxt.startRule(RuleTypeDescription)
+ ctxt.build(token)
+ return 27, err
+ }
+
+ // var stateComment = "State: 26 - GherkinDocument:0>Feature:3>Rule:1>Background:0>#BackgroundLine:0"
+ var expectedTokens = []string{"#EOF", "#Empty", "#Comment", "#StepLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Other"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 26, err
+}
+
+// GherkinDocument:0>Feature:3>Rule:1>Background:1>DescriptionHelper:1>Description:0>#Other:0
+func (ctxt *parseContext) matchAt27(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchEOF(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.endRule(RuleTypeFeature)
+ ctxt.build(token)
+ return 42, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.build(token)
+ return 28, err
+ }
+ if ok, token, err := ctxt.matchStepLine(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.startRule(RuleTypeStep)
+ ctxt.build(token)
+ return 29, err
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead0(line) {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 31, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 22, err
+ }
+ if ok, token, err := ctxt.matchScenarioLine(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenario)
+ ctxt.build(token)
+ return 32, err
+ }
+ if ok, token, err := ctxt.matchRuleLine(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.build(token)
+ return 23, err
+ }
+ if ok, token, err := ctxt.matchOther(line); ok {
+ ctxt.build(token)
+ return 27, err
+ }
+
+ // var stateComment = "State: 27 - GherkinDocument:0>Feature:3>Rule:1>Background:1>DescriptionHelper:1>Description:0>#Other:0"
+ var expectedTokens = []string{"#EOF", "#Comment", "#StepLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Other"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 27, err
+}
+
+// GherkinDocument:0>Feature:3>Rule:1>Background:1>DescriptionHelper:2>#Comment:0
+func (ctxt *parseContext) matchAt28(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchEOF(line); ok {
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.endRule(RuleTypeFeature)
+ ctxt.build(token)
+ return 42, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.build(token)
+ return 28, err
+ }
+ if ok, token, err := ctxt.matchStepLine(line); ok {
+ ctxt.startRule(RuleTypeStep)
+ ctxt.build(token)
+ return 29, err
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead0(line) {
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 31, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 22, err
+ }
+ if ok, token, err := ctxt.matchScenarioLine(line); ok {
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenario)
+ ctxt.build(token)
+ return 32, err
+ }
+ if ok, token, err := ctxt.matchRuleLine(line); ok {
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.build(token)
+ return 23, err
+ }
+ if ok, token, err := ctxt.matchEmpty(line); ok {
+ ctxt.build(token)
+ return 28, err
+ }
+
+ // var stateComment = "State: 28 - GherkinDocument:0>Feature:3>Rule:1>Background:1>DescriptionHelper:2>#Comment:0"
+ var expectedTokens = []string{"#EOF", "#Comment", "#StepLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Empty"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 28, err
+}
+
+// GherkinDocument:0>Feature:3>Rule:1>Background:2>Step:0>#StepLine:0
+func (ctxt *parseContext) matchAt29(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchEOF(line); ok {
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.endRule(RuleTypeFeature)
+ ctxt.build(token)
+ return 42, err
+ }
+ if ok, token, err := ctxt.matchTableRow(line); ok {
+ ctxt.startRule(RuleTypeDataTable)
+ ctxt.build(token)
+ return 30, err
+ }
+ if ok, token, err := ctxt.matchDocStringSeparator(line); ok {
+ ctxt.startRule(RuleTypeDocString)
+ ctxt.build(token)
+ return 45, err
+ }
+ if ok, token, err := ctxt.matchStepLine(line); ok {
+ ctxt.endRule(RuleTypeStep)
+ ctxt.startRule(RuleTypeStep)
+ ctxt.build(token)
+ return 29, err
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead0(line) {
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 31, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 22, err
+ }
+ if ok, token, err := ctxt.matchScenarioLine(line); ok {
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenario)
+ ctxt.build(token)
+ return 32, err
+ }
+ if ok, token, err := ctxt.matchRuleLine(line); ok {
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.build(token)
+ return 23, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.build(token)
+ return 29, err
+ }
+ if ok, token, err := ctxt.matchEmpty(line); ok {
+ ctxt.build(token)
+ return 29, err
+ }
+
+ // var stateComment = "State: 29 - GherkinDocument:0>Feature:3>Rule:1>Background:2>Step:0>#StepLine:0"
+ var expectedTokens = []string{"#EOF", "#TableRow", "#DocStringSeparator", "#StepLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Comment", "#Empty"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 29, err
+}
+
+// GherkinDocument:0>Feature:3>Rule:1>Background:2>Step:1>StepArg:0>__alt0:0>DataTable:0>#TableRow:0
+func (ctxt *parseContext) matchAt30(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchEOF(line); ok {
+ ctxt.endRule(RuleTypeDataTable)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.endRule(RuleTypeFeature)
+ ctxt.build(token)
+ return 42, err
+ }
+ if ok, token, err := ctxt.matchTableRow(line); ok {
+ ctxt.build(token)
+ return 30, err
+ }
+ if ok, token, err := ctxt.matchStepLine(line); ok {
+ ctxt.endRule(RuleTypeDataTable)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.startRule(RuleTypeStep)
+ ctxt.build(token)
+ return 29, err
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead0(line) {
+ ctxt.endRule(RuleTypeDataTable)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 31, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.endRule(RuleTypeDataTable)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 22, err
+ }
+ if ok, token, err := ctxt.matchScenarioLine(line); ok {
+ ctxt.endRule(RuleTypeDataTable)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenario)
+ ctxt.build(token)
+ return 32, err
+ }
+ if ok, token, err := ctxt.matchRuleLine(line); ok {
+ ctxt.endRule(RuleTypeDataTable)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.build(token)
+ return 23, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.build(token)
+ return 30, err
+ }
+ if ok, token, err := ctxt.matchEmpty(line); ok {
+ ctxt.build(token)
+ return 30, err
+ }
+
+ // var stateComment = "State: 30 - GherkinDocument:0>Feature:3>Rule:1>Background:2>Step:1>StepArg:0>__alt0:0>DataTable:0>#TableRow:0"
+ var expectedTokens = []string{"#EOF", "#TableRow", "#StepLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Comment", "#Empty"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 30, err
+}
+
+// GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:0>Tags:0>#TagLine:0
+func (ctxt *parseContext) matchAt31(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.build(token)
+ return 31, err
+ }
+ if ok, token, err := ctxt.matchScenarioLine(line); ok {
+ ctxt.endRule(RuleTypeTags)
+ ctxt.startRule(RuleTypeScenario)
+ ctxt.build(token)
+ return 32, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.build(token)
+ return 31, err
+ }
+ if ok, token, err := ctxt.matchEmpty(line); ok {
+ ctxt.build(token)
+ return 31, err
+ }
+
+ // var stateComment = "State: 31 - GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:0>Tags:0>#TagLine:0"
+ var expectedTokens = []string{"#TagLine", "#ScenarioLine", "#Comment", "#Empty"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 31, err
+}
+
+// GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:0>#ScenarioLine:0
+func (ctxt *parseContext) matchAt32(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchEOF(line); ok {
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.endRule(RuleTypeFeature)
+ ctxt.build(token)
+ return 42, err
+ }
+ if ok, token, err := ctxt.matchEmpty(line); ok {
+ ctxt.build(token)
+ return 32, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.build(token)
+ return 34, err
+ }
+ if ok, token, err := ctxt.matchStepLine(line); ok {
+ ctxt.startRule(RuleTypeStep)
+ ctxt.build(token)
+ return 35, err
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead1(line) {
+ ctxt.startRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 37, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead0(line) {
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 31, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 22, err
+ }
+ if ok, token, err := ctxt.matchExamplesLine(line); ok {
+ ctxt.startRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeExamples)
+ ctxt.build(token)
+ return 38, err
+ }
+ if ok, token, err := ctxt.matchScenarioLine(line); ok {
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenario)
+ ctxt.build(token)
+ return 32, err
+ }
+ if ok, token, err := ctxt.matchRuleLine(line); ok {
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.build(token)
+ return 23, err
+ }
+ if ok, token, err := ctxt.matchOther(line); ok {
+ ctxt.startRule(RuleTypeDescription)
+ ctxt.build(token)
+ return 33, err
+ }
+
+ // var stateComment = "State: 32 - GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:0>#ScenarioLine:0"
+ var expectedTokens = []string{"#EOF", "#Empty", "#Comment", "#StepLine", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Other"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 32, err
+}
+
+// GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:1>DescriptionHelper:1>Description:0>#Other:0
+func (ctxt *parseContext) matchAt33(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchEOF(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.endRule(RuleTypeFeature)
+ ctxt.build(token)
+ return 42, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.build(token)
+ return 34, err
+ }
+ if ok, token, err := ctxt.matchStepLine(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.startRule(RuleTypeStep)
+ ctxt.build(token)
+ return 35, err
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead1(line) {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.startRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 37, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead0(line) {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 31, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 22, err
+ }
+ if ok, token, err := ctxt.matchExamplesLine(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.startRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeExamples)
+ ctxt.build(token)
+ return 38, err
+ }
+ if ok, token, err := ctxt.matchScenarioLine(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenario)
+ ctxt.build(token)
+ return 32, err
+ }
+ if ok, token, err := ctxt.matchRuleLine(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.build(token)
+ return 23, err
+ }
+ if ok, token, err := ctxt.matchOther(line); ok {
+ ctxt.build(token)
+ return 33, err
+ }
+
+ // var stateComment = "State: 33 - GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:1>DescriptionHelper:1>Description:0>#Other:0"
+ var expectedTokens = []string{"#EOF", "#Comment", "#StepLine", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Other"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 33, err
+}
+
+// GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:1>DescriptionHelper:2>#Comment:0
+func (ctxt *parseContext) matchAt34(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchEOF(line); ok {
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.endRule(RuleTypeFeature)
+ ctxt.build(token)
+ return 42, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.build(token)
+ return 34, err
+ }
+ if ok, token, err := ctxt.matchStepLine(line); ok {
+ ctxt.startRule(RuleTypeStep)
+ ctxt.build(token)
+ return 35, err
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead1(line) {
+ ctxt.startRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 37, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead0(line) {
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 31, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 22, err
+ }
+ if ok, token, err := ctxt.matchExamplesLine(line); ok {
+ ctxt.startRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeExamples)
+ ctxt.build(token)
+ return 38, err
+ }
+ if ok, token, err := ctxt.matchScenarioLine(line); ok {
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenario)
+ ctxt.build(token)
+ return 32, err
+ }
+ if ok, token, err := ctxt.matchRuleLine(line); ok {
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.build(token)
+ return 23, err
+ }
+ if ok, token, err := ctxt.matchEmpty(line); ok {
+ ctxt.build(token)
+ return 34, err
+ }
+
+ // var stateComment = "State: 34 - GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:1>DescriptionHelper:2>#Comment:0"
+ var expectedTokens = []string{"#EOF", "#Comment", "#StepLine", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Empty"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 34, err
+}
+
+// GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:2>Step:0>#StepLine:0
+func (ctxt *parseContext) matchAt35(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchEOF(line); ok {
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.endRule(RuleTypeFeature)
+ ctxt.build(token)
+ return 42, err
+ }
+ if ok, token, err := ctxt.matchTableRow(line); ok {
+ ctxt.startRule(RuleTypeDataTable)
+ ctxt.build(token)
+ return 36, err
+ }
+ if ok, token, err := ctxt.matchDocStringSeparator(line); ok {
+ ctxt.startRule(RuleTypeDocString)
+ ctxt.build(token)
+ return 43, err
+ }
+ if ok, token, err := ctxt.matchStepLine(line); ok {
+ ctxt.endRule(RuleTypeStep)
+ ctxt.startRule(RuleTypeStep)
+ ctxt.build(token)
+ return 35, err
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead1(line) {
+ ctxt.endRule(RuleTypeStep)
+ ctxt.startRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 37, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead0(line) {
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 31, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 22, err
+ }
+ if ok, token, err := ctxt.matchExamplesLine(line); ok {
+ ctxt.endRule(RuleTypeStep)
+ ctxt.startRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeExamples)
+ ctxt.build(token)
+ return 38, err
+ }
+ if ok, token, err := ctxt.matchScenarioLine(line); ok {
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenario)
+ ctxt.build(token)
+ return 32, err
+ }
+ if ok, token, err := ctxt.matchRuleLine(line); ok {
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.build(token)
+ return 23, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.build(token)
+ return 35, err
+ }
+ if ok, token, err := ctxt.matchEmpty(line); ok {
+ ctxt.build(token)
+ return 35, err
+ }
+
+ // var stateComment = "State: 35 - GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:2>Step:0>#StepLine:0"
+ var expectedTokens = []string{"#EOF", "#TableRow", "#DocStringSeparator", "#StepLine", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Comment", "#Empty"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 35, err
+}
+
+// GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:2>Step:1>StepArg:0>__alt0:0>DataTable:0>#TableRow:0
+func (ctxt *parseContext) matchAt36(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchEOF(line); ok {
+ ctxt.endRule(RuleTypeDataTable)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.endRule(RuleTypeFeature)
+ ctxt.build(token)
+ return 42, err
+ }
+ if ok, token, err := ctxt.matchTableRow(line); ok {
+ ctxt.build(token)
+ return 36, err
+ }
+ if ok, token, err := ctxt.matchStepLine(line); ok {
+ ctxt.endRule(RuleTypeDataTable)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.startRule(RuleTypeStep)
+ ctxt.build(token)
+ return 35, err
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead1(line) {
+ ctxt.endRule(RuleTypeDataTable)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.startRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 37, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead0(line) {
+ ctxt.endRule(RuleTypeDataTable)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 31, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.endRule(RuleTypeDataTable)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 22, err
+ }
+ if ok, token, err := ctxt.matchExamplesLine(line); ok {
+ ctxt.endRule(RuleTypeDataTable)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.startRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeExamples)
+ ctxt.build(token)
+ return 38, err
+ }
+ if ok, token, err := ctxt.matchScenarioLine(line); ok {
+ ctxt.endRule(RuleTypeDataTable)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenario)
+ ctxt.build(token)
+ return 32, err
+ }
+ if ok, token, err := ctxt.matchRuleLine(line); ok {
+ ctxt.endRule(RuleTypeDataTable)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.build(token)
+ return 23, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.build(token)
+ return 36, err
+ }
+ if ok, token, err := ctxt.matchEmpty(line); ok {
+ ctxt.build(token)
+ return 36, err
+ }
+
+ // var stateComment = "State: 36 - GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:2>Step:1>StepArg:0>__alt0:0>DataTable:0>#TableRow:0"
+ var expectedTokens = []string{"#EOF", "#TableRow", "#StepLine", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Comment", "#Empty"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 36, err
+}
+
+// GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:0>Tags:0>#TagLine:0
+func (ctxt *parseContext) matchAt37(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.build(token)
+ return 37, err
+ }
+ if ok, token, err := ctxt.matchExamplesLine(line); ok {
+ ctxt.endRule(RuleTypeTags)
+ ctxt.startRule(RuleTypeExamples)
+ ctxt.build(token)
+ return 38, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.build(token)
+ return 37, err
+ }
+ if ok, token, err := ctxt.matchEmpty(line); ok {
+ ctxt.build(token)
+ return 37, err
+ }
+
+ // var stateComment = "State: 37 - GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:0>Tags:0>#TagLine:0"
+ var expectedTokens = []string{"#TagLine", "#ExamplesLine", "#Comment", "#Empty"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 37, err
+}
+
+// GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:0>#ExamplesLine:0
+func (ctxt *parseContext) matchAt38(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchEOF(line); ok {
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.endRule(RuleTypeFeature)
+ ctxt.build(token)
+ return 42, err
+ }
+ if ok, token, err := ctxt.matchEmpty(line); ok {
+ ctxt.build(token)
+ return 38, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.build(token)
+ return 40, err
+ }
+ if ok, token, err := ctxt.matchTableRow(line); ok {
+ ctxt.startRule(RuleTypeExamplesTable)
+ ctxt.build(token)
+ return 41, err
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead1(line) {
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 37, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead0(line) {
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 31, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 22, err
+ }
+ if ok, token, err := ctxt.matchExamplesLine(line); ok {
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeExamples)
+ ctxt.build(token)
+ return 38, err
+ }
+ if ok, token, err := ctxt.matchScenarioLine(line); ok {
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenario)
+ ctxt.build(token)
+ return 32, err
+ }
+ if ok, token, err := ctxt.matchRuleLine(line); ok {
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.build(token)
+ return 23, err
+ }
+ if ok, token, err := ctxt.matchOther(line); ok {
+ ctxt.startRule(RuleTypeDescription)
+ ctxt.build(token)
+ return 39, err
+ }
+
+ // var stateComment = "State: 38 - GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:0>#ExamplesLine:0"
+ var expectedTokens = []string{"#EOF", "#Empty", "#Comment", "#TableRow", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Other"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 38, err
+}
+
+// GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:1>DescriptionHelper:1>Description:0>#Other:0
+func (ctxt *parseContext) matchAt39(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchEOF(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.endRule(RuleTypeFeature)
+ ctxt.build(token)
+ return 42, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.build(token)
+ return 40, err
+ }
+ if ok, token, err := ctxt.matchTableRow(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.startRule(RuleTypeExamplesTable)
+ ctxt.build(token)
+ return 41, err
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead1(line) {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 37, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead0(line) {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 31, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 22, err
+ }
+ if ok, token, err := ctxt.matchExamplesLine(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeExamples)
+ ctxt.build(token)
+ return 38, err
+ }
+ if ok, token, err := ctxt.matchScenarioLine(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenario)
+ ctxt.build(token)
+ return 32, err
+ }
+ if ok, token, err := ctxt.matchRuleLine(line); ok {
+ ctxt.endRule(RuleTypeDescription)
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.build(token)
+ return 23, err
+ }
+ if ok, token, err := ctxt.matchOther(line); ok {
+ ctxt.build(token)
+ return 39, err
+ }
+
+ // var stateComment = "State: 39 - GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:1>DescriptionHelper:1>Description:0>#Other:0"
+ var expectedTokens = []string{"#EOF", "#Comment", "#TableRow", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Other"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 39, err
+}
+
+// GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:1>DescriptionHelper:2>#Comment:0
+func (ctxt *parseContext) matchAt40(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchEOF(line); ok {
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.endRule(RuleTypeFeature)
+ ctxt.build(token)
+ return 42, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.build(token)
+ return 40, err
+ }
+ if ok, token, err := ctxt.matchTableRow(line); ok {
+ ctxt.startRule(RuleTypeExamplesTable)
+ ctxt.build(token)
+ return 41, err
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead1(line) {
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 37, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead0(line) {
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 31, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 22, err
+ }
+ if ok, token, err := ctxt.matchExamplesLine(line); ok {
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeExamples)
+ ctxt.build(token)
+ return 38, err
+ }
+ if ok, token, err := ctxt.matchScenarioLine(line); ok {
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenario)
+ ctxt.build(token)
+ return 32, err
+ }
+ if ok, token, err := ctxt.matchRuleLine(line); ok {
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.build(token)
+ return 23, err
+ }
+ if ok, token, err := ctxt.matchEmpty(line); ok {
+ ctxt.build(token)
+ return 40, err
+ }
+
+ // var stateComment = "State: 40 - GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:1>DescriptionHelper:2>#Comment:0"
+ var expectedTokens = []string{"#EOF", "#Comment", "#TableRow", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Empty"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 40, err
+}
+
+// GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:2>ExamplesTable:0>#TableRow:0
+func (ctxt *parseContext) matchAt41(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchEOF(line); ok {
+ ctxt.endRule(RuleTypeExamplesTable)
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.endRule(RuleTypeFeature)
+ ctxt.build(token)
+ return 42, err
+ }
+ if ok, token, err := ctxt.matchTableRow(line); ok {
+ ctxt.build(token)
+ return 41, err
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead1(line) {
+ ctxt.endRule(RuleTypeExamplesTable)
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 37, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead0(line) {
+ ctxt.endRule(RuleTypeExamplesTable)
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 31, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.endRule(RuleTypeExamplesTable)
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 22, err
+ }
+ if ok, token, err := ctxt.matchExamplesLine(line); ok {
+ ctxt.endRule(RuleTypeExamplesTable)
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeExamples)
+ ctxt.build(token)
+ return 38, err
+ }
+ if ok, token, err := ctxt.matchScenarioLine(line); ok {
+ ctxt.endRule(RuleTypeExamplesTable)
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenario)
+ ctxt.build(token)
+ return 32, err
+ }
+ if ok, token, err := ctxt.matchRuleLine(line); ok {
+ ctxt.endRule(RuleTypeExamplesTable)
+ ctxt.endRule(RuleTypeExamples)
+ ctxt.endRule(RuleTypeExamplesDefinition)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.build(token)
+ return 23, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.build(token)
+ return 41, err
+ }
+ if ok, token, err := ctxt.matchEmpty(line); ok {
+ ctxt.build(token)
+ return 41, err
+ }
+
+ // var stateComment = "State: 41 - GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:2>ExamplesTable:0>#TableRow:0"
+ var expectedTokens = []string{"#EOF", "#TableRow", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Comment", "#Empty"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 41, err
+}
+
+// GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:2>Step:1>StepArg:0>__alt0:1>DocString:0>#DocStringSeparator:0
+func (ctxt *parseContext) matchAt43(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchDocStringSeparator(line); ok {
+ ctxt.build(token)
+ return 44, err
+ }
+ if ok, token, err := ctxt.matchOther(line); ok {
+ ctxt.build(token)
+ return 43, err
+ }
+
+ // var stateComment = "State: 43 - GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:2>Step:1>StepArg:0>__alt0:1>DocString:0>#DocStringSeparator:0"
+ var expectedTokens = []string{"#DocStringSeparator", "#Other"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 43, err
+}
+
+// GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:2>Step:1>StepArg:0>__alt0:1>DocString:2>#DocStringSeparator:0
+func (ctxt *parseContext) matchAt44(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchEOF(line); ok {
+ ctxt.endRule(RuleTypeDocString)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.endRule(RuleTypeFeature)
+ ctxt.build(token)
+ return 42, err
+ }
+ if ok, token, err := ctxt.matchStepLine(line); ok {
+ ctxt.endRule(RuleTypeDocString)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.startRule(RuleTypeStep)
+ ctxt.build(token)
+ return 35, err
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead1(line) {
+ ctxt.endRule(RuleTypeDocString)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.startRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 37, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead0(line) {
+ ctxt.endRule(RuleTypeDocString)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 31, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.endRule(RuleTypeDocString)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 22, err
+ }
+ if ok, token, err := ctxt.matchExamplesLine(line); ok {
+ ctxt.endRule(RuleTypeDocString)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.startRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeExamples)
+ ctxt.build(token)
+ return 38, err
+ }
+ if ok, token, err := ctxt.matchScenarioLine(line); ok {
+ ctxt.endRule(RuleTypeDocString)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenario)
+ ctxt.build(token)
+ return 32, err
+ }
+ if ok, token, err := ctxt.matchRuleLine(line); ok {
+ ctxt.endRule(RuleTypeDocString)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.build(token)
+ return 23, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.build(token)
+ return 44, err
+ }
+ if ok, token, err := ctxt.matchEmpty(line); ok {
+ ctxt.build(token)
+ return 44, err
+ }
+
+ // var stateComment = "State: 44 - GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:2>Step:1>StepArg:0>__alt0:1>DocString:2>#DocStringSeparator:0"
+ var expectedTokens = []string{"#EOF", "#StepLine", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Comment", "#Empty"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 44, err
+}
+
+// GherkinDocument:0>Feature:3>Rule:1>Background:2>Step:1>StepArg:0>__alt0:1>DocString:0>#DocStringSeparator:0
+func (ctxt *parseContext) matchAt45(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchDocStringSeparator(line); ok {
+ ctxt.build(token)
+ return 46, err
+ }
+ if ok, token, err := ctxt.matchOther(line); ok {
+ ctxt.build(token)
+ return 45, err
+ }
+
+ // var stateComment = "State: 45 - GherkinDocument:0>Feature:3>Rule:1>Background:2>Step:1>StepArg:0>__alt0:1>DocString:0>#DocStringSeparator:0"
+ var expectedTokens = []string{"#DocStringSeparator", "#Other"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 45, err
+}
+
+// GherkinDocument:0>Feature:3>Rule:1>Background:2>Step:1>StepArg:0>__alt0:1>DocString:2>#DocStringSeparator:0
+func (ctxt *parseContext) matchAt46(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchEOF(line); ok {
+ ctxt.endRule(RuleTypeDocString)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.endRule(RuleTypeFeature)
+ ctxt.build(token)
+ return 42, err
+ }
+ if ok, token, err := ctxt.matchStepLine(line); ok {
+ ctxt.endRule(RuleTypeDocString)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.startRule(RuleTypeStep)
+ ctxt.build(token)
+ return 29, err
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead0(line) {
+ ctxt.endRule(RuleTypeDocString)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 31, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.endRule(RuleTypeDocString)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 22, err
+ }
+ if ok, token, err := ctxt.matchScenarioLine(line); ok {
+ ctxt.endRule(RuleTypeDocString)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenario)
+ ctxt.build(token)
+ return 32, err
+ }
+ if ok, token, err := ctxt.matchRuleLine(line); ok {
+ ctxt.endRule(RuleTypeDocString)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.endRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.build(token)
+ return 23, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.build(token)
+ return 46, err
+ }
+ if ok, token, err := ctxt.matchEmpty(line); ok {
+ ctxt.build(token)
+ return 46, err
+ }
+
+ // var stateComment = "State: 46 - GherkinDocument:0>Feature:3>Rule:1>Background:2>Step:1>StepArg:0>__alt0:1>DocString:2>#DocStringSeparator:0"
+ var expectedTokens = []string{"#EOF", "#StepLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Comment", "#Empty"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 46, err
+}
+
+// GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:2>Step:1>StepArg:0>__alt0:1>DocString:0>#DocStringSeparator:0
+func (ctxt *parseContext) matchAt47(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchDocStringSeparator(line); ok {
+ ctxt.build(token)
+ return 48, err
+ }
+ if ok, token, err := ctxt.matchOther(line); ok {
+ ctxt.build(token)
+ return 47, err
+ }
+
+ // var stateComment = "State: 47 - GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:2>Step:1>StepArg:0>__alt0:1>DocString:0>#DocStringSeparator:0"
+ var expectedTokens = []string{"#DocStringSeparator", "#Other"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 47, err
+}
+
+// GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:2>Step:1>StepArg:0>__alt0:1>DocString:2>#DocStringSeparator:0
+func (ctxt *parseContext) matchAt48(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchEOF(line); ok {
+ ctxt.endRule(RuleTypeDocString)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.endRule(RuleTypeFeature)
+ ctxt.build(token)
+ return 42, err
+ }
+ if ok, token, err := ctxt.matchStepLine(line); ok {
+ ctxt.endRule(RuleTypeDocString)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.startRule(RuleTypeStep)
+ ctxt.build(token)
+ return 15, err
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead1(line) {
+ ctxt.endRule(RuleTypeDocString)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.startRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 17, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead0(line) {
+ ctxt.endRule(RuleTypeDocString)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 11, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.endRule(RuleTypeDocString)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 22, err
+ }
+ if ok, token, err := ctxt.matchExamplesLine(line); ok {
+ ctxt.endRule(RuleTypeDocString)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.startRule(RuleTypeExamplesDefinition)
+ ctxt.startRule(RuleTypeExamples)
+ ctxt.build(token)
+ return 18, err
+ }
+ if ok, token, err := ctxt.matchScenarioLine(line); ok {
+ ctxt.endRule(RuleTypeDocString)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenario)
+ ctxt.build(token)
+ return 12, err
+ }
+ if ok, token, err := ctxt.matchRuleLine(line); ok {
+ ctxt.endRule(RuleTypeDocString)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeScenario)
+ ctxt.endRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.build(token)
+ return 23, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.build(token)
+ return 48, err
+ }
+ if ok, token, err := ctxt.matchEmpty(line); ok {
+ ctxt.build(token)
+ return 48, err
+ }
+
+ // var stateComment = "State: 48 - GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:2>Step:1>StepArg:0>__alt0:1>DocString:2>#DocStringSeparator:0"
+ var expectedTokens = []string{"#EOF", "#StepLine", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Comment", "#Empty"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 48, err
+}
+
+// GherkinDocument:0>Feature:1>Background:2>Step:1>StepArg:0>__alt0:1>DocString:0>#DocStringSeparator:0
+func (ctxt *parseContext) matchAt49(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchDocStringSeparator(line); ok {
+ ctxt.build(token)
+ return 50, err
+ }
+ if ok, token, err := ctxt.matchOther(line); ok {
+ ctxt.build(token)
+ return 49, err
+ }
+
+ // var stateComment = "State: 49 - GherkinDocument:0>Feature:1>Background:2>Step:1>StepArg:0>__alt0:1>DocString:0>#DocStringSeparator:0"
+ var expectedTokens = []string{"#DocStringSeparator", "#Other"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 49, err
+}
+
+// GherkinDocument:0>Feature:1>Background:2>Step:1>StepArg:0>__alt0:1>DocString:2>#DocStringSeparator:0
+func (ctxt *parseContext) matchAt50(line *Line) (newState int, err error) {
+ if ok, token, err := ctxt.matchEOF(line); ok {
+ ctxt.endRule(RuleTypeDocString)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.endRule(RuleTypeFeature)
+ ctxt.build(token)
+ return 42, err
+ }
+ if ok, token, err := ctxt.matchStepLine(line); ok {
+ ctxt.endRule(RuleTypeDocString)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.startRule(RuleTypeStep)
+ ctxt.build(token)
+ return 9, err
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ if ctxt.lookahead0(line) {
+ ctxt.endRule(RuleTypeDocString)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 11, err
+ }
+ }
+ if ok, token, err := ctxt.matchTagLine(line); ok {
+ ctxt.endRule(RuleTypeDocString)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.startRule(RuleTypeTags)
+ ctxt.build(token)
+ return 22, err
+ }
+ if ok, token, err := ctxt.matchScenarioLine(line); ok {
+ ctxt.endRule(RuleTypeDocString)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.startRule(RuleTypeScenarioDefinition)
+ ctxt.startRule(RuleTypeScenario)
+ ctxt.build(token)
+ return 12, err
+ }
+ if ok, token, err := ctxt.matchRuleLine(line); ok {
+ ctxt.endRule(RuleTypeDocString)
+ ctxt.endRule(RuleTypeStep)
+ ctxt.endRule(RuleTypeBackground)
+ ctxt.startRule(RuleTypeRule)
+ ctxt.startRule(RuleTypeRuleHeader)
+ ctxt.build(token)
+ return 23, err
+ }
+ if ok, token, err := ctxt.matchComment(line); ok {
+ ctxt.build(token)
+ return 50, err
+ }
+ if ok, token, err := ctxt.matchEmpty(line); ok {
+ ctxt.build(token)
+ return 50, err
+ }
+
+ // var stateComment = "State: 50 - GherkinDocument:0>Feature:1>Background:2>Step:1>StepArg:0>__alt0:1>DocString:2>#DocStringSeparator:0"
+ var expectedTokens = []string{"#EOF", "#StepLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Comment", "#Empty"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return 50, err
+}
+
+type Matcher interface {
+ MatchEOF(line *Line) (bool, *Token, error)
+ MatchEmpty(line *Line) (bool, *Token, error)
+ MatchComment(line *Line) (bool, *Token, error)
+ MatchTagLine(line *Line) (bool, *Token, error)
+ MatchFeatureLine(line *Line) (bool, *Token, error)
+ MatchRuleLine(line *Line) (bool, *Token, error)
+ MatchBackgroundLine(line *Line) (bool, *Token, error)
+ MatchScenarioLine(line *Line) (bool, *Token, error)
+ MatchExamplesLine(line *Line) (bool, *Token, error)
+ MatchStepLine(line *Line) (bool, *Token, error)
+ MatchDocStringSeparator(line *Line) (bool, *Token, error)
+ MatchTableRow(line *Line) (bool, *Token, error)
+ MatchLanguage(line *Line) (bool, *Token, error)
+ MatchOther(line *Line) (bool, *Token, error)
+ Reset()
+}
+
+func (ctxt *parseContext) isMatchEOF(line *Line) bool {
+ ok, _, _ := ctxt.matchEOF(line)
+ return ok
+}
+func (ctxt *parseContext) matchEOF(line *Line) (bool, *Token, error) {
+ return ctxt.m.MatchEOF(line)
+}
+
+func (ctxt *parseContext) isMatchEmpty(line *Line) bool {
+ ok, _, _ := ctxt.matchEmpty(line)
+ return ok
+}
+func (ctxt *parseContext) matchEmpty(line *Line) (bool, *Token, error) {
+ if line.IsEof() {
+ return false, nil, nil
+ }
+ return ctxt.m.MatchEmpty(line)
+}
+
+func (ctxt *parseContext) isMatchComment(line *Line) bool {
+ ok, _, _ := ctxt.matchComment(line)
+ return ok
+}
+func (ctxt *parseContext) matchComment(line *Line) (bool, *Token, error) {
+ if line.IsEof() {
+ return false, nil, nil
+ }
+ return ctxt.m.MatchComment(line)
+}
+
+func (ctxt *parseContext) isMatchTagLine(line *Line) bool {
+ ok, _, _ := ctxt.matchTagLine(line)
+ return ok
+}
+func (ctxt *parseContext) matchTagLine(line *Line) (bool, *Token, error) {
+ if line.IsEof() {
+ return false, nil, nil
+ }
+ return ctxt.m.MatchTagLine(line)
+}
+
+func (ctxt *parseContext) isMatchFeatureLine(line *Line) bool {
+ ok, _, _ := ctxt.matchFeatureLine(line)
+ return ok
+}
+func (ctxt *parseContext) matchFeatureLine(line *Line) (bool, *Token, error) {
+ if line.IsEof() {
+ return false, nil, nil
+ }
+ return ctxt.m.MatchFeatureLine(line)
+}
+
+func (ctxt *parseContext) isMatchRuleLine(line *Line) bool {
+ ok, _, _ := ctxt.matchRuleLine(line)
+ return ok
+}
+func (ctxt *parseContext) matchRuleLine(line *Line) (bool, *Token, error) {
+ if line.IsEof() {
+ return false, nil, nil
+ }
+ return ctxt.m.MatchRuleLine(line)
+}
+
+func (ctxt *parseContext) isMatchBackgroundLine(line *Line) bool {
+ ok, _, _ := ctxt.matchBackgroundLine(line)
+ return ok
+}
+func (ctxt *parseContext) matchBackgroundLine(line *Line) (bool, *Token, error) {
+ if line.IsEof() {
+ return false, nil, nil
+ }
+ return ctxt.m.MatchBackgroundLine(line)
+}
+
+func (ctxt *parseContext) isMatchScenarioLine(line *Line) bool {
+ ok, _, _ := ctxt.matchScenarioLine(line)
+ return ok
+}
+func (ctxt *parseContext) matchScenarioLine(line *Line) (bool, *Token, error) {
+ if line.IsEof() {
+ return false, nil, nil
+ }
+ return ctxt.m.MatchScenarioLine(line)
+}
+
+func (ctxt *parseContext) isMatchExamplesLine(line *Line) bool {
+ ok, _, _ := ctxt.matchExamplesLine(line)
+ return ok
+}
+func (ctxt *parseContext) matchExamplesLine(line *Line) (bool, *Token, error) {
+ if line.IsEof() {
+ return false, nil, nil
+ }
+ return ctxt.m.MatchExamplesLine(line)
+}
+
+func (ctxt *parseContext) isMatchStepLine(line *Line) bool {
+ ok, _, _ := ctxt.matchStepLine(line)
+ return ok
+}
+func (ctxt *parseContext) matchStepLine(line *Line) (bool, *Token, error) {
+ if line.IsEof() {
+ return false, nil, nil
+ }
+ return ctxt.m.MatchStepLine(line)
+}
+
+func (ctxt *parseContext) isMatchDocStringSeparator(line *Line) bool {
+ ok, _, _ := ctxt.matchDocStringSeparator(line)
+ return ok
+}
+func (ctxt *parseContext) matchDocStringSeparator(line *Line) (bool, *Token, error) {
+ if line.IsEof() {
+ return false, nil, nil
+ }
+ return ctxt.m.MatchDocStringSeparator(line)
+}
+
+func (ctxt *parseContext) isMatchTableRow(line *Line) bool {
+ ok, _, _ := ctxt.matchTableRow(line)
+ return ok
+}
+func (ctxt *parseContext) matchTableRow(line *Line) (bool, *Token, error) {
+ if line.IsEof() {
+ return false, nil, nil
+ }
+ return ctxt.m.MatchTableRow(line)
+}
+
+func (ctxt *parseContext) isMatchLanguage(line *Line) bool {
+ ok, _, _ := ctxt.matchLanguage(line)
+ return ok
+}
+func (ctxt *parseContext) matchLanguage(line *Line) (bool, *Token, error) {
+ if line.IsEof() {
+ return false, nil, nil
+ }
+ return ctxt.m.MatchLanguage(line)
+}
+
+func (ctxt *parseContext) isMatchOther(line *Line) bool {
+ ok, _, _ := ctxt.matchOther(line)
+ return ok
+}
+func (ctxt *parseContext) matchOther(line *Line) (bool, *Token, error) {
+ if line.IsEof() {
+ return false, nil, nil
+ }
+ return ctxt.m.MatchOther(line)
+}
+
+func (ctxt *parseContext) lookahead0(initialLine *Line) bool {
+ var queue []*scanResult
+ var match bool
+
+ for {
+ line, atEof, err := ctxt.scan()
+ queue = append(queue, &scanResult{line, atEof, err})
+
+ if false || ctxt.isMatchScenarioLine(line) {
+ match = true
+ break
+ }
+ if !(false || ctxt.isMatchEmpty(line) || ctxt.isMatchComment(line) || ctxt.isMatchTagLine(line)) {
+ break
+ }
+ if atEof {
+ break
+ }
+ }
+
+ ctxt.queue = append(ctxt.queue, queue...)
+
+ return match
+}
+
+func (ctxt *parseContext) lookahead1(initialLine *Line) bool {
+ var queue []*scanResult
+ var match bool
+
+ for {
+ line, atEof, err := ctxt.scan()
+ queue = append(queue, &scanResult{line, atEof, err})
+
+ if false || ctxt.isMatchExamplesLine(line) {
+ match = true
+ break
+ }
+ if !(false || ctxt.isMatchEmpty(line) || ctxt.isMatchComment(line) || ctxt.isMatchTagLine(line)) {
+ break
+ }
+ if atEof {
+ break
+ }
+ }
+
+ ctxt.queue = append(ctxt.queue, queue...)
+
+ return match
+}
diff --git a/vendor/github.com/cucumber/gherkin/go/v26/parser.go.razor b/vendor/github.com/cucumber/gherkin/go/v26/parser.go.razor
new file mode 100644
index 000000000..7b173db1f
--- /dev/null
+++ b/vendor/github.com/cucumber/gherkin/go/v26/parser.go.razor
@@ -0,0 +1,309 @@
+@using Berp;
+@helper CallProduction(ProductionRule production)
+{
+ switch(production.Type)
+ {
+ case ProductionRuleType.Start:
+ @:ctxt.startRule(@Raw("RuleType" + production.RuleName.Replace("#", "")));
+ break;
+ case ProductionRuleType.End:
+ @:ctxt.endRule(@Raw("RuleType" + production.RuleName.Replace("#", "")));
+ break;
+ case ProductionRuleType.Process:
+ @:ctxt.build(token);
+ break;
+ }
+}
+@helper HandleParserError(IEnumerable expectedTokens, State state)
+{
+ // var stateComment = "State: @state.Id - @Raw(state.Comment)"
+ var expectedTokens = []string{"@Raw(string.Join("\", \"", expectedTokens))"}
+ if line.IsEof() {
+ err = &parseError{
+ msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens,", ")),
+ loc: &Location{Line: line.LineNumber, Column: 0},
+ }
+ } else {
+ err = &parseError{
+ msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens,", "), line.LineText),
+ loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1},
+ }
+ }
+ // if (ctxt.p.stopAtFirstError) throw error;
+ //ctxt.addError(err)
+ return @state.Id, err}
+@helper MatchToken(TokenType tokenType)
+{ctxt.match@(tokenType)(line)}
+@helper IsMatchToken(TokenType tokenType)
+{ctxt.isMatch@(tokenType)(line)}
+@helper TokenConst(Rule rule)
+{@Raw("rule" + rule.Name.Replace("#", "Int"))}
+//
+// This file is generated. Do not edit! Edit parser.go.razor instead.
+
+package gherkin
+
+import (
+ "fmt"
+ "strings"
+)
+
+type TokenType int
+
+const (
+ TokenTypeNone TokenType = iota
+ @foreach(var rule in Model.RuleSet.TokenRules)
+ { @Raw("TokenType" + rule.Name.Replace("#", ""))
+}
+)
+
+func tokenTypeForRule(rt RuleType) TokenType {
+ return TokenTypeNone
+}
+
+func (t TokenType) Name() string {
+ switch t {
+ @foreach(var rule in Model.RuleSet.TokenRules)
+ { case @Raw("TokenType" + rule.Name.Replace("#", "")): return "@Raw(rule.Name.Replace("#", ""))"
+}
+ }
+ return ""
+}
+
+func (t TokenType) RuleType() RuleType {
+ switch t {
+ @foreach(var rule in Model.RuleSet.TokenRules)
+ { case @Raw("TokenType" + rule.Name.Replace("#", "")): return @Raw("RuleType" + rule.Name.Replace("#", ""))
+}
+ }
+ return RuleTypeNone
+}
+
+
+type RuleType int
+
+const (
+ RuleTypeNone RuleType = iota
+
+ @foreach(var rule in Model.RuleSet.Where(r => !r.TempRule))
+ { @Raw("RuleType" + rule.Name.Replace("#", ""))
+}
+)
+
+func (t RuleType) IsEOF() bool {
+ return t == RuleTypeEOF
+}
+func (t RuleType) Name() string {
+ switch t {
+ @foreach(var rule in Model.RuleSet.Where(r => !r.TempRule))
+ { case @Raw("RuleType" + rule.Name.Replace("#", "")): return "@Raw(rule.Name)"
+}
+ }
+ return ""
+}
+
+type Location struct {
+ Line int
+ Column int
+}
+
+type parseError struct {
+ msg string
+ loc *Location
+}
+
+func (a *parseError) Error() string {
+ return fmt.Sprintf("(%d:%d): %s", a.loc.Line, a.loc.Column, a.msg)
+}
+
+type parseErrors []error
+func (pe parseErrors) Error() string {
+ var ret = []string{"Parser errors:"}
+ for i := range pe {
+ ret = append(ret, pe[i].Error())
+ }
+ return strings.Join(ret,"\n")
+}
+
+func (p *parser) Parse(s Scanner, m Matcher) (err error) {
+ p.builder.Reset()
+ m.Reset()
+ ctxt := &parseContext{p,s,p.builder,m,nil,nil}
+ var state int
+ ctxt.startRule(@Raw("RuleType" + @Model.RuleSet.StartRule.Name))
+ for {
+ gl, eof, err := ctxt.scan()
+ if err != nil {
+ ctxt.addError(err)
+ if p.stopAtFirstError {
+ break
+ }
+ }
+ state, err = ctxt.match(state, gl)
+ if err != nil {
+ ctxt.addError(err)
+ if p.stopAtFirstError {
+ break
+ }
+ }
+ if eof {
+ // done! \o/
+ break
+ }
+ }
+ ctxt.endRule(@Raw("RuleType" + @Model.RuleSet.StartRule.Name))
+ if len(ctxt.errors) > 0 {
+ return ctxt.errors
+ }
+ return
+}
+
+type parseContext struct {
+ p *parser
+ s Scanner
+ b Builder
+ m Matcher
+ queue []*scanResult
+ errors parseErrors
+}
+
+func (ctxt *parseContext) addError(e error) {
+ ctxt.errors = append(ctxt.errors, e);
+ // if (p.errors.length > 10)
+ // throw Errors.CompositeParserException.create(p.errors);
+}
+
+type scanResult struct {
+ line *Line
+ atEof bool
+ err error
+}
+func (ctxt *parseContext) scan() (*Line, bool, error) {
+ l := len(ctxt.queue)
+ if l > 0 {
+ x := ctxt.queue[0]
+ ctxt.queue = ctxt.queue[1:]
+ return x.line, x.atEof, x.err
+ }
+ return ctxt.s.Scan()
+}
+
+func (ctxt *parseContext) startRule(r RuleType) (bool, error) {
+ ok, err := ctxt.b.StartRule(r)
+ if err != nil {
+ ctxt.addError(err)
+ }
+ return ok, err
+}
+
+func (ctxt *parseContext) endRule(r RuleType) (bool, error) {
+ ok, err := ctxt.b.EndRule(r)
+ if err != nil {
+ ctxt.addError(err)
+ }
+ return ok, err
+}
+
+func (ctxt *parseContext) build(t *Token) (bool, error) {
+ ok, err := ctxt.b.Build(t)
+ if err != nil {
+ ctxt.addError(err)
+ }
+ return ok, err
+}
+
+
+func (ctxt *parseContext) match(state int, line *Line) (newState int, err error) {
+ switch(state) {
+ @foreach(var state in Model.States.Values.Where(s => !s.IsEndState))
+ {
+ @:case @state.Id:
+ @:return ctxt.matchAt@(state.Id)(line);
+ }
+ default:
+ return state, fmt.Errorf("Unknown state: %+v", state);
+ }
+}
+
+@foreach(var state in Model.States.Values.Where(s => !s.IsEndState))
+{
+
+ // @Raw(state.Comment)
+func (ctxt *parseContext) matchAt@(state.Id)(line *Line) (newState int, err error) {
+ @foreach(var transition in state.Transitions)
+ {
+ @:if ok, token, err := @MatchToken(transition.TokenType); ok {
+ if (transition.LookAheadHint != null)
+ {
+ @:if ctxt.lookahead@(transition.LookAheadHint.Id)(line) {
+ }
+ foreach(var production in transition.Productions)
+ {
+ @CallProduction(production)
+ }
+ @:return @transition.TargetState, err;
+ if (transition.LookAheadHint != null)
+ {
+ @:}
+ }
+ @:}
+ }
+ @HandleParserError(state.Transitions.Select(t => "#" + t.TokenType.ToString()).Distinct(), state)
+}
+
+}
+
+type Matcher interface {
+ @foreach(var rule in Model.RuleSet.TokenRules)
+ { Match@(rule.Name.Replace("#", ""))(line *Line) (bool,*Token,error)
+}
+ Reset()
+}
+@foreach(var rule in Model.RuleSet.TokenRules)
+{
+
+func (ctxt *parseContext) isMatch@(rule.Name.Replace("#", ""))(line *Line) bool {
+ ok, _, _ := ctxt.match@(rule.Name.Replace("#", ""))(line)
+ return ok
+}
+func (ctxt *parseContext) match@(rule.Name.Replace("#", ""))(line *Line) (bool, *Token, error) {
+ @if (rule.Name != "#EOF")
+ {
+ @:if line.IsEof() {
+ @: return false, nil, nil
+ @:}
+ }
+ return ctxt.m.Match@(rule.Name.Replace("#", ""))(line);
+}
+
+}
+
+@foreach(var lookAheadHint in Model.RuleSet.LookAheadHints)
+{
+
+func (ctxt *parseContext) lookahead@(lookAheadHint.Id)(initialLine *Line) bool {
+ var queue []*scanResult
+ var match bool
+
+ for {
+ line, atEof, err := ctxt.scan();
+ queue = append(queue, &scanResult{line,atEof,err});
+
+ if false @foreach(var tokenType in lookAheadHint.ExpectedTokens) { || @IsMatchToken(tokenType)} {
+ match = true;
+ break
+ }
+ if !(false @foreach(var tokenType in lookAheadHint.Skip) { || @IsMatchToken(tokenType)}) {
+ break
+ }
+ if atEof {
+ break
+ }
+ }
+
+ ctxt.queue = append(ctxt.queue, queue...)
+
+ return match;
+ }
+
+}
diff --git a/vendor/github.com/cucumber/gherkin/go/v26/pickles.go b/vendor/github.com/cucumber/gherkin/go/v26/pickles.go
new file mode 100644
index 000000000..ad3fa84d2
--- /dev/null
+++ b/vendor/github.com/cucumber/gherkin/go/v26/pickles.go
@@ -0,0 +1,266 @@
+package gherkin
+
+import (
+ "github.com/cucumber/messages/go/v21"
+ "strings"
+)
+
+func Pickles(gherkinDocument messages.GherkinDocument, uri string, newId func() string) []*messages.Pickle {
+ pickles := make([]*messages.Pickle, 0)
+ if gherkinDocument.Feature == nil {
+ return pickles
+ }
+ language := gherkinDocument.Feature.Language
+
+ pickles = compileFeature(pickles, *gherkinDocument.Feature, uri, language, newId)
+ return pickles
+}
+
+func compileFeature(pickles []*messages.Pickle, feature messages.Feature, uri string, language string, newId func() string) []*messages.Pickle {
+ featureBackgroundSteps := make([]*messages.Step, 0)
+ featureTags := feature.Tags
+ for _, child := range feature.Children {
+ if child.Background != nil {
+ featureBackgroundSteps = append(featureBackgroundSteps, child.Background.Steps...)
+ }
+ if child.Rule != nil {
+ pickles = compileRule(pickles, child.Rule, featureTags, featureBackgroundSteps, uri, language, newId)
+ }
+ if child.Scenario != nil {
+ if len(child.Scenario.Examples) == 0 {
+ pickles = compileScenario(pickles, featureBackgroundSteps, child.Scenario, featureTags, uri, language, newId)
+ } else {
+ pickles = compileScenarioOutline(pickles, child.Scenario, featureTags, featureBackgroundSteps, uri, language, newId)
+ }
+ }
+ }
+ return pickles
+}
+
+func compileRule(
+ pickles []*messages.Pickle,
+ rule *messages.Rule,
+ featureTags []*messages.Tag,
+ featureBackgroundSteps []*messages.Step,
+ uri string,
+ language string,
+ newId func() string,
+) []*messages.Pickle {
+ ruleBackgroundSteps := make([]*messages.Step, 0)
+ ruleBackgroundSteps = append(ruleBackgroundSteps, featureBackgroundSteps...)
+ tags := append(featureTags, rule.Tags...)
+
+ for _, child := range rule.Children {
+ if child.Background != nil {
+ ruleBackgroundSteps = append(ruleBackgroundSteps, child.Background.Steps...)
+ }
+ if child.Scenario != nil {
+ if len(child.Scenario.Examples) == 0 {
+ pickles = compileScenario(pickles, ruleBackgroundSteps, child.Scenario, tags, uri, language, newId)
+ } else {
+ pickles = compileScenarioOutline(pickles, child.Scenario, tags, ruleBackgroundSteps, uri, language, newId)
+ }
+ }
+ }
+ return pickles
+
+}
+
+func compileScenarioOutline(
+ pickles []*messages.Pickle,
+ scenario *messages.Scenario,
+ inheritedTags []*messages.Tag,
+ backgroundSteps []*messages.Step,
+ uri string,
+ language string,
+ newId func() string,
+) []*messages.Pickle {
+ for _, examples := range scenario.Examples {
+ if examples.TableHeader == nil {
+ continue
+ }
+ variableCells := examples.TableHeader.Cells
+ for _, valuesRow := range examples.TableBody {
+ valueCells := valuesRow.Cells
+
+ computedPickleSteps := make([]*messages.PickleStep, 0)
+ pickleBackgroundSteps := make([]*messages.PickleStep, 0)
+
+ if len(scenario.Steps) > 0 {
+ pickleBackgroundSteps = pickleSteps(backgroundSteps, newId)
+ }
+
+ // translate computedPickleSteps based on valuesRow
+ previous := messages.PickleStepType_UNKNOWN
+ for _, step := range scenario.Steps {
+ text := step.Text
+ for i, variableCell := range variableCells {
+ text = strings.Replace(text, "<"+variableCell.Value+">", valueCells[i].Value, -1)
+ }
+
+ pickleStep := pickleStep(step, variableCells, valuesRow, newId, previous)
+ previous = pickleStep.Type
+ computedPickleSteps = append(computedPickleSteps, pickleStep)
+ }
+
+ // translate pickle name
+ name := scenario.Name
+ for i, key := range variableCells {
+ name = strings.Replace(name, "<"+key.Value+">", valueCells[i].Value, -1)
+ }
+
+ if len(computedPickleSteps) > 0 {
+ computedPickleSteps = append(pickleBackgroundSteps, computedPickleSteps...)
+ }
+
+ id := newId()
+ tags := pickleTags(append(inheritedTags, append(scenario.Tags, examples.Tags...)...))
+
+ pickles = append(pickles, &messages.Pickle{
+ Id: id,
+ Uri: uri,
+ Steps: computedPickleSteps,
+ Tags: tags,
+ Name: name,
+ Language: language,
+ AstNodeIds: []string{scenario.Id, valuesRow.Id},
+ })
+ }
+ }
+ return pickles
+}
+
+func compileScenario(
+ pickles []*messages.Pickle,
+ backgroundSteps []*messages.Step,
+ scenario *messages.Scenario,
+ inheritedTags []*messages.Tag,
+ uri string,
+ language string,
+ newId func() string,
+) []*messages.Pickle {
+ steps := make([]*messages.PickleStep, 0)
+ if len(scenario.Steps) > 0 {
+ pickleBackgroundSteps := pickleSteps(backgroundSteps, newId)
+ steps = append(pickleBackgroundSteps, pickleSteps(scenario.Steps, newId)...)
+ }
+ tags := pickleTags(append(inheritedTags, scenario.Tags...))
+ id := newId()
+ pickles = append(pickles, &messages.Pickle{
+ Id: id,
+ Uri: uri,
+ Steps: steps,
+ Tags: tags,
+ Name: scenario.Name,
+ Language: language,
+ AstNodeIds: []string{scenario.Id},
+ })
+ return pickles
+}
+
+func pickleDataTable(table *messages.DataTable, variableCells []*messages.TableCell, valueCells []*messages.TableCell) *messages.PickleTable {
+ pickleTableRows := make([]*messages.PickleTableRow, len(table.Rows))
+ for i, row := range table.Rows {
+ pickleTableCells := make([]*messages.PickleTableCell, len(row.Cells))
+ for j, cell := range row.Cells {
+ pickleTableCells[j] = &messages.PickleTableCell{
+ Value: interpolate(cell.Value, variableCells, valueCells),
+ }
+ }
+ pickleTableRows[i] = &messages.PickleTableRow{Cells: pickleTableCells}
+ }
+ return &messages.PickleTable{Rows: pickleTableRows}
+}
+
+func pickleDocString(docString *messages.DocString, variableCells []*messages.TableCell, valueCells []*messages.TableCell) *messages.PickleDocString {
+ return &messages.PickleDocString{
+ MediaType: interpolate(docString.MediaType, variableCells, valueCells),
+ Content: interpolate(docString.Content, variableCells, valueCells),
+ }
+}
+
+func pickleTags(tags []*messages.Tag) []*messages.PickleTag {
+ ptags := make([]*messages.PickleTag, len(tags))
+ for i, tag := range tags {
+ ptags[i] = &messages.PickleTag{
+ Name: tag.Name,
+ AstNodeId: tag.Id,
+ }
+ }
+ return ptags
+}
+
+func pickleSteps(steps []*messages.Step, newId func() string) []*messages.PickleStep {
+ pickleSteps := make([]*messages.PickleStep, len(steps))
+ previous := messages.PickleStepType_UNKNOWN
+ for i, step := range steps {
+ pickleStep := pickleStep(step, nil, nil, newId, previous)
+ previous = pickleStep.Type
+ pickleSteps[i] = pickleStep
+ }
+ return pickleSteps
+}
+
+func pickleStep(
+ step *messages.Step,
+ variableCells []*messages.TableCell,
+ valuesRow *messages.TableRow,
+ newId func() string,
+ previous messages.PickleStepType,
+) *messages.PickleStep {
+
+ var valueCells []*messages.TableCell
+ if valuesRow != nil {
+ valueCells = valuesRow.Cells
+ }
+
+ pickleStep := &messages.PickleStep{
+ Id: newId(),
+ Text: interpolate(step.Text, variableCells, valueCells),
+ Type: mapType(step.KeywordType, previous),
+ AstNodeIds: []string{step.Id},
+ }
+ if valuesRow != nil {
+ pickleStep.AstNodeIds = append(pickleStep.AstNodeIds, valuesRow.Id)
+ }
+ if step.DataTable != nil {
+ pickleStep.Argument = &messages.PickleStepArgument{
+ DataTable: pickleDataTable(step.DataTable, variableCells, valueCells),
+ }
+ }
+ if step.DocString != nil {
+ pickleStep.Argument = &messages.PickleStepArgument{
+ DocString: pickleDocString(step.DocString, variableCells, valueCells),
+ }
+ }
+ return pickleStep
+}
+
+func mapType(keywordType messages.StepKeywordType, previous messages.PickleStepType) messages.PickleStepType {
+ switch keywordType {
+ case messages.StepKeywordType_UNKNOWN:
+ return messages.PickleStepType_UNKNOWN
+ case messages.StepKeywordType_CONTEXT:
+ return messages.PickleStepType_CONTEXT
+ case messages.StepKeywordType_ACTION:
+ return messages.PickleStepType_ACTION
+ case messages.StepKeywordType_OUTCOME:
+ return messages.PickleStepType_OUTCOME
+ case messages.StepKeywordType_CONJUNCTION:
+ return previous
+ default:
+ panic("Bad enum value for StepKeywordType")
+ }
+}
+
+func interpolate(s string, variableCells []*messages.TableCell, valueCells []*messages.TableCell) string {
+ if variableCells == nil || valueCells == nil {
+ return s
+ }
+
+ for i, variableCell := range variableCells {
+ s = strings.Replace(s, "<"+variableCell.Value+">", valueCells[i].Value, -1)
+ }
+
+ return s
+}
diff --git a/vendor/github.com/cucumber/gherkin/go/v26/test.feature b/vendor/github.com/cucumber/gherkin/go/v26/test.feature
new file mode 100644
index 000000000..dff77a22b
--- /dev/null
+++ b/vendor/github.com/cucumber/gherkin/go/v26/test.feature
@@ -0,0 +1,151 @@
+Feature:
+
+ Scenario: scenario 1
+ Given text
+
+ Scenario: scenario 2
+ Given text
+
+ Scenario: scenario 3
+ Given text
+
+ Scenario: scenario 4
+ Given text
+
+ Scenario: scenario 5
+ Given text
+
+ Scenario: scenario 6
+ Given text
+
+ Scenario: scenario 7
+ Given text
+
+ Scenario: scenario 8
+ Given text
+
+ Scenario: scenario 9
+ Given text
+
+ Scenario: scenario 10
+ Given text
+
+ Scenario: scenario 11
+ Given text
+
+ Scenario: scenario 12
+ Given text
+
+ Scenario: scenario 13
+ Given text
+
+ Scenario: scenario 14
+ Given text
+
+ Scenario: scenario 15
+ Given text
+
+ Scenario: scenario 16
+ Given text
+
+ Scenario: scenario 17
+ Given text
+
+ Scenario: scenario 18
+ Given text
+
+ Scenario: scenario 19
+ Given text
+
+ Scenario: scenario 20
+ Given text
+
+ Scenario: scenario 21
+ Given text
+
+ Scenario: scenario 22
+ Given text
+
+ Scenario: scenario 23
+ Given text
+
+ Scenario: scenario 24
+ Given text
+
+ Scenario: scenario 25
+ Given text
+
+ Scenario: scenario 26
+ Given text
+
+ Scenario: scenario 27
+ Given text
+
+ Scenario: scenario 28
+ Given text
+
+ Scenario: scenario 29
+ Given text
+
+ Scenario: scenario 30
+ Given text
+
+ Scenario: scenario 31
+ Given text
+
+ Scenario: scenario 32
+ Given text
+
+ Scenario: scenario 33
+ Given text
+
+ Scenario: scenario 34
+ Given text
+
+ Scenario: scenario 35
+ Given text
+
+ Scenario: scenario 36
+ Given text
+
+ Scenario: scenario 37
+ Given text
+
+ Scenario: scenario 38
+ Given text
+
+ Scenario: scenario 39
+ Given text
+
+ Scenario: scenario 40
+ Given text
+
+ Scenario: scenario 41
+ Given text
+
+ Scenario: scenario 42
+ Given text
+
+ Scenario: scenario 43
+ Given text
+
+ Scenario: scenario 44
+ Given text
+
+ Scenario: scenario 45
+ Given text
+
+ Scenario: scenario 46
+ Given text
+
+ Scenario: scenario 47
+ Given text
+
+ Scenario: scenario 48
+ Given text
+
+ Scenario: scenario 49
+ Given text
+
+ Scenario: scenario 50
+ Given text
diff --git a/vendor/github.com/cucumber/gherkin/go/v26/test.sh b/vendor/github.com/cucumber/gherkin/go/v26/test.sh
new file mode 100644
index 000000000..97debf647
--- /dev/null
+++ b/vendor/github.com/cucumber/gherkin/go/v26/test.sh
@@ -0,0 +1,3 @@
+#!/usr/bin/env sh
+
+./bin/gherkin --no-ast --no-pickles test.feature | ./bin/gherkin --no-source --no-ast --json
diff --git a/vendor/github.com/cucumber/godog/.gitignore b/vendor/github.com/cucumber/godog/.gitignore
new file mode 100644
index 000000000..bd77fc9ff
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/.gitignore
@@ -0,0 +1,13 @@
+/cmd/godog/godog
+/example/example
+**/vendor/*
+Gopkg.lock
+Gopkg.toml
+
+.DS_Store
+.idea
+.vscode
+
+_artifacts
+
+vendor
diff --git a/vendor/github.com/cucumber/godog/CHANGELOG.md b/vendor/github.com/cucumber/godog/CHANGELOG.md
new file mode 100644
index 000000000..21a323e0a
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/CHANGELOG.md
@@ -0,0 +1,280 @@
+# Changelog
+
+All notable changes to this project will be documented in this file.
+
+This project adheres to [Semantic Versioning](http://semver.org).
+
+This document is formatted according to the principles of [Keep A CHANGELOG](http://keepachangelog.com).
+
+## Unreleased
+
+## [v0.15.1]
+
+### Added
+- Step text is added to "step is undefined" error - ([669](https://github.com/cucumber/godog/pull/669) - [vearutop](https://github.com/vearutop))
+- Localisation support by @MegaGrindStone in https://github.com/cucumber/godog/pull/665
+- feat: support uint types by @chengxilo in https://github.com/cucumber/godog/pull/695
+
+### Changed
+- Replace deprecated `::set-output` - ([681](https://github.com/cucumber/godog/pull/681) - [nodeg](https://github.com/nodeg))
+
+### Fixed
+- fix(errors): fix(errors): Fix expected Step argument count for steps with `context.Context` ([679](https://github.com/cucumber/godog/pull/679) - [tigh-latte](https://github.com/tigh-latte))
+- fix(formatter): On concurrent execution, execute formatter at end of Scenario - ([645](https://github.com/cucumber/godog/pull/645) - [tigh-latte](https://github.com/tigh-latte))
+- Pretty printing results now prints the line where the step is declared instead of the line where the handler is declared. ([668](https://github.com/cucumber/godog/pull/668) - [spencerc](https://github.com/SpencerC))
+- Update honnef.co/go/tools/cmd/staticcheck version in Makefile by @RezaZareiii in https://github.com/cucumber/godog/pull/670
+- fix: verify dogT exists in the context before using it by @cakoolen in https://github.com/cucumber/godog/pull/692
+- fix: change bang to being in README by @nahomEagleLion in https://github.com/cucumber/godog/pull/687
+- Mark junit test cases as skipped if no pickle step results available by @mrsheepuk in https://github.com/cucumber/godog/pull/597
+- Print step declaration line instead of handler declaration line by @SpencerC in https://github.com/cucumber/godog/pull/668
+
+## [v0.15.0]
+
+### Added
+- Improved the type checking of step return types and improved the error messages - ([647](https://github.com/cucumber/godog/pull/647) - [johnlon](https://github.com/johnlon))
+- Ambiguous step definitions will now be detected when strict mode is activated - ([636](https://github.com/cucumber/godog/pull/636)/([648](https://github.com/cucumber/godog/pull/648) - [johnlon](https://github.com/johnlon))
+- Provide support for attachments / embeddings including a new example in the examples dir - ([623](https://github.com/cucumber/godog/pull/623) - [johnlon](https://github.com/johnlon))
+
+### Changed
+- Formatters now have a `Close` method and associated `io.Writer` changed to `io.WriteCloser`.
+
+## [v0.14.1]
+
+### Added
+- Provide testing.T-compatible interface on test context, allowing usage of assertion libraries such as testify's assert/require - ([571](https://github.com/cucumber/godog/pull/571) - [mrsheepuk](https://github.com/mrsheepuk))
+- Created releasing guidelines - ([608](https://github.com/cucumber/godog/pull/608) - [glibas](https://github.com/glibas))
+
+### Fixed
+- Step duration calculation - ([616](https://github.com/cucumber/godog/pull/616) - [iaroslav-ciupin](https://github.com/iaroslav-ciupin))
+- Invalid memory address or nil pointer dereference in RetrieveFeatures - ([566](https://github.com/cucumber/godog/pull/566) - [corneldamian](https://github.com/corneldamian))
+
+## [v0.14.0]
+### Added
+- Improve ErrSkip handling, add test for Summary and operations order ([584](https://github.com/cucumber/godog/pull/584) - [vearutop](https://github.com/vearutop))
+
+### Fixed
+- Remove line overwriting for scenario outlines in cucumber formatter ([605](https://github.com/cucumber/godog/pull/605) - [glibas](https://github.com/glibas))
+- Remove duplicate warning message ([590](https://github.com/cucumber/godog/pull/590) - [vearutop](https://github.com/vearutop))
+- updated base formatter to set a scenario as passed unless there exist ([582](https://github.com/cucumber/godog/pull/582) - [roskee](https://github.com/roskee))
+
+### Changed
+- Update test.yml ([583](https://github.com/cucumber/godog/pull/583) - [vearutop](https://github.com/vearutop))
+
+## [v0.13.0]
+### Added
+- Support for reading feature files from an `fs.FS` ([550](https://github.com/cucumber/godog/pull/550) - [tigh-latte](https://github.com/tigh-latte))
+- Added keyword functions. ([509](https://github.com/cucumber/godog/pull/509) - [otrava7](https://github.com/otrava7))
+- Prefer go test to use of godog cli in README ([548](https://github.com/cucumber/godog/pull/548) - [danielhelfand](https://github.com/danielhelfand))
+- Use `fs.FS` abstraction for filesystem ([550](https://github.com/cucumber/godog/pull/550) - [tigh-latte](https://github.com/tigh-latte))
+- Cancel context for each scenario ([514](https://github.com/cucumber/godog/pull/514) - [draganm](https://github.com/draganm))
+
+### Fixed
+- Improve hooks invocation flow ([568](https://github.com/cucumber/godog/pull/568) - [vearutop](https://github.com/vearutop))
+- Result of testing.T respect strict option ([539](https://github.com/cucumber/godog/pull/539) - [eiel](https://github.com/eiel))
+
+### Changed
+- BREAKING CHANGE, upgraded cucumber and messages dependencies = ([515](https://github.com/cucumber/godog/pull/515) - [otrava7](https://github.com/otrava7))
+
+## [v0.12.6]
+### Changed
+- Each scenario is run with a cancellable `context.Context` which is cancelled at the end of the scenario. ([514](https://github.com/cucumber/godog/pull/514) - [draganm](https://github.com/draganm))
+- README example is updated with `context.Context` and `go test` usage. ([477](https://github.com/cucumber/godog/pull/477) - [vearutop](https://github.com/vearutop))
+- Removed deprecation of `godog.BindFlags`. ([498](https://github.com/cucumber/godog/pull/498) - [vearutop](https://github.com/vearutop))
+- Pretty Print when using rules. ([480](https://github.com/cucumber/godog/pull/480) - [dumpsterfireproject](https://github.com/dumpsterfireproject))
+
+### Fixed
+- Fixed a bug which would ignore the context returned from a substep.([488](https://github.com/cucumber/godog/pull/488) - [wichert](https://github.com/wichert))
+- Fixed a bug which would cause a panic when using the pretty formatter with a feature that contained a rule. ([480](https://github.com/cucumber/godog/pull/480) - [dumpsterfireproject](https://github.com/dumpsterfireproject))
+- Multiple invocations of AfterScenario hooks in case of undefined steps. ([494](https://github.com/cucumber/godog/pull/494) - [vearutop](https://github.com/vearutop))
+- Add a check for missing test files and raise a more helpful error. ([468](https://github.com/cucumber/godog/pull/468) - [ALCooper12](https://github.com/ALCooper12))
+- Fix version subcommand. Do not print usage if run subcommand fails. ([475](https://github.com/cucumber/godog/pull/475) - [coopernurse](https://github.com/coopernurse))
+
+### Added
+- Add new option for created features with parsing from byte slices. ([476](https://github.com/cucumber/godog/pull/476) - [akaswenwilk](https://github.com/akaswenwilk))
+
+### Deprecated
+- `godog` CLI tool prints deprecation warning. ([489](https://github.com/cucumber/godog/pull/489) - [vearutop](https://github.com/vearutop))
+
+## [v0.12.5]
+### Changed
+- Changed underlying cobra command setup to return errors instead of calling `os.Exit` directly to enable simpler testing. ([454](https://github.com/cucumber/godog/pull/454) - [mxygem](https://github.com/mxygem))
+- Remove use of deprecated methods from `_examples`. ([460](https://github.com/cucumber/godog/pull/460) - [ricardogarfe](https://github.com/ricardogarfe))
+
+### Fixed
+- Support for go1.18 in `godog` cli mode ([466](https://github.com/cucumber/godog/pull/466) - [vearutop](https://github.com/vearutop))
+
+## [v0.12.4]
+### Added
+- Allow suite-level configuration of steps and hooks ([453](https://github.com/cucumber/godog/pull/453) - [vearutop](https://github.com/vearutop))
+
+## [v0.12.3]
+### Added
+- Automated binary releases with GitHub Actions ([437](https://github.com/cucumber/godog/pull/437) - [vearutop](https://github.com/vearutop))
+- Automated binary versioning with `go install` ([437](https://github.com/cucumber/godog/pull/437) - [vearutop](https://github.com/vearutop))
+- Module with local replace in examples ([437](https://github.com/cucumber/godog/pull/437) - [vearutop](https://github.com/vearutop))
+
+### Changed
+- suggest to use `go install` instead of the deprecated `go get` to install the `godog` binary ([449](https://github.com/cucumber/godog/pull/449) - [dmitris](https://github.com/dmitris))
+
+### Fixed
+- After Scenario hook is called before After Step ([444](https://github.com/cucumber/godog/pull/444) - [vearutop](https://github.com/vearutop))
+- `check-go-version` in Makefile to run on WSL. ([443](https://github.com/cucumber/godog/pull/443) - [mxygem](https://github.com/mxygem))
+
+## [v0.12.2]
+### Fixed
+- Error in `go mod tidy` with `GO111MODULE=off` ([436](https://github.com/cucumber/godog/pull/436) - [vearutop](https://github.com/vearutop))
+
+## [v0.12.1]
+### Fixed
+- Unintended change of behavior in before step hook ([424](https://github.com/cucumber/godog/pull/424) - [nhatthm](https://github.com/nhatthm))
+
+## [v0.12.0]
+### Added
+- Support for step definitions without return ([364](https://github.com/cucumber/godog/pull/364) - [titouanfreville](https://github.com/titouanfreville))
+- Contextualized hooks for scenarios and steps ([409](https://github.com/cucumber/godog/pull/409) - [vearutop](https://github.com/vearutop))
+- Step result status in After hook ([409](https://github.com/cucumber/godog/pull/409) - [vearutop](https://github.com/vearutop))
+- Support auto converting doc strings to plain strings ([380](https://github.com/cucumber/godog/pull/380) - [chirino](https://github.com/chirino))
+- Use multiple formatters in the same test run ([392](https://github.com/cucumber/godog/pull/392) - [vearutop](https://github.com/vearutop))
+- Added `RetrieveFeatures()` method to `godog.TestSuite` ([276](https://github.com/cucumber/godog/pull/276) - [radtriste](https://github.com/radtriste))
+- Added support to create custom formatters ([372](https://github.com/cucumber/godog/pull/372) - [leviable](https://github.com/leviable))
+
+### Changed
+- Upgraded gherkin-go to v19 and messages-go to v16 ([402](https://github.com/cucumber/godog/pull/402) - [mbow](https://github.com/mbow))
+- Generate simpler snippets that use *godog.DocString and *godog.Table ([379](https://github.com/cucumber/godog/pull/379) - [chirino](https://github.com/chirino))
+
+### Deprecated
+- `ScenarioContext.BeforeScenario`, use `ScenarioContext.Before` ([409](https://github.com/cucumber/godog/pull/409)) - [vearutop](https://github.com/vearutop))
+- `ScenarioContext.AfterScenario`, use `ScenarioContext.After` ([409](https://github.com/cucumber/godog/pull/409)) - [vearutop](https://github.com/vearutop))
+- `ScenarioContext.BeforeStep`, use `ScenarioContext.StepContext().Before` ([409](https://github.com/cucumber/godog/pull/409)) - [vearutop](https://github.com/vearutop))
+- `ScenarioContext.AfterStep`, use `ScenarioContext.StepContext().After` ([409](https://github.com/cucumber/godog/pull/409)) - [vearutop](https://github.com/vearutop))
+
+### Fixed
+- Incorrect step definition output for Data Tables ([411](https://github.com/cucumber/godog/pull/411) - [karfrank](https://github.com/karfrank))
+- `ScenarioContext.AfterStep` not invoked after a failed case ([409](https://github.com/cucumber/godog/pull/409) - [vearutop](https://github.com/vearutop)))
+- Can't execute multiple specific scenarios in the same feature file ([414](https://github.com/cucumber/godog/pull/414) - [vearutop](https://github.com/vearutop)))
+
+## [v0.11.0]
+### Added
+- Created a simple example for a custom formatter ([330](https://github.com/cucumber/godog/pull/330) - [lonnblad](https://github.com/lonnblad))
+- --format junit:result.xml will now write to result.xml ([331](https://github.com/cucumber/godog/pull/331) - [lonnblad](https://github.com/lonnblad))
+- Added make commands to create artifacts and upload them to a github release ([333](https://github.com/cucumber/godog/pull/333) - [lonnblad](https://github.com/lonnblad))
+- Created release notes and changelog for v0.11.0 ([355](https://github.com/cucumber/godog/pull/355) - [lonnblad](https://github.com/lonnblad))
+- Created v0.11.0-rc2 ([362](https://github.com/cucumber/godog/pull/362) - [lonnblad](https://github.com/lonnblad))
+
+### Changed
+- Added Cobra for the Command Line Interface ([321](https://github.com/cucumber/godog/pull/321) - [lonnblad](https://github.com/lonnblad))
+- Added internal packages for formatters, storage and models ([323](https://github.com/cucumber/godog/pull/323) - [lonnblad](https://github.com/lonnblad))
+- Added an internal package for tags filtering ([326](https://github.com/cucumber/godog/pull/326) - [lonnblad](https://github.com/lonnblad))
+- Added an internal pkg for the builder ([327](https://github.com/cucumber/godog/pull/327) - [lonnblad](https://github.com/lonnblad))
+- Moved the parser code to a new internal pkg ([329](https://github.com/cucumber/godog/pull/329) - [lonnblad](https://github.com/lonnblad))
+- Moved StepDefinition to the formatters pkg ([332](https://github.com/cucumber/godog/pull/332) - [lonnblad](https://github.com/lonnblad))
+- Removed go1.12 and added go1.15 to CI config ([356](https://github.com/cucumber/godog/pull/356) - [lonnblad](https://github.com/lonnblad))
+
+### Fixed
+- Improved the help text of the formatter flag in the run command ([347](https://github.com/cucumber/godog/pull/347) - [lonnblad](https://github.com/lonnblad))
+- Removed $GOPATH from the README.md and updated the example ([349](https://github.com/cucumber/godog/pull/349) - [lonnblad](https://github.com/lonnblad))
+- Fixed the undefined step definitions help ([350](https://github.com/cucumber/godog/pull/350) - [lonnblad](https://github.com/lonnblad))
+- Added a comment regarding running the examples within the $GOPATH ([352](https://github.com/cucumber/godog/pull/352) - [lonnblad](https://github.com/lonnblad))
+- doc(FAQ/TestMain): `testing.M.Run()` is optional ([353](https://github.com/cucumber/godog/pull/353) - [hansbogert](https://github.com/hansbogert))
+- Made a fix for the unstable Randomize Run tests ([354](https://github.com/cucumber/godog/pull/354) - [lonnblad](https://github.com/lonnblad))
+- Fixed an issue when go test is parsing command-line flags ([359](https://github.com/cucumber/godog/pull/359) - [lonnblad](https://github.com/lonnblad))
+- Make pickleStepIDs unique accross multiple paths ([366](https://github.com/cucumber/godog/pull/366) - [rickardenglund](https://github.com/rickardenglund))
+
+### Removed
+- Removed deprecated code ([322](https://github.com/cucumber/godog/pull/322) - [lonnblad](https://github.com/lonnblad))
+
+## [v0.10.0]
+### Added
+- Added concurrency support to the pretty formatter ([275](https://github.com/cucumber/godog/pull/275) - [lonnblad](https://github.com/lonnblad))
+- Added concurrency support to the events formatter ([274](https://github.com/cucumber/godog/pull/274) - [lonnblad](https://github.com/lonnblad))
+- Added concurrency support to the cucumber formatter ([273](https://github.com/cucumber/godog/pull/273) - [lonnblad](https://github.com/lonnblad))
+- Added an example for how to use assertion pkgs like testify with godog ([289](https://github.com/cucumber/godog/pull/289) - [lonnblad](https://github.com/lonnblad))
+- Added the new TestSuiteInitializer and ScenarioInitializer ([294](https://github.com/cucumber/godog/pull/294) - [lonnblad](https://github.com/lonnblad))
+- Added an in-mem storage for pickles ([304](https://github.com/cucumber/godog/pull/304) - [lonnblad](https://github.com/lonnblad))
+- Added Pickle and PickleStep results to the in-mem storage ([305](https://github.com/cucumber/godog/pull/305) - [lonnblad](https://github.com/lonnblad))
+- Added features to the in-mem storage ([306](https://github.com/cucumber/godog/pull/306) - [lonnblad](https://github.com/lonnblad))
+- Broke out some code from massive files into new files ([307](https://github.com/cucumber/godog/pull/307) - [lonnblad](https://github.com/lonnblad))
+- Added support for concurrent scenarios ([311](https://github.com/cucumber/godog/pull/311) - [lonnblad](https://github.com/lonnblad))
+
+### Changed
+- Broke out snippets gen and added sorting on method name ([271](https://github.com/cucumber/godog/pull/271) - [lonnblad](https://github.com/lonnblad))
+- Updated so that we run all tests concurrent now ([278](https://github.com/cucumber/godog/pull/278) - [lonnblad](https://github.com/lonnblad))
+- Moved fmt tests to a godog_test pkg and restructured the fmt output tests ([295](https://github.com/cucumber/godog/pull/295) - [lonnblad](https://github.com/lonnblad))
+- Moved builder tests to a godog_test pkg ([296](https://github.com/cucumber/godog/pull/296) - [lonnblad](https://github.com/lonnblad))
+- Made the builder tests run in parallel ([298](https://github.com/cucumber/godog/pull/298) - [lonnblad](https://github.com/lonnblad))
+- Refactored suite_context.go ([300](https://github.com/cucumber/godog/pull/300) - [lonnblad](https://github.com/lonnblad))
+- Added better testing of the Context Initializers and TestSuite{}.Run() ([301](https://github.com/cucumber/godog/pull/301) - [lonnblad](https://github.com/lonnblad))
+- Updated the README.md ([302](https://github.com/cucumber/godog/pull/302) - [lonnblad](https://github.com/lonnblad))
+- Unexported some exported properties in unexported structs ([303](https://github.com/cucumber/godog/pull/303) - [lonnblad](https://github.com/lonnblad))
+- Refactored some states in the formatters and feature struct ([310](https://github.com/cucumber/godog/pull/310) - [lonnblad](https://github.com/lonnblad))
+
+### Deprecated
+- Deprecated SuiteContext and ConcurrentFormatter ([314](https://github.com/cucumber/godog/pull/314) - [lonnblad](https://github.com/lonnblad))
+
+### Fixed
+- Fixed failing builder tests due to the v0.9.0 change ([lonnblad](https://github.com/lonnblad))
+- Update paths to screenshots for examples ([270](https://github.com/cucumber/godog/pull/270) - [leviable](https://github.com/leviable))
+- Made progress formatter verification a bit more accurate ([lonnblad](https://github.com/lonnblad))
+- Added comparison between single and multi threaded runs ([272](https://github.com/cucumber/godog/pull/272) - [lonnblad](https://github.com/lonnblad))
+- Fixed issue with empty feature file causing nil pointer deref ([288](https://github.com/cucumber/godog/pull/288) - [lonnblad](https://github.com/lonnblad))
+- Updated linting checks in circleci config and fixed linting issues ([290](https://github.com/cucumber/godog/pull/290) - [lonnblad](https://github.com/lonnblad))
+- Readded some legacy doc for FeatureContext ([297](https://github.com/cucumber/godog/pull/297) - [lonnblad](https://github.com/lonnblad))
+- Fixed an issue with calculating time for junit testsuite ([308](https://github.com/cucumber/godog/pull/308) - [lonnblad](https://github.com/lonnblad))
+- Fixed so that we don't execute features with zero scenarios ([315](https://github.com/cucumber/godog/pull/315) - [lonnblad](https://github.com/lonnblad))
+- Fixed the broken --random flag ([317](https://github.com/cucumber/godog/pull/317) - [lonnblad](https://github.com/lonnblad))
+
+### Removed
+- Removed pre go112 build code ([293](https://github.com/cucumber/godog/pull/293) - [lonnblad](https://github.com/lonnblad))
+- Removed the deprecated feature hooks ([312](https://github.com/cucumber/godog/pull/312) - [lonnblad](https://github.com/lonnblad))
+
+## [0.9.0]
+### Changed
+- Run godog features in CircleCI in strict mode ([mxygem](https://github.com/mxygem))
+- Removed TestMain call in `suite_test.go` for CI. ([mxygem](https://github.com/mxygem))
+- Migrated to [gherkin-go - v11.0.0](https://github.com/cucumber/gherkin-go/releases/tag/v11.0.0). ([240](https://github.com/cucumber/godog/pull/240) - [lonnblad](https://github.com/lonnblad))
+
+### Fixed
+- Fixed the time attributes in the JUnit formatter. ([232](https://github.com/cucumber/godog/pull/232) - [lonnblad](https://github.com/lonnblad))
+- Re enable custom formatters. ([238](https://github.com/cucumber/godog/pull/238) - [ericmcbride](https://github.com/ericmcbride))
+- Added back suite_test.go ([mxygem](https://github.com/mxygem))
+- Normalise module paths for use on Windows ([242](https://github.com/cucumber/godog/pull/242) - [gjtaylor](https://github.com/gjtaylor))
+- Fixed panic in indenting function `s` ([247](https://github.com/cucumber/godog/pull/247) - [titouanfreville](https://github.com/titouanfreville))
+- Fixed wrong version in API example ([263](https://github.com/cucumber/godog/pull/263) - [denis-trofimov](https://github.com/denis-trofimov))
+
+## [0.8.1]
+### Added
+- Link in Readme to the Slack community. ([210](https://github.com/cucumber/godog/pull/210) - [smikulcik](https://github.com/smikulcik))
+- Added run tests for Cucumber formatting. ([214](https://github.com/cucumber/godog/pull/214), [216](https://github.com/cucumber/godog/pull/216) - [lonnblad](https://github.com/lonnblad))
+
+### Changed
+- Renamed the `examples` directory to `_examples`, removing dependencies from the Go module ([218](https://github.com/cucumber/godog/pull/218) - [axw](https://github.com/axw))
+
+### Fixed
+- Find/Replaced references to DATA-DOG/godog -> cucumber/godog for docs. ([209](https://github.com/cucumber/godog/pull/209) - [smikulcik](https://github.com/smikulcik))
+- Fixed missing links in changelog to be correctly included! ([mxygem](https://github.com/mxygem))
+
+## [0.8.0]
+### Added
+- Added initial CircleCI config. ([mxygem](https://github.com/mxygem))
+- Added concurrency support for JUnit formatting ([lonnblad](https://github.com/lonnblad))
+
+### Changed
+- Changed code references to DATA-DOG/godog to cucumber/godog to help get things building correctly. ([mxygem](https://github.com/mxygem))
+
+[v0.15.1]: https://github.com/cucumber/godog/compare/v0.15.0...v0.15.1
+[v0.15.0]: https://github.com/cucumber/godog/compare/v0.14.1...v0.15.0
+[v0.14.1]: https://github.com/cucumber/godog/compare/v0.14.0...v0.14.1
+[v0.14.0]: https://github.com/cucumber/godog/compare/v0.13.0...v0.14.0
+[v0.13.0]: https://github.com/cucumber/godog/compare/v0.12.6...v0.13.0
+[v0.12.6]: https://github.com/cucumber/godog/compare/v0.12.5...v0.12.6
+[v0.12.5]: https://github.com/cucumber/godog/compare/v0.12.4...v0.12.5
+[v0.12.4]: https://github.com/cucumber/godog/compare/v0.12.3...v0.12.4
+[v0.12.3]: https://github.com/cucumber/godog/compare/v0.12.2...v0.12.3
+[v0.12.2]: https://github.com/cucumber/godog/compare/v0.12.1...v0.12.2
+[v0.12.1]: https://github.com/cucumber/godog/compare/v0.12.0...v0.12.1
+[v0.12.0]: https://github.com/cucumber/godog/compare/v0.11.0...v0.12.0
+[v0.11.0]: https://github.com/cucumber/godog/compare/v0.10.0...v0.11.0
+[v0.10.0]: https://github.com/cucumber/godog/compare/v0.9.0...v0.10.0
+[0.9.0]: https://github.com/cucumber/godog/compare/v0.8.1...v0.9.0
+[0.8.1]: https://github.com/cucumber/godog/compare/v0.8.0...v0.8.1
+[0.8.0]: https://github.com/cucumber/godog/compare/v0.7.13...v0.8.0
diff --git a/vendor/github.com/cucumber/godog/CHANGELOG_OLD.md b/vendor/github.com/cucumber/godog/CHANGELOG_OLD.md
new file mode 100644
index 000000000..070337965
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/CHANGELOG_OLD.md
@@ -0,0 +1,113 @@
+# Change LOG
+
+**2020-02-06**
+- move to new [CHANGELOG.md](CHANGELOG.md)
+
+**2020-01-31**
+- change license to MIT and moving project repository to **cucumber**
+ organization.
+
+**2018-11-16**
+- added formatter output test suite, currently mainly pretty format
+ tested.
+- these tests, helped to identify some output format issues.
+
+**2018-11-12**
+- proper go module support added for `godog` command build.
+- added build tests.
+
+**2018-10-27**
+- support go1.11 new compiler and linker changes for **godog** command.
+- support go1.11 modules and `go mod` builds.
+- `BindFlags` now has a prefix option for flags, so that `go test` command
+ can avoid flag name collisions.
+- `BindFlags` respect default options provided for binding, so that it
+ does not override predefined options when flags are bind, see #144.
+- Minor patch to support tag filters on example tables for
+ ScenarioOutline.
+- Minor patch for pretty printer, when scenario has no steps, comment
+ possition computation was in panic.
+
+**2018-03-04**
+- support go1.10 new compiler and linker changes for **godog** command.
+
+**2017-08-31**
+- added **BeforeFeature** and **AfterFeature** hooks.
+- failed multistep error is now prepended with a parent step text in order
+ to determine failed nested step.
+- pretty format now removes the step definition location package name in
+ comment next to step if the step definition matches tested package. If
+ step definition is imported from other package, full package name will
+ be printed.
+
+**2017-05-04**
+- added **--strict** option in order to fail suite when there are pending
+ or undefined steps. By default, suite passes and treats pending or
+ undefined steps as TODOs.
+
+**2017-04-29** - **v0.7.0**
+- added support for nested steps. From now on, it is possible to return
+ **godog.Steps** instead of an **error** in the step definition func.
+ This change introduced few minor changes in **Formatter** interface. Be
+ sure to adapt the changes if you have custom formatters.
+
+**2017-04-27**
+- added an option to randomize scenario execution order, so we could
+ ensure that scenarios do not depend on global state.
+- godog was manually sorting feature files by name. Now it just runs them
+ in given order, you may sort them anyway you like. For example `godog
+ $(find . -name '*.feature' | sort)`
+
+**2016-10-30** - **v0.6.0**
+- added experimental **events** format, this might be used for unified
+ cucumber formats. But should be not adapted widely, since it is highly
+ possible that specification will change.
+- added **RunWithOptions** method which allows to easily run godog from
+ **TestMain** without needing to simulate flag arguments. These options
+ now allows to configure output writer.
+- added flag **-o, --output=runner.binary** which only compiles the test
+ runner executable, but does not execute it.
+- **FlagSet** initialization now takes io.Writer as output for help text
+ output. It was not showing nice colors on windows before.
+ **--no-colors** option only applies to test run output.
+
+**2016-06-14** - **v0.5.0**
+- godog now uses **go tool compile** and **go tool link** to support
+ vendor directory dependencies. It also compiles test executable the same
+ way as standard **go test** utility. With this change, only go
+ versions from **1.5** are now supported.
+
+**2016-06-01**
+- parse flags in main command, to show version and help without needing
+ to compile test package and buildable go sources.
+
+**2016-05-28**
+- show nicely formatted called step func name and file path
+
+**2016-05-26**
+- pack gherkin dependency in a subpackage to prevent compatibility
+ conflicts in the future. If recently upgraded, probably you will need to
+ reference gherkin as `github.com/DATA-DOG/godog/gherkin` instead.
+
+**2016-05-25**
+- refactored test suite build tooling in order to use standard **go test**
+ tool. Which allows to compile package with godog runner script in **go**
+ idiomatic way. It also supports all build environment options as usual.
+- **godog.Run** now returns an **int** exit status. It was not returning
+ anything before, so there is no compatibility breaks.
+
+**2016-03-04**
+- added **junit** compatible output formatter, which prints **xml**
+ results to **os.Stdout**
+- fixed #14 which skipped printing background steps when there was
+ scenario outline in feature.
+
+**2015-07-03**
+- changed **godog.Suite** from interface to struct. Context registration should be updated accordingly. The reason
+for change: since it exports the same methods and there is no need to mock a function in tests, there is no
+obvious reason to keep an interface.
+- in order to support running suite concurrently, needed to refactor an entry point of application. The **Run** method
+now is a func of godog package which initializes and run the suite (or more suites). Method **New** is removed. This
+change made godog a little cleaner.
+- renamed **RegisterFormatter** func to **Format** to be more consistent.
+
diff --git a/vendor/github.com/cucumber/godog/CONTRIBUTING.md b/vendor/github.com/cucumber/godog/CONTRIBUTING.md
new file mode 100644
index 000000000..c21ee4263
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/CONTRIBUTING.md
@@ -0,0 +1,28 @@
+# Welcome 💖
+
+Before anything else, thank you for taking some of your precious time to help this project move forward. ❤️
+
+If you're new to open source and feeling a bit nervous 😳, we understand! We recommend watching [this excellent guide](https://egghead.io/talks/git-how-to-make-your-first-open-source-contribution)
+to give you a grounding in some of the basic concepts. You could also watch [this talk](https://www.youtube.com/watch?v=tuSk6dMoTIs) from our very own wonderful [Marit van Dijk](https://github.com/mlvandijk) on her experiences contributing to Cucumber.
+
+We want you to feel safe to make mistakes, and ask questions. If anything in this guide or anywhere else in the codebase doesn't make sense to you, please let us know! It's through your feedback that we can make this codebase more welcoming, so we'll be glad to hear thoughts.
+
+You can chat with us in the `#committers` channel in our [community Discord](https://cucumber.io/docs/community/get-in-touch/#discord), or feel free to [raise an issue] if you're experiencing any friction trying make your contribution.
+
+## Setup
+
+To get your development environment set up, you'll need to [install Go]. We're currently using version 1.17 for development.
+
+Once that's done, try running the tests:
+
+ make test
+
+If everything passes, you're ready to hack!
+
+[install go]: https://golang.org/doc/install
+[community Discord]: https://cucumber.io/community#discord
+[raise an issue]: https://github.com/cucumber/godog/issues/new/choose
+
+## Changing dependencies
+
+If dependencies have changed, you will also need to update the _examples module. `go mod tidy` should be sufficient.
\ No newline at end of file
diff --git a/vendor/github.com/cucumber/godog/LICENSE b/vendor/github.com/cucumber/godog/LICENSE
new file mode 100644
index 000000000..97dcbd65f
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) SmartBear
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/cucumber/godog/Makefile b/vendor/github.com/cucumber/godog/Makefile
new file mode 100644
index 000000000..06c95c4e0
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/Makefile
@@ -0,0 +1,77 @@
+.PHONY: test gherkin bump cover
+
+VERS ?= $(shell git symbolic-ref -q --short HEAD || git describe --tags --exact-match)
+
+GO_MAJOR_VERSION = $(shell go version | cut -c 14- | cut -d' ' -f1 | cut -d'.' -f1)
+GO_MINOR_VERSION = $(shell go version | cut -c 14- | cut -d' ' -f1 | cut -d'.' -f2)
+MINIMUM_SUPPORTED_GO_MAJOR_VERSION = 1
+MINIMUM_SUPPORTED_GO_MINOR_VERSION = 16
+GO_VERSION_VALIDATION_ERR_MSG = Go version $(GO_MAJOR_VERSION).$(GO_MINOR_VERSION) is not supported, please update to at least $(MINIMUM_SUPPORTED_GO_MAJOR_VERSION).$(MINIMUM_SUPPORTED_GO_MINOR_VERSION)
+
+.PHONY: check-go-version
+check-go-version:
+ @if [ $(GO_MAJOR_VERSION) -gt $(MINIMUM_SUPPORTED_GO_MAJOR_VERSION) ]; then \
+ exit 0 ;\
+ elif [ $(GO_MAJOR_VERSION) -lt $(MINIMUM_SUPPORTED_GO_MAJOR_VERSION) ]; then \
+ echo '$(GO_VERSION_VALIDATION_ERR_MSG)';\
+ exit 1; \
+ elif [ $(GO_MINOR_VERSION) -lt $(MINIMUM_SUPPORTED_GO_MINOR_VERSION) ] ; then \
+ echo '$(GO_VERSION_VALIDATION_ERR_MSG)';\
+ exit 1; \
+ fi
+
+test: check-go-version
+ @echo "running all tests"
+ @go fmt ./...
+ @go run honnef.co/go/tools/cmd/staticcheck@v0.5.1 github.com/cucumber/godog
+ @go run honnef.co/go/tools/cmd/staticcheck@v0.5.1 github.com/cucumber/godog/cmd/godog
+ go vet ./...
+ go test -race ./...
+ go run ./cmd/godog -f progress -c 4
+
+gherkin:
+ @if [ -z "$(VERS)" ]; then echo "Provide gherkin version like: 'VERS=commit-hash'"; exit 1; fi
+ @rm -rf gherkin
+ @mkdir gherkin
+ @curl -s -L https://github.com/cucumber/gherkin-go/tarball/$(VERS) | tar -C gherkin -zx --strip-components 1
+ @rm -rf gherkin/{.travis.yml,.gitignore,*_test.go,gherkin-generate*,*.razor,*.jq,Makefile,CONTRIBUTING.md}
+
+bump:
+ @if [ -z "$(VERSION)" ]; then echo "Provide version like: 'VERSION=$(VERS) make bump'"; exit 1; fi
+ @echo "bumping version from: $(VERS) to $(VERSION)"
+ @sed -i.bak 's/$(VERS)/$(VERSION)/g' godog.go
+ @sed -i.bak 's/$(VERS)/$(VERSION)/g' _examples/api/features/version.feature
+ @find . -name '*.bak' | xargs rm
+
+cover:
+ go test -race -coverprofile=coverage.txt
+ go tool cover -html=coverage.txt
+ rm coverage.txt
+
+ARTIFACT_DIR := _artifacts
+
+# To upload artifacts for the current version;
+# execute: make upload
+#
+# Check https://github.com/tcnksm/ghr for usage of ghr
+upload: artifacts
+ ghr -replace $(VERS) $(ARTIFACT_DIR)
+
+# To build artifacts for the current version;
+# execute: make artifacts
+artifacts:
+ rm -rf $(ARTIFACT_DIR)
+ mkdir $(ARTIFACT_DIR)
+
+ $(call _build,darwin,amd64)
+ $(call _build,linux,amd64)
+ $(call _build,linux,arm64)
+
+define _build
+ mkdir $(ARTIFACT_DIR)/godog-$(VERS)-$1-$2
+ env GOOS=$1 GOARCH=$2 go build -ldflags "-X github.com/cucumber/godog.Version=$(VERS)" -o $(ARTIFACT_DIR)/godog-$(VERS)-$1-$2/godog ./cmd/godog
+ cp README.md $(ARTIFACT_DIR)/godog-$(VERS)-$1-$2/README.md
+ cp LICENSE $(ARTIFACT_DIR)/godog-$(VERS)-$1-$2/LICENSE
+ cd $(ARTIFACT_DIR) && tar -c --use-compress-program="pigz --fast" -f godog-$(VERS)-$1-$2.tar.gz godog-$(VERS)-$1-$2 && cd ..
+ rm -rf $(ARTIFACT_DIR)/godog-$(VERS)-$1-$2
+endef
diff --git a/vendor/github.com/cucumber/godog/README.md b/vendor/github.com/cucumber/godog/README.md
new file mode 100644
index 000000000..dceacf167
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/README.md
@@ -0,0 +1,583 @@
+[](https://vshymanskyy.github.io/StandWithUkraine)
+[](https://github.com/cucumber/godog/actions?query=branch%main+workflow%3Atest)
+[](https://pkg.go.dev/github.com/cucumber/godog)
+[](https://codecov.io/gh/cucumber/godog)
+[](https://oselvar.com/github/cucumber/oselvar-github-metrics/main/cucumber/godog)
+[](https://oselvar.com/github/cucumber/oselvar-github-metrics/main/cucumber/godog)
+
+# Godog
+
+
+
+**The API is likely to change a few times before we reach 1.0.0**
+
+Please read the full README, you may find it very useful. And do not forget to peek into the [Release Notes](https://github.com/cucumber/godog/blob/master/release-notes) and the [CHANGELOG](https://github.com/cucumber/godog/blob/master/CHANGELOG.md) from time to time.
+
+Package godog is the official Cucumber BDD framework for Golang, it merges specification and test documentation into one cohesive whole, using Gherkin formatted scenarios in the format of Given, When, Then.
+
+The project was inspired by [behat][behat] and [cucumber][cucumber].
+
+## Why Godog/Cucumber
+
+### A single source of truth
+
+Godog merges specification and test documentation into one cohesive whole.
+
+### Living documentation
+
+Because they're automatically tested by Godog, your specifications are
+always being up-to-date.
+
+### Focus on the customer
+
+Business and IT don't always understand each other. Godog's executable specifications encourage closer collaboration, helping teams keep the business goal in mind at all times.
+
+### Less rework
+
+When automated testing is this much fun, teams can easily protect themselves from costly regressions.
+
+### Read more
+- [Behaviour-Driven Development](https://cucumber.io/docs/bdd/)
+- [Gherkin Reference](https://cucumber.io/docs/gherkin/reference/)
+
+## Contributions
+
+Godog is a community driven Open Source Project within the Cucumber organization. We [welcome contributions from everyone](https://cucumber.io/blog/open-source/tackling-structural-racism-(and-sexism)-in-open-so/), and we're ready to support you if you have the enthusiasm to contribute.
+
+See the [contributing guide] for more detail on how to get started.
+
+See the [releasing guide] for release flow details.
+
+## Getting help
+
+We have a [community Discord](https://cucumber.io/docs/community/get-in-touch/#discord) where you can chat with other users, developers, and BDD practitioners.
+
+## Examples
+
+You can find a few examples [here](/_examples).
+
+**Note** that if you want to execute any of the examples and have the Git repository checked out in the `$GOPATH`, you need to use: `GO111MODULE=off`. [Issue](https://github.com/cucumber/godog/issues/344) for reference.
+
+### Godogs
+
+The following example can be [found here](/_examples/godogs).
+
+#### Step 1 - Setup a go module
+
+Create a new go module named **godogs** in your go workspace by running `mkdir godogs`
+
+From now on, use **godogs** as your working directory by running `cd godogs`
+
+Initiate the go module inside the **godogs** directory by running `go mod init godogs`
+
+#### Step 2 - Create gherkin feature
+
+Imagine we have a **godog cart** to serve godogs for lunch.
+
+First of all, we describe our feature in plain text:
+
+``` gherkin
+Feature: eat godogs
+ In order to be happy
+ As a hungry gopher
+ I need to be able to eat godogs
+
+ Scenario: Eat 5 out of 12
+ Given there are 12 godogs
+ When I eat 5
+ Then there should be 7 remaining
+```
+
+Run `vim features/godogs.feature` and add the text above into the vim editor and save the file.
+
+#### Step 3 - Create godog step definitions
+
+**NOTE:** Same as **go test**, godog respects package level isolation. All your step definitions should be in your tested package root directory. In this case: **godogs**.
+
+Create and copy the step definitions below into a new file by running `vim godogs_test.go`:
+``` go
+package main
+
+import "github.com/cucumber/godog"
+
+func iEat(arg1 int) error {
+ return godog.ErrPending
+}
+
+func thereAreGodogs(arg1 int) error {
+ return godog.ErrPending
+}
+
+func thereShouldBeRemaining(arg1 int) error {
+ return godog.ErrPending
+}
+
+func InitializeScenario(ctx *godog.ScenarioContext) {
+ ctx.Step(`^there are (\d+) godogs$`, thereAreGodogs)
+ ctx.Step(`^I eat (\d+)$`, iEat)
+ ctx.Step(`^there should be (\d+) remaining$`, thereShouldBeRemaining)
+}
+```
+
+Alternatively, you can also specify the keyword (Given, When, Then...) when creating the step definitions:
+``` go
+func InitializeScenario(ctx *godog.ScenarioContext) {
+ ctx.Given(`^there are (\d+) godogs$`, thereAreGodogs)
+ ctx.When(`^I eat (\d+)$`, iEat)
+ ctx.Then(`^there should be (\d+) remaining$`, thereShouldBeRemaining)
+}
+```
+
+Our module should now look like this:
+```
+godogs
+- features
+ - godogs.feature
+- go.mod
+- go.sum
+- godogs_test.go
+```
+
+Run `go test` in the **godogs** directory to run the steps you have defined. You should now see that the scenario runs
+with a warning stating there are no tests to run.
+```
+testing: warning: no tests to run
+PASS
+ok godogs 0.225s
+```
+
+By adding some logic to these steps, you will be able to thoroughly test the feature you just defined.
+
+#### Step 4 - Create the main program to test
+
+Let's keep it simple by only requiring an amount of **godogs** for now.
+
+Create and copy the code below into a new file by running `vim godogs.go`
+```go
+package main
+
+// Godogs available to eat
+var Godogs int
+
+func main() { /* usual main func */ }
+```
+
+Our module should now look like this:
+```
+godogs
+- features
+ - godogs.feature
+- go.mod
+- go.sum
+- godogs.go
+- godogs_test.go
+```
+
+#### Step 5 - Add some logic to the step definitions
+
+Now lets implement our step definitions to test our feature requirements.
+
+Replace the contents of `godogs_test.go` with the code below by running `vim godogs_test.go`.
+
+```go
+package main
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "testing"
+
+ "github.com/cucumber/godog"
+)
+
+// godogsCtxKey is the key used to store the available godogs in the context.Context.
+type godogsCtxKey struct{}
+
+func thereAreGodogs(ctx context.Context, available int) (context.Context, error) {
+ return context.WithValue(ctx, godogsCtxKey{}, available), nil
+}
+
+func iEat(ctx context.Context, num int) (context.Context, error) {
+ available, ok := ctx.Value(godogsCtxKey{}).(int)
+ if !ok {
+ return ctx, errors.New("there are no godogs available")
+ }
+
+ if available < num {
+ return ctx, fmt.Errorf("you cannot eat %d godogs, there are %d available", num, available)
+ }
+
+ available -= num
+
+ return context.WithValue(ctx, godogsCtxKey{}, available), nil
+}
+
+func thereShouldBeRemaining(ctx context.Context, remaining int) error {
+ available, ok := ctx.Value(godogsCtxKey{}).(int)
+ if !ok {
+ return errors.New("there are no godogs available")
+ }
+
+ if available != remaining {
+ return fmt.Errorf("expected %d godogs to be remaining, but there is %d", remaining, available)
+ }
+
+ return nil
+}
+
+func TestFeatures(t *testing.T) {
+ suite := godog.TestSuite{
+ ScenarioInitializer: InitializeScenario,
+ Options: &godog.Options{
+ Format: "pretty",
+ Paths: []string{"features"},
+ TestingT: t, // Testing instance that will run subtests.
+ },
+ }
+
+ if suite.Run() != 0 {
+ t.Fatal("non-zero status returned, failed to run feature tests")
+ }
+}
+
+func InitializeScenario(sc *godog.ScenarioContext) {
+ sc.Step(`^there are (\d+) godogs$`, thereAreGodogs)
+ sc.Step(`^I eat (\d+)$`, iEat)
+ sc.Step(`^there should be (\d+) remaining$`, thereShouldBeRemaining)
+}
+```
+
+In this example, we are using `context.Context` to pass the state between the steps.
+Every scenario starts with an empty context and then steps and hooks can add relevant information to it.
+Instrumented context is chained through the steps and hooks and is safe to use when multiple scenarios are running concurrently.
+
+When you run godog again with `go test -v godogs_test.go`, you should see a passing run:
+```
+=== RUN TestFeatures
+Feature: eat godogs
+ In order to be happy
+ As a hungry gopher
+ I need to be able to eat godogs
+=== RUN TestFeatures/Eat_5_out_of_12
+
+ Scenario: Eat 5 out of 12 # features/godogs.feature:6
+ Given there are 12 godogs # godog_test.go:15 -> command-line-arguments.thereAreGodogs
+ When I eat 5 # godog_test.go:19 -> command-line-arguments.iEat
+ Then there should be 7 remaining # godog_test.go:34 -> command-line-arguments.thereShouldBeRemaining
+
+1 scenarios (1 passed)
+3 steps (3 passed)
+279.917µs
+--- PASS: TestFeatures (0.00s)
+ --- PASS: TestFeatures/Eat_5_out_of_12 (0.00s)
+PASS
+ok command-line-arguments 0.164s
+```
+
+You may hook to `ScenarioContext` **Before** event in order to reset or pre-seed the application state before each scenario.
+You may hook into more events, like `sc.StepContext()` **After** to print all state in case of an error.
+Or **BeforeSuite** to prepare a database.
+
+By now, you should have figured out, how to use **godog**. Another piece of advice is to make steps orthogonal, small and simple to read for a user.
+Whether the user is a dumb website user or an API developer, who may understand a little more technical context - it should target that user.
+
+When steps are orthogonal and small, you can combine them just like you do with Unix tools. Look how to simplify or remove ones, which can be composed.
+
+`TestFeatures` acts as a regular Go test, so you can leverage your IDE facilities to run and debug it.
+
+### Attachments
+
+An example showing how to make attachments (aka embeddings) to the results is shown in [_examples/attachments](/_examples/attachments/)
+
+## Code of Conduct
+
+Everyone interacting in this codebase and issue tracker is expected to follow the Cucumber [code of conduct](https://github.com/cucumber/cucumber/blob/master/CODE_OF_CONDUCT.md).
+
+## References and Tutorials
+
+- [cucumber-html-reporter](https://github.com/gkushang/cucumber-html-reporter),
+ may be used in order to generate **html** reports together with **cucumber** output formatter. See the [following docker image](https://github.com/myie/cucumber-html-reporter) for usage details.
+- [how to use godog by semaphoreci](https://semaphoreci.com/community/tutorials/how-to-use-godog-for-behavior-driven-development-in-go)
+- see [examples](https://github.com/cucumber/godog/tree/master/_examples)
+- see extension [AssistDog](https://github.com/hellomd/assistdog),
+ which may have useful **gherkin.DataTable** transformations or comparison methods for assertions.
+
+## Documentation
+
+See [pkg documentation][godoc] for general API details.
+See **[Circle Config](/.circleci/config.yml)** for supported **go** versions.
+See `godog -h` for general command options.
+
+See implementation examples:
+
+- [rest API server](/_examples/api)
+- [rest API with Database](/_examples/db)
+- [godogs](/_examples/godogs)
+
+## FAQ
+
+### Running Godog with go test
+
+You may integrate running **godog** in your **go test** command.
+
+#### Subtests of *testing.T
+
+You can run test suite using go [Subtests](https://pkg.go.dev/testing#hdr-Subtests_and_Sub_benchmarks).
+In this case it is not necessary to have **godog** command installed. See the following example.
+
+```go
+package main_test
+
+import (
+ "testing"
+
+ "github.com/cucumber/godog"
+)
+
+func TestFeatures(t *testing.T) {
+ suite := godog.TestSuite{
+ ScenarioInitializer: func(s *godog.ScenarioContext) {
+ // Add step definitions here.
+ },
+ Options: &godog.Options{
+ Format: "pretty",
+ Paths: []string{"features"},
+ TestingT: t, // Testing instance that will run subtests.
+ },
+ }
+
+ if suite.Run() != 0 {
+ t.Fatal("non-zero status returned, failed to run feature tests")
+ }
+}
+```
+
+Then you can run suite.
+```
+go test -test.v -test.run ^TestFeatures$
+```
+
+Or a particular scenario.
+```
+go test -test.v -test.run ^TestFeatures$/^my_scenario$
+```
+
+#### TestMain
+
+You can run test suite using go [TestMain](https://golang.org/pkg/testing/#hdr-Main) func available since **go 1.4**.
+In this case it is not necessary to have **godog** command installed. See the following examples.
+
+The following example binds **godog** flags with specified prefix `godog` in order to prevent flag collisions.
+
+```go
+package main
+
+import (
+ "os"
+ "testing"
+
+ "github.com/cucumber/godog"
+ "github.com/cucumber/godog/colors"
+ "github.com/spf13/pflag" // godog v0.11.0 and later
+)
+
+var opts = godog.Options{
+ Output: colors.Colored(os.Stdout),
+ Format: "progress", // can define default values
+}
+
+func init() {
+ godog.BindFlags("godog.", pflag.CommandLine, &opts) // godog v0.10.0 and earlier
+ godog.BindCommandLineFlags("godog.", &opts) // godog v0.11.0 and later
+}
+
+func TestMain(m *testing.M) {
+ pflag.Parse()
+ opts.Paths = pflag.Args()
+
+ status := godog.TestSuite{
+ Name: "godogs",
+ TestSuiteInitializer: InitializeTestSuite,
+ ScenarioInitializer: InitializeScenario,
+ Options: &opts,
+ }.Run()
+
+ // Optional: Run `testing` package's logic besides godog.
+ if st := m.Run(); st > status {
+ status = st
+ }
+
+ os.Exit(status)
+}
+```
+
+Then you may run tests with by specifying flags in order to filter features.
+
+```
+go test -v --godog.random --godog.tags=wip
+go test -v --godog.format=pretty --godog.random -race -coverprofile=coverage.txt -covermode=atomic
+```
+
+The following example does not bind godog flags, instead manually configuring needed options.
+
+```go
+func TestMain(m *testing.M) {
+ opts := godog.Options{
+ Format: "progress",
+ Paths: []string{"features"},
+ Randomize: time.Now().UTC().UnixNano(), // randomize scenario execution order
+ }
+
+ status := godog.TestSuite{
+ Name: "godogs",
+ TestSuiteInitializer: InitializeTestSuite,
+ ScenarioInitializer: InitializeScenario,
+ Options: &opts,
+ }.Run()
+
+ // Optional: Run `testing` package's logic besides godog.
+ if st := m.Run(); st > status {
+ status = st
+ }
+
+ os.Exit(status)
+}
+```
+
+You can even go one step further and reuse **go test** flags, like **verbose** mode in order to switch godog **format**. See the following example:
+
+```go
+func TestMain(m *testing.M) {
+ format := "progress"
+ for _, arg := range os.Args[1:] {
+ if arg == "-test.v=true" { // go test transforms -v option
+ format = "pretty"
+ break
+ }
+ }
+
+ opts := godog.Options{
+ Format: format,
+ Paths: []string{"features"},
+ }
+
+ status := godog.TestSuite{
+ Name: "godogs",
+ TestSuiteInitializer: InitializeTestSuite,
+ ScenarioInitializer: InitializeScenario,
+ Options: &opts,
+ }.Run()
+
+ // Optional: Run `testing` package's logic besides godog.
+ if st := m.Run(); st > status {
+ status = st
+ }
+
+ os.Exit(status)
+}
+```
+
+Now when running `go test -v` it will use **pretty** format.
+
+### Tags
+
+If you want to filter scenarios by tags, you can use the `-t=` or `--tags=` where `` is one of the following:
+
+- `@wip` - run all scenarios with wip tag
+- `~@wip` - exclude all scenarios with wip tag
+- `@wip && ~@new` - run wip scenarios, but exclude new
+- `@wip,@undone` - run wip or undone scenarios
+
+### Using assertion packages like testify with Godog
+A more extensive example can be [found here](/_examples/assert-godogs).
+
+```go
+func thereShouldBeRemaining(ctx context.Context, remaining int) error {
+ assert.Equal(
+ godog.T(ctx), Godogs, remaining,
+ "Expected %d godogs to be remaining, but there is %d", remaining, Godogs,
+ )
+ return nil
+}
+```
+
+### Embeds
+
+If you're looking to compile your test binary in advance of running, you can compile the feature files into the binary via `go:embed`:
+
+```go
+
+//go:embed features/*
+var features embed.FS
+
+var opts = godog.Options{
+ Paths: []string{"features"},
+ FS: features,
+}
+```
+
+Now, the test binary can be compiled with all feature files embedded, and can be ran independently from the feature files:
+
+```sh
+> go test -c ./test/integration/integration_test.go
+> mv integration.test /some/random/dir
+> cd /some/random/dir
+> ./integration.test
+```
+
+**NOTE:** `godog.Options.FS` is as `fs.FS`, so custom filesystem loaders can be used.
+
+## CLI Mode
+
+**NOTE:** The [`godog` CLI has been deprecated](https://github.com/cucumber/godog/discussions/478). It is recommended to use `go test` instead.
+
+Another way to use `godog` is to run it in CLI mode.
+
+In this mode `godog` CLI will use `go` under the hood to compile and run your test suite.
+
+**Godog** does not intervene with the standard **go test** command behavior. You can leverage both frameworks to functionally test your application while maintaining all test related source code in **_test.go** files.
+
+**Godog** acts similar compared to **go test** command, by using go compiler and linker tool in order to produce test executable. Godog contexts need to be exported the same way as **Test** functions for go tests. Note, that if you use **godog** command tool, it will use `go` executable to determine compiler and linker.
+
+### Install
+```
+go install github.com/cucumber/godog/cmd/godog@latest
+```
+Adding `@v0.12.0` will install v0.12.0 specifically instead of master.
+
+With `go` version prior to 1.17, use `go get github.com/cucumber/godog/cmd/godog@v0.12.0`.
+Running `within the $GOPATH`, you would also need to set `GO111MODULE=on`, like this:
+```
+GO111MODULE=on go get github.com/cucumber/godog/cmd/godog@v0.12.0
+```
+
+### Configure common options for godog CLI
+
+There are no global options or configuration files. Alias your common or project based commands: `alias godog-wip="godog --format=progress --tags=@wip"`
+
+## Concurrency
+
+When concurrency is configured in options, godog will execute the scenarios concurrently, which is supported by all supplied formatters.
+
+In order to support concurrency well, you should reset the state and isolate each scenario. They should not share any state. It is suggested to run the suite concurrently in order to make sure there is no state corruption or race conditions in the application.
+
+It is also useful to randomize the order of scenario execution, which you can now do with `--random` command option or `godog.Options.Randomize` setting.
+
+### Building your own custom formatter
+A simple example can be [found here](/_examples/custom-formatter).
+
+## License
+**Godog** and **Gherkin** are licensed under the [MIT][license] and developed as a part of the [cucumber project][cucumber]
+
+[godoc]: https://pkg.go.dev/github.com/cucumber/godog "Documentation on godog"
+[golang]: https://golang.org/ "GO programming language"
+[behat]: http://docs.behat.org/ "Behavior driven development framework for PHP"
+[cucumber]: https://cucumber.io/ "Behavior driven development framework"
+[license]: https://en.wikipedia.org/wiki/MIT_License "The MIT license"
+[contributing guide]: https://github.com/cucumber/godog/blob/main/CONTRIBUTING.md
+[releasing guide]: https://github.com/cucumber/godog/blob/main/RELEASING.md
+[community Discord]: https://cucumber.io/community#discord
+
+
+
diff --git a/vendor/github.com/cucumber/godog/RELEASING.md b/vendor/github.com/cucumber/godog/RELEASING.md
new file mode 100644
index 000000000..cba243657
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/RELEASING.md
@@ -0,0 +1,67 @@
+# Releasing Guidelines for Cucumber Godog
+
+This document provides guidelines for releasing new versions of Cucumber Godog. Follow these steps to ensure a smooth and consistent release process.
+
+## Versioning
+
+Cucumber Godog follows [Semantic Versioning]. Version numbers are in the format `MAJOR.MINOR.PATCH`.
+
+### Current (for v0.MINOR.PATCH)
+
+- **MINOR**: Incompatible API changes.
+- **PATCH**: Backward-compatible new features and bug fixes.
+
+### After v1.X.X release
+
+- **MAJOR**: Incompatible API changes.
+- **MINOR**: Backward-compatible new features.
+- **PATCH**: Backward-compatible bug fixes.
+
+## Release Process
+
+1. **Update Changelog:**
+ - Open `CHANGELOG.md` and add an entry for the upcoming release formatting according to the principles of [Keep A CHANGELOG].
+ - Include details about new features, enhancements, and bug fixes.
+
+2. **Run Tests:**
+ - Run the test suite to ensure all existing features are working as expected.
+
+3. **Manual Testing for Backwards Compatibility:**
+ - Manually test the new release with external libraries that depend on Cucumber Godog.
+ - Look for any potential backwards compatibility issues, especially with widely-used libraries.
+ - Address any identified issues before proceeding.
+
+4. **Create Release on GitHub:**
+ - Go to the [Releases] page on GitHub.
+ - Click on "Draft a new release."
+ - Tag version should be set to the new tag vMAJOR.MINOR.PATCH
+ - Title the release using the version number (e.g., "vMAJOR.MINOR.PATCH").
+ - Click 'Generate release notes'
+
+5. **Publish Release:**
+ - Click "Publish release" to make the release public.
+
+6. **Announce the Release:**
+ - Make an announcement on relevant communication channels (e.g., [community Discord]) about the new release.
+
+## Additional Considerations
+
+- **Documentation:**
+ - Update the project documentation on the [website], if applicable.
+
+- **Deprecation Notices:**
+ - If any features are deprecated, clearly document them in the release notes and provide guidance on migration.
+
+- **Compatibility:**
+ - Clearly state any compatibility requirements or changes in the release notes.
+
+- **Feedback:**
+ - Encourage users to provide feedback and report any issues with the new release.
+
+Following these guidelines, including manual testing with external libraries, will help ensure a thorough release process for Cucumber Godog, allowing detection and resolution of potential backwards compatibility issues before tagging the release.
+
+[community Discord]: https://cucumber.io/community#discord
+[website]: https://cucumber.github.io/godog/
+[Releases]: https://github.com/cucumber/godog/releases
+[Semantic Versioning]: http://semver.org
+[Keep A CHANGELOG]: http://keepachangelog.com
\ No newline at end of file
diff --git a/vendor/github.com/cucumber/godog/codecov.yml b/vendor/github.com/cucumber/godog/codecov.yml
new file mode 100644
index 000000000..1418fc73d
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/codecov.yml
@@ -0,0 +1,8 @@
+coverage:
+ status:
+ project:
+ default:
+ threshold: 0.5%
+ patch:
+ default:
+ threshold: 0.5%
diff --git a/vendor/github.com/cucumber/godog/colors/ansi_others.go b/vendor/github.com/cucumber/godog/colors/ansi_others.go
new file mode 100644
index 000000000..6a166079f
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/colors/ansi_others.go
@@ -0,0 +1,19 @@
+// Copyright 2014 shiena Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+//go:build !windows
+// +build !windows
+
+package colors
+
+import "io"
+
+type ansiColorWriter struct {
+ w io.Writer
+ mode outputMode
+}
+
+func (cw *ansiColorWriter) Write(p []byte) (int, error) {
+ return cw.w.Write(p)
+}
diff --git a/vendor/github.com/cucumber/godog/colors/ansi_windows.go b/vendor/github.com/cucumber/godog/colors/ansi_windows.go
new file mode 100644
index 000000000..8a92c8223
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/colors/ansi_windows.go
@@ -0,0 +1,418 @@
+// Copyright 2014 shiena Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+//go:build windows
+// +build windows
+
+package colors
+
+import (
+ "bytes"
+ "io"
+ "strings"
+ "syscall"
+ "unsafe"
+)
+
+type csiState int
+
+const (
+ outsideCsiCode csiState = iota
+ firstCsiCode
+ secondCsiCode
+)
+
+type parseResult int
+
+const (
+ noConsole parseResult = iota
+ changedColor
+ unknown
+)
+
+type ansiColorWriter struct {
+ w io.Writer
+ mode outputMode
+ state csiState
+ paramStartBuf bytes.Buffer
+ paramBuf bytes.Buffer
+}
+
+const (
+ firstCsiChar byte = '\x1b'
+ secondeCsiChar byte = '['
+ separatorChar byte = ';'
+ sgrCode byte = 'm'
+)
+
+const (
+ foregroundBlue = uint16(0x0001)
+ foregroundGreen = uint16(0x0002)
+ foregroundRed = uint16(0x0004)
+ foregroundIntensity = uint16(0x0008)
+ backgroundBlue = uint16(0x0010)
+ backgroundGreen = uint16(0x0020)
+ backgroundRed = uint16(0x0040)
+ backgroundIntensity = uint16(0x0080)
+ underscore = uint16(0x8000)
+
+ foregroundMask = foregroundBlue | foregroundGreen | foregroundRed | foregroundIntensity
+ backgroundMask = backgroundBlue | backgroundGreen | backgroundRed | backgroundIntensity
+)
+
+const (
+ ansiReset = "0"
+ ansiIntensityOn = "1"
+ ansiIntensityOff = "21"
+ ansiUnderlineOn = "4"
+ ansiUnderlineOff = "24"
+ ansiBlinkOn = "5"
+ ansiBlinkOff = "25"
+
+ ansiForegroundBlack = "30"
+ ansiForegroundRed = "31"
+ ansiForegroundGreen = "32"
+ ansiForegroundYellow = "33"
+ ansiForegroundBlue = "34"
+ ansiForegroundMagenta = "35"
+ ansiForegroundCyan = "36"
+ ansiForegroundWhite = "37"
+ ansiForegroundDefault = "39"
+
+ ansiBackgroundBlack = "40"
+ ansiBackgroundRed = "41"
+ ansiBackgroundGreen = "42"
+ ansiBackgroundYellow = "43"
+ ansiBackgroundBlue = "44"
+ ansiBackgroundMagenta = "45"
+ ansiBackgroundCyan = "46"
+ ansiBackgroundWhite = "47"
+ ansiBackgroundDefault = "49"
+
+ ansiLightForegroundGray = "90"
+ ansiLightForegroundRed = "91"
+ ansiLightForegroundGreen = "92"
+ ansiLightForegroundYellow = "93"
+ ansiLightForegroundBlue = "94"
+ ansiLightForegroundMagenta = "95"
+ ansiLightForegroundCyan = "96"
+ ansiLightForegroundWhite = "97"
+
+ ansiLightBackgroundGray = "100"
+ ansiLightBackgroundRed = "101"
+ ansiLightBackgroundGreen = "102"
+ ansiLightBackgroundYellow = "103"
+ ansiLightBackgroundBlue = "104"
+ ansiLightBackgroundMagenta = "105"
+ ansiLightBackgroundCyan = "106"
+ ansiLightBackgroundWhite = "107"
+)
+
+type drawType int
+
+const (
+ foreground drawType = iota
+ background
+)
+
+type winColor struct {
+ code uint16
+ drawType drawType
+}
+
+var colorMap = map[string]winColor{
+ ansiForegroundBlack: {0, foreground},
+ ansiForegroundRed: {foregroundRed, foreground},
+ ansiForegroundGreen: {foregroundGreen, foreground},
+ ansiForegroundYellow: {foregroundRed | foregroundGreen, foreground},
+ ansiForegroundBlue: {foregroundBlue, foreground},
+ ansiForegroundMagenta: {foregroundRed | foregroundBlue, foreground},
+ ansiForegroundCyan: {foregroundGreen | foregroundBlue, foreground},
+ ansiForegroundWhite: {foregroundRed | foregroundGreen | foregroundBlue, foreground},
+ ansiForegroundDefault: {foregroundRed | foregroundGreen | foregroundBlue, foreground},
+
+ ansiBackgroundBlack: {0, background},
+ ansiBackgroundRed: {backgroundRed, background},
+ ansiBackgroundGreen: {backgroundGreen, background},
+ ansiBackgroundYellow: {backgroundRed | backgroundGreen, background},
+ ansiBackgroundBlue: {backgroundBlue, background},
+ ansiBackgroundMagenta: {backgroundRed | backgroundBlue, background},
+ ansiBackgroundCyan: {backgroundGreen | backgroundBlue, background},
+ ansiBackgroundWhite: {backgroundRed | backgroundGreen | backgroundBlue, background},
+ ansiBackgroundDefault: {0, background},
+
+ ansiLightForegroundGray: {foregroundIntensity, foreground},
+ ansiLightForegroundRed: {foregroundIntensity | foregroundRed, foreground},
+ ansiLightForegroundGreen: {foregroundIntensity | foregroundGreen, foreground},
+ ansiLightForegroundYellow: {foregroundIntensity | foregroundRed | foregroundGreen, foreground},
+ ansiLightForegroundBlue: {foregroundIntensity | foregroundBlue, foreground},
+ ansiLightForegroundMagenta: {foregroundIntensity | foregroundRed | foregroundBlue, foreground},
+ ansiLightForegroundCyan: {foregroundIntensity | foregroundGreen | foregroundBlue, foreground},
+ ansiLightForegroundWhite: {foregroundIntensity | foregroundRed | foregroundGreen | foregroundBlue, foreground},
+
+ ansiLightBackgroundGray: {backgroundIntensity, background},
+ ansiLightBackgroundRed: {backgroundIntensity | backgroundRed, background},
+ ansiLightBackgroundGreen: {backgroundIntensity | backgroundGreen, background},
+ ansiLightBackgroundYellow: {backgroundIntensity | backgroundRed | backgroundGreen, background},
+ ansiLightBackgroundBlue: {backgroundIntensity | backgroundBlue, background},
+ ansiLightBackgroundMagenta: {backgroundIntensity | backgroundRed | backgroundBlue, background},
+ ansiLightBackgroundCyan: {backgroundIntensity | backgroundGreen | backgroundBlue, background},
+ ansiLightBackgroundWhite: {backgroundIntensity | backgroundRed | backgroundGreen | backgroundBlue, background},
+}
+
+var (
+ kernel32 = syscall.NewLazyDLL("kernel32.dll")
+ procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute")
+ procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
+ defaultAttr *textAttributes
+)
+
+func init() {
+ screenInfo := getConsoleScreenBufferInfo(uintptr(syscall.Stdout))
+ if screenInfo != nil {
+ colorMap[ansiForegroundDefault] = winColor{
+ screenInfo.WAttributes & (foregroundRed | foregroundGreen | foregroundBlue),
+ foreground,
+ }
+ colorMap[ansiBackgroundDefault] = winColor{
+ screenInfo.WAttributes & (backgroundRed | backgroundGreen | backgroundBlue),
+ background,
+ }
+ defaultAttr = convertTextAttr(screenInfo.WAttributes)
+ }
+}
+
+type coord struct {
+ X, Y int16
+}
+
+type smallRect struct {
+ Left, Top, Right, Bottom int16
+}
+
+type consoleScreenBufferInfo struct {
+ DwSize coord
+ DwCursorPosition coord
+ WAttributes uint16
+ SrWindow smallRect
+ DwMaximumWindowSize coord
+}
+
+func getConsoleScreenBufferInfo(hConsoleOutput uintptr) *consoleScreenBufferInfo {
+ var csbi consoleScreenBufferInfo
+ ret, _, _ := procGetConsoleScreenBufferInfo.Call(
+ hConsoleOutput,
+ uintptr(unsafe.Pointer(&csbi)))
+ if ret == 0 {
+ return nil
+ }
+ return &csbi
+}
+
+func setConsoleTextAttribute(hConsoleOutput uintptr, wAttributes uint16) bool {
+ ret, _, _ := procSetConsoleTextAttribute.Call(
+ hConsoleOutput,
+ uintptr(wAttributes))
+ return ret != 0
+}
+
+type textAttributes struct {
+ foregroundColor uint16
+ backgroundColor uint16
+ foregroundIntensity uint16
+ backgroundIntensity uint16
+ underscore uint16
+ otherAttributes uint16
+}
+
+func convertTextAttr(winAttr uint16) *textAttributes {
+ fgColor := winAttr & (foregroundRed | foregroundGreen | foregroundBlue)
+ bgColor := winAttr & (backgroundRed | backgroundGreen | backgroundBlue)
+ fgIntensity := winAttr & foregroundIntensity
+ bgIntensity := winAttr & backgroundIntensity
+ underline := winAttr & underscore
+ otherAttributes := winAttr &^ (foregroundMask | backgroundMask | underscore)
+ return &textAttributes{fgColor, bgColor, fgIntensity, bgIntensity, underline, otherAttributes}
+}
+
+func convertWinAttr(textAttr *textAttributes) uint16 {
+ var winAttr uint16
+ winAttr |= textAttr.foregroundColor
+ winAttr |= textAttr.backgroundColor
+ winAttr |= textAttr.foregroundIntensity
+ winAttr |= textAttr.backgroundIntensity
+ winAttr |= textAttr.underscore
+ winAttr |= textAttr.otherAttributes
+ return winAttr
+}
+
+func changeColor(param []byte) parseResult {
+ screenInfo := getConsoleScreenBufferInfo(uintptr(syscall.Stdout))
+ if screenInfo == nil {
+ return noConsole
+ }
+
+ winAttr := convertTextAttr(screenInfo.WAttributes)
+ strParam := string(param)
+ if len(strParam) <= 0 {
+ strParam = "0"
+ }
+ csiParam := strings.Split(strParam, string(separatorChar))
+ for _, p := range csiParam {
+ c, ok := colorMap[p]
+ switch {
+ case !ok:
+ switch p {
+ case ansiReset:
+ winAttr.foregroundColor = defaultAttr.foregroundColor
+ winAttr.backgroundColor = defaultAttr.backgroundColor
+ winAttr.foregroundIntensity = defaultAttr.foregroundIntensity
+ winAttr.backgroundIntensity = defaultAttr.backgroundIntensity
+ winAttr.underscore = 0
+ winAttr.otherAttributes = 0
+ case ansiIntensityOn:
+ winAttr.foregroundIntensity = foregroundIntensity
+ case ansiIntensityOff:
+ winAttr.foregroundIntensity = 0
+ case ansiUnderlineOn:
+ winAttr.underscore = underscore
+ case ansiUnderlineOff:
+ winAttr.underscore = 0
+ case ansiBlinkOn:
+ winAttr.backgroundIntensity = backgroundIntensity
+ case ansiBlinkOff:
+ winAttr.backgroundIntensity = 0
+ default:
+ // unknown code
+ }
+ case c.drawType == foreground:
+ winAttr.foregroundColor = c.code
+ case c.drawType == background:
+ winAttr.backgroundColor = c.code
+ }
+ }
+ winTextAttribute := convertWinAttr(winAttr)
+ setConsoleTextAttribute(uintptr(syscall.Stdout), winTextAttribute)
+
+ return changedColor
+}
+
+func parseEscapeSequence(command byte, param []byte) parseResult {
+ if defaultAttr == nil {
+ return noConsole
+ }
+
+ switch command {
+ case sgrCode:
+ return changeColor(param)
+ default:
+ return unknown
+ }
+}
+
+func (cw *ansiColorWriter) flushBuffer() (int, error) {
+ return cw.flushTo(cw.w)
+}
+
+func (cw *ansiColorWriter) resetBuffer() (int, error) {
+ return cw.flushTo(nil)
+}
+
+func (cw *ansiColorWriter) flushTo(w io.Writer) (int, error) {
+ var n1, n2 int
+ var err error
+
+ startBytes := cw.paramStartBuf.Bytes()
+ cw.paramStartBuf.Reset()
+ if w != nil {
+ n1, err = cw.w.Write(startBytes)
+ if err != nil {
+ return n1, err
+ }
+ } else {
+ n1 = len(startBytes)
+ }
+ paramBytes := cw.paramBuf.Bytes()
+ cw.paramBuf.Reset()
+ if w != nil {
+ n2, err = cw.w.Write(paramBytes)
+ if err != nil {
+ return n1 + n2, err
+ }
+ } else {
+ n2 = len(paramBytes)
+ }
+ return n1 + n2, nil
+}
+
+func isParameterChar(b byte) bool {
+ return ('0' <= b && b <= '9') || b == separatorChar
+}
+
+func (cw *ansiColorWriter) Write(p []byte) (int, error) {
+ r, nw, first, last := 0, 0, 0, 0
+ if cw.mode != discardNonColorEscSeq {
+ cw.state = outsideCsiCode
+ cw.resetBuffer()
+ }
+
+ var err error
+ for i, ch := range p {
+ switch cw.state {
+ case outsideCsiCode:
+ if ch == firstCsiChar {
+ cw.paramStartBuf.WriteByte(ch)
+ cw.state = firstCsiCode
+ }
+ case firstCsiCode:
+ switch ch {
+ case firstCsiChar:
+ cw.paramStartBuf.WriteByte(ch)
+ break
+ case secondeCsiChar:
+ cw.paramStartBuf.WriteByte(ch)
+ cw.state = secondCsiCode
+ last = i - 1
+ default:
+ cw.resetBuffer()
+ cw.state = outsideCsiCode
+ }
+ case secondCsiCode:
+ if isParameterChar(ch) {
+ cw.paramBuf.WriteByte(ch)
+ } else {
+ nw, err = cw.w.Write(p[first:last])
+ r += nw
+ if err != nil {
+ return r, err
+ }
+ first = i + 1
+ result := parseEscapeSequence(ch, cw.paramBuf.Bytes())
+ if result == noConsole || (cw.mode == outputNonColorEscSeq && result == unknown) {
+ cw.paramBuf.WriteByte(ch)
+ nw, err := cw.flushBuffer()
+ if err != nil {
+ return r, err
+ }
+ r += nw
+ } else {
+ n, _ := cw.resetBuffer()
+ // Add one more to the size of the buffer for the last ch
+ r += n + 1
+ }
+
+ cw.state = outsideCsiCode
+ }
+ default:
+ cw.state = outsideCsiCode
+ }
+ }
+
+ if cw.mode != discardNonColorEscSeq || cw.state == outsideCsiCode {
+ nw, err = cw.w.Write(p[first:])
+ r += nw
+ }
+
+ return r, err
+}
diff --git a/vendor/github.com/cucumber/godog/colors/colors.go b/vendor/github.com/cucumber/godog/colors/colors.go
new file mode 100644
index 000000000..be7722e95
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/colors/colors.go
@@ -0,0 +1,68 @@
+package colors
+
+import (
+ "fmt"
+ "strings"
+)
+
+const ansiEscape = "\x1b"
+
+// a color code type
+type color int
+
+// some ansi colors
+const (
+ black color = iota + 30
+ red
+ green
+ yellow
+ blue // unused
+ magenta // unused
+ cyan
+ white
+)
+
+func colorize(s interface{}, c color) string {
+ return fmt.Sprintf("%s[%dm%v%s[0m", ansiEscape, c, s, ansiEscape)
+}
+
+// ColorFunc is a helper type to create colorized strings.
+type ColorFunc func(interface{}) string
+
+// Bold will accept a ColorFunc and return a new ColorFunc
+// that will make the string bold.
+func Bold(fn ColorFunc) ColorFunc {
+ return ColorFunc(func(input interface{}) string {
+ return strings.Replace(fn(input), ansiEscape+"[", ansiEscape+"[1;", 1)
+ })
+}
+
+// Green will accept an interface and return a colorized green string.
+func Green(s interface{}) string {
+ return colorize(s, green)
+}
+
+// Red will accept an interface and return a colorized red string.
+func Red(s interface{}) string {
+ return colorize(s, red)
+}
+
+// Cyan will accept an interface and return a colorized cyan string.
+func Cyan(s interface{}) string {
+ return colorize(s, cyan)
+}
+
+// Black will accept an interface and return a colorized black string.
+func Black(s interface{}) string {
+ return colorize(s, black)
+}
+
+// Yellow will accept an interface and return a colorized yellow string.
+func Yellow(s interface{}) string {
+ return colorize(s, yellow)
+}
+
+// White will accept an interface and return a colorized white string.
+func White(s interface{}) string {
+ return colorize(s, white)
+}
diff --git a/vendor/github.com/cucumber/godog/colors/no_colors.go b/vendor/github.com/cucumber/godog/colors/no_colors.go
new file mode 100644
index 000000000..2eeb80243
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/colors/no_colors.go
@@ -0,0 +1,59 @@
+package colors
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+)
+
+type noColors struct {
+ out io.Writer
+ lastbuf bytes.Buffer
+}
+
+// Uncolored will accept and io.Writer and return a
+// new io.Writer that won't include colors.
+func Uncolored(w io.Writer) io.Writer {
+ return &noColors{out: w}
+}
+
+func (w *noColors) Write(data []byte) (n int, err error) {
+ er := bytes.NewBuffer(data)
+loop:
+ for {
+ c1, _, err := er.ReadRune()
+ if err != nil {
+ break loop
+ }
+ if c1 != 0x1b {
+ fmt.Fprint(w.out, string(c1))
+ continue
+ }
+ c2, _, err := er.ReadRune()
+ if err != nil {
+ w.lastbuf.WriteRune(c1)
+ break loop
+ }
+ if c2 != 0x5b {
+ w.lastbuf.WriteRune(c1)
+ w.lastbuf.WriteRune(c2)
+ continue
+ }
+
+ var buf bytes.Buffer
+ for {
+ c, _, err := er.ReadRune()
+ if err != nil {
+ w.lastbuf.WriteRune(c1)
+ w.lastbuf.WriteRune(c2)
+ w.lastbuf.Write(buf.Bytes())
+ break loop
+ }
+ if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' {
+ break
+ }
+ buf.Write([]byte(string(c)))
+ }
+ }
+ return len(data) - w.lastbuf.Len(), nil
+}
diff --git a/vendor/github.com/cucumber/godog/colors/writer.go b/vendor/github.com/cucumber/godog/colors/writer.go
new file mode 100644
index 000000000..469c7a5ed
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/colors/writer.go
@@ -0,0 +1,41 @@
+// Copyright 2014 shiena Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package colors
+
+import "io"
+
+type outputMode int
+
+// DiscardNonColorEscSeq supports the divided color escape sequence.
+// But non-color escape sequence is not output.
+// Please use the OutputNonColorEscSeq If you want to output a non-color
+// escape sequences such as ncurses. However, it does not support the divided
+// color escape sequence.
+const (
+ _ outputMode = iota
+ discardNonColorEscSeq
+ outputNonColorEscSeq // unused
+)
+
+// Colored creates and initializes a new ansiColorWriter
+// using io.Writer w as its initial contents.
+// In the console of Windows, which change the foreground and background
+// colors of the text by the escape sequence.
+// In the console of other systems, which writes to w all text.
+func Colored(w io.Writer) io.Writer {
+ return createModeAnsiColorWriter(w, discardNonColorEscSeq)
+}
+
+// NewModeAnsiColorWriter create and initializes a new ansiColorWriter
+// by specifying the outputMode.
+func createModeAnsiColorWriter(w io.Writer, mode outputMode) io.Writer {
+ if _, ok := w.(*ansiColorWriter); !ok {
+ return &ansiColorWriter{
+ w: w,
+ mode: mode,
+ }
+ }
+ return w
+}
diff --git a/vendor/github.com/cucumber/godog/flags.go b/vendor/github.com/cucumber/godog/flags.go
new file mode 100644
index 000000000..45efbfec7
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/flags.go
@@ -0,0 +1,255 @@
+package godog
+
+import (
+ "flag"
+ "fmt"
+ "io"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/cucumber/godog/colors"
+ "github.com/cucumber/godog/internal/utils"
+)
+
+// repeats a space n times
+var s = utils.S
+
+var descFeaturesArgument = "Optional feature(s) to run. Can be:\n" +
+ s(4) + "- dir " + colors.Yellow("(features/)") + "\n" +
+ s(4) + "- feature " + colors.Yellow("(*.feature)") + "\n" +
+ s(4) + "- scenario at specific line " + colors.Yellow("(*.feature:10)") + "\n" +
+ "If no feature paths are listed, suite tries " + colors.Yellow("features") + " path by default.\n" +
+ "Multiple comma-separated values can be provided.\n"
+
+var descConcurrencyOption = "Run the test suite with concurrency level:\n" +
+ s(4) + "- " + colors.Yellow(`= 1`) + ": supports all types of formats.\n" +
+ s(4) + "- " + colors.Yellow(`>= 2`) + ": only supports " + colors.Yellow("progress") + ". Note, that\n" +
+ s(4) + "your context needs to support parallel execution."
+
+var descTagsOption = "Filter scenarios by tags. Expression can be:\n" +
+ s(4) + "- " + colors.Yellow(`"@wip"`) + ": run all scenarios with wip tag\n" +
+ s(4) + "- " + colors.Yellow(`"~@wip"`) + ": exclude all scenarios with wip tag\n" +
+ s(4) + "- " + colors.Yellow(`"@wip && ~@new"`) + ": run wip scenarios, but exclude new\n" +
+ s(4) + "- " + colors.Yellow(`"@wip,@undone"`) + ": run wip or undone scenarios"
+
+var descRandomOption = "Randomly shuffle the scenario execution order.\n" +
+ "Specify SEED to reproduce the shuffling from a previous run.\n" +
+ s(4) + `e.g. ` + colors.Yellow(`--random`) + " or " + colors.Yellow(`--random=5738`)
+
+// FlagSet allows to manage flags by external suite runner
+// builds flag.FlagSet with godog flags binded
+//
+// Deprecated:
+func FlagSet(opt *Options) *flag.FlagSet {
+ set := flag.NewFlagSet("godog", flag.ExitOnError)
+ BindFlags("", set, opt)
+ set.Usage = usage(set, opt.Output)
+ return set
+}
+
+// BindFlags binds godog flags to given flag set prefixed
+// by given prefix, without overriding usage
+func BindFlags(prefix string, set *flag.FlagSet, opt *Options) {
+ set.Usage = usage(set, set.Output())
+
+ descFormatOption := "How to format tests output. Built-in formats:\n"
+
+ type fm struct {
+ name string
+ desc string
+ }
+ var fms []fm
+ for name, desc := range AvailableFormatters() {
+ fms = append(fms, fm{
+ name: name,
+ desc: desc,
+ })
+ }
+ sort.Slice(fms, func(i, j int) bool {
+ return fms[i].name < fms[j].name
+ })
+
+ for _, fm := range fms {
+ descFormatOption += s(4) + "- " + colors.Yellow(fm.name) + ": " + fm.desc + "\n"
+ }
+
+ descFormatOption = strings.TrimSpace(descFormatOption)
+
+ // override flag defaults if any corresponding properties were supplied on the incoming `opt`
+ defFormatOption := "pretty"
+ if opt.Format != "" {
+ defFormatOption = opt.Format
+ }
+
+ defTagsOption := ""
+ if opt.Tags != "" {
+ defTagsOption = opt.Tags
+ }
+
+ defConcurrencyOption := 1
+ if opt.Concurrency != 0 {
+ defConcurrencyOption = opt.Concurrency
+ }
+
+ defShowStepDefinitions := false
+ if opt.ShowStepDefinitions {
+ defShowStepDefinitions = opt.ShowStepDefinitions
+ }
+
+ defStopOnFailure := false
+ if opt.StopOnFailure {
+ defStopOnFailure = opt.StopOnFailure
+ }
+
+ defStrict := false
+ if opt.Strict {
+ defStrict = opt.Strict
+ }
+
+ defNoColors := false
+ if opt.NoColors {
+ defNoColors = opt.NoColors
+ }
+
+ set.StringVar(&opt.Format, prefix+"format", defFormatOption, descFormatOption)
+ set.StringVar(&opt.Format, prefix+"f", defFormatOption, descFormatOption)
+ set.StringVar(&opt.Tags, prefix+"tags", defTagsOption, descTagsOption)
+ set.StringVar(&opt.Tags, prefix+"t", defTagsOption, descTagsOption)
+ set.IntVar(&opt.Concurrency, prefix+"concurrency", defConcurrencyOption, descConcurrencyOption)
+ set.IntVar(&opt.Concurrency, prefix+"c", defConcurrencyOption, descConcurrencyOption)
+ set.BoolVar(&opt.ShowStepDefinitions, prefix+"definitions", defShowStepDefinitions, "Print all available step definitions.")
+ set.BoolVar(&opt.ShowStepDefinitions, prefix+"d", defShowStepDefinitions, "Print all available step definitions.")
+ set.BoolVar(&opt.StopOnFailure, prefix+"stop-on-failure", defStopOnFailure, "Stop processing on first failed scenario.")
+ set.BoolVar(&opt.Strict, prefix+"strict", defStrict, "Fail suite when there are pending or undefined or ambiguous steps.")
+ set.BoolVar(&opt.NoColors, prefix+"no-colors", defNoColors, "Disable ansi colors.")
+ set.Var(&randomSeed{&opt.Randomize}, prefix+"random", descRandomOption)
+ set.BoolVar(&opt.ShowHelp, "godog.help", false, "Show usage help.")
+ set.Func(prefix+"paths", descFeaturesArgument, func(paths string) error {
+ if paths != "" {
+ opt.Paths = strings.Split(paths, ",")
+ }
+
+ return nil
+ })
+}
+
+type flagged struct {
+ short, long, descr, dflt string
+}
+
+func (f *flagged) name() string {
+ var name string
+ switch {
+ case len(f.short) > 0 && len(f.long) > 0:
+ name = fmt.Sprintf("-%s, --%s", f.short, f.long)
+ case len(f.long) > 0:
+ name = fmt.Sprintf("--%s", f.long)
+ case len(f.short) > 0:
+ name = fmt.Sprintf("-%s", f.short)
+ }
+
+ if f.long == "random" {
+ // `random` is special in that we will later assign it randomly
+ // if the user specifies `--random` without specifying one,
+ // so mask the "default" value here to avoid UI confusion about
+ // what the value will end up being.
+ name += "[=SEED]"
+ } else if f.dflt != "true" && f.dflt != "false" {
+ name += "=" + f.dflt
+ }
+ return name
+}
+
+func usage(set *flag.FlagSet, w io.Writer) func() {
+ return func() {
+ var list []*flagged
+ var longest int
+ set.VisitAll(func(f *flag.Flag) {
+ var fl *flagged
+ for _, flg := range list {
+ if flg.descr == f.Usage {
+ fl = flg
+ break
+ }
+ }
+ if nil == fl {
+ fl = &flagged{
+ dflt: f.DefValue,
+ descr: f.Usage,
+ }
+ list = append(list, fl)
+ }
+ if len(f.Name) > 2 {
+ fl.long = f.Name
+ } else {
+ fl.short = f.Name
+ }
+ })
+
+ for _, f := range list {
+ if len(f.name()) > longest {
+ longest = len(f.name())
+ }
+ }
+
+ // prints an option or argument with a description, or only description
+ opt := func(name, desc string) string {
+ var ret []string
+ lines := strings.Split(desc, "\n")
+ ret = append(ret, s(2)+colors.Green(name)+s(longest+2-len(name))+lines[0])
+ if len(lines) > 1 {
+ for _, ln := range lines[1:] {
+ ret = append(ret, s(2)+s(longest+2)+ln)
+ }
+ }
+ return strings.Join(ret, "\n")
+ }
+
+ // --- GENERAL ---
+ fmt.Fprintln(w, colors.Yellow("Usage:"))
+ fmt.Fprintf(w, s(2)+"go test [options]\n\n")
+
+ // --- OPTIONS ---
+ fmt.Fprintln(w, colors.Yellow("Options:"))
+ for _, f := range list {
+ fmt.Fprintln(w, opt(f.name(), f.descr))
+ }
+ fmt.Fprintln(w, "")
+ }
+}
+
+// randomSeed implements `flag.Value`, see https://golang.org/pkg/flag/#Value
+type randomSeed struct {
+ ref *int64
+}
+
+func (rs *randomSeed) Set(s string) error {
+ if s == "true" {
+ *rs.ref = makeRandomSeed()
+ return nil
+ }
+
+ if s == "false" {
+ *rs.ref = 0
+ return nil
+ }
+
+ i, err := strconv.ParseInt(s, 10, 64)
+ *rs.ref = i
+ return err
+}
+
+func (rs *randomSeed) String() string {
+ if rs.ref == nil {
+ return "0"
+ }
+ return strconv.FormatInt(*rs.ref, 10)
+}
+
+// If a Value has an IsBoolFlag() bool method returning true, the command-line
+// parser makes -name equivalent to -name=true rather than using the next
+// command-line argument.
+func (rs *randomSeed) IsBoolFlag() bool {
+ return *rs.ref == 0
+}
diff --git a/vendor/github.com/cucumber/godog/flags_v0110.go b/vendor/github.com/cucumber/godog/flags_v0110.go
new file mode 100644
index 000000000..eddf0279d
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/flags_v0110.go
@@ -0,0 +1,33 @@
+package godog
+
+import (
+ "errors"
+ "flag"
+ "math/rand"
+ "time"
+
+ "github.com/spf13/pflag"
+
+ "github.com/cucumber/godog/internal/flags"
+)
+
+// Choose randomly assigns a convenient pseudo-random seed value.
+// The resulting seed will be between `1-99999` for later ease of specification.
+func makeRandomSeed() int64 {
+ return rand.New(rand.NewSource(time.Now().UTC().UnixNano())).Int63n(99998) + 1
+}
+
+func flagSet(opt *Options) *pflag.FlagSet {
+ set := pflag.NewFlagSet("godog", pflag.ExitOnError)
+ flags.BindRunCmdFlags("", set, opt)
+ pflag.ErrHelp = errors.New("godog: help requested")
+ return set
+}
+
+// BindCommandLineFlags binds godog flags to given flag set prefixed
+// by given prefix, without overriding usage
+func BindCommandLineFlags(prefix string, opts *Options) {
+ flagSet := pflag.CommandLine
+ flags.BindRunCmdFlags(prefix, flagSet, opts)
+ pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
+}
diff --git a/vendor/github.com/cucumber/godog/fmt.go b/vendor/github.com/cucumber/godog/fmt.go
new file mode 100644
index 000000000..f30f9f895
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/fmt.go
@@ -0,0 +1,124 @@
+package godog
+
+import (
+ "fmt"
+ "io"
+ "strings"
+ "unicode/utf8"
+
+ "github.com/cucumber/godog/colors"
+ "github.com/cucumber/godog/formatters"
+ internal_fmt "github.com/cucumber/godog/internal/formatters"
+ "github.com/cucumber/godog/internal/models"
+ "github.com/cucumber/godog/internal/storage"
+)
+
+// FindFmt searches available formatters registered
+// and returns FormaterFunc matched by given
+// format name or nil otherwise
+func FindFmt(name string) FormatterFunc {
+ return formatters.FindFmt(name)
+}
+
+// Format registers a feature suite output
+// formatter by given name, description and
+// FormatterFunc constructor function, to initialize
+// formatter with the output recorder.
+func Format(name, description string, f FormatterFunc) {
+ formatters.Format(name, description, f)
+}
+
+// AvailableFormatters gives a map of all
+// formatters registered with their name as key
+// and description as value
+func AvailableFormatters() map[string]string {
+ return formatters.AvailableFormatters()
+}
+
+// Formatter is an interface for feature runner
+// output summary presentation.
+//
+// New formatters may be created to represent
+// suite results in different ways. These new
+// formatters needs to be registered with a
+// godog.Format function call
+type Formatter = formatters.Formatter
+
+type storageFormatter interface {
+ SetStorage(*storage.Storage)
+}
+
+// FormatterFunc builds a formatter with given
+// suite name and io.Writer to record output
+type FormatterFunc = formatters.FormatterFunc
+
+func printStepDefinitions(steps []*models.StepDefinition, w io.Writer) {
+ var longest int
+ for _, def := range steps {
+ n := utf8.RuneCountInString(def.Expr.String())
+ if longest < n {
+ longest = n
+ }
+ }
+
+ for _, def := range steps {
+ n := utf8.RuneCountInString(def.Expr.String())
+ location := internal_fmt.DefinitionID(def)
+ spaces := strings.Repeat(" ", longest-n)
+ fmt.Fprintln(w,
+ colors.Yellow(def.Expr.String())+spaces,
+ colors.Bold(colors.Black)("# "+location))
+ }
+
+ if len(steps) == 0 {
+ fmt.Fprintln(w, "there were no contexts registered, could not find any step definition..")
+ }
+}
+
+// NewBaseFmt creates a new base formatter.
+func NewBaseFmt(suite string, out io.Writer) *BaseFmt {
+ return internal_fmt.NewBase(suite, out)
+}
+
+// NewProgressFmt creates a new progress formatter.
+func NewProgressFmt(suite string, out io.Writer) *ProgressFmt {
+ return internal_fmt.NewProgress(suite, out)
+}
+
+// NewPrettyFmt creates a new pretty formatter.
+func NewPrettyFmt(suite string, out io.Writer) *PrettyFmt {
+ return &PrettyFmt{Base: NewBaseFmt(suite, out)}
+}
+
+// NewEventsFmt creates a new event streaming formatter.
+func NewEventsFmt(suite string, out io.Writer) *EventsFmt {
+ return &EventsFmt{Base: NewBaseFmt(suite, out)}
+}
+
+// NewCukeFmt creates a new Cucumber JSON formatter.
+func NewCukeFmt(suite string, out io.Writer) *CukeFmt {
+ return &CukeFmt{Base: NewBaseFmt(suite, out)}
+}
+
+// NewJUnitFmt creates a new JUnit formatter.
+func NewJUnitFmt(suite string, out io.Writer) *JUnitFmt {
+ return &JUnitFmt{Base: NewBaseFmt(suite, out)}
+}
+
+// BaseFmt exports Base formatter.
+type BaseFmt = internal_fmt.Base
+
+// ProgressFmt exports Progress formatter.
+type ProgressFmt = internal_fmt.Progress
+
+// PrettyFmt exports Pretty formatter.
+type PrettyFmt = internal_fmt.Pretty
+
+// EventsFmt exports Events formatter.
+type EventsFmt = internal_fmt.Events
+
+// CukeFmt exports Cucumber JSON formatter.
+type CukeFmt = internal_fmt.Cuke
+
+// JUnitFmt exports JUnit formatter.
+type JUnitFmt = internal_fmt.JUnit
diff --git a/vendor/github.com/cucumber/godog/formatters/fmt.go b/vendor/github.com/cucumber/godog/formatters/fmt.go
new file mode 100644
index 000000000..973cf11b8
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/formatters/fmt.go
@@ -0,0 +1,108 @@
+package formatters
+
+import (
+ "io"
+ "regexp"
+
+ messages "github.com/cucumber/messages/go/v21"
+)
+
+type registeredFormatter struct {
+ name string
+ description string
+ fmt FormatterFunc
+}
+
+var registeredFormatters []*registeredFormatter
+
+// FindFmt searches available formatters registered
+// and returns FormaterFunc matched by given
+// format name or nil otherwise
+func FindFmt(name string) FormatterFunc {
+ for _, el := range registeredFormatters {
+ if el.name == name {
+ return el.fmt
+ }
+ }
+
+ return nil
+}
+
+// Format registers a feature suite output
+// formatter by given name, description and
+// FormatterFunc constructor function, to initialize
+// formatter with the output recorder.
+func Format(name, description string, f FormatterFunc) {
+ registeredFormatters = append(registeredFormatters, ®isteredFormatter{
+ name: name,
+ fmt: f,
+ description: description,
+ })
+}
+
+// AvailableFormatters gives a map of all
+// formatters registered with their name as key
+// and description as value
+func AvailableFormatters() map[string]string {
+ fmts := make(map[string]string, len(registeredFormatters))
+
+ for _, f := range registeredFormatters {
+ fmts[f.name] = f.description
+ }
+
+ return fmts
+}
+
+// Formatter is an interface for feature runner
+// output summary presentation.
+//
+// New formatters may be created to represent
+// suite results in different ways. These new
+// formatters needs to be registered with a
+// godog.Format function call
+type Formatter interface {
+ TestRunStarted()
+ Feature(*messages.GherkinDocument, string, []byte)
+ Pickle(*messages.Pickle)
+ Defined(*messages.Pickle, *messages.PickleStep, *StepDefinition)
+ Failed(*messages.Pickle, *messages.PickleStep, *StepDefinition, error)
+ Passed(*messages.Pickle, *messages.PickleStep, *StepDefinition)
+ Skipped(*messages.Pickle, *messages.PickleStep, *StepDefinition)
+ Undefined(*messages.Pickle, *messages.PickleStep, *StepDefinition)
+ Pending(*messages.Pickle, *messages.PickleStep, *StepDefinition)
+ Ambiguous(*messages.Pickle, *messages.PickleStep, *StepDefinition, error)
+ Summary()
+}
+
+// FlushFormatter is a `Formatter` but can be flushed.
+type FlushFormatter interface {
+ Formatter
+ Flush()
+}
+
+// FormatterFunc builds a formatter with given
+// suite name and io.Writer to record output
+type FormatterFunc func(string, io.Writer) Formatter
+
+// StepDefinition is a registered step definition
+// contains a StepHandler and regexp which
+// is used to match a step. Args which
+// were matched by last executed step
+//
+// This structure is passed to the formatter
+// when step is matched and is either failed
+// or successful
+type StepDefinition struct {
+ Expr *regexp.Regexp
+ Handler interface{}
+ Keyword Keyword
+}
+
+type Keyword int64
+
+const (
+ Given Keyword = iota
+ When
+ Then
+ None
+)
diff --git a/vendor/github.com/cucumber/godog/godog.go b/vendor/github.com/cucumber/godog/godog.go
new file mode 100644
index 000000000..dda501471
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/godog.go
@@ -0,0 +1,43 @@
+/*
+Package godog is the official Cucumber BDD framework for Golang, it merges specification
+and test documentation into one cohesive whole.
+
+Godog does not intervene with the standard "go test" command and it's behavior.
+You can leverage both frameworks to functionally test your application while
+maintaining all test related source code in *_test.go files.
+
+Godog acts similar compared to go test command. It uses go
+compiler and linker tool in order to produce test executable. Godog
+contexts needs to be exported same as Test functions for go test.
+
+For example, imagine you're about to create the famous UNIX ls command.
+Before you begin, you describe how the feature should work, see the example below..
+
+Example:
+
+ Feature: ls
+ In order to see the directory structure
+ As a UNIX user
+ I need to be able to list the current directory's contents
+
+ Scenario:
+ Given I am in a directory "test"
+ And I have a file named "foo"
+ And I have a file named "bar"
+ When I run ls
+ Then I should get output:
+ """
+ bar
+ foo
+ """
+
+Now, wouldn't it be cool if something could read this sentence and use it to actually
+run a test against the ls command? Hey, that's exactly what this package does!
+As you'll see, Godog is easy to learn, quick to use, and will put the fun back into tests.
+
+Godog was inspired by Behat and Cucumber the above description is taken from it's documentation.
+*/
+package godog
+
+// Version of package - based on Semantic Versioning 2.0.0 http://semver.org/
+var Version = "v0.0.0-dev"
diff --git a/vendor/github.com/cucumber/godog/internal/builder/ast.go b/vendor/github.com/cucumber/godog/internal/builder/ast.go
new file mode 100644
index 000000000..c4f82407c
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/internal/builder/ast.go
@@ -0,0 +1,31 @@
+package builder
+
+import "go/ast"
+
+func astContexts(f *ast.File, selectName string) []string {
+ var contexts []string
+ for _, d := range f.Decls {
+ switch fun := d.(type) {
+ case *ast.FuncDecl:
+ for _, param := range fun.Type.Params.List {
+ switch expr := param.Type.(type) {
+ case *ast.StarExpr:
+ switch x := expr.X.(type) {
+ case *ast.Ident:
+ if x.Name == selectName {
+ contexts = append(contexts, fun.Name.Name)
+ }
+ case *ast.SelectorExpr:
+ switch t := x.X.(type) {
+ case *ast.Ident:
+ if t.Name == "godog" && x.Sel.Name == selectName {
+ contexts = append(contexts, fun.Name.Name)
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ return contexts
+}
diff --git a/vendor/github.com/cucumber/godog/internal/builder/builder.go b/vendor/github.com/cucumber/godog/internal/builder/builder.go
new file mode 100644
index 000000000..4cd4928f5
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/internal/builder/builder.go
@@ -0,0 +1,490 @@
+package builder
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "go/build"
+ "go/parser"
+ "go/token"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "strings"
+ "text/template"
+ "time"
+ "unicode"
+)
+
+var (
+ tooldir = findToolDir()
+ compiler = filepath.Join(tooldir, "compile")
+ linker = filepath.Join(tooldir, "link")
+ gopaths = filepath.SplitList(build.Default.GOPATH)
+ godogImportPath = "github.com/cucumber/godog"
+
+ // godep
+ runnerTemplate = template.Must(template.New("testmain").Parse(`package main
+
+import (
+ "github.com/cucumber/godog"
+ {{if or .TestSuiteContexts .ScenarioContexts}}_test "{{.ImportPath}}"{{end}}
+ {{if or .XTestSuiteContexts .XScenarioContexts}}_xtest "{{.ImportPath}}_test"{{end}}
+ {{if or .XTestSuiteContexts .XScenarioContexts}}"testing/internal/testdeps"{{end}}
+ "os"
+)
+
+{{if or .XTestSuiteContexts .XScenarioContexts}}
+func init() {
+ testdeps.ImportPath = "{{.ImportPath}}"
+}
+{{end}}
+
+func main() {
+ status := godog.TestSuite{
+ Name: "{{ .Name }}",
+ TestSuiteInitializer: func (ctx *godog.TestSuiteContext) {
+ os.Setenv("GODOG_TESTED_PACKAGE", "{{.ImportPath}}")
+ {{range .TestSuiteContexts}}
+ _test.{{ . }}(ctx)
+ {{end}}
+ {{range .XTestSuiteContexts}}
+ _xtest.{{ . }}(ctx)
+ {{end}}
+ },
+ ScenarioInitializer: func (ctx *godog.ScenarioContext) {
+ {{range .ScenarioContexts}}
+ _test.{{ . }}(ctx)
+ {{end}}
+ {{range .XScenarioContexts}}
+ _xtest.{{ . }}(ctx)
+ {{end}}
+ },
+ }.Run()
+
+ os.Exit(status)
+}`))
+
+ // temp file for import
+ tempFileTemplate = template.Must(template.New("temp").Parse(`package {{.Name}}
+
+import "github.com/cucumber/godog"
+
+var _ = godog.Version
+`))
+)
+
+// Build creates a test package like go test command at given target path.
+// If there are no go files in tested directory, then
+// it simply builds a godog executable to scan features.
+//
+// If there are go test files, it first builds a test
+// package with standard go test command.
+//
+// Finally it generates godog suite executable which
+// registers exported godog contexts from the test files
+// of tested package.
+//
+// Returns the path to generated executable
+func Build(bin string) error {
+ abs, err := filepath.Abs(".")
+ if err != nil {
+ return err
+ }
+
+ // we allow package to be nil, if godog is run only when
+ // there is a feature file in empty directory
+ pkg := importPackage(abs)
+ src, err := buildTestMain(pkg)
+ if err != nil {
+ return err
+ }
+
+ // may need to produce temp file for godog dependency
+ srcTemp, err := buildTempFile(pkg)
+ if err != nil {
+ return err
+ }
+
+ if srcTemp != nil {
+ // @TODO: in case of modules we cannot build it our selves, we need to have this hacky option
+ pathTemp := filepath.Join(abs, "godog_dependency_file_test.go")
+ err = ioutil.WriteFile(pathTemp, srcTemp, 0644)
+ if err != nil {
+ return err
+ }
+ defer os.Remove(pathTemp)
+ }
+
+ workdir := ""
+ testdir := workdir
+
+ // build and compile the tested package.
+ // generated test executable will be removed
+ // since we do not need it for godog suite.
+ // we also print back the temp WORK directory
+ // go has built. We will reuse it for our suite workdir.
+ temp := fmt.Sprintf(filepath.Join("%s", "temp-%d.test"), os.TempDir(), time.Now().UnixNano())
+ if os.Getenv("GO111MODULE") != "off" {
+ modTidyOutput, err := exec.Command("go", "mod", "tidy").CombinedOutput()
+ if err != nil {
+ return fmt.Errorf("failed to tidy modules in tested package: %s, reason: %v, output: %s", abs, err, string(modTidyOutput))
+ }
+ }
+ testOutput, err := exec.Command("go", "test", "-c", "-work", "-o", temp).CombinedOutput()
+ if err != nil {
+ return fmt.Errorf("failed to compile tested package: %s, reason: %v, output: %s", abs, err, string(testOutput))
+ }
+ defer os.Remove(temp)
+
+ // extract go-build temporary directory as our workdir
+ linesOut := strings.Split(strings.TrimSpace(string(testOutput)), "\n")
+ // it may have some compilation warnings, in the output, but these are not
+ // considered to be errors, since command exit status is 0
+ for _, ln := range linesOut {
+ if !strings.HasPrefix(ln, "WORK=") {
+ continue
+ }
+ workdir = strings.Replace(ln, "WORK=", "", 1)
+ break
+ }
+
+ if strings.Contains(string(testOutput), "[no test files]") {
+ return fmt.Errorf("incorrect project structure: no test files found")
+ }
+
+ // may not locate it in output
+ if workdir == testdir {
+ return fmt.Errorf("expected WORK dir path to be present in output: %s", string(testOutput))
+ }
+
+ // check whether workdir exists
+ stats, err := os.Stat(workdir)
+ if os.IsNotExist(err) {
+ return fmt.Errorf("expected WORK dir: %s to be available", workdir)
+ }
+
+ if !stats.IsDir() {
+ return fmt.Errorf("expected WORK dir: %s to be directory", workdir)
+ }
+ testdir = filepath.Join(workdir, "b001")
+ defer os.RemoveAll(workdir)
+
+ // replace _testmain.go file with our own
+ testmain := filepath.Join(testdir, "_testmain.go")
+ err = ioutil.WriteFile(testmain, src, 0644)
+ if err != nil {
+ return err
+ }
+
+ // godog package may be vendored and may need importmap
+ vendored := maybeVendoredGodog()
+
+ // compile godog testmain package archive
+ // we do not depend on CGO so a lot of checks are not necessary
+ linkerCfg := filepath.Join(testdir, "importcfg.link")
+ compilerCfg := linkerCfg
+
+ if vendored != nil {
+ data, err := ioutil.ReadFile(linkerCfg)
+ if err != nil {
+ return err
+ }
+
+ data = append(data, []byte(fmt.Sprintf("importmap %s=%s\n", godogImportPath, vendored.ImportPath))...)
+ compilerCfg = filepath.Join(testdir, "importcfg")
+
+ err = ioutil.WriteFile(compilerCfg, data, 0644)
+ if err != nil {
+ return err
+ }
+ }
+
+ testMainPkgOut := filepath.Join(testdir, "main.a")
+ args := []string{
+ "-o", testMainPkgOut,
+ "-importcfg", compilerCfg,
+ "-p", "main",
+ "-complete",
+ }
+
+ if err := filterImportCfg(compilerCfg); err != nil {
+ return err
+ }
+
+ args = append(args, "-pack", testmain)
+ cmd := exec.Command(compiler, args...)
+ cmd.Env = os.Environ()
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ return fmt.Errorf("failed to compile testmain package: %v - output: %s", err, string(out))
+ }
+
+ // link test suite executable
+ args = []string{
+ "-o", bin,
+ "-importcfg", linkerCfg,
+ "-buildmode=exe",
+ }
+ args = append(args, testMainPkgOut)
+ cmd = exec.Command(linker, args...)
+ cmd.Env = os.Environ()
+
+ out, err = cmd.CombinedOutput()
+ if err != nil {
+ msg := `failed to link test executable:
+ reason: %s
+ command: %s`
+ return fmt.Errorf(msg, string(out), linker+" '"+strings.Join(args, "' '")+"'")
+ }
+
+ return nil
+}
+
+// filterImportCfg strips unsupported lines from imports configuration.
+func filterImportCfg(path string) error {
+ orig, err := os.ReadFile(path)
+ if err != nil {
+ return fmt.Errorf("failed to read %s: %w", path, err)
+ }
+
+ res := ""
+ for _, l := range strings.Split(string(orig), "\n") {
+ if !strings.HasPrefix(l, "modinfo") {
+ res += l + "\n"
+ }
+ }
+ err = ioutil.WriteFile(path, []byte(res), 0600)
+ if err != nil {
+ return fmt.Errorf("failed to write %s: %w", path, err)
+ }
+
+ return nil
+}
+
+func maybeVendoredGodog() *build.Package {
+ dir, err := filepath.Abs(".")
+ if err != nil {
+ return nil
+ }
+
+ for _, gopath := range gopaths {
+ gopath = filepath.Join(gopath, "src")
+ for strings.HasPrefix(dir, gopath) && dir != gopath {
+ pkg, err := build.ImportDir(filepath.Join(dir, "vendor", godogImportPath), 0)
+ if err != nil {
+ dir = filepath.Dir(dir)
+ continue
+ }
+ return pkg
+ }
+ }
+ return nil
+}
+
+func normaliseLocalImportPath(dir string) string {
+ return path.Join("_", strings.Map(makeImportValid, filepath.ToSlash(dir)))
+}
+func importPackage(dir string) *build.Package {
+ pkg, _ := build.ImportDir(dir, 0)
+
+ // normalize import path for local import packages
+ // taken from go source code
+ // see: https://github.com/golang/go/blob/go1.7rc5/src/cmd/go/pkg.go#L279
+ if pkg != nil && pkg.ImportPath == "." {
+ pkg.ImportPath = normaliseLocalImportPath(dir)
+ }
+
+ return pkg
+}
+
+// from go src
+func makeImportValid(r rune) rune {
+ // Should match Go spec, compilers, and ../../go/parser/parser.go:/isValidImport.
+ const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD"
+ if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) {
+ return '_'
+ }
+ return r
+}
+
+// build temporary file content if godog
+// package is not present in currently tested package
+func buildTempFile(pkg *build.Package) ([]byte, error) {
+ shouldBuild := true
+ var name string
+ if pkg != nil {
+ name = pkg.Name
+ all := pkg.Imports
+ all = append(all, pkg.TestImports...)
+ all = append(all, pkg.XTestImports...)
+ for _, imp := range all {
+ if imp == godogImportPath {
+ shouldBuild = false
+ break
+ }
+ }
+
+ // maybe we are testing the godog package on it's own
+ if name == "godog" {
+ if parseImport(pkg.ImportPath, pkg.Root) == godogImportPath {
+ shouldBuild = false
+ }
+ }
+ }
+
+ if name == "" {
+ name = "main"
+ }
+
+ if !shouldBuild {
+ return nil, nil
+ }
+
+ data := struct{ Name string }{name}
+ var buf bytes.Buffer
+ if err := tempFileTemplate.Execute(&buf, data); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// buildTestMain if given package is valid
+// it scans test files for contexts
+// and produces a testmain source code.
+func buildTestMain(pkg *build.Package) ([]byte, error) {
+ var (
+ ctxs, xctxs contexts
+ err error
+ name = "main"
+ importPath string
+ )
+
+ if nil != pkg {
+ if ctxs, err = processPackageTestFiles(pkg.TestGoFiles); err != nil {
+ return nil, err
+ }
+
+ if xctxs, err = processPackageTestFiles(pkg.XTestGoFiles); err != nil {
+ return nil, err
+ }
+
+ importPath = parseImport(pkg.ImportPath, pkg.Root)
+ name = pkg.Name
+ } else {
+ name = "main"
+ }
+
+ data := struct {
+ Name string
+ ImportPath string
+ TestSuiteContexts []string
+ ScenarioContexts []string
+ XTestSuiteContexts []string
+ XScenarioContexts []string
+ }{
+ Name: name,
+ ImportPath: importPath,
+ TestSuiteContexts: ctxs.testSuiteCtxs,
+ ScenarioContexts: ctxs.scenarioCtxs,
+ XTestSuiteContexts: xctxs.testSuiteCtxs,
+ XScenarioContexts: xctxs.scenarioCtxs,
+ }
+
+ var buf bytes.Buffer
+ if err = runnerTemplate.Execute(&buf, data); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// parseImport parses the import path to deal with go module.
+func parseImport(rawPath, rootPath string) string {
+ // with go > 1.11 and go module enabled out of the GOPATH,
+ // the import path begins with an underscore and the GOPATH is unknown on build.
+ if rootPath != "" {
+ // go < 1.11 or it's a module inside the GOPATH
+ return rawPath
+ }
+ // for module support, query the module import path
+ cmd := exec.Command("go", "list", "-m", "-json")
+ out, err := cmd.StdoutPipe()
+ if err != nil {
+ // Unable to read stdout
+ return rawPath
+ }
+ if cmd.Start() != nil {
+ // Does not using modules
+ return rawPath
+ }
+ var mod struct {
+ Dir string `json:"Dir"`
+ Path string `json:"Path"`
+ }
+ if json.NewDecoder(out).Decode(&mod) != nil {
+ // Unexpected result
+ return rawPath
+ }
+ if cmd.Wait() != nil {
+ return rawPath
+ }
+ // Concatenates the module path with the current sub-folders if needed
+ return mod.Path + filepath.ToSlash(strings.TrimPrefix(rawPath, normaliseLocalImportPath(mod.Dir)))
+}
+
+type contexts struct {
+ deprecatedFeatureCtxs []string
+ testSuiteCtxs []string
+ scenarioCtxs []string
+}
+
+func (ctxs contexts) validate() error {
+ var allCtxs []string
+ allCtxs = append(allCtxs, ctxs.deprecatedFeatureCtxs...)
+ allCtxs = append(allCtxs, ctxs.testSuiteCtxs...)
+ allCtxs = append(allCtxs, ctxs.scenarioCtxs...)
+
+ var failed []string
+ for _, ctx := range allCtxs {
+ runes := []rune(ctx)
+ if unicode.IsLower(runes[0]) {
+ expected := append([]rune{unicode.ToUpper(runes[0])}, runes[1:]...)
+ failed = append(failed, fmt.Sprintf("%s - should be: %s", ctx, string(expected)))
+ }
+ }
+
+ if len(failed) > 0 {
+ return fmt.Errorf("godog contexts must be exported:\n\t%s", strings.Join(failed, "\n\t"))
+ }
+
+ return nil
+}
+
+// processPackageTestFiles runs through ast of each test
+// file pack and looks for godog suite contexts to register
+// on run
+func processPackageTestFiles(packs ...[]string) (ctxs contexts, _ error) {
+ fset := token.NewFileSet()
+ for _, pack := range packs {
+ for _, testFile := range pack {
+ node, err := parser.ParseFile(fset, testFile, nil, 0)
+ if err != nil {
+ return ctxs, err
+ }
+
+ ctxs.testSuiteCtxs = append(ctxs.testSuiteCtxs, astContexts(node, "TestSuiteContext")...)
+ ctxs.scenarioCtxs = append(ctxs.scenarioCtxs, astContexts(node, "ScenarioContext")...)
+ }
+ }
+
+ return ctxs, ctxs.validate()
+}
+
+func findToolDir() string {
+ if out, err := exec.Command("go", "env", "GOTOOLDIR").Output(); err != nil {
+ return filepath.Clean(strings.TrimSpace(string(out)))
+ }
+ return filepath.Clean(build.ToolDir)
+}
diff --git a/vendor/github.com/cucumber/godog/internal/flags/flags.go b/vendor/github.com/cucumber/godog/internal/flags/flags.go
new file mode 100644
index 000000000..1bd67e591
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/internal/flags/flags.go
@@ -0,0 +1,49 @@
+package flags
+
+import (
+ "github.com/spf13/pflag"
+)
+
+// BindRunCmdFlags is an internal func to bind run subcommand flags.
+func BindRunCmdFlags(prefix string, flagSet *pflag.FlagSet, opts *Options) {
+ if opts.Concurrency == 0 {
+ opts.Concurrency = 1
+ }
+
+ if opts.Format == "" {
+ opts.Format = "pretty"
+ }
+
+ flagSet.BoolVar(&opts.NoColors, prefix+"no-colors", opts.NoColors, "disable ansi colors")
+ flagSet.IntVarP(&opts.Concurrency, prefix+"concurrency", "c", opts.Concurrency, "run the test suite with concurrency")
+ flagSet.StringVarP(&opts.Tags, prefix+"tags", "t", opts.Tags, `filter scenarios by tags, expression can be:
+ "@wip" run all scenarios with wip tag
+ "~@wip" exclude all scenarios with wip tag
+ "@wip && ~@new" run wip scenarios, but exclude new
+ "@wip,@undone" run wip or undone scenarios`)
+ flagSet.StringVarP(&opts.Format, prefix+"format", "f", opts.Format, `will write a report according to the selected formatter
+
+usage:
+ -f
+ will use the formatter and write the report on stdout
+ -f :
+ will use the formatter and write the report to the file path
+
+built-in formatters are:
+ progress prints a character per step
+ cucumber produces a Cucumber JSON report
+ events produces JSON event stream, based on spec: 0.1.0
+ junit produces JUnit compatible XML report
+ pretty prints every feature with runtime statuses
+ `)
+
+ flagSet.BoolVarP(&opts.ShowStepDefinitions, prefix+"definitions", "d", opts.ShowStepDefinitions, "print all available step definitions")
+ flagSet.BoolVar(&opts.StopOnFailure, prefix+"stop-on-failure", opts.StopOnFailure, "stop processing on first failed scenario")
+ flagSet.BoolVar(&opts.Strict, prefix+"strict", opts.Strict, "fail suite when there are pending or undefined or ambiguous steps")
+
+ flagSet.Int64Var(&opts.Randomize, prefix+"random", opts.Randomize, `randomly shuffle the scenario execution order
+ --random
+specify SEED to reproduce the shuffling from a previous run
+ --random=5738`)
+ flagSet.Lookup(prefix + "random").NoOptDefVal = "-1"
+}
diff --git a/vendor/github.com/cucumber/godog/internal/flags/options.go b/vendor/github.com/cucumber/godog/internal/flags/options.go
new file mode 100644
index 000000000..40acea652
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/internal/flags/options.go
@@ -0,0 +1,88 @@
+package flags
+
+import (
+ "context"
+ "io"
+ "io/fs"
+ "testing"
+)
+
+// Options are suite run options
+// flags are mapped to these options.
+//
+// It can also be used together with godog.RunWithOptions
+// to run test suite from go source directly
+//
+// See the flags for more details
+type Options struct {
+ // Print step definitions found and exit
+ ShowStepDefinitions bool
+
+ // Randomize, if not `0`, will be used to run scenarios in a random order.
+ //
+ // Randomizing scenario order is especially helpful for detecting
+ // situations where you have state leaking between scenarios, which can
+ // cause flickering or fragile tests.
+ //
+ // The default value of `0` means "do not randomize".
+ //
+ // The magic value of `-1` means "pick a random seed for me", and godog will
+ // assign a seed on it's own during the `RunWithOptions` phase, similar to if
+ // you specified `--random` on the command line.
+ //
+ // Any other value will be used as the random seed for shuffling. Re-using the
+ // same seed will allow you to reproduce the shuffle order of a previous run
+ // to isolate an error condition.
+ Randomize int64
+
+ // Stops on the first failure
+ StopOnFailure bool
+
+ // Fail suite when there are pending or undefined or ambiguous steps
+ Strict bool
+
+ // Forces ansi color stripping
+ NoColors bool
+
+ // Various filters for scenarios parsed
+ // from feature files
+ Tags string
+
+ // Dialect to be used to parse feature files. If not set, default to "en".
+ Dialect string
+
+ // The formatter name
+ Format string
+
+ // Concurrency rate, not all formatters accepts this
+ Concurrency int
+
+ // All feature file paths
+ Paths []string
+
+ // Where it should print formatter output
+ Output io.Writer
+
+ // DefaultContext is used as initial context instead of context.Background().
+ DefaultContext context.Context
+
+ // TestingT runs scenarios as subtests.
+ TestingT *testing.T
+
+ // FeatureContents allows passing in each feature manually
+ // where the contents of each feature is stored as a byte slice
+ // in a map entry
+ FeatureContents []Feature
+
+ // FS allows passing in an `fs.FS` to read features from, such as an `embed.FS`
+ // or os.DirFS(string).
+ FS fs.FS
+
+ // ShowHelp enables suite to show CLI flags usage help and exit.
+ ShowHelp bool
+}
+
+type Feature struct {
+ Name string
+ Contents []byte
+}
diff --git a/vendor/github.com/cucumber/godog/internal/formatters/fmt.go b/vendor/github.com/cucumber/godog/internal/formatters/fmt.go
new file mode 100644
index 000000000..5530c0c24
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/internal/formatters/fmt.go
@@ -0,0 +1,104 @@
+package formatters
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strconv"
+ "strings"
+
+ messages "github.com/cucumber/messages/go/v21"
+
+ "github.com/cucumber/godog/colors"
+ "github.com/cucumber/godog/internal/models"
+ "github.com/cucumber/godog/internal/utils"
+)
+
+var (
+ red = colors.Red
+ redb = colors.Bold(colors.Red)
+ green = colors.Green
+ blackb = colors.Bold(colors.Black)
+ yellow = colors.Yellow
+ cyan = colors.Cyan
+ cyanb = colors.Bold(colors.Cyan)
+ whiteb = colors.Bold(colors.White)
+)
+
+// repeats a space n times
+var s = utils.S
+
+var (
+ passed = models.Passed
+ failed = models.Failed
+ skipped = models.Skipped
+ undefined = models.Undefined
+ pending = models.Pending
+ ambiguous = models.Ambiguous
+)
+
+type sortFeaturesByName []*models.Feature
+
+func (s sortFeaturesByName) Len() int { return len(s) }
+func (s sortFeaturesByName) Less(i, j int) bool { return s[i].Feature.Name < s[j].Feature.Name }
+func (s sortFeaturesByName) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+type sortPicklesByID []*messages.Pickle
+
+func (s sortPicklesByID) Len() int { return len(s) }
+func (s sortPicklesByID) Less(i, j int) bool {
+ iID := mustConvertStringToInt(s[i].Id)
+ jID := mustConvertStringToInt(s[j].Id)
+ return iID < jID
+}
+func (s sortPicklesByID) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+type sortPickleStepResultsByPickleStepID []models.PickleStepResult
+
+func (s sortPickleStepResultsByPickleStepID) Len() int { return len(s) }
+func (s sortPickleStepResultsByPickleStepID) Less(i, j int) bool {
+ iID := mustConvertStringToInt(s[i].PickleStepID)
+ jID := mustConvertStringToInt(s[j].PickleStepID)
+ return iID < jID
+}
+func (s sortPickleStepResultsByPickleStepID) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+func mustConvertStringToInt(s string) int {
+ i, err := strconv.Atoi(s)
+ if err != nil {
+ panic(err)
+ }
+
+ return i
+}
+
+// DefinitionID ...
+func DefinitionID(sd *models.StepDefinition) string {
+ ptr := sd.HandlerValue.Pointer()
+ f := runtime.FuncForPC(ptr)
+ dir := filepath.Dir(sd.File)
+ fn := strings.Replace(f.Name(), dir, "", -1)
+ var parts []string
+ for _, gr := range matchFuncDefRef.FindAllStringSubmatch(fn, -1) {
+ parts = append(parts, strings.Trim(gr[1], "_."))
+ }
+ if len(parts) > 0 {
+ // case when suite is a structure with methods
+ fn = strings.Join(parts, ".")
+ } else {
+ // case when steps are just plain funcs
+ fn = strings.Trim(fn, "_.")
+ }
+
+ if pkg := os.Getenv("GODOG_TESTED_PACKAGE"); len(pkg) > 0 {
+ fn = strings.Replace(fn, pkg, "", 1)
+ fn = strings.TrimLeft(fn, ".")
+ fn = strings.Replace(fn, "..", ".", -1)
+ }
+
+ return fmt.Sprintf("%s:%d -> %s", filepath.Base(sd.File), sd.Line, fn)
+}
+
+var matchFuncDefRef = regexp.MustCompile(`\(([^\)]+)\)`)
diff --git a/vendor/github.com/cucumber/godog/internal/formatters/fmt_base.go b/vendor/github.com/cucumber/godog/internal/formatters/fmt_base.go
new file mode 100644
index 000000000..607a1c065
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/internal/formatters/fmt_base.go
@@ -0,0 +1,272 @@
+package formatters
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "unicode"
+
+ messages "github.com/cucumber/messages/go/v21"
+
+ "github.com/cucumber/godog/colors"
+ "github.com/cucumber/godog/formatters"
+ "github.com/cucumber/godog/internal/models"
+ "github.com/cucumber/godog/internal/storage"
+ "github.com/cucumber/godog/internal/utils"
+)
+
+// BaseFormatterFunc implements the FormatterFunc for the base formatter.
+func BaseFormatterFunc(suite string, out io.Writer) formatters.Formatter {
+ return NewBase(suite, out)
+}
+
+// NewBase creates a new base formatter.
+func NewBase(suite string, out io.Writer) *Base {
+ return &Base{
+ suiteName: suite,
+ indent: 2,
+ out: out,
+ Lock: new(sync.Mutex),
+ }
+}
+
+// Base is a base formatter.
+type Base struct {
+ suiteName string
+ out io.Writer
+ indent int
+
+ Storage *storage.Storage
+ Lock *sync.Mutex
+}
+
+// SetStorage assigns gherkin data storage.
+func (f *Base) SetStorage(st *storage.Storage) {
+ f.Lock.Lock()
+ defer f.Lock.Unlock()
+
+ f.Storage = st
+}
+
+// TestRunStarted is triggered on test start.
+func (f *Base) TestRunStarted() {}
+
+// Feature receives gherkin document.
+func (f *Base) Feature(*messages.GherkinDocument, string, []byte) {}
+
+// Pickle receives scenario.
+func (f *Base) Pickle(*messages.Pickle) {}
+
+// Defined receives step definition.
+func (f *Base) Defined(*messages.Pickle, *messages.PickleStep, *formatters.StepDefinition) {
+}
+
+// Passed captures passed step.
+func (f *Base) Passed(*messages.Pickle, *messages.PickleStep, *formatters.StepDefinition) {}
+
+// Skipped captures skipped step.
+func (f *Base) Skipped(*messages.Pickle, *messages.PickleStep, *formatters.StepDefinition) {
+}
+
+// Undefined captures undefined step.
+func (f *Base) Undefined(*messages.Pickle, *messages.PickleStep, *formatters.StepDefinition) {
+}
+
+// Failed captures failed step.
+func (f *Base) Failed(*messages.Pickle, *messages.PickleStep, *formatters.StepDefinition, error) {
+}
+
+// Pending captures pending step.
+func (f *Base) Pending(*messages.Pickle, *messages.PickleStep, *formatters.StepDefinition) {
+}
+
+// Ambiguous captures ambiguous step.
+func (f *Base) Ambiguous(*messages.Pickle, *messages.PickleStep, *formatters.StepDefinition, error) {
+}
+
+// Summary renders summary information.
+func (f *Base) Summary() {
+ var totalSc, passedSc, undefinedSc int
+ var totalSt, passedSt, failedSt, skippedSt, pendingSt, undefinedSt, ambiguousSt int
+
+ pickleResults := f.Storage.MustGetPickleResults()
+ for _, pr := range pickleResults {
+ var prStatus models.StepResultStatus
+ totalSc++
+
+ pickleStepResults := f.Storage.MustGetPickleStepResultsByPickleID(pr.PickleID)
+
+ if len(pickleStepResults) == 0 {
+ prStatus = undefined
+ }
+
+ for _, sr := range pickleStepResults {
+ totalSt++
+
+ switch sr.Status {
+ case passed:
+ passedSt++
+ case failed:
+ prStatus = failed
+ failedSt++
+ case ambiguous:
+ prStatus = ambiguous
+ ambiguousSt++
+ case skipped:
+ skippedSt++
+ case undefined:
+ prStatus = undefined
+ undefinedSt++
+ case pending:
+ prStatus = pending
+ pendingSt++
+ }
+ }
+
+ if prStatus == passed {
+ passedSc++
+ } else if prStatus == undefined {
+ undefinedSc++
+ }
+ }
+
+ var steps, parts, scenarios []string
+ if passedSt > 0 {
+ steps = append(steps, green(fmt.Sprintf("%d passed", passedSt)))
+ }
+ if failedSt > 0 {
+ parts = append(parts, red(fmt.Sprintf("%d failed", failedSt)))
+ steps = append(steps, red(fmt.Sprintf("%d failed", failedSt)))
+ }
+ if pendingSt > 0 {
+ parts = append(parts, yellow(fmt.Sprintf("%d pending", pendingSt)))
+ steps = append(steps, yellow(fmt.Sprintf("%d pending", pendingSt)))
+ }
+ if ambiguousSt > 0 {
+ parts = append(parts, yellow(fmt.Sprintf("%d ambiguous", ambiguousSt)))
+ steps = append(steps, yellow(fmt.Sprintf("%d ambiguous", ambiguousSt)))
+ }
+ if undefinedSt > 0 {
+ parts = append(parts, yellow(fmt.Sprintf("%d undefined", undefinedSc)))
+ steps = append(steps, yellow(fmt.Sprintf("%d undefined", undefinedSt)))
+ } else if undefinedSc > 0 {
+ // there may be some scenarios without steps
+ parts = append(parts, yellow(fmt.Sprintf("%d undefined", undefinedSc)))
+ }
+ if skippedSt > 0 {
+ steps = append(steps, cyan(fmt.Sprintf("%d skipped", skippedSt)))
+ }
+ if passedSc > 0 {
+ scenarios = append(scenarios, green(fmt.Sprintf("%d passed", passedSc)))
+ }
+ scenarios = append(scenarios, parts...)
+
+ testRunStartedAt := f.Storage.MustGetTestRunStarted().StartedAt
+ elapsed := utils.TimeNowFunc().Sub(testRunStartedAt)
+
+ fmt.Fprintln(f.out, "")
+
+ if totalSc == 0 {
+ fmt.Fprintln(f.out, "No scenarios")
+ } else {
+ fmt.Fprintf(f.out, "%d scenarios (%s)\n", totalSc, strings.Join(scenarios, ", "))
+ }
+
+ if totalSt == 0 {
+ fmt.Fprintln(f.out, "No steps")
+ } else {
+ fmt.Fprintf(f.out, "%d steps (%s)\n", totalSt, strings.Join(steps, ", "))
+ }
+
+ elapsedString := elapsed.String()
+ if elapsed.Nanoseconds() == 0 {
+ // go 1.5 and 1.6 prints 0 instead of 0s, if duration is zero.
+ elapsedString = "0s"
+ }
+ fmt.Fprintln(f.out, elapsedString)
+
+ // prints used randomization seed
+ seed, err := strconv.ParseInt(os.Getenv("GODOG_SEED"), 10, 64)
+ if err == nil && seed != 0 {
+ fmt.Fprintln(f.out, "")
+ fmt.Fprintln(f.out, "Randomized with seed:", colors.Yellow(seed))
+ }
+
+ if text := f.Snippets(); text != "" {
+ fmt.Fprintln(f.out, "")
+ fmt.Fprintln(f.out, yellow("You can implement step definitions for undefined steps with these snippets:"))
+ fmt.Fprintln(f.out, yellow(text))
+ }
+}
+
+// Snippets returns code suggestions for undefined steps.
+func (f *Base) Snippets() string {
+ undefinedStepResults := f.Storage.MustGetPickleStepResultsByStatus(undefined)
+ if len(undefinedStepResults) == 0 {
+ return ""
+ }
+
+ var index int
+ var snips []undefinedSnippet
+ // build snippets
+ for _, u := range undefinedStepResults {
+ pickleStep := f.Storage.MustGetPickleStep(u.PickleStepID)
+
+ steps := []string{pickleStep.Text}
+ arg := pickleStep.Argument
+ if u.Def != nil {
+ steps = u.Def.Undefined
+ arg = nil
+ }
+ for _, step := range steps {
+ expr := snippetExprCleanup.ReplaceAllString(step, "\\$1")
+ expr = snippetNumbers.ReplaceAllString(expr, "(\\d+)")
+ expr = snippetExprQuoted.ReplaceAllString(expr, "$1\"([^\"]*)\"$2")
+ expr = "^" + strings.TrimSpace(expr) + "$"
+
+ name := snippetNumbers.ReplaceAllString(step, " ")
+ name = snippetExprQuoted.ReplaceAllString(name, " ")
+ name = strings.TrimSpace(snippetMethodName.ReplaceAllString(name, ""))
+ var words []string
+ for i, w := range strings.Split(name, " ") {
+ switch {
+ case i != 0:
+ w = strings.Title(w)
+ case len(w) > 0:
+ w = string(unicode.ToLower(rune(w[0]))) + w[1:]
+ }
+ words = append(words, w)
+ }
+ name = strings.Join(words, "")
+ if len(name) == 0 {
+ index++
+ name = fmt.Sprintf("StepDefinitioninition%d", index)
+ }
+
+ var found bool
+ for _, snip := range snips {
+ if snip.Expr == expr {
+ found = true
+ break
+ }
+ }
+ if !found {
+ snips = append(snips, undefinedSnippet{Method: name, Expr: expr, argument: arg})
+ }
+ }
+ }
+
+ sort.Sort(snippetSortByMethod(snips))
+
+ var buf bytes.Buffer
+ if err := undefinedSnippetsTpl.Execute(&buf, snips); err != nil {
+ panic(err)
+ }
+ // there may be trailing spaces
+ return strings.Replace(buf.String(), " \n", "\n", -1)
+}
diff --git a/vendor/github.com/cucumber/godog/internal/formatters/fmt_cucumber.go b/vendor/github.com/cucumber/godog/internal/formatters/fmt_cucumber.go
new file mode 100644
index 000000000..31380c975
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/internal/formatters/fmt_cucumber.go
@@ -0,0 +1,326 @@
+package formatters
+
+/*
+ The specification for the formatting originated from https://www.relishapp.com/cucumber/cucumber/docs/formatters/json-output-formatter.
+ I found that documentation was misleading or out dated. To validate formatting I create a ruby cucumber test harness and ran the
+ same feature files through godog and the ruby cucumber.
+
+ The docstrings in the cucumber.feature represent the cucumber output for those same feature definitions.
+
+ I did note that comments in ruby could be at just about any level in particular Feature, Scenario and Step. In godog I
+ could only find comments under the Feature data structure.
+*/
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "io"
+ "sort"
+ "strings"
+
+ "github.com/cucumber/godog/formatters"
+ "github.com/cucumber/godog/internal/models"
+ messages "github.com/cucumber/messages/go/v21"
+)
+
+func init() {
+ formatters.Format("cucumber", "Produces cucumber JSON format output.", CucumberFormatterFunc)
+}
+
+// CucumberFormatterFunc implements the FormatterFunc for the cucumber formatter
+func CucumberFormatterFunc(suite string, out io.Writer) formatters.Formatter {
+ return &Cuke{Base: NewBase(suite, out)}
+}
+
+// Cuke ...
+type Cuke struct {
+ *Base
+}
+
+// Summary renders test result as Cucumber JSON.
+func (f *Cuke) Summary() {
+ features := f.Storage.MustGetFeatures()
+
+ res := f.buildCukeFeatures(features)
+
+ dat, err := json.MarshalIndent(res, "", " ")
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Fprintf(f.out, "%s\n", string(dat))
+}
+
+func (f *Cuke) buildCukeFeatures(features []*models.Feature) (res []CukeFeatureJSON) {
+ sort.Sort(sortFeaturesByName(features))
+
+ res = make([]CukeFeatureJSON, len(features))
+
+ for idx, feat := range features {
+ cukeFeature := buildCukeFeature(feat)
+
+ pickles := f.Storage.MustGetPickles(feat.Uri)
+ sort.Sort(sortPicklesByID(pickles))
+
+ cukeFeature.Elements = f.buildCukeElements(pickles)
+
+ for jdx, elem := range cukeFeature.Elements {
+ elem.ID = cukeFeature.ID + ";" + makeCukeID(elem.Name) + elem.ID
+ elem.Tags = append(cukeFeature.Tags, elem.Tags...)
+ cukeFeature.Elements[jdx] = elem
+ }
+
+ res[idx] = cukeFeature
+ }
+
+ return res
+}
+
+func (f *Cuke) buildCukeElements(pickles []*messages.Pickle) (res []cukeElement) {
+ res = make([]cukeElement, len(pickles))
+
+ for idx, pickle := range pickles {
+ pickleResult := f.Storage.MustGetPickleResult(pickle.Id)
+ pickleStepResults := f.Storage.MustGetPickleStepResultsByPickleID(pickle.Id)
+
+ cukeElement := f.buildCukeElement(pickle)
+
+ stepStartedAt := pickleResult.StartedAt
+
+ cukeElement.Steps = make([]cukeStep, len(pickleStepResults))
+ sort.Sort(sortPickleStepResultsByPickleStepID(pickleStepResults))
+
+ for jdx, stepResult := range pickleStepResults {
+ cukeStep := f.buildCukeStep(pickle, stepResult)
+
+ stepResultFinishedAt := stepResult.FinishedAt
+ d := int(stepResultFinishedAt.Sub(stepStartedAt).Nanoseconds())
+ stepStartedAt = stepResultFinishedAt
+
+ cukeStep.Result.Duration = &d
+ if stepResult.Status == undefined ||
+ stepResult.Status == pending ||
+ stepResult.Status == skipped ||
+ stepResult.Status == ambiguous {
+ cukeStep.Result.Duration = nil
+ }
+
+ cukeElement.Steps[jdx] = cukeStep
+ }
+
+ res[idx] = cukeElement
+ }
+
+ return res
+}
+
+type cukeComment struct {
+ Value string `json:"value"`
+ Line int `json:"line"`
+}
+
+type cukeDocstring struct {
+ Value string `json:"value"`
+ ContentType string `json:"content_type"`
+ Line int `json:"line"`
+}
+
+type cukeTag struct {
+ Name string `json:"name"`
+ Line int `json:"line"`
+}
+
+type cukeResult struct {
+ Status string `json:"status"`
+ Error string `json:"error_message,omitempty"`
+ Duration *int `json:"duration,omitempty"`
+}
+
+type cukeMatch struct {
+ Location string `json:"location"`
+}
+
+type cukeEmbedding struct {
+ Name string `json:"name"`
+ MimeType string `json:"mime_type"`
+ Data string `json:"data"`
+}
+
+type cukeStep struct {
+ Keyword string `json:"keyword"`
+ Name string `json:"name"`
+ Line int `json:"line"`
+ Docstring *cukeDocstring `json:"doc_string,omitempty"`
+ Match cukeMatch `json:"match"`
+ Result cukeResult `json:"result"`
+ DataTable []*cukeDataTableRow `json:"rows,omitempty"`
+ Embeddings []cukeEmbedding `json:"embeddings,omitempty"`
+}
+
+type cukeDataTableRow struct {
+ Cells []string `json:"cells"`
+}
+
+type cukeElement struct {
+ ID string `json:"id"`
+ Keyword string `json:"keyword"`
+ Name string `json:"name"`
+ Description string `json:"description"`
+ Line int `json:"line"`
+ Type string `json:"type"`
+ Tags []cukeTag `json:"tags,omitempty"`
+ Steps []cukeStep `json:"steps,omitempty"`
+}
+
+// CukeFeatureJSON ...
+type CukeFeatureJSON struct {
+ URI string `json:"uri"`
+ ID string `json:"id"`
+ Keyword string `json:"keyword"`
+ Name string `json:"name"`
+ Description string `json:"description"`
+ Line int `json:"line"`
+ Comments []cukeComment `json:"comments,omitempty"`
+ Tags []cukeTag `json:"tags,omitempty"`
+ Elements []cukeElement `json:"elements,omitempty"`
+}
+
+func buildCukeFeature(feat *models.Feature) CukeFeatureJSON {
+ cukeFeature := CukeFeatureJSON{
+ URI: feat.Uri,
+ ID: makeCukeID(feat.Feature.Name),
+ Keyword: feat.Feature.Keyword,
+ Name: feat.Feature.Name,
+ Description: feat.Feature.Description,
+ Line: int(feat.Feature.Location.Line),
+ Comments: make([]cukeComment, len(feat.Comments)),
+ Tags: make([]cukeTag, len(feat.Feature.Tags)),
+ }
+
+ for idx, element := range feat.Feature.Tags {
+ cukeFeature.Tags[idx].Line = int(element.Location.Line)
+ cukeFeature.Tags[idx].Name = element.Name
+ }
+
+ for idx, comment := range feat.Comments {
+ cukeFeature.Comments[idx].Value = strings.TrimSpace(comment.Text)
+ cukeFeature.Comments[idx].Line = int(comment.Location.Line)
+ }
+
+ return cukeFeature
+}
+
+func (f *Cuke) buildCukeElement(pickle *messages.Pickle) (cukeElement cukeElement) {
+ feature := f.Storage.MustGetFeature(pickle.Uri)
+ scenario := feature.FindScenario(pickle.AstNodeIds[0])
+
+ cukeElement.Name = pickle.Name
+ cukeElement.Line = int(scenario.Location.Line)
+ cukeElement.Description = scenario.Description
+ cukeElement.Keyword = scenario.Keyword
+ cukeElement.Type = "scenario"
+
+ cukeElement.Tags = make([]cukeTag, len(scenario.Tags))
+ for idx, element := range scenario.Tags {
+ cukeElement.Tags[idx].Line = int(element.Location.Line)
+ cukeElement.Tags[idx].Name = element.Name
+ }
+
+ if len(pickle.AstNodeIds) == 1 {
+ return
+ }
+
+ example, _ := feature.FindExample(pickle.AstNodeIds[1])
+
+ for _, tag := range example.Tags {
+ tag := cukeTag{Line: int(tag.Location.Line), Name: tag.Name}
+ cukeElement.Tags = append(cukeElement.Tags, tag)
+ }
+
+ examples := scenario.Examples
+ if len(examples) > 0 {
+ rowID := pickle.AstNodeIds[1]
+
+ for _, example := range examples {
+ for idx, row := range example.TableBody {
+ if rowID == row.Id {
+ cukeElement.ID += fmt.Sprintf(";%s;%d", makeCukeID(example.Name), idx+2)
+ cukeElement.Line = int(row.Location.Line)
+ }
+ }
+ }
+ }
+
+ return cukeElement
+}
+
+func (f *Cuke) buildCukeStep(pickle *messages.Pickle, stepResult models.PickleStepResult) (cukeStep cukeStep) {
+ feature := f.Storage.MustGetFeature(pickle.Uri)
+ pickleStep := f.Storage.MustGetPickleStep(stepResult.PickleStepID)
+ step := feature.FindStep(pickleStep.AstNodeIds[0])
+
+ line := step.Location.Line
+
+ cukeStep.Name = pickleStep.Text
+ cukeStep.Line = int(line)
+ cukeStep.Keyword = step.Keyword
+
+ arg := pickleStep.Argument
+
+ if arg != nil {
+ if arg.DocString != nil && step.DocString != nil {
+ cukeStep.Docstring = &cukeDocstring{}
+ cukeStep.Docstring.ContentType = strings.TrimSpace(arg.DocString.MediaType)
+ if step.Location != nil {
+ cukeStep.Docstring.Line = int(step.DocString.Location.Line)
+ }
+ cukeStep.Docstring.Value = arg.DocString.Content
+ }
+
+ if arg.DataTable != nil {
+ cukeStep.DataTable = make([]*cukeDataTableRow, len(arg.DataTable.Rows))
+ for i, row := range arg.DataTable.Rows {
+ cells := make([]string, len(row.Cells))
+ for j, cell := range row.Cells {
+ cells[j] = cell.Value
+ }
+ cukeStep.DataTable[i] = &cukeDataTableRow{Cells: cells}
+ }
+ }
+ }
+
+ if stepResult.Def != nil {
+ cukeStep.Match.Location = strings.Split(DefinitionID(stepResult.Def), " ")[0]
+ }
+
+ cukeStep.Result.Status = stepResult.Status.String()
+ if stepResult.Err != nil {
+ cukeStep.Result.Error = stepResult.Err.Error()
+ }
+
+ if stepResult.Status == undefined || stepResult.Status == pending || stepResult.Status == ambiguous {
+ cukeStep.Match.Location = fmt.Sprintf("%s:%d", pickle.Uri, step.Location.Line)
+ }
+
+ if stepResult.Attachments != nil {
+ attachments := []cukeEmbedding{}
+
+ for _, a := range stepResult.Attachments {
+ attachments = append(attachments, cukeEmbedding{
+ Name: a.Name,
+ Data: base64.StdEncoding.EncodeToString(a.Data),
+ MimeType: a.MimeType,
+ })
+ }
+
+ if len(attachments) > 0 {
+ cukeStep.Embeddings = attachments
+ }
+ }
+ return cukeStep
+}
+
+func makeCukeID(name string) string {
+ return strings.Replace(strings.ToLower(name), " ", "-", -1)
+}
diff --git a/vendor/github.com/cucumber/godog/internal/formatters/fmt_events.go b/vendor/github.com/cucumber/godog/internal/formatters/fmt_events.go
new file mode 100644
index 000000000..c5ffcb50e
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/internal/formatters/fmt_events.go
@@ -0,0 +1,346 @@
+package formatters
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/cucumber/godog/formatters"
+ "github.com/cucumber/godog/internal/utils"
+ messages "github.com/cucumber/messages/go/v21"
+)
+
+const nanoSec = 1000000
+const spec = "0.1.0"
+
+func init() {
+ formatters.Format("events", fmt.Sprintf("Produces JSON event stream, based on spec: %s.", spec), EventsFormatterFunc)
+}
+
+// EventsFormatterFunc implements the FormatterFunc for the events formatter
+func EventsFormatterFunc(suite string, out io.Writer) formatters.Formatter {
+ return &Events{Base: NewBase(suite, out)}
+}
+
+// Events - Events formatter
+type Events struct {
+ *Base
+}
+
+func (f *Events) event(ev interface{}) {
+ data, err := json.Marshal(ev)
+ if err != nil {
+ panic(fmt.Sprintf("failed to marshal stream event: %+v - %v", ev, err))
+ }
+ fmt.Fprintln(f.out, string(data))
+}
+
+// Pickle receives scenario.
+func (f *Events) Pickle(pickle *messages.Pickle) {
+ f.Base.Pickle(pickle)
+
+ f.Lock.Lock()
+ defer f.Lock.Unlock()
+
+ f.event(&struct {
+ Event string `json:"event"`
+ Location string `json:"location"`
+ Timestamp int64 `json:"timestamp"`
+ }{
+ "TestCaseStarted",
+ f.scenarioLocation(pickle),
+ utils.TimeNowFunc().UnixNano() / nanoSec,
+ })
+
+ if len(pickle.Steps) == 0 {
+ // @TODO: is status undefined or passed? when there are no steps
+ // for this scenario
+ f.event(&struct {
+ Event string `json:"event"`
+ Location string `json:"location"`
+ Timestamp int64 `json:"timestamp"`
+ Status string `json:"status"`
+ }{
+ "TestCaseFinished",
+ f.scenarioLocation(pickle),
+ utils.TimeNowFunc().UnixNano() / nanoSec,
+ "undefined",
+ })
+ }
+}
+
+// TestRunStarted is triggered on test start.
+func (f *Events) TestRunStarted() {
+ f.Base.TestRunStarted()
+
+ f.Lock.Lock()
+ defer f.Lock.Unlock()
+
+ f.event(&struct {
+ Event string `json:"event"`
+ Version string `json:"version"`
+ Timestamp int64 `json:"timestamp"`
+ Suite string `json:"suite"`
+ }{
+ "TestRunStarted",
+ spec,
+ utils.TimeNowFunc().UnixNano() / nanoSec,
+ f.suiteName,
+ })
+}
+
+// Feature receives gherkin document.
+func (f *Events) Feature(ft *messages.GherkinDocument, p string, c []byte) {
+ f.Base.Feature(ft, p, c)
+
+ f.Lock.Lock()
+ defer f.Lock.Unlock()
+
+ f.event(&struct {
+ Event string `json:"event"`
+ Location string `json:"location"`
+ Source string `json:"source"`
+ }{
+ "TestSource",
+ fmt.Sprintf("%s:%d", p, ft.Feature.Location.Line),
+ string(c),
+ })
+}
+
+// Summary pushes summary information to JSON stream.
+func (f *Events) Summary() {
+ // @TODO: determine status
+ status := passed
+
+ f.Storage.MustGetPickleStepResultsByStatus(failed)
+
+ if len(f.Storage.MustGetPickleStepResultsByStatus(failed)) > 0 {
+ status = failed
+ } else if len(f.Storage.MustGetPickleStepResultsByStatus(passed)) == 0 {
+ if len(f.Storage.MustGetPickleStepResultsByStatus(undefined)) > len(f.Storage.MustGetPickleStepResultsByStatus(pending)) {
+ status = undefined
+ } else {
+ status = pending
+ }
+ }
+
+ snips := f.Snippets()
+ if len(snips) > 0 {
+ snips = "You can implement step definitions for undefined steps with these snippets:\n" + snips
+ }
+
+ f.event(&struct {
+ Event string `json:"event"`
+ Status string `json:"status"`
+ Timestamp int64 `json:"timestamp"`
+ Snippets string `json:"snippets"`
+ Memory string `json:"memory"`
+ }{
+ "TestRunFinished",
+ status.String(),
+ utils.TimeNowFunc().UnixNano() / nanoSec,
+ snips,
+ "", // @TODO not sure that could be correctly implemented
+ })
+}
+
+func (f *Events) step(pickle *messages.Pickle, pickleStep *messages.PickleStep) {
+ feature := f.Storage.MustGetFeature(pickle.Uri)
+ pickleStepResult := f.Storage.MustGetPickleStepResult(pickleStep.Id)
+ step := feature.FindStep(pickleStep.AstNodeIds[0])
+
+ var errMsg string
+ if pickleStepResult.Err != nil {
+ errMsg = pickleStepResult.Err.Error()
+ }
+
+ if pickleStepResult.Attachments != nil {
+ for _, attachment := range pickleStepResult.Attachments {
+
+ f.event(&struct {
+ Event string `json:"event"`
+ Location string `json:"location"`
+ Timestamp int64 `json:"timestamp"`
+ ContentEncoding string `json:"contentEncoding"`
+ FileName string `json:"fileName"`
+ MimeType string `json:"mimeType"`
+ Body string `json:"body"`
+ }{
+ "Attachment",
+ fmt.Sprintf("%s:%d", pickle.Uri, step.Location.Line),
+ utils.TimeNowFunc().UnixNano() / nanoSec,
+ messages.AttachmentContentEncoding_BASE64.String(),
+ attachment.Name,
+ attachment.MimeType,
+ string(attachment.Data),
+ })
+
+ }
+ }
+
+ f.event(&struct {
+ Event string `json:"event"`
+ Location string `json:"location"`
+ Timestamp int64 `json:"timestamp"`
+ Status string `json:"status"`
+ Summary string `json:"summary,omitempty"`
+ }{
+ "TestStepFinished",
+ fmt.Sprintf("%s:%d", pickle.Uri, step.Location.Line),
+ utils.TimeNowFunc().UnixNano() / nanoSec,
+ pickleStepResult.Status.String(),
+ errMsg,
+ })
+
+ if isLastStep(pickle, pickleStep) {
+ var status string
+
+ pickleStepResults := f.Storage.MustGetPickleStepResultsByPickleID(pickle.Id)
+ for _, stepResult := range pickleStepResults {
+ switch stepResult.Status {
+ case passed, failed, undefined, pending, ambiguous:
+ status = stepResult.Status.String()
+ }
+ }
+
+ f.event(&struct {
+ Event string `json:"event"`
+ Location string `json:"location"`
+ Timestamp int64 `json:"timestamp"`
+ Status string `json:"status"`
+ }{
+ "TestCaseFinished",
+ f.scenarioLocation(pickle),
+ utils.TimeNowFunc().UnixNano() / nanoSec,
+ status,
+ })
+ }
+}
+
+// Defined receives step definition.
+func (f *Events) Defined(pickle *messages.Pickle, pickleStep *messages.PickleStep, def *formatters.StepDefinition) {
+ f.Base.Defined(pickle, pickleStep, def)
+
+ f.Lock.Lock()
+ defer f.Lock.Unlock()
+
+ feature := f.Storage.MustGetFeature(pickle.Uri)
+ step := feature.FindStep(pickleStep.AstNodeIds[0])
+
+ if def != nil {
+ matchedDef := f.Storage.MustGetStepDefintionMatch(pickleStep.AstNodeIds[0])
+
+ m := def.Expr.FindStringSubmatchIndex(pickleStep.Text)[2:]
+ var args [][2]int
+ for i := 0; i < len(m)/2; i++ {
+ pair := m[i : i*2+2]
+ var idxs [2]int
+ idxs[0] = pair[0]
+ idxs[1] = pair[1]
+ args = append(args, idxs)
+ }
+
+ if len(args) == 0 {
+ args = make([][2]int, 0)
+ }
+
+ f.event(&struct {
+ Event string `json:"event"`
+ Location string `json:"location"`
+ DefID string `json:"definition_id"`
+ Args [][2]int `json:"arguments"`
+ }{
+ "StepDefinitionFound",
+ fmt.Sprintf("%s:%d", pickle.Uri, step.Location.Line),
+ DefinitionID(matchedDef),
+ args,
+ })
+ }
+
+ f.event(&struct {
+ Event string `json:"event"`
+ Location string `json:"location"`
+ Timestamp int64 `json:"timestamp"`
+ }{
+ "TestStepStarted",
+ fmt.Sprintf("%s:%d", pickle.Uri, step.Location.Line),
+ utils.TimeNowFunc().UnixNano() / nanoSec,
+ })
+}
+
+// Passed captures passed step.
+func (f *Events) Passed(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition) {
+ f.Base.Passed(pickle, step, match)
+
+ f.Lock.Lock()
+ defer f.Lock.Unlock()
+
+ f.step(pickle, step)
+}
+
+// Skipped captures skipped step.
+func (f *Events) Skipped(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition) {
+ f.Base.Skipped(pickle, step, match)
+
+ f.Lock.Lock()
+ defer f.Lock.Unlock()
+
+ f.step(pickle, step)
+}
+
+// Undefined captures undefined step.
+func (f *Events) Undefined(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition) {
+ f.Base.Undefined(pickle, step, match)
+
+ f.Lock.Lock()
+ defer f.Lock.Unlock()
+
+ f.step(pickle, step)
+}
+
+// Failed captures failed step.
+func (f *Events) Failed(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition, err error) {
+ f.Base.Failed(pickle, step, match, err)
+
+ f.Lock.Lock()
+ defer f.Lock.Unlock()
+
+ f.step(pickle, step)
+}
+
+// Pending captures pending step.
+func (f *Events) Pending(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition) {
+ f.Base.Pending(pickle, step, match)
+
+ f.Lock.Lock()
+ defer f.Lock.Unlock()
+
+ f.step(pickle, step)
+}
+
+// Ambiguous captures ambiguous step.
+func (f *Events) Ambiguous(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition, err error) {
+ f.Base.Ambiguous(pickle, step, match, err)
+
+ f.Lock.Lock()
+ defer f.Lock.Unlock()
+
+ f.step(pickle, step)
+}
+
+func (f *Events) scenarioLocation(pickle *messages.Pickle) string {
+ feature := f.Storage.MustGetFeature(pickle.Uri)
+ scenario := feature.FindScenario(pickle.AstNodeIds[0])
+
+ line := scenario.Location.Line
+ if len(pickle.AstNodeIds) == 2 {
+ _, row := feature.FindExample(pickle.AstNodeIds[1])
+ line = row.Location.Line
+ }
+
+ return fmt.Sprintf("%s:%d", pickle.Uri, line)
+}
+
+func isLastStep(pickle *messages.Pickle, step *messages.PickleStep) bool {
+ return pickle.Steps[len(pickle.Steps)-1].Id == step.Id
+}
diff --git a/vendor/github.com/cucumber/godog/internal/formatters/fmt_flushwrap.go b/vendor/github.com/cucumber/godog/internal/formatters/fmt_flushwrap.go
new file mode 100644
index 000000000..129b06210
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/internal/formatters/fmt_flushwrap.go
@@ -0,0 +1,108 @@
+package formatters
+
+import (
+ "sync"
+
+ "github.com/cucumber/godog/formatters"
+ messages "github.com/cucumber/messages/go/v21"
+)
+
+// WrapOnFlush wrap a `formatters.Formatter` in a `formatters.FlushFormatter`, which only
+// executes when `Flush` is called
+func WrapOnFlush(fmt formatters.Formatter) formatters.FlushFormatter {
+ return &onFlushFormatter{
+ fmt: fmt,
+ fns: make([]func(), 0),
+ mu: &sync.Mutex{},
+ }
+}
+
+type onFlushFormatter struct {
+ fmt formatters.Formatter
+ fns []func()
+ mu *sync.Mutex
+}
+
+func (o *onFlushFormatter) Pickle(pickle *messages.Pickle) {
+ o.fns = append(o.fns, func() {
+ o.fmt.Pickle(pickle)
+ })
+}
+
+func (o *onFlushFormatter) Passed(pickle *messages.Pickle, step *messages.PickleStep, definition *formatters.StepDefinition) {
+ o.fns = append(o.fns, func() {
+ o.fmt.Passed(pickle, step, definition)
+ })
+}
+
+// Ambiguous implements formatters.Formatter.
+func (o *onFlushFormatter) Ambiguous(pickle *messages.Pickle, step *messages.PickleStep, definition *formatters.StepDefinition, err error) {
+ o.fns = append(o.fns, func() {
+ o.fmt.Ambiguous(pickle, step, definition, err)
+ })
+}
+
+// Defined implements formatters.Formatter.
+func (o *onFlushFormatter) Defined(pickle *messages.Pickle, step *messages.PickleStep, definition *formatters.StepDefinition) {
+ o.fns = append(o.fns, func() {
+ o.fmt.Defined(pickle, step, definition)
+ })
+}
+
+// Failed implements formatters.Formatter.
+func (o *onFlushFormatter) Failed(pickle *messages.Pickle, step *messages.PickleStep, definition *formatters.StepDefinition, err error) {
+ o.fns = append(o.fns, func() {
+ o.fmt.Failed(pickle, step, definition, err)
+ })
+}
+
+// Feature implements formatters.Formatter.
+func (o *onFlushFormatter) Feature(pickle *messages.GherkinDocument, p string, c []byte) {
+ o.fns = append(o.fns, func() {
+ o.fmt.Feature(pickle, p, c)
+ })
+}
+
+// Pending implements formatters.Formatter.
+func (o *onFlushFormatter) Pending(pickle *messages.Pickle, step *messages.PickleStep, definition *formatters.StepDefinition) {
+ o.fns = append(o.fns, func() {
+ o.fmt.Pending(pickle, step, definition)
+ })
+}
+
+// Skipped implements formatters.Formatter.
+func (o *onFlushFormatter) Skipped(pickle *messages.Pickle, step *messages.PickleStep, definition *formatters.StepDefinition) {
+ o.fns = append(o.fns, func() {
+ o.fmt.Skipped(pickle, step, definition)
+ })
+}
+
+// Summary implements formatters.Formatter.
+func (o *onFlushFormatter) Summary() {
+ o.fns = append(o.fns, func() {
+ o.fmt.Summary()
+ })
+}
+
+// TestRunStarted implements formatters.Formatter.
+func (o *onFlushFormatter) TestRunStarted() {
+ o.fns = append(o.fns, func() {
+ o.fmt.TestRunStarted()
+ })
+}
+
+// Undefined implements formatters.Formatter.
+func (o *onFlushFormatter) Undefined(pickle *messages.Pickle, step *messages.PickleStep, definition *formatters.StepDefinition) {
+ o.fns = append(o.fns, func() {
+ o.fmt.Undefined(pickle, step, definition)
+ })
+}
+
+// Flush the logs.
+func (o *onFlushFormatter) Flush() {
+ o.mu.Lock()
+ defer o.mu.Unlock()
+ for _, fn := range o.fns {
+ fn()
+ }
+}
diff --git a/vendor/github.com/cucumber/godog/internal/formatters/fmt_junit.go b/vendor/github.com/cucumber/godog/internal/formatters/fmt_junit.go
new file mode 100644
index 000000000..85acabe2e
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/internal/formatters/fmt_junit.go
@@ -0,0 +1,246 @@
+package formatters
+
+import (
+ "encoding/xml"
+ "fmt"
+ "io"
+ "os"
+ "sort"
+ "strconv"
+ "time"
+
+ "github.com/cucumber/godog/formatters"
+ "github.com/cucumber/godog/internal/models"
+ "github.com/cucumber/godog/internal/utils"
+)
+
+func init() {
+ formatters.Format("junit", "Prints junit compatible xml to stdout", JUnitFormatterFunc)
+}
+
+// JUnitFormatterFunc implements the FormatterFunc for the junit formatter
+func JUnitFormatterFunc(suite string, out io.Writer) formatters.Formatter {
+ return &JUnit{Base: NewBase(suite, out)}
+}
+
+// JUnit renders test results in JUnit format.
+type JUnit struct {
+ *Base
+}
+
+// Summary renders summary information.
+func (f *JUnit) Summary() {
+ suite := f.buildJUNITPackageSuite()
+
+ _, err := io.WriteString(f.out, xml.Header)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, "failed to write junit string:", err)
+ }
+
+ enc := xml.NewEncoder(f.out)
+ enc.Indent("", s(2))
+ if err = enc.Encode(suite); err != nil {
+ fmt.Fprintln(os.Stderr, "failed to write junit xml:", err)
+ }
+}
+
+func junitTimeDuration(from, to time.Time) string {
+ return strconv.FormatFloat(to.Sub(from).Seconds(), 'f', -1, 64)
+}
+
+// getPickleResult deals with the fact that if there's no result due to 'StopOnFirstFailure' being
+// set, MustGetPickleResult panics.
+func (f *JUnit) getPickleResult(pickleID string) (res *models.PickleResult) {
+ defer func() {
+ if r := recover(); r != nil {
+ res = nil
+ }
+ }()
+ pr := f.Storage.MustGetPickleResult(pickleID)
+ res = &pr
+ return
+}
+
+func (f *JUnit) getPickleStepResult(stepID string) (res *models.PickleStepResult) {
+ defer func() {
+ if r := recover(); r != nil {
+ res = nil
+ }
+ }()
+ psr := f.Storage.MustGetPickleStepResult(stepID)
+ res = &psr
+ return
+}
+
+func (f *JUnit) getPickleStepResultsByPickleID(pickleID string) (res []models.PickleStepResult) {
+ defer func() {
+ if r := recover(); r != nil {
+ res = nil
+ }
+ }()
+ res = f.Storage.MustGetPickleStepResultsByPickleID(pickleID)
+ return
+}
+
+func (f *JUnit) buildJUNITPackageSuite() JunitPackageSuite {
+ features := f.Storage.MustGetFeatures()
+ sort.Sort(sortFeaturesByName(features))
+
+ testRunStartedAt := f.Storage.MustGetTestRunStarted().StartedAt
+
+ suite := JunitPackageSuite{
+ Name: f.suiteName,
+ TestSuites: make([]*junitTestSuite, len(features)),
+ Time: junitTimeDuration(testRunStartedAt, utils.TimeNowFunc()),
+ }
+
+ for idx, feature := range features {
+ pickles := f.Storage.MustGetPickles(feature.Uri)
+ sort.Sort(sortPicklesByID(pickles))
+
+ ts := junitTestSuite{
+ Name: feature.Feature.Name,
+ TestCases: make([]*junitTestCase, len(pickles)),
+ }
+
+ var testcaseNames = make(map[string]int)
+ for _, pickle := range pickles {
+ testcaseNames[pickle.Name] = testcaseNames[pickle.Name] + 1
+ }
+
+ firstPickleStartedAt := testRunStartedAt
+ lastPickleFinishedAt := testRunStartedAt
+
+ var outlineNo = make(map[string]int)
+ for idx, pickle := range pickles {
+ tc := junitTestCase{}
+ tc.Name = pickle.Name
+ if testcaseNames[tc.Name] > 1 {
+ outlineNo[tc.Name] = outlineNo[tc.Name] + 1
+ tc.Name += fmt.Sprintf(" #%d", outlineNo[tc.Name])
+ }
+
+ pickleResult := f.getPickleResult(pickle.Id)
+ if pickleResult == nil {
+ tc.Status = skipped.String()
+ } else {
+ if idx == 0 {
+ firstPickleStartedAt = pickleResult.StartedAt
+ }
+ lastPickleFinishedAt = pickleResult.StartedAt
+ }
+
+ if len(pickle.Steps) > 0 {
+ lastStep := pickle.Steps[len(pickle.Steps)-1]
+ if lastPickleStepResult := f.getPickleStepResult(lastStep.Id); lastPickleStepResult != nil {
+ lastPickleFinishedAt = lastPickleStepResult.FinishedAt
+ }
+ }
+
+ if pickleResult != nil {
+ tc.Time = junitTimeDuration(pickleResult.StartedAt, lastPickleFinishedAt)
+ }
+
+ ts.Tests++
+ suite.Tests++
+
+ pickleStepResults := f.getPickleStepResultsByPickleID(pickle.Id)
+ for _, stepResult := range pickleStepResults {
+ pickleStep := f.Storage.MustGetPickleStep(stepResult.PickleStepID)
+
+ switch stepResult.Status {
+ case passed:
+ tc.Status = passed.String()
+ case failed:
+ tc.Status = failed.String()
+ tc.Failure = &junitFailure{
+ Message: fmt.Sprintf("Step %s: %s", pickleStep.Text, stepResult.Err),
+ }
+ case ambiguous:
+ tc.Status = ambiguous.String()
+ tc.Error = append(tc.Error, &junitError{
+ Type: "ambiguous",
+ Message: fmt.Sprintf("Step %s", pickleStep.Text),
+ })
+ case skipped:
+ tc.Error = append(tc.Error, &junitError{
+ Type: "skipped",
+ Message: fmt.Sprintf("Step %s", pickleStep.Text),
+ })
+ case undefined:
+ tc.Status = undefined.String()
+ tc.Error = append(tc.Error, &junitError{
+ Type: "undefined",
+ Message: fmt.Sprintf("Step %s", pickleStep.Text),
+ })
+ case pending:
+ tc.Status = pending.String()
+ tc.Error = append(tc.Error, &junitError{
+ Type: "pending",
+ Message: fmt.Sprintf("Step %s: TODO: write pending definition", pickleStep.Text),
+ })
+ }
+ }
+
+ switch tc.Status {
+ case failed.String():
+ ts.Failures++
+ suite.Failures++
+ case undefined.String(), pending.String():
+ ts.Errors++
+ suite.Errors++
+ }
+
+ ts.TestCases[idx] = &tc
+ }
+
+ ts.Time = junitTimeDuration(firstPickleStartedAt, lastPickleFinishedAt)
+
+ suite.TestSuites[idx] = &ts
+ }
+
+ return suite
+}
+
+type junitFailure struct {
+ Message string `xml:"message,attr"`
+ Type string `xml:"type,attr,omitempty"`
+}
+
+type junitError struct {
+ XMLName xml.Name `xml:"error,omitempty"`
+ Message string `xml:"message,attr"`
+ Type string `xml:"type,attr"`
+}
+
+type junitTestCase struct {
+ XMLName xml.Name `xml:"testcase"`
+ Name string `xml:"name,attr"`
+ Status string `xml:"status,attr"`
+ Time string `xml:"time,attr"`
+ Failure *junitFailure `xml:"failure,omitempty"`
+ Error []*junitError
+}
+
+type junitTestSuite struct {
+ XMLName xml.Name `xml:"testsuite"`
+ Name string `xml:"name,attr"`
+ Tests int `xml:"tests,attr"`
+ Skipped int `xml:"skipped,attr"`
+ Failures int `xml:"failures,attr"`
+ Errors int `xml:"errors,attr"`
+ Time string `xml:"time,attr"`
+ TestCases []*junitTestCase
+}
+
+// JunitPackageSuite ...
+type JunitPackageSuite struct {
+ XMLName xml.Name `xml:"testsuites"`
+ Name string `xml:"name,attr"`
+ Tests int `xml:"tests,attr"`
+ Skipped int `xml:"skipped,attr"`
+ Failures int `xml:"failures,attr"`
+ Errors int `xml:"errors,attr"`
+ Time string `xml:"time,attr"`
+ TestSuites []*junitTestSuite
+}
diff --git a/vendor/github.com/cucumber/godog/internal/formatters/fmt_multi.go b/vendor/github.com/cucumber/godog/internal/formatters/fmt_multi.go
new file mode 100644
index 000000000..001c99809
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/internal/formatters/fmt_multi.go
@@ -0,0 +1,139 @@
+package formatters
+
+import (
+ "io"
+
+ "github.com/cucumber/godog/formatters"
+ "github.com/cucumber/godog/internal/storage"
+ messages "github.com/cucumber/messages/go/v21"
+)
+
+// MultiFormatter passes test progress to multiple formatters.
+type MultiFormatter struct {
+ formatters []formatter
+ repeater repeater
+}
+
+type formatter struct {
+ fmt formatters.FormatterFunc
+ out io.Writer
+}
+
+type repeater []formatters.Formatter
+
+type storageFormatter interface {
+ SetStorage(s *storage.Storage)
+}
+
+// SetStorage passes storage to all added formatters.
+func (r repeater) SetStorage(s *storage.Storage) {
+ for _, f := range r {
+ if ss, ok := f.(storageFormatter); ok {
+ ss.SetStorage(s)
+ }
+ }
+}
+
+// TestRunStarted triggers TestRunStarted for all added formatters.
+func (r repeater) TestRunStarted() {
+ for _, f := range r {
+ f.TestRunStarted()
+ }
+}
+
+// Feature triggers Feature for all added formatters.
+func (r repeater) Feature(document *messages.GherkinDocument, s string, bytes []byte) {
+ for _, f := range r {
+ f.Feature(document, s, bytes)
+ }
+}
+
+// Pickle triggers Pickle for all added formatters.
+func (r repeater) Pickle(pickle *messages.Pickle) {
+ for _, f := range r {
+ f.Pickle(pickle)
+ }
+}
+
+// Defined triggers Defined for all added formatters.
+func (r repeater) Defined(pickle *messages.Pickle, step *messages.PickleStep, definition *formatters.StepDefinition) {
+ for _, f := range r {
+ f.Defined(pickle, step, definition)
+ }
+}
+
+// Failed triggers Failed for all added formatters.
+func (r repeater) Failed(pickle *messages.Pickle, step *messages.PickleStep, definition *formatters.StepDefinition, err error) {
+ for _, f := range r {
+ f.Failed(pickle, step, definition, err)
+ }
+}
+
+// Passed triggers Passed for all added formatters.
+func (r repeater) Passed(pickle *messages.Pickle, step *messages.PickleStep, definition *formatters.StepDefinition) {
+ for _, f := range r {
+ f.Passed(pickle, step, definition)
+ }
+}
+
+// Skipped triggers Skipped for all added formatters.
+func (r repeater) Skipped(pickle *messages.Pickle, step *messages.PickleStep, definition *formatters.StepDefinition) {
+ for _, f := range r {
+ f.Skipped(pickle, step, definition)
+ }
+}
+
+// Undefined triggers Undefined for all added formatters.
+func (r repeater) Undefined(pickle *messages.Pickle, step *messages.PickleStep, definition *formatters.StepDefinition) {
+ for _, f := range r {
+ f.Undefined(pickle, step, definition)
+ }
+}
+
+// Pending triggers Pending for all added formatters.
+func (r repeater) Pending(pickle *messages.Pickle, step *messages.PickleStep, definition *formatters.StepDefinition) {
+ for _, f := range r {
+ f.Pending(pickle, step, definition)
+ }
+}
+
+// Ambiguous triggers Ambiguous for all added formatters.
+func (r repeater) Ambiguous(pickle *messages.Pickle, step *messages.PickleStep, definition *formatters.StepDefinition, err error) {
+ for _, f := range r {
+ f.Ambiguous(pickle, step, definition, err)
+ }
+}
+
+// Summary triggers Summary for all added formatters.
+func (r repeater) Summary() {
+ for _, f := range r {
+ f.Summary()
+ }
+}
+
+// Add adds formatter with output writer.
+func (m *MultiFormatter) Add(name string, out io.Writer) {
+ f := formatters.FindFmt(name)
+ if f == nil {
+ panic("formatter not found: " + name)
+ }
+
+ m.formatters = append(m.formatters, formatter{
+ fmt: f,
+ out: out,
+ })
+}
+
+// FormatterFunc implements the FormatterFunc for the multi formatter.
+func (m *MultiFormatter) FormatterFunc(suite string, out io.Writer) formatters.Formatter {
+ for _, f := range m.formatters {
+ out := out
+ if f.out != nil {
+ out = f.out
+ }
+
+ m.repeater = append(m.repeater, f.fmt(suite, out))
+ }
+
+ return m.repeater
+}
diff --git a/vendor/github.com/cucumber/godog/internal/formatters/fmt_pretty.go b/vendor/github.com/cucumber/godog/internal/formatters/fmt_pretty.go
new file mode 100644
index 000000000..76d733793
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/internal/formatters/fmt_pretty.go
@@ -0,0 +1,586 @@
+package formatters
+
+import (
+ "fmt"
+ "io"
+ "regexp"
+ "sort"
+ "strings"
+ "unicode/utf8"
+
+ messages "github.com/cucumber/messages/go/v21"
+
+ "github.com/cucumber/godog/colors"
+ "github.com/cucumber/godog/formatters"
+ "github.com/cucumber/godog/internal/models"
+)
+
+func init() {
+ formatters.Format("pretty", "Prints every feature with runtime statuses.", PrettyFormatterFunc)
+}
+
+// PrettyFormatterFunc implements the FormatterFunc for the pretty formatter
+func PrettyFormatterFunc(suite string, out io.Writer) formatters.Formatter {
+ return &Pretty{Base: NewBase(suite, out)}
+}
+
+var outlinePlaceholderRegexp = regexp.MustCompile("<[^>]+>")
+
+// Pretty is a formatter for readable output.
+type Pretty struct {
+ *Base
+ firstFeature *bool
+}
+
+// TestRunStarted is triggered on test start.
+func (f *Pretty) TestRunStarted() {
+ f.Base.TestRunStarted()
+
+ f.Lock.Lock()
+ defer f.Lock.Unlock()
+
+ firstFeature := true
+ f.firstFeature = &firstFeature
+}
+
+// Feature receives gherkin document.
+func (f *Pretty) Feature(gd *messages.GherkinDocument, p string, c []byte) {
+ f.Lock.Lock()
+ if !*f.firstFeature {
+ fmt.Fprintln(f.out, "")
+ }
+
+ *f.firstFeature = false
+ f.Lock.Unlock()
+
+ f.Base.Feature(gd, p, c)
+
+ f.Lock.Lock()
+ defer f.Lock.Unlock()
+
+ f.printFeature(gd.Feature)
+}
+
+// Pickle takes a gherkin node for formatting.
+func (f *Pretty) Pickle(pickle *messages.Pickle) {
+ f.Base.Pickle(pickle)
+
+ f.Lock.Lock()
+ defer f.Lock.Unlock()
+
+ if len(pickle.Steps) == 0 {
+ f.printUndefinedPickle(pickle)
+ return
+ }
+}
+
+// Passed captures passed step.
+func (f *Pretty) Passed(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition) {
+ f.Base.Passed(pickle, step, match)
+
+ f.Lock.Lock()
+ defer f.Lock.Unlock()
+
+ f.printStep(pickle, step)
+}
+
+// Skipped captures skipped step.
+func (f *Pretty) Skipped(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition) {
+ f.Base.Skipped(pickle, step, match)
+
+ f.Lock.Lock()
+ defer f.Lock.Unlock()
+
+ f.printStep(pickle, step)
+}
+
+// Undefined captures undefined step.
+func (f *Pretty) Undefined(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition) {
+ f.Base.Undefined(pickle, step, match)
+
+ f.Lock.Lock()
+ defer f.Lock.Unlock()
+
+ f.printStep(pickle, step)
+}
+
+// Failed captures failed step.
+func (f *Pretty) Failed(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition, err error) {
+ f.Base.Failed(pickle, step, match, err)
+
+ f.Lock.Lock()
+ defer f.Lock.Unlock()
+
+ f.printStep(pickle, step)
+}
+
+// Failed captures failed step.
+func (f *Pretty) Ambiguous(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition, err error) {
+ f.Base.Ambiguous(pickle, step, match, err)
+
+ f.Lock.Lock()
+ defer f.Lock.Unlock()
+
+ f.printStep(pickle, step)
+}
+
+// Pending captures pending step.
+func (f *Pretty) Pending(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition) {
+ f.Base.Pending(pickle, step, match)
+
+ f.Lock.Lock()
+ defer f.Lock.Unlock()
+
+ f.printStep(pickle, step)
+}
+
+func (f *Pretty) printFeature(feature *messages.Feature) {
+ fmt.Fprintln(f.out, keywordAndName(feature.Keyword, feature.Name))
+ if strings.TrimSpace(feature.Description) != "" {
+ for _, line := range strings.Split(feature.Description, "\n") {
+ fmt.Fprintln(f.out, s(f.indent)+strings.TrimSpace(line))
+ }
+ }
+}
+
+func keywordAndName(keyword, name string) string {
+ title := whiteb(keyword + ":")
+ if len(name) > 0 {
+ title += " " + name
+ }
+ return title
+}
+
+func (f *Pretty) scenarioLengths(pickle *messages.Pickle) (scenarioHeaderLength int, maxLength int) {
+ feature := f.Storage.MustGetFeature(pickle.Uri)
+ astScenario := feature.FindScenario(pickle.AstNodeIds[0])
+ astBackground := feature.FindBackground(pickle.AstNodeIds[0])
+
+ scenarioHeaderLength = f.lengthPickle(astScenario.Keyword, astScenario.Name)
+ maxLength = f.longestStep(astScenario.Steps, scenarioHeaderLength)
+
+ if astBackground != nil {
+ maxLength = f.longestStep(astBackground.Steps, maxLength)
+ }
+
+ return scenarioHeaderLength, maxLength
+}
+
+func (f *Pretty) printScenarioHeader(pickle *messages.Pickle, astScenario *messages.Scenario, spaceFilling int) {
+ feature := f.Storage.MustGetFeature(pickle.Uri)
+ text := s(f.indent) + keywordAndName(astScenario.Keyword, astScenario.Name)
+ text += s(spaceFilling) + line(feature.Uri, astScenario.Location)
+ fmt.Fprintln(f.out, "\n"+text)
+}
+
+func (f *Pretty) printUndefinedPickle(pickle *messages.Pickle) {
+ feature := f.Storage.MustGetFeature(pickle.Uri)
+ astScenario := feature.FindScenario(pickle.AstNodeIds[0])
+ astBackground := feature.FindBackground(pickle.AstNodeIds[0])
+
+ scenarioHeaderLength, maxLength := f.scenarioLengths(pickle)
+
+ if astBackground != nil {
+ fmt.Fprintln(f.out, "\n"+s(f.indent)+keywordAndName(astBackground.Keyword, astBackground.Name))
+ for _, step := range astBackground.Steps {
+ text := s(f.indent*2) + cyan(strings.TrimSpace(step.Keyword)) + " " + cyan(step.Text)
+ fmt.Fprintln(f.out, text)
+ }
+ }
+
+ // do not print scenario headers and examples multiple times
+ if len(astScenario.Examples) > 0 {
+ exampleTable, exampleRow := feature.FindExample(pickle.AstNodeIds[1])
+ firstExampleRow := exampleTable.TableBody[0].Id == exampleRow.Id
+ firstExamplesTable := astScenario.Examples[0].Location.Line == exampleTable.Location.Line
+
+ if !(firstExamplesTable && firstExampleRow) {
+ return
+ }
+ }
+
+ f.printScenarioHeader(pickle, astScenario, maxLength-scenarioHeaderLength)
+
+ for _, examples := range astScenario.Examples {
+ max := longestExampleRow(examples, cyan, cyan)
+
+ fmt.Fprintln(f.out, "")
+ fmt.Fprintln(f.out, s(f.indent*2)+keywordAndName(examples.Keyword, examples.Name))
+
+ f.printTableHeader(examples.TableHeader, max)
+
+ for _, row := range examples.TableBody {
+ f.printTableRow(row, max, cyan)
+ }
+ }
+}
+
+// Summary renders summary information.
+func (f *Pretty) Summary() {
+ failedStepResults := f.Storage.MustGetPickleStepResultsByStatus(failed)
+ if len(failedStepResults) > 0 {
+ fmt.Fprintln(f.out, "\n--- "+red("Failed steps:")+"\n")
+
+ sort.Sort(sortPickleStepResultsByPickleStepID(failedStepResults))
+
+ for _, fail := range failedStepResults {
+ pickle := f.Storage.MustGetPickle(fail.PickleID)
+ pickleStep := f.Storage.MustGetPickleStep(fail.PickleStepID)
+ feature := f.Storage.MustGetFeature(pickle.Uri)
+
+ astScenario := feature.FindScenario(pickle.AstNodeIds[0])
+ scenarioDesc := fmt.Sprintf("%s: %s", astScenario.Keyword, pickle.Name)
+
+ astStep := feature.FindStep(pickleStep.AstNodeIds[0])
+ stepDesc := strings.TrimSpace(astStep.Keyword) + " " + pickleStep.Text
+
+ fmt.Fprintln(f.out, s(f.indent)+red(scenarioDesc)+line(feature.Uri, astScenario.Location))
+ fmt.Fprintln(f.out, s(f.indent*2)+red(stepDesc)+line(feature.Uri, astStep.Location))
+ fmt.Fprintln(f.out, s(f.indent*3)+red("Error: ")+redb(fmt.Sprintf("%+v", fail.Err))+"\n")
+ }
+ }
+
+ f.Base.Summary()
+}
+
+func (f *Pretty) printOutlineExample(pickle *messages.Pickle, step *messages.PickleStep, backgroundSteps int) {
+ var errorMsg string
+ var clr = green
+
+ feature := f.Storage.MustGetFeature(pickle.Uri)
+ astScenario := feature.FindScenario(pickle.AstNodeIds[0])
+ scenarioHeaderLength, maxLength := f.scenarioLengths(pickle)
+
+ exampleTable, exampleRow := feature.FindExample(pickle.AstNodeIds[1])
+ printExampleHeader := exampleTable.TableBody[0].Id == exampleRow.Id
+ firstExamplesTable := astScenario.Examples[0].Location.Line == exampleTable.Location.Line
+
+ pickleStepResults := f.Storage.MustGetPickleStepResultsByPickleIDUntilStep(pickle.Id, step.Id)
+
+ firstExecutedScenarioStep := len(pickleStepResults) == backgroundSteps+1
+ if firstExamplesTable && printExampleHeader && firstExecutedScenarioStep {
+ f.printScenarioHeader(pickle, astScenario, maxLength-scenarioHeaderLength)
+ }
+
+ if len(exampleTable.TableBody) == 0 {
+ // do not print empty examples
+ return
+ }
+
+ lastStep := len(pickleStepResults) == len(pickle.Steps)
+ if !lastStep {
+ // do not print examples unless all steps has finished
+ return
+ }
+
+ for _, result := range pickleStepResults {
+ // determine example row status
+ switch {
+ case result.Status == failed:
+ errorMsg = result.Err.Error()
+ clr = result.Status.Color()
+ case result.Status == ambiguous:
+ errorMsg = result.Err.Error()
+ clr = result.Status.Color()
+ case result.Status == undefined || result.Status == pending:
+ clr = result.Status.Color()
+ case result.Status == skipped && clr == nil:
+ clr = cyan
+ }
+
+ if firstExamplesTable && printExampleHeader {
+ // in first example, we need to print steps
+
+ pickleStep := f.Storage.MustGetPickleStep(result.PickleStepID)
+ astStep := feature.FindStep(pickleStep.AstNodeIds[0])
+
+ var text = ""
+ if result.Def != nil {
+ if m := outlinePlaceholderRegexp.FindAllStringIndex(astStep.Text, -1); len(m) > 0 {
+ var pos int
+ for i := 0; i < len(m); i++ {
+ pair := m[i]
+ text += cyan(astStep.Text[pos:pair[0]])
+ text += cyanb(astStep.Text[pair[0]:pair[1]])
+ pos = pair[1]
+ }
+ text += cyan(astStep.Text[pos:len(astStep.Text)])
+ } else {
+ text = cyan(astStep.Text)
+ }
+
+ _, maxLength := f.scenarioLengths(pickle)
+ stepLength := f.lengthPickleStep(astStep.Keyword, astStep.Text)
+
+ text += s(maxLength - stepLength)
+ text += " " + blackb("# "+DefinitionID(result.Def))
+ }
+
+ // print the step outline
+ fmt.Fprintln(f.out, s(f.indent*2)+cyan(strings.TrimSpace(astStep.Keyword))+" "+text)
+
+ if pickleStep.Argument != nil {
+ if table := pickleStep.Argument.DataTable; table != nil {
+ f.printTable(table, cyan)
+ }
+
+ if docString := astStep.DocString; docString != nil {
+ f.printDocString(docString)
+ }
+ }
+ }
+ }
+
+ max := longestExampleRow(exampleTable, clr, cyan)
+
+ // an example table header
+ if printExampleHeader {
+ fmt.Fprintln(f.out, "")
+ fmt.Fprintln(f.out, s(f.indent*2)+keywordAndName(exampleTable.Keyword, exampleTable.Name))
+
+ f.printTableHeader(exampleTable.TableHeader, max)
+ }
+
+ f.printTableRow(exampleRow, max, clr)
+
+ if errorMsg != "" {
+ fmt.Fprintln(f.out, s(f.indent*4)+redb(errorMsg))
+ }
+}
+
+func (f *Pretty) printTableRow(row *messages.TableRow, max []int, clr colors.ColorFunc) {
+ cells := make([]string, len(row.Cells))
+
+ for i, cell := range row.Cells {
+ val := clr(cell.Value)
+ ln := utf8.RuneCountInString(val)
+ cells[i] = val + s(max[i]-ln)
+ }
+
+ fmt.Fprintln(f.out, s(f.indent*3)+"| "+strings.Join(cells, " | ")+" |")
+}
+
+func (f *Pretty) printTableHeader(row *messages.TableRow, max []int) {
+ f.printTableRow(row, max, cyan)
+}
+
+func isFirstScenarioInRule(rule *messages.Rule, scenario *messages.Scenario) bool {
+ if rule == nil || scenario == nil {
+ return false
+ }
+ var firstScenario *messages.Scenario
+ for _, c := range rule.Children {
+ if c.Scenario != nil {
+ firstScenario = c.Scenario
+ break
+ }
+ }
+ return firstScenario != nil && firstScenario.Id == scenario.Id
+}
+
+func isFirstPickleAndNoRule(feature *models.Feature, pickle *messages.Pickle, rule *messages.Rule) bool {
+ if rule != nil {
+ return false
+ }
+ return feature.Pickles[0].Id == pickle.Id
+}
+
+func (f *Pretty) printStep(pickle *messages.Pickle, pickleStep *messages.PickleStep) {
+ feature := f.Storage.MustGetFeature(pickle.Uri)
+ astBackground := feature.FindBackground(pickle.AstNodeIds[0])
+ astScenario := feature.FindScenario(pickle.AstNodeIds[0])
+ astRule := feature.FindRule(pickle.AstNodeIds[0])
+ astStep := feature.FindStep(pickleStep.AstNodeIds[0])
+
+ var astBackgroundStep bool
+ var firstExecutedBackgroundStep bool
+ var backgroundSteps int
+
+ if astBackground != nil {
+ backgroundSteps = len(astBackground.Steps)
+
+ for idx, step := range astBackground.Steps {
+ if step.Id == pickleStep.AstNodeIds[0] {
+ astBackgroundStep = true
+ firstExecutedBackgroundStep = idx == 0
+ break
+ }
+ }
+ }
+
+ firstPickle := isFirstPickleAndNoRule(feature, pickle, astRule) || isFirstScenarioInRule(astRule, astScenario)
+
+ if astBackgroundStep && !firstPickle {
+ return
+ }
+
+ if astBackgroundStep && firstExecutedBackgroundStep {
+ fmt.Fprintln(f.out, "\n"+s(f.indent)+keywordAndName(astBackground.Keyword, astBackground.Name))
+ }
+
+ if !astBackgroundStep && len(astScenario.Examples) > 0 {
+ f.printOutlineExample(pickle, pickleStep, backgroundSteps)
+ return
+ }
+
+ scenarioHeaderLength, maxLength := f.scenarioLengths(pickle)
+ stepLength := f.lengthPickleStep(astStep.Keyword, pickleStep.Text)
+
+ firstExecutedScenarioStep := astScenario.Steps[0].Id == pickleStep.AstNodeIds[0]
+ if !astBackgroundStep && firstExecutedScenarioStep {
+ f.printScenarioHeader(pickle, astScenario, maxLength-scenarioHeaderLength)
+ }
+
+ pickleStepResult := f.Storage.MustGetPickleStepResult(pickleStep.Id)
+ text := s(f.indent*2) + pickleStepResult.Status.Color()(strings.TrimSpace(astStep.Keyword)) + " " + pickleStepResult.Status.Color()(pickleStep.Text)
+ if pickleStepResult.Def != nil {
+ text += s(maxLength - stepLength + 1)
+ text += blackb("# " + DefinitionID(pickleStepResult.Def))
+ }
+ fmt.Fprintln(f.out, text)
+
+ if pickleStep.Argument != nil {
+ if table := pickleStep.Argument.DataTable; table != nil {
+ f.printTable(table, cyan)
+ }
+
+ if docString := astStep.DocString; docString != nil {
+ f.printDocString(docString)
+ }
+ }
+
+ if pickleStepResult.Err != nil {
+ fmt.Fprintln(f.out, s(f.indent*2)+redb(fmt.Sprintf("%+v", pickleStepResult.Err)))
+ }
+
+ if pickleStepResult.Status == pending {
+ fmt.Fprintln(f.out, s(f.indent*3)+yellow("TODO: write pending definition"))
+ }
+}
+
+func (f *Pretty) printDocString(docString *messages.DocString) {
+ var ct string
+
+ if len(docString.MediaType) > 0 {
+ ct = " " + cyan(docString.MediaType)
+ }
+
+ fmt.Fprintln(f.out, s(f.indent*3)+cyan(docString.Delimiter)+ct)
+
+ for _, ln := range strings.Split(docString.Content, "\n") {
+ fmt.Fprintln(f.out, s(f.indent*3)+cyan(ln))
+ }
+
+ fmt.Fprintln(f.out, s(f.indent*3)+cyan(docString.Delimiter))
+}
+
+// print table with aligned table cells
+// @TODO: need to make example header cells bold
+func (f *Pretty) printTable(t *messages.PickleTable, c colors.ColorFunc) {
+ maxColLengths := maxColLengths(t, c)
+ var cols = make([]string, len(t.Rows[0].Cells))
+
+ for _, row := range t.Rows {
+ for i, cell := range row.Cells {
+ val := c(cell.Value)
+ colLength := utf8.RuneCountInString(val)
+ cols[i] = val + s(maxColLengths[i]-colLength)
+ }
+
+ fmt.Fprintln(f.out, s(f.indent*3)+"| "+strings.Join(cols, " | ")+" |")
+ }
+}
+
+// longest gives a list of longest columns of all rows in Table
+func maxColLengths(t *messages.PickleTable, clrs ...colors.ColorFunc) []int {
+ if t == nil {
+ return []int{}
+ }
+
+ longest := make([]int, len(t.Rows[0].Cells))
+ for _, row := range t.Rows {
+ for i, cell := range row.Cells {
+ for _, c := range clrs {
+ ln := utf8.RuneCountInString(c(cell.Value))
+ if longest[i] < ln {
+ longest[i] = ln
+ }
+ }
+
+ ln := utf8.RuneCountInString(cell.Value)
+ if longest[i] < ln {
+ longest[i] = ln
+ }
+ }
+ }
+
+ return longest
+}
+
+func longestExampleRow(t *messages.Examples, clrs ...colors.ColorFunc) []int {
+ if t == nil {
+ return []int{}
+ }
+
+ longest := make([]int, len(t.TableHeader.Cells))
+ for i, cell := range t.TableHeader.Cells {
+ for _, c := range clrs {
+ ln := utf8.RuneCountInString(c(cell.Value))
+ if longest[i] < ln {
+ longest[i] = ln
+ }
+ }
+
+ ln := utf8.RuneCountInString(cell.Value)
+ if longest[i] < ln {
+ longest[i] = ln
+ }
+ }
+
+ for _, row := range t.TableBody {
+ for i, cell := range row.Cells {
+ for _, c := range clrs {
+ ln := utf8.RuneCountInString(c(cell.Value))
+ if longest[i] < ln {
+ longest[i] = ln
+ }
+ }
+
+ ln := utf8.RuneCountInString(cell.Value)
+ if longest[i] < ln {
+ longest[i] = ln
+ }
+ }
+ }
+
+ return longest
+}
+
+func (f *Pretty) longestStep(steps []*messages.Step, pickleLength int) int {
+ max := pickleLength
+
+ for _, step := range steps {
+ length := f.lengthPickleStep(step.Keyword, step.Text)
+ if length > max {
+ max = length
+ }
+ }
+
+ return max
+}
+
+// a line number representation in feature file
+func line(path string, loc *messages.Location) string {
+ // Path can contain a line number already.
+ // This line number has to be trimmed to avoid duplication.
+ path = strings.TrimSuffix(path, fmt.Sprintf(":%d", loc.Line))
+ return " " + blackb(fmt.Sprintf("# %s:%d", path, loc.Line))
+}
+
+func (f *Pretty) lengthPickleStep(keyword, text string) int {
+ return f.indent*2 + utf8.RuneCountInString(strings.TrimSpace(keyword)+" "+text)
+}
+
+func (f *Pretty) lengthPickle(keyword, name string) int {
+ return f.indent + utf8.RuneCountInString(strings.TrimSpace(keyword)+": "+name)
+}
diff --git a/vendor/github.com/cucumber/godog/internal/formatters/fmt_progress.go b/vendor/github.com/cucumber/godog/internal/formatters/fmt_progress.go
new file mode 100644
index 000000000..9722ef7a5
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/internal/formatters/fmt_progress.go
@@ -0,0 +1,172 @@
+package formatters
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "sort"
+ "strings"
+
+ "github.com/cucumber/godog/formatters"
+ messages "github.com/cucumber/messages/go/v21"
+)
+
+func init() {
+ formatters.Format("progress", "Prints a character per step.", ProgressFormatterFunc)
+}
+
+// ProgressFormatterFunc implements the FormatterFunc for the progress formatter.
+func ProgressFormatterFunc(suite string, out io.Writer) formatters.Formatter {
+ return NewProgress(suite, out)
+}
+
+// NewProgress creates a new progress formatter.
+func NewProgress(suite string, out io.Writer) *Progress {
+ steps := 0
+ return &Progress{
+ Base: NewBase(suite, out),
+ StepsPerRow: 70,
+ Steps: &steps,
+ }
+}
+
+// Progress is a minimalistic formatter.
+type Progress struct {
+ *Base
+ StepsPerRow int
+ Steps *int
+}
+
+// Summary renders summary information.
+func (f *Progress) Summary() {
+ left := math.Mod(float64(*f.Steps), float64(f.StepsPerRow))
+ if left != 0 {
+ if *f.Steps > f.StepsPerRow {
+ fmt.Fprintf(f.out, s(f.StepsPerRow-int(left))+fmt.Sprintf(" %d\n", *f.Steps))
+ } else {
+ fmt.Fprintf(f.out, " %d\n", *f.Steps)
+ }
+ }
+
+ var failedStepsOutput []string
+
+ failedSteps := f.Storage.MustGetPickleStepResultsByStatus(failed)
+ sort.Sort(sortPickleStepResultsByPickleStepID(failedSteps))
+
+ for _, sr := range failedSteps {
+ if sr.Status == failed {
+ pickle := f.Storage.MustGetPickle(sr.PickleID)
+ pickleStep := f.Storage.MustGetPickleStep(sr.PickleStepID)
+ feature := f.Storage.MustGetFeature(pickle.Uri)
+
+ sc := feature.FindScenario(pickle.AstNodeIds[0])
+ scenarioDesc := fmt.Sprintf("%s: %s", sc.Keyword, pickle.Name)
+ scenarioLine := fmt.Sprintf("%s:%d", pickle.Uri, sc.Location.Line)
+
+ step := feature.FindStep(pickleStep.AstNodeIds[0])
+ stepDesc := strings.TrimSpace(step.Keyword) + " " + pickleStep.Text
+ stepLine := fmt.Sprintf("%s:%d", pickle.Uri, step.Location.Line)
+
+ failedStepsOutput = append(
+ failedStepsOutput,
+ s(2)+red(scenarioDesc)+blackb(" # "+scenarioLine),
+ s(4)+red(stepDesc)+blackb(" # "+stepLine),
+ s(6)+red("Error: ")+redb(fmt.Sprintf("%+v", sr.Err)),
+ "",
+ )
+ }
+ }
+
+ if len(failedStepsOutput) > 0 {
+ fmt.Fprintln(f.out, "\n\n--- "+red("Failed steps:")+"\n")
+ fmt.Fprint(f.out, strings.Join(failedStepsOutput, "\n"))
+ }
+ fmt.Fprintln(f.out, "")
+
+ f.Base.Summary()
+}
+
+func (f *Progress) step(pickleStepID string) {
+ pickleStepResult := f.Storage.MustGetPickleStepResult(pickleStepID)
+
+ switch pickleStepResult.Status {
+ case passed:
+ fmt.Fprint(f.out, green("."))
+ case skipped:
+ fmt.Fprint(f.out, cyan("-"))
+ case failed:
+ fmt.Fprint(f.out, red("F"))
+ case undefined:
+ fmt.Fprint(f.out, yellow("U"))
+ case ambiguous:
+ fmt.Fprint(f.out, yellow("A"))
+ case pending:
+ fmt.Fprint(f.out, yellow("P"))
+ }
+
+ *f.Steps++
+
+ if math.Mod(float64(*f.Steps), float64(f.StepsPerRow)) == 0 {
+ fmt.Fprintf(f.out, " %d\n", *f.Steps)
+ }
+}
+
+// Passed captures passed step.
+func (f *Progress) Passed(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition) {
+ f.Base.Passed(pickle, step, match)
+
+ f.Lock.Lock()
+ defer f.Lock.Unlock()
+
+ f.step(step.Id)
+}
+
+// Skipped captures skipped step.
+func (f *Progress) Skipped(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition) {
+ f.Base.Skipped(pickle, step, match)
+
+ f.Lock.Lock()
+ defer f.Lock.Unlock()
+
+ f.step(step.Id)
+}
+
+// Undefined captures undefined step.
+func (f *Progress) Undefined(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition) {
+ f.Base.Undefined(pickle, step, match)
+
+ f.Lock.Lock()
+ defer f.Lock.Unlock()
+
+ f.step(step.Id)
+}
+
+// Failed captures failed step.
+func (f *Progress) Failed(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition, err error) {
+ f.Base.Failed(pickle, step, match, err)
+
+ f.Lock.Lock()
+ defer f.Lock.Unlock()
+
+ f.step(step.Id)
+}
+
+// Ambiguous steps.
+func (f *Progress) Ambiguous(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition, err error) {
+ f.Base.Ambiguous(pickle, step, match, err)
+
+ f.Lock.Lock()
+ defer f.Lock.Unlock()
+
+ f.step(step.Id)
+}
+
+// Pending captures pending step.
+func (f *Progress) Pending(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition) {
+ f.Base.Pending(pickle, step, match)
+
+ f.Lock.Lock()
+ defer f.Lock.Unlock()
+
+ f.step(step.Id)
+}
diff --git a/vendor/github.com/cucumber/godog/internal/formatters/undefined_snippets_gen.go b/vendor/github.com/cucumber/godog/internal/formatters/undefined_snippets_gen.go
new file mode 100644
index 000000000..ff6cd79ef
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/internal/formatters/undefined_snippets_gen.go
@@ -0,0 +1,108 @@
+package formatters
+
+import (
+ "fmt"
+ "reflect"
+ "regexp"
+ "strings"
+ "text/template"
+
+ messages "github.com/cucumber/messages/go/v21"
+)
+
+// some snippet formatting regexps
+var snippetExprCleanup = regexp.MustCompile(`([\/\[\]\(\)\\^\$\.\|\?\*\+\'])`)
+var snippetExprQuoted = regexp.MustCompile(`(\W|^)"(?:[^"]*)"(\W|$)`)
+var snippetMethodName = regexp.MustCompile(`[^a-zA-Z\_\ ]`)
+var snippetNumbers = regexp.MustCompile(`(\d+)`)
+
+var snippetHelperFuncs = template.FuncMap{
+ "backticked": func(s string) string {
+ return "`" + s + "`"
+ },
+}
+
+var undefinedSnippetsTpl = template.Must(template.New("snippets").Funcs(snippetHelperFuncs).Parse(`
+{{ range . }}func {{ .Method }}({{ .Args }}) error {
+ return godog.ErrPending
+}
+
+{{end}}func InitializeScenario(ctx *godog.ScenarioContext) { {{ range . }}
+ ctx.Step({{ backticked .Expr }}, {{ .Method }}){{end}}
+}
+`))
+
+type undefinedSnippet struct {
+ Method string
+ Expr string
+ argument *messages.PickleStepArgument
+}
+
+func (s undefinedSnippet) Args() (ret string) {
+ var (
+ args []string
+ pos int
+ breakLoop bool
+ )
+
+ for !breakLoop {
+ part := s.Expr[pos:]
+ ipos := strings.Index(part, "(\\d+)")
+ spos := strings.Index(part, "\"([^\"]*)\"")
+
+ switch {
+ case spos == -1 && ipos == -1:
+ breakLoop = true
+ case spos == -1:
+ pos += ipos + len("(\\d+)")
+ args = append(args, reflect.Int.String())
+ case ipos == -1:
+ pos += spos + len("\"([^\"]*)\"")
+ args = append(args, reflect.String.String())
+ case ipos < spos:
+ pos += ipos + len("(\\d+)")
+ args = append(args, reflect.Int.String())
+ case spos < ipos:
+ pos += spos + len("\"([^\"]*)\"")
+ args = append(args, reflect.String.String())
+ }
+ }
+
+ if s.argument != nil {
+ if s.argument.DocString != nil {
+ args = append(args, "*godog.DocString")
+ }
+
+ if s.argument.DataTable != nil {
+ args = append(args, "*godog.Table")
+ }
+ }
+
+ var last string
+
+ for i, arg := range args {
+ if last == "" || last == arg {
+ ret += fmt.Sprintf("arg%d, ", i+1)
+ } else {
+ ret = strings.TrimRight(ret, ", ") + fmt.Sprintf(" %s, arg%d, ", last, i+1)
+ }
+
+ last = arg
+ }
+
+ return strings.TrimSpace(strings.TrimRight(ret, ", ") + " " + last)
+}
+
+type snippetSortByMethod []undefinedSnippet
+
+func (s snippetSortByMethod) Len() int {
+ return len(s)
+}
+
+func (s snippetSortByMethod) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s snippetSortByMethod) Less(i, j int) bool {
+ return s[i].Method < s[j].Method
+}
diff --git a/vendor/github.com/cucumber/godog/internal/models/feature.go b/vendor/github.com/cucumber/godog/internal/models/feature.go
new file mode 100644
index 000000000..9d9d84da7
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/internal/models/feature.go
@@ -0,0 +1,151 @@
+package models
+
+import (
+ messages "github.com/cucumber/messages/go/v21"
+)
+
+// Feature is an internal object to group together
+// the parsed gherkin document, the pickles and the
+// raw content.
+type Feature struct {
+ *messages.GherkinDocument
+ Pickles []*messages.Pickle
+ Content []byte
+}
+
+// FindRule returns the rule to which the given scenario belongs
+func (f Feature) FindRule(astScenarioID string) *messages.Rule {
+ for _, child := range f.GherkinDocument.Feature.Children {
+ if ru := child.Rule; ru != nil {
+ if rc := child.Rule; rc != nil {
+ for _, rcc := range rc.Children {
+ if sc := rcc.Scenario; sc != nil && sc.Id == astScenarioID {
+ return ru
+ }
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// FindScenario returns the scenario in the feature or in a rule in the feature
+func (f Feature) FindScenario(astScenarioID string) *messages.Scenario {
+ for _, child := range f.GherkinDocument.Feature.Children {
+ if sc := child.Scenario; sc != nil && sc.Id == astScenarioID {
+ return sc
+ }
+ if rc := child.Rule; rc != nil {
+ for _, rcc := range rc.Children {
+ if sc := rcc.Scenario; sc != nil && sc.Id == astScenarioID {
+ return sc
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+// FindBackground ...
+func (f Feature) FindBackground(astScenarioID string) *messages.Background {
+ var bg *messages.Background
+
+ for _, child := range f.GherkinDocument.Feature.Children {
+ if tmp := child.Background; tmp != nil {
+ bg = tmp
+ }
+
+ if sc := child.Scenario; sc != nil && sc.Id == astScenarioID {
+ return bg
+ }
+
+ if ru := child.Rule; ru != nil {
+ for _, rc := range ru.Children {
+ if tmp := rc.Background; tmp != nil {
+ bg = tmp
+ }
+
+ if sc := rc.Scenario; sc != nil && sc.Id == astScenarioID {
+ return bg
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+// FindExample ...
+func (f Feature) FindExample(exampleAstID string) (*messages.Examples, *messages.TableRow) {
+ for _, child := range f.GherkinDocument.Feature.Children {
+ if sc := child.Scenario; sc != nil {
+ for _, example := range sc.Examples {
+ for _, row := range example.TableBody {
+ if row.Id == exampleAstID {
+ return example, row
+ }
+ }
+ }
+ }
+ if ru := child.Rule; ru != nil {
+ for _, rc := range ru.Children {
+ if sc := rc.Scenario; sc != nil {
+ for _, example := range sc.Examples {
+ for _, row := range example.TableBody {
+ if row.Id == exampleAstID {
+ return example, row
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return nil, nil
+}
+
+// FindStep ...
+func (f Feature) FindStep(astStepID string) *messages.Step {
+ for _, child := range f.GherkinDocument.Feature.Children {
+
+ if ru := child.Rule; ru != nil {
+ for _, ch := range ru.Children {
+ if sc := ch.Scenario; sc != nil {
+ for _, step := range sc.Steps {
+ if step.Id == astStepID {
+ return step
+ }
+ }
+ }
+
+ if bg := ch.Background; bg != nil {
+ for _, step := range bg.Steps {
+ if step.Id == astStepID {
+ return step
+ }
+ }
+ }
+ }
+ }
+
+ if sc := child.Scenario; sc != nil {
+ for _, step := range sc.Steps {
+ if step.Id == astStepID {
+ return step
+ }
+ }
+ }
+
+ if bg := child.Background; bg != nil {
+ for _, step := range bg.Steps {
+ if step.Id == astStepID {
+ return step
+ }
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cucumber/godog/internal/models/results.go b/vendor/github.com/cucumber/godog/internal/models/results.go
new file mode 100644
index 000000000..9c7f98d7f
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/internal/models/results.go
@@ -0,0 +1,111 @@
+package models
+
+import (
+ "time"
+
+ "github.com/cucumber/godog/colors"
+ "github.com/cucumber/godog/internal/utils"
+)
+
+// TestRunStarted ...
+type TestRunStarted struct {
+ StartedAt time.Time
+}
+
+// PickleResult ...
+type PickleResult struct {
+ PickleID string
+ StartedAt time.Time
+}
+
+// PickleAttachment ...
+type PickleAttachment struct {
+ Name string
+ MimeType string
+ Data []byte
+}
+
+// PickleStepResult ...
+type PickleStepResult struct {
+ Status StepResultStatus
+ FinishedAt time.Time
+ Err error
+
+ PickleID string
+ PickleStepID string
+
+ Def *StepDefinition
+
+ Attachments []PickleAttachment
+}
+
+// NewStepResult ...
+func NewStepResult(
+ status StepResultStatus,
+ pickleID, pickleStepID string,
+ match *StepDefinition,
+ attachments []PickleAttachment,
+ err error,
+) PickleStepResult {
+ return PickleStepResult{
+ Status: status,
+ FinishedAt: utils.TimeNowFunc(),
+ Err: err,
+ PickleID: pickleID,
+ PickleStepID: pickleStepID,
+ Def: match,
+ Attachments: attachments,
+ }
+}
+
+// StepResultStatus ...
+type StepResultStatus int
+
+const (
+ // Passed ...
+ Passed StepResultStatus = iota
+ // Failed ...
+ Failed
+ // Skipped ...
+ Skipped
+ // Undefined ...
+ Undefined
+ // Pending ...
+ Pending
+ // Ambiguous ...
+ Ambiguous
+)
+
+// Color ...
+func (st StepResultStatus) Color() colors.ColorFunc {
+ switch st {
+ case Passed:
+ return colors.Green
+ case Failed:
+ return colors.Red
+ case Skipped:
+ return colors.Cyan
+ default:
+ return colors.Yellow
+ }
+}
+
+// String ...
+func (st StepResultStatus) String() string {
+ switch st {
+ case Passed:
+ return "passed"
+ case Failed:
+ return "failed"
+ case Skipped:
+ return "skipped"
+ case Undefined:
+ return "undefined"
+ case Pending:
+ return "pending"
+ case Ambiguous:
+ return "ambiguous"
+ default:
+ return "unknown"
+ }
+}
diff --git a/vendor/github.com/cucumber/godog/internal/models/stepdef.go b/vendor/github.com/cucumber/godog/internal/models/stepdef.go
new file mode 100644
index 000000000..7c2e973ac
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/internal/models/stepdef.go
@@ -0,0 +1,309 @@
+package models
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+
+ messages "github.com/cucumber/messages/go/v21"
+
+ "github.com/cucumber/godog/formatters"
+)
+
+var typeOfBytes = reflect.TypeOf([]byte(nil))
+
+// matchable errors
+var (
+ ErrUnmatchedStepArgumentNumber = errors.New("func expected more arguments than given")
+ ErrCannotConvert = errors.New("cannot convert argument")
+ ErrUnsupportedParameterType = errors.New("func has unsupported parameter type")
+)
+
+// StepDefinition ...
+type StepDefinition struct {
+ formatters.StepDefinition
+
+ Args []interface{}
+ HandlerValue reflect.Value
+ File string
+ Line int
+
+ // multistep related
+ Nested bool
+ Undefined []string
+}
+
+var typeOfContext = reflect.TypeOf((*context.Context)(nil)).Elem()
+
+// Run a step with the matched arguments using reflect
+// Returns one of ...
+// (context, error)
+// (context, godog.Steps)
+func (sd *StepDefinition) Run(ctx context.Context) (context.Context, interface{}) {
+ var values []reflect.Value
+
+ typ := sd.HandlerValue.Type()
+ numIn := typ.NumIn()
+ hasCtxIn := numIn > 0 && typ.In(0).Implements(typeOfContext)
+ ctxOffset := 0
+
+ if hasCtxIn {
+ values = append(values, reflect.ValueOf(ctx))
+ ctxOffset = 1
+ numIn--
+ }
+
+ if len(sd.Args) < numIn {
+ return ctx, fmt.Errorf("%w: expected %d arguments, matched %d from step", ErrUnmatchedStepArgumentNumber, numIn, len(sd.Args))
+ }
+
+ for i := 0; i < numIn; i++ {
+ param := typ.In(i + ctxOffset)
+ switch param.Kind() {
+ case reflect.Int:
+ s, err := sd.shouldBeString(i)
+ if err != nil {
+ return ctx, err
+ }
+ v, err := strconv.ParseInt(s, 10, 0)
+ if err != nil {
+ return ctx, fmt.Errorf(`%w %d: "%s" to int: %s`, ErrCannotConvert, i, s, err)
+ }
+ values = append(values, reflect.ValueOf(int(v)))
+ case reflect.Int64:
+ s, err := sd.shouldBeString(i)
+ if err != nil {
+ return ctx, err
+ }
+ v, err := strconv.ParseInt(s, 10, 64)
+ if err != nil {
+ return ctx, fmt.Errorf(`%w %d: "%s" to int64: %s`, ErrCannotConvert, i, s, err)
+ }
+ values = append(values, reflect.ValueOf(v))
+ case reflect.Int32:
+ s, err := sd.shouldBeString(i)
+ if err != nil {
+ return ctx, err
+ }
+ v, err := strconv.ParseInt(s, 10, 32)
+ if err != nil {
+ return ctx, fmt.Errorf(`%w %d: "%s" to int32: %s`, ErrCannotConvert, i, s, err)
+ }
+ values = append(values, reflect.ValueOf(int32(v)))
+ case reflect.Int16:
+ s, err := sd.shouldBeString(i)
+ if err != nil {
+ return ctx, err
+ }
+ v, err := strconv.ParseInt(s, 10, 16)
+ if err != nil {
+ return ctx, fmt.Errorf(`%w %d: "%s" to int16: %s`, ErrCannotConvert, i, s, err)
+ }
+ values = append(values, reflect.ValueOf(int16(v)))
+ case reflect.Int8:
+ s, err := sd.shouldBeString(i)
+ if err != nil {
+ return ctx, err
+ }
+ v, err := strconv.ParseInt(s, 10, 8)
+ if err != nil {
+ return ctx, fmt.Errorf(`%w %d: "%s" to int8: %s`, ErrCannotConvert, i, s, err)
+ }
+ values = append(values, reflect.ValueOf(int8(v)))
+ case reflect.Uint:
+ s, err := sd.shouldBeString(i)
+ if err != nil {
+ return ctx, err
+ }
+ v, err := strconv.ParseUint(s, 10, 0)
+ if err != nil {
+ return ctx, fmt.Errorf(`%w %d: "%s" to uint: %s`, ErrCannotConvert, i, s, err)
+ }
+ values = append(values, reflect.ValueOf(uint(v)))
+ case reflect.Uint64:
+ s, err := sd.shouldBeString(i)
+ if err != nil {
+ return ctx, err
+ }
+ v, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return ctx, fmt.Errorf(`%w %d: "%s" to uint64: %s`, ErrCannotConvert, i, s, err)
+ }
+ values = append(values, reflect.ValueOf(v))
+ case reflect.Uint32:
+ s, err := sd.shouldBeString(i)
+ if err != nil {
+ return ctx, err
+ }
+ v, err := strconv.ParseUint(s, 10, 32)
+ if err != nil {
+ return ctx, fmt.Errorf(`%w %d: "%s" to uint32: %s`, ErrCannotConvert, i, s, err)
+ }
+ values = append(values, reflect.ValueOf(uint32(v)))
+ case reflect.Uint16:
+ s, err := sd.shouldBeString(i)
+ if err != nil {
+ return ctx, err
+ }
+ v, err := strconv.ParseUint(s, 10, 16)
+ if err != nil {
+ return ctx, fmt.Errorf(`%w %d: "%s" to uint16: %s`, ErrCannotConvert, i, s, err)
+ }
+ values = append(values, reflect.ValueOf(uint16(v)))
+ case reflect.Uint8:
+ s, err := sd.shouldBeString(i)
+ if err != nil {
+ return ctx, err
+ }
+ v, err := strconv.ParseUint(s, 10, 8)
+ if err != nil {
+ return ctx, fmt.Errorf(`%w %d: "%s" to uint8: %s`, ErrCannotConvert, i, s, err)
+ }
+ values = append(values, reflect.ValueOf(uint8(v)))
+ case reflect.String:
+ s, err := sd.shouldBeString(i)
+ if err != nil {
+ return ctx, err
+ }
+ values = append(values, reflect.ValueOf(s))
+ case reflect.Float64:
+ s, err := sd.shouldBeString(i)
+ if err != nil {
+ return ctx, err
+ }
+ v, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ return ctx, fmt.Errorf(`%w %d: "%s" to float64: %s`, ErrCannotConvert, i, s, err)
+ }
+ values = append(values, reflect.ValueOf(v))
+ case reflect.Float32:
+ s, err := sd.shouldBeString(i)
+ if err != nil {
+ return ctx, err
+ }
+ v, err := strconv.ParseFloat(s, 32)
+ if err != nil {
+ return ctx, fmt.Errorf(`%w %d: "%s" to float32: %s`, ErrCannotConvert, i, s, err)
+ }
+ values = append(values, reflect.ValueOf(float32(v)))
+ case reflect.Ptr:
+ arg := sd.Args[i]
+ switch param.Elem().String() {
+ case "messages.PickleDocString":
+ if v, ok := arg.(*messages.PickleStepArgument); ok {
+ values = append(values, reflect.ValueOf(v.DocString))
+ break
+ }
+
+ if v, ok := arg.(*messages.PickleDocString); ok {
+ values = append(values, reflect.ValueOf(v))
+ break
+ }
+
+ return ctx, fmt.Errorf(`%w %d: "%v" of type "%T" to *messages.PickleDocString`, ErrCannotConvert, i, arg, arg)
+ case "messages.PickleTable":
+ if v, ok := arg.(*messages.PickleStepArgument); ok {
+ values = append(values, reflect.ValueOf(v.DataTable))
+ break
+ }
+
+ if v, ok := arg.(*messages.PickleTable); ok {
+ values = append(values, reflect.ValueOf(v))
+ break
+ }
+
+ return ctx, fmt.Errorf(`%w %d: "%v" of type "%T" to *messages.PickleTable`, ErrCannotConvert, i, arg, arg)
+ default:
+ // the error here is that the declared function has an unsupported param type - really this ought to be trapped at registration ti,e
+ return ctx, fmt.Errorf("%w: the data type of parameter %d type *%s is not supported", ErrUnsupportedParameterType, i, param.Elem().String())
+ }
+ case reflect.Slice:
+ switch param {
+ case typeOfBytes:
+ s, err := sd.shouldBeString(i)
+ if err != nil {
+ return ctx, err
+ }
+ values = append(values, reflect.ValueOf([]byte(s)))
+ default:
+ // the problem is the function decl is not using a support slice type as the param
+ return ctx, fmt.Errorf("%w: the slice parameter %d type []%s is not supported", ErrUnsupportedParameterType, i, param.Elem().Kind())
+ }
+ case reflect.Struct:
+ return ctx, fmt.Errorf("%w: the struct parameter %d type %s is not supported", ErrUnsupportedParameterType, i, param.String())
+ default:
+ return ctx, fmt.Errorf("%w: the parameter %d type %s is not supported", ErrUnsupportedParameterType, i, param.Kind())
+ }
+ }
+
+ res := sd.HandlerValue.Call(values)
+ if len(res) == 0 {
+ return ctx, nil
+ }
+
+ // Note that the step fn return types were validated at Initialise in test_context.go stepWithKeyword()
+
+ // single return value may be one of ...
+ // error
+ // context.Context
+ // godog.Steps
+ result0 := res[0].Interface()
+ if len(res) == 1 {
+
+ // if the single return value is a context then just return it
+ if ctx, ok := result0.(context.Context); ok {
+ return ctx, nil
+ }
+
+ // return type is presumably one of nil, "error" or "Steps" so place it into second return position
+ return ctx, result0
+ }
+
+ // multi-value value return must be
+ // (context, error) and the context value must not be nil
+ if ctx, ok := result0.(context.Context); ok {
+ return ctx, res[1].Interface()
+ }
+
+ result1 := res[1].Interface()
+ errMsg := ""
+ if result1 != nil {
+ errMsg = fmt.Sprintf(", step def also returned an error: %v", result1)
+ }
+
+ text := sd.StepDefinition.Expr.String()
+
+ if result0 == nil {
+ panic(fmt.Sprintf("step definition '%v' with return type (context.Context, error) must not return for the context.Context value%s", text, errMsg))
+ }
+
+ panic(fmt.Errorf("step definition '%v' has return type (context.Context, error), but found %v rather than a context.Context value%s", text, result0, errMsg))
+}
+
+func (sd *StepDefinition) shouldBeString(idx int) (string, error) {
+ arg := sd.Args[idx]
+ switch arg := arg.(type) {
+ case string:
+ return arg, nil
+ case *messages.PickleStepArgument:
+ if arg.DocString == nil {
+ return "", fmt.Errorf(`%w %d: "%v" of type "%T": DocString is not set`, ErrCannotConvert, idx, arg, arg)
+ }
+ return arg.DocString.Content, nil
+ case *messages.PickleDocString:
+ return arg.Content, nil
+ default:
+ return "", fmt.Errorf(`%w %d: "%v" of type "%T" to string`, ErrCannotConvert, idx, arg, arg)
+ }
+}
+
+// GetInternalStepDefinition ...
+func (sd *StepDefinition) GetInternalStepDefinition() *formatters.StepDefinition {
+ if sd == nil {
+ return nil
+ }
+
+ return &sd.StepDefinition
+}
diff --git a/vendor/github.com/cucumber/godog/internal/parser/parser.go b/vendor/github.com/cucumber/godog/internal/parser/parser.go
new file mode 100644
index 000000000..f607000aa
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/internal/parser/parser.go
@@ -0,0 +1,243 @@
+package parser
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/fs"
+ "os"
+ "regexp"
+ "strconv"
+ "strings"
+
+ gherkin "github.com/cucumber/gherkin/go/v26"
+ messages "github.com/cucumber/messages/go/v21"
+
+ "github.com/cucumber/godog/internal/flags"
+ "github.com/cucumber/godog/internal/models"
+ "github.com/cucumber/godog/internal/tags"
+)
+
+var pathLineRe = regexp.MustCompile(`:([\d]+)$`)
+
+// ExtractFeaturePathLine ...
+func ExtractFeaturePathLine(p string) (string, int) {
+ line := -1
+ retPath := p
+ if m := pathLineRe.FindStringSubmatch(p); len(m) > 0 {
+ if i, err := strconv.Atoi(m[1]); err == nil {
+ line = i
+ retPath = p[:strings.LastIndexByte(p, ':')]
+ }
+ }
+ return retPath, line
+}
+
+func parseFeatureFile(fsys fs.FS, path, dialect string, newIDFunc func() string) (*models.Feature, error) {
+ reader, err := fsys.Open(path)
+ if err != nil {
+ return nil, err
+ }
+
+ defer reader.Close()
+
+ var buf bytes.Buffer
+ gherkinDocument, err := gherkin.ParseGherkinDocumentForLanguage(io.TeeReader(reader, &buf), dialect, newIDFunc)
+ if err != nil {
+ return nil, fmt.Errorf("%s - %v", path, err)
+ }
+
+ gherkinDocument.Uri = path
+ pickles := gherkin.Pickles(*gherkinDocument, path, newIDFunc)
+
+ f := models.Feature{GherkinDocument: gherkinDocument, Pickles: pickles, Content: buf.Bytes()}
+ return &f, nil
+}
+
+func parseBytes(path string, feature []byte, dialect string, newIDFunc func() string) (*models.Feature, error) {
+ reader := bytes.NewReader(feature)
+
+ var buf bytes.Buffer
+ gherkinDocument, err := gherkin.ParseGherkinDocumentForLanguage(io.TeeReader(reader, &buf), dialect, newIDFunc)
+ if err != nil {
+ return nil, fmt.Errorf("%s - %v", path, err)
+ }
+
+ gherkinDocument.Uri = path
+ pickles := gherkin.Pickles(*gherkinDocument, path, newIDFunc)
+
+ f := models.Feature{GherkinDocument: gherkinDocument, Pickles: pickles, Content: buf.Bytes()}
+ return &f, nil
+}
+
+func parseFeatureDir(fsys fs.FS, dir, dialect string, newIDFunc func() string) ([]*models.Feature, error) {
+ var features []*models.Feature
+ return features, fs.WalkDir(fsys, dir, func(p string, f fs.DirEntry, err error) error {
+ if err != nil {
+ return err
+ }
+
+ if f.IsDir() {
+ return nil
+ }
+
+ if !strings.HasSuffix(p, ".feature") {
+ return nil
+ }
+
+ feat, err := parseFeatureFile(fsys, p, dialect, newIDFunc)
+ if err != nil {
+ return err
+ }
+
+ features = append(features, feat)
+ return nil
+ })
+}
+
+func parsePath(fsys fs.FS, path, dialect string, newIDFunc func() string) ([]*models.Feature, error) {
+ var features []*models.Feature
+
+ path, line := ExtractFeaturePathLine(path)
+
+ fi, err := func() (fs.FileInfo, error) {
+ file, err := fsys.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ return file.Stat()
+ }()
+ if err != nil {
+ return features, err
+ }
+
+ if fi.IsDir() {
+ return parseFeatureDir(fsys, path, dialect, newIDFunc)
+ }
+
+ ft, err := parseFeatureFile(fsys, path, dialect, newIDFunc)
+ if err != nil {
+ return features, err
+ }
+
+ // filter scenario by line number
+ var pickles []*messages.Pickle
+
+ if line != -1 {
+ ft.Uri += ":" + strconv.Itoa(line)
+ }
+
+ for _, pickle := range ft.Pickles {
+ sc := ft.FindScenario(pickle.AstNodeIds[0])
+
+ if line == -1 || int64(line) == sc.Location.Line {
+ if line != -1 {
+ pickle.Uri += ":" + strconv.Itoa(line)
+ }
+
+ pickles = append(pickles, pickle)
+ }
+ }
+ ft.Pickles = pickles
+
+ return append(features, ft), nil
+}
+
+// ParseFeatures ...
+func ParseFeatures(fsys fs.FS, filter, dialect string, paths []string) ([]*models.Feature, error) {
+ var order int
+
+ if dialect == "" {
+ dialect = gherkin.DefaultDialect
+ }
+
+ featureIdxs := make(map[string]int)
+ uniqueFeatureURI := make(map[string]*models.Feature)
+ newIDFunc := (&messages.Incrementing{}).NewId
+ for _, path := range paths {
+ feats, err := parsePath(fsys, path, dialect, newIDFunc)
+
+ switch {
+ case os.IsNotExist(err):
+ return nil, fmt.Errorf(`feature path "%s" is not available`, path)
+ case os.IsPermission(err):
+ return nil, fmt.Errorf(`feature path "%s" is not accessible`, path)
+ case err != nil:
+ return nil, err
+ }
+
+ for _, ft := range feats {
+ if _, duplicate := uniqueFeatureURI[ft.Uri]; duplicate {
+ continue
+ }
+
+ uniqueFeatureURI[ft.Uri] = ft
+ featureIdxs[ft.Uri] = order
+
+ order++
+ }
+ }
+
+ var features = make([]*models.Feature, len(uniqueFeatureURI))
+ for uri, feature := range uniqueFeatureURI {
+ idx := featureIdxs[uri]
+ features[idx] = feature
+ }
+
+ features = filterFeatures(filter, features)
+
+ return features, nil
+}
+
+type FeatureContent = flags.Feature
+
+func ParseFromBytes(filter, dialect string, featuresInputs []FeatureContent) ([]*models.Feature, error) {
+ var order int
+
+ if dialect == "" {
+ dialect = gherkin.DefaultDialect
+ }
+
+ featureIdxs := make(map[string]int)
+ uniqueFeatureURI := make(map[string]*models.Feature)
+ newIDFunc := (&messages.Incrementing{}).NewId
+ for _, f := range featuresInputs {
+ ft, err := parseBytes(f.Name, f.Contents, dialect, newIDFunc)
+ if err != nil {
+ return nil, err
+ }
+
+ if _, duplicate := uniqueFeatureURI[ft.Uri]; duplicate {
+ continue
+ }
+
+ uniqueFeatureURI[ft.Uri] = ft
+ featureIdxs[ft.Uri] = order
+
+ order++
+ }
+
+ var features = make([]*models.Feature, len(uniqueFeatureURI))
+ for uri, feature := range uniqueFeatureURI {
+ idx := featureIdxs[uri]
+ features[idx] = feature
+ }
+
+ features = filterFeatures(filter, features)
+
+ return features, nil
+}
+
+func filterFeatures(filter string, features []*models.Feature) (result []*models.Feature) {
+ for _, ft := range features {
+ ft.Pickles = tags.ApplyTagFilter(filter, ft.Pickles)
+
+ if ft.Feature != nil && len(ft.Pickles) > 0 {
+ result = append(result, ft)
+ }
+ }
+
+ return
+}
diff --git a/vendor/github.com/cucumber/godog/internal/storage/fs.go b/vendor/github.com/cucumber/godog/internal/storage/fs.go
new file mode 100644
index 000000000..333c61def
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/internal/storage/fs.go
@@ -0,0 +1,21 @@
+package storage
+
+import (
+ "io/fs"
+ "os"
+)
+
+// FS is a wrapper that falls back to `os`.
+type FS struct {
+ FS fs.FS
+}
+
+// Open a file in the provided `fs.FS`. If none provided,
+// open via `os.Open`
+func (f FS) Open(name string) (fs.File, error) {
+ if f.FS == nil {
+ return os.Open(name)
+ }
+
+ return f.FS.Open(name)
+}
diff --git a/vendor/github.com/cucumber/godog/internal/storage/storage.go b/vendor/github.com/cucumber/godog/internal/storage/storage.go
new file mode 100644
index 000000000..72b7e86f7
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/internal/storage/storage.go
@@ -0,0 +1,338 @@
+package storage
+
+import (
+ "fmt"
+ "sync"
+
+ messages "github.com/cucumber/messages/go/v21"
+ "github.com/hashicorp/go-memdb"
+
+ "github.com/cucumber/godog/internal/models"
+)
+
+const (
+ writeMode bool = true
+ readMode bool = false
+
+ tableFeature string = "feature"
+ tableFeatureIndexURI string = "id"
+
+ tablePickle string = "pickle"
+ tablePickleIndexID string = "id"
+ tablePickleIndexURI string = "uri"
+
+ tablePickleStep string = "pickle_step"
+ tablePickleStepIndexID string = "id"
+
+ tablePickleResult string = "pickle_result"
+ tablePickleResultIndexPickleID string = "id"
+
+ tablePickleStepResult string = "pickle_step_result"
+ tablePickleStepResultIndexPickleStepID string = "id"
+ tablePickleStepResultIndexPickleID string = "pickle_id"
+ tablePickleStepResultIndexStatus string = "status"
+
+ tableStepDefintionMatch string = "step_defintion_match"
+ tableStepDefintionMatchIndexStepID string = "id"
+)
+
+// Storage is a thread safe in-mem storage
+type Storage struct {
+ db *memdb.MemDB
+
+ testRunStarted models.TestRunStarted
+ testRunStartedLock *sync.Mutex
+}
+
+// NewStorage will create an in-mem storage that
+// is used across concurrent runners and formatters
+func NewStorage() *Storage {
+ schema := memdb.DBSchema{
+ Tables: map[string]*memdb.TableSchema{
+ tableFeature: {
+ Name: tableFeature,
+ Indexes: map[string]*memdb.IndexSchema{
+ tableFeatureIndexURI: {
+ Name: tableFeatureIndexURI,
+ Unique: true,
+ Indexer: &memdb.StringFieldIndex{Field: "Uri"},
+ },
+ },
+ },
+ tablePickle: {
+ Name: tablePickle,
+ Indexes: map[string]*memdb.IndexSchema{
+ tablePickleIndexID: {
+ Name: tablePickleIndexID,
+ Unique: true,
+ Indexer: &memdb.StringFieldIndex{Field: "Id"},
+ },
+ tablePickleIndexURI: {
+ Name: tablePickleIndexURI,
+ Unique: false,
+ Indexer: &memdb.StringFieldIndex{Field: "Uri"},
+ },
+ },
+ },
+ tablePickleStep: {
+ Name: tablePickleStep,
+ Indexes: map[string]*memdb.IndexSchema{
+ tablePickleStepIndexID: {
+ Name: tablePickleStepIndexID,
+ Unique: true,
+ Indexer: &memdb.StringFieldIndex{Field: "Id"},
+ },
+ },
+ },
+ tablePickleResult: {
+ Name: tablePickleResult,
+ Indexes: map[string]*memdb.IndexSchema{
+ tablePickleResultIndexPickleID: {
+ Name: tablePickleResultIndexPickleID,
+ Unique: true,
+ Indexer: &memdb.StringFieldIndex{Field: "PickleID"},
+ },
+ },
+ },
+ tablePickleStepResult: {
+ Name: tablePickleStepResult,
+ Indexes: map[string]*memdb.IndexSchema{
+ tablePickleStepResultIndexPickleStepID: {
+ Name: tablePickleStepResultIndexPickleStepID,
+ Unique: true,
+ Indexer: &memdb.StringFieldIndex{Field: "PickleStepID"},
+ },
+ tablePickleStepResultIndexPickleID: {
+ Name: tablePickleStepResultIndexPickleID,
+ Unique: false,
+ Indexer: &memdb.StringFieldIndex{Field: "PickleID"},
+ },
+ tablePickleStepResultIndexStatus: {
+ Name: tablePickleStepResultIndexStatus,
+ Unique: false,
+ Indexer: &memdb.IntFieldIndex{Field: "Status"},
+ },
+ },
+ },
+ tableStepDefintionMatch: {
+ Name: tableStepDefintionMatch,
+ Indexes: map[string]*memdb.IndexSchema{
+ tableStepDefintionMatchIndexStepID: {
+ Name: tableStepDefintionMatchIndexStepID,
+ Unique: true,
+ Indexer: &memdb.StringFieldIndex{Field: "StepID"},
+ },
+ },
+ },
+ },
+ }
+
+ db, err := memdb.NewMemDB(&schema)
+ if err != nil {
+ panic(err)
+ }
+
+ return &Storage{db: db, testRunStartedLock: new(sync.Mutex)}
+}
+
+// MustInsertPickle will insert a pickle and it's steps,
+// will panic on error.
+func (s *Storage) MustInsertPickle(p *messages.Pickle) {
+ txn := s.db.Txn(writeMode)
+
+ if err := txn.Insert(tablePickle, p); err != nil {
+ panic(err)
+ }
+
+ for _, step := range p.Steps {
+ if err := txn.Insert(tablePickleStep, step); err != nil {
+ panic(err)
+ }
+ }
+
+ txn.Commit()
+}
+
+// MustGetPickle will retrieve a pickle by id and panic on error.
+func (s *Storage) MustGetPickle(id string) *messages.Pickle {
+ v := s.mustFirst(tablePickle, tablePickleIndexID, id)
+ return v.(*messages.Pickle)
+}
+
+// MustGetPickles will retrieve pickles by URI and panic on error.
+func (s *Storage) MustGetPickles(uri string) (ps []*messages.Pickle) {
+ it := s.mustGet(tablePickle, tablePickleIndexURI, uri)
+ for v := it.Next(); v != nil; v = it.Next() {
+ ps = append(ps, v.(*messages.Pickle))
+ }
+
+ return
+}
+
+// MustGetPickleStep will retrieve a pickle step and panic on error.
+func (s *Storage) MustGetPickleStep(id string) *messages.PickleStep {
+ v := s.mustFirst(tablePickleStep, tablePickleStepIndexID, id)
+ return v.(*messages.PickleStep)
+}
+
+// MustInsertTestRunStarted will set the test run started event and panic on error.
+func (s *Storage) MustInsertTestRunStarted(trs models.TestRunStarted) {
+ s.testRunStartedLock.Lock()
+ defer s.testRunStartedLock.Unlock()
+
+ s.testRunStarted = trs
+}
+
+// MustGetTestRunStarted will retrieve the test run started event and panic on error.
+func (s *Storage) MustGetTestRunStarted() models.TestRunStarted {
+ s.testRunStartedLock.Lock()
+ defer s.testRunStartedLock.Unlock()
+
+ return s.testRunStarted
+}
+
+// MustInsertPickleResult will instert a pickle result and panic on error.
+func (s *Storage) MustInsertPickleResult(pr models.PickleResult) {
+ s.mustInsert(tablePickleResult, pr)
+}
+
+// MustInsertPickleStepResult will insert a pickle step result and panic on error.
+func (s *Storage) MustInsertPickleStepResult(psr models.PickleStepResult) {
+ s.mustInsert(tablePickleStepResult, psr)
+}
+
+// MustGetPickleResult will retrieve a pickle result by id and panic on error.
+func (s *Storage) MustGetPickleResult(id string) models.PickleResult {
+ v := s.mustFirst(tablePickleResult, tablePickleResultIndexPickleID, id)
+ return v.(models.PickleResult)
+}
+
+// MustGetPickleResults will retrieve all pickle results and panic on error.
+func (s *Storage) MustGetPickleResults() (prs []models.PickleResult) {
+ it := s.mustGet(tablePickleResult, tablePickleResultIndexPickleID)
+ for v := it.Next(); v != nil; v = it.Next() {
+ prs = append(prs, v.(models.PickleResult))
+ }
+
+ return prs
+}
+
+// MustGetPickleStepResult will retrieve a pickle strep result by id and panic on error.
+func (s *Storage) MustGetPickleStepResult(id string) models.PickleStepResult {
+ v := s.mustFirst(tablePickleStepResult, tablePickleStepResultIndexPickleStepID, id)
+ return v.(models.PickleStepResult)
+}
+
+// MustGetPickleStepResultsByPickleID will retrieve pickle step results by pickle id and panic on error.
+func (s *Storage) MustGetPickleStepResultsByPickleID(pickleID string) (psrs []models.PickleStepResult) {
+ it := s.mustGet(tablePickleStepResult, tablePickleStepResultIndexPickleID, pickleID)
+ for v := it.Next(); v != nil; v = it.Next() {
+ psrs = append(psrs, v.(models.PickleStepResult))
+ }
+
+ return psrs
+}
+
+// MustGetPickleStepResultsByPickleIDUntilStep will retrieve pickle step results by pickle id
+// from 0..stepID for that pickle.
+func (s *Storage) MustGetPickleStepResultsByPickleIDUntilStep(pickleID string, untilStepID string) (psrs []models.PickleStepResult) {
+ it := s.mustGet(tablePickleStepResult, tablePickleStepResultIndexPickleID, pickleID)
+ for v := it.Next(); v != nil; v = it.Next() {
+ psr := v.(models.PickleStepResult)
+ psrs = append(psrs, psr)
+ if psr.PickleStepID == untilStepID {
+ break
+ }
+ }
+
+ return psrs
+}
+
+// MustGetPickleStepResultsByStatus will retrieve pickle strep results by status and panic on error.
+func (s *Storage) MustGetPickleStepResultsByStatus(status models.StepResultStatus) (psrs []models.PickleStepResult) {
+ it := s.mustGet(tablePickleStepResult, tablePickleStepResultIndexStatus, status)
+ for v := it.Next(); v != nil; v = it.Next() {
+ psrs = append(psrs, v.(models.PickleStepResult))
+ }
+
+ return psrs
+}
+
+// MustInsertFeature will insert a feature and panic on error.
+func (s *Storage) MustInsertFeature(f *models.Feature) {
+ s.mustInsert(tableFeature, f)
+}
+
+// MustGetFeature will retrieve a feature by URI and panic on error.
+func (s *Storage) MustGetFeature(uri string) *models.Feature {
+ v := s.mustFirst(tableFeature, tableFeatureIndexURI, uri)
+ return v.(*models.Feature)
+}
+
+// MustGetFeatures will retrieve all features by and panic on error.
+func (s *Storage) MustGetFeatures() (fs []*models.Feature) {
+ it := s.mustGet(tableFeature, tableFeatureIndexURI)
+ for v := it.Next(); v != nil; v = it.Next() {
+ fs = append(fs, v.(*models.Feature))
+ }
+
+ return
+}
+
+type stepDefinitionMatch struct {
+ StepID string
+ StepDefinition *models.StepDefinition
+}
+
+// MustInsertStepDefintionMatch will insert the matched StepDefintion for the step ID and panic on error.
+func (s *Storage) MustInsertStepDefintionMatch(stepID string, match *models.StepDefinition) {
+ d := stepDefinitionMatch{
+ StepID: stepID,
+ StepDefinition: match,
+ }
+
+ s.mustInsert(tableStepDefintionMatch, d)
+}
+
+// MustGetStepDefintionMatch will retrieve the matched StepDefintion for the step ID and panic on error.
+func (s *Storage) MustGetStepDefintionMatch(stepID string) *models.StepDefinition {
+ v := s.mustFirst(tableStepDefintionMatch, tableStepDefintionMatchIndexStepID, stepID)
+ return v.(stepDefinitionMatch).StepDefinition
+}
+
+func (s *Storage) mustInsert(table string, obj interface{}) {
+ txn := s.db.Txn(writeMode)
+
+ if err := txn.Insert(table, obj); err != nil {
+ panic(err)
+ }
+
+ txn.Commit()
+}
+
+func (s *Storage) mustFirst(table, index string, args ...interface{}) interface{} {
+ txn := s.db.Txn(readMode)
+ defer txn.Abort()
+
+ v, err := txn.First(table, index, args...)
+ if err != nil {
+ panic(err)
+ } else if v == nil {
+ err = fmt.Errorf("couldn't find index: %q in table: %q with args: %+v", index, table, args)
+ panic(err)
+ }
+
+ return v
+}
+
+func (s *Storage) mustGet(table, index string, args ...interface{}) memdb.ResultIterator {
+ txn := s.db.Txn(readMode)
+ defer txn.Abort()
+
+ it, err := txn.Get(table, index, args...)
+ if err != nil {
+ panic(err)
+ }
+
+ return it
+}
diff --git a/vendor/github.com/cucumber/godog/internal/tags/tag_filter.go b/vendor/github.com/cucumber/godog/internal/tags/tag_filter.go
new file mode 100644
index 000000000..72b4512b4
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/internal/tags/tag_filter.go
@@ -0,0 +1,62 @@
+package tags
+
+import (
+ "strings"
+
+ messages "github.com/cucumber/messages/go/v21"
+)
+
+// ApplyTagFilter will apply a filter string on the
+// array of pickles and returned the filtered list.
+func ApplyTagFilter(filter string, pickles []*messages.Pickle) []*messages.Pickle {
+ if filter == "" {
+ return pickles
+ }
+
+ var result = []*messages.Pickle{}
+
+ for _, pickle := range pickles {
+ if match(filter, pickle.Tags) {
+ result = append(result, pickle)
+ }
+ }
+
+ return result
+}
+
+// Based on http://behat.readthedocs.org/en/v2.5/guides/6.cli.html#gherkin-filters
+func match(filter string, tags []*messages.PickleTag) (ok bool) {
+ ok = true
+
+ for _, andTags := range strings.Split(filter, "&&") {
+ var okComma bool
+
+ for _, tag := range strings.Split(andTags, ",") {
+ tag = strings.TrimSpace(tag)
+ tag = strings.Replace(tag, "@", "", -1)
+
+ okComma = contains(tags, tag) || okComma
+
+ if tag[0] == '~' {
+ tag = tag[1:]
+ okComma = !contains(tags, tag) || okComma
+ }
+ }
+
+ ok = ok && okComma
+ }
+
+ return
+}
+
+func contains(tags []*messages.PickleTag, tag string) bool {
+ for _, t := range tags {
+ tagName := strings.Replace(t.Name, "@", "", -1)
+
+ if tagName == tag {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/cucumber/godog/internal/utils/utils.go b/vendor/github.com/cucumber/godog/internal/utils/utils.go
new file mode 100644
index 000000000..f1ec21f95
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/internal/utils/utils.go
@@ -0,0 +1,21 @@
+package utils
+
+import (
+ "strings"
+ "time"
+)
+
+// S repeats a space n times
+func S(n int) string {
+ if n < 0 {
+ n = 1
+ }
+ return strings.Repeat(" ", n)
+}
+
+// TimeNowFunc is a utility function to simply testing
+// by allowing TimeNowFunc to be defined to zero time
+// to remove the time domain from tests
+var TimeNowFunc = func() time.Time {
+ return time.Now()
+}
diff --git a/vendor/github.com/cucumber/godog/logo.png b/vendor/github.com/cucumber/godog/logo.png
new file mode 100644
index 000000000..70e6c7aa8
Binary files /dev/null and b/vendor/github.com/cucumber/godog/logo.png differ
diff --git a/vendor/github.com/cucumber/godog/logo.svg b/vendor/github.com/cucumber/godog/logo.svg
new file mode 100644
index 000000000..bfda7fdb1
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/logo.svg
@@ -0,0 +1,79 @@
+
diff --git a/vendor/github.com/cucumber/godog/mod_version.go b/vendor/github.com/cucumber/godog/mod_version.go
new file mode 100644
index 000000000..c915e1627
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/mod_version.go
@@ -0,0 +1,16 @@
+//go:build go1.12
+// +build go1.12
+
+package godog
+
+import (
+ "runtime/debug"
+)
+
+func init() {
+ if info, available := debug.ReadBuildInfo(); available {
+ if Version == "v0.0.0-dev" && info.Main.Version != "(devel)" {
+ Version = info.Main.Version
+ }
+ }
+}
diff --git a/vendor/github.com/cucumber/godog/options.go b/vendor/github.com/cucumber/godog/options.go
new file mode 100644
index 000000000..2b32cfd8f
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/options.go
@@ -0,0 +1,12 @@
+package godog
+
+import "github.com/cucumber/godog/internal/flags"
+
+// Options are suite run options
+// flags are mapped to these options.
+//
+// It can also be used together with godog.RunWithOptions
+// to run test suite from go source directly
+//
+// See the flags for more details
+type Options = flags.Options
diff --git a/vendor/github.com/cucumber/godog/run.go b/vendor/github.com/cucumber/godog/run.go
new file mode 100644
index 000000000..1231d0286
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/run.go
@@ -0,0 +1,409 @@
+package godog
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "go/build"
+ "io"
+ "io/fs"
+ "math/rand"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "testing"
+
+ messages "github.com/cucumber/messages/go/v21"
+
+ "github.com/cucumber/godog/colors"
+ "github.com/cucumber/godog/formatters"
+ ifmt "github.com/cucumber/godog/internal/formatters"
+ "github.com/cucumber/godog/internal/models"
+ "github.com/cucumber/godog/internal/parser"
+ "github.com/cucumber/godog/internal/storage"
+ "github.com/cucumber/godog/internal/utils"
+)
+
+const (
+ exitSuccess int = iota
+ exitFailure
+ exitOptionError
+)
+
+type (
+ testSuiteInitializer func(*TestSuiteContext)
+ scenarioInitializer func(*ScenarioContext)
+)
+
+type runner struct {
+ randomSeed int64
+ stopOnFailure, strict bool
+
+ defaultContext context.Context
+ testingT *testing.T
+
+ features []*models.Feature
+
+ testSuiteInitializer testSuiteInitializer
+ scenarioInitializer scenarioInitializer
+
+ storage *storage.Storage
+ fmt Formatter
+}
+
+func (r *runner) concurrent(rate int) (failed bool) {
+ var copyLock sync.Mutex
+
+ if fmt, ok := r.fmt.(storageFormatter); ok {
+ fmt.SetStorage(r.storage)
+ }
+
+ testSuiteContext := TestSuiteContext{
+ suite: &suite{
+ fmt: r.fmt,
+ randomSeed: r.randomSeed,
+ strict: r.strict,
+ storage: r.storage,
+ defaultContext: r.defaultContext,
+ testingT: r.testingT,
+ },
+ }
+ if r.testSuiteInitializer != nil {
+ r.testSuiteInitializer(&testSuiteContext)
+ }
+
+ testRunStarted := models.TestRunStarted{StartedAt: utils.TimeNowFunc()}
+ r.storage.MustInsertTestRunStarted(testRunStarted)
+ r.fmt.TestRunStarted()
+
+ // run before suite handlers
+ for _, f := range testSuiteContext.beforeSuiteHandlers {
+ f()
+ }
+
+ queue := make(chan int, rate)
+ for _, ft := range r.features {
+ pickles := make([]*messages.Pickle, len(ft.Pickles))
+ if r.randomSeed != 0 {
+ r := rand.New(rand.NewSource(r.randomSeed))
+ perm := r.Perm(len(ft.Pickles))
+ for i, v := range perm {
+ pickles[v] = ft.Pickles[i]
+ }
+ } else {
+ copy(pickles, ft.Pickles)
+ }
+
+ for i, p := range pickles {
+ pickle := *p
+
+ queue <- i // reserve space in queue
+
+ if i == 0 {
+ r.fmt.Feature(ft.GherkinDocument, ft.Uri, ft.Content)
+ }
+
+ runPickle := func(fail *bool, pickle *messages.Pickle) {
+ defer func() {
+ <-queue // free a space in queue
+ }()
+
+ if r.stopOnFailure && *fail {
+ return
+ }
+
+ // Copy base suite.
+ suite := *testSuiteContext.suite
+ if rate > 1 {
+ // if running concurrently, only print at end of scenario to keep
+ // scenario logs segregated
+ ffmt := ifmt.WrapOnFlush(testSuiteContext.suite.fmt)
+ suite.fmt = ffmt
+ defer ffmt.Flush()
+ }
+
+ if r.scenarioInitializer != nil {
+ sc := ScenarioContext{suite: &suite}
+ r.scenarioInitializer(&sc)
+ }
+
+ err := suite.runPickle(pickle)
+ if suite.shouldFail(err) {
+ copyLock.Lock()
+ *fail = true
+ copyLock.Unlock()
+ }
+ }
+
+ if rate == 1 {
+ // Running within the same goroutine for concurrency 1
+ // to preserve original stacks and simplify debugging.
+ runPickle(&failed, &pickle)
+ } else {
+ go runPickle(&failed, &pickle)
+ }
+ }
+ }
+
+ // wait until last are processed
+ for i := 0; i < rate; i++ {
+ queue <- i
+ }
+
+ close(queue)
+
+ // run after suite handlers
+ for _, f := range testSuiteContext.afterSuiteHandlers {
+ f()
+ }
+
+ // print summary
+ r.fmt.Summary()
+ return
+}
+
+func runWithOptions(suiteName string, runner runner, opt Options) int {
+ var output io.Writer = os.Stdout
+ if nil != opt.Output {
+ output = opt.Output
+ }
+
+ multiFmt := ifmt.MultiFormatter{}
+
+ for _, formatter := range strings.Split(opt.Format, ",") {
+ out := output
+ formatterParts := strings.SplitN(formatter, ":", 2)
+
+ if len(formatterParts) > 1 {
+ f, err := os.Create(formatterParts[1])
+ if err != nil {
+ err = fmt.Errorf(
+ `couldn't create file with name: "%s", error: %s`,
+ formatterParts[1], err.Error(),
+ )
+ fmt.Fprintln(os.Stderr, err)
+
+ return exitOptionError
+ }
+
+ defer f.Close()
+
+ out = f
+ }
+
+ if opt.NoColors {
+ out = colors.Uncolored(out)
+ } else {
+ out = colors.Colored(out)
+ }
+
+ if nil == formatters.FindFmt(formatterParts[0]) {
+ var names []string
+ for name := range formatters.AvailableFormatters() {
+ names = append(names, name)
+ }
+ fmt.Fprintln(os.Stderr, fmt.Errorf(
+ `unregistered formatter name: "%s", use one of: %s`,
+ opt.Format,
+ strings.Join(names, ", "),
+ ))
+ return exitOptionError
+ }
+
+ multiFmt.Add(formatterParts[0], out)
+ }
+
+ if opt.ShowStepDefinitions {
+ s := suite{}
+ sc := ScenarioContext{suite: &s}
+ runner.scenarioInitializer(&sc)
+ printStepDefinitions(s.steps, output)
+ return exitOptionError
+ }
+
+ if len(opt.Paths) == 0 && len(opt.FeatureContents) == 0 {
+ inf, err := func() (fs.FileInfo, error) {
+ file, err := opt.FS.Open("features")
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ return file.Stat()
+ }()
+ if err == nil && inf.IsDir() {
+ opt.Paths = []string{"features"}
+ }
+ }
+
+ if opt.Concurrency < 1 {
+ opt.Concurrency = 1
+ }
+
+ runner.fmt = multiFmt.FormatterFunc(suiteName, output)
+ opt.FS = storage.FS{FS: opt.FS}
+
+ if len(opt.FeatureContents) > 0 {
+ features, err := parser.ParseFromBytes(opt.Tags, opt.Dialect, opt.FeatureContents)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ return exitOptionError
+ }
+ runner.features = append(runner.features, features...)
+ }
+
+ if len(opt.Paths) > 0 {
+ features, err := parser.ParseFeatures(opt.FS, opt.Tags, opt.Dialect, opt.Paths)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ return exitOptionError
+ }
+ runner.features = append(runner.features, features...)
+ }
+
+ runner.storage = storage.NewStorage()
+ for _, feat := range runner.features {
+ runner.storage.MustInsertFeature(feat)
+
+ for _, pickle := range feat.Pickles {
+ runner.storage.MustInsertPickle(pickle)
+ }
+ }
+
+ // user may have specified -1 option to create random seed
+ runner.randomSeed = opt.Randomize
+ if runner.randomSeed == -1 {
+ runner.randomSeed = makeRandomSeed()
+ }
+
+ runner.stopOnFailure = opt.StopOnFailure
+ runner.strict = opt.Strict
+ runner.defaultContext = opt.DefaultContext
+ runner.testingT = opt.TestingT
+
+ // store chosen seed in environment, so it could be seen in formatter summary report
+ os.Setenv("GODOG_SEED", strconv.FormatInt(runner.randomSeed, 10))
+ // determine tested package
+ _, filename, _, _ := runtime.Caller(1)
+ os.Setenv("GODOG_TESTED_PACKAGE", runsFromPackage(filename))
+
+ failed := runner.concurrent(opt.Concurrency)
+
+ // @TODO: should prevent from having these
+ os.Setenv("GODOG_SEED", "")
+ os.Setenv("GODOG_TESTED_PACKAGE", "")
+ if failed && opt.Format != "events" {
+ return exitFailure
+ }
+ return exitSuccess
+}
+
+func runsFromPackage(fp string) string {
+ dir := filepath.Dir(fp)
+
+ gopaths := filepath.SplitList(build.Default.GOPATH)
+ for _, gp := range gopaths {
+ gp = filepath.Join(gp, "src")
+ if strings.Index(dir, gp) == 0 {
+ return strings.TrimLeft(strings.Replace(dir, gp, "", 1), string(filepath.Separator))
+ }
+ }
+ return dir
+}
+
+// TestSuite allows for configuration
+// of the Test Suite Execution
+type TestSuite struct {
+ Name string
+ TestSuiteInitializer func(*TestSuiteContext)
+ ScenarioInitializer func(*ScenarioContext)
+ Options *Options
+}
+
+// Run will execute the test suite.
+//
+// If options are not set, it will reads
+// all configuration options from flags.
+//
+// The exit codes may vary from:
+//
+// 0 - success
+// 1 - failed
+// 2 - command line usage error
+// 128 - or higher, os signal related error exit codes
+//
+// If there are flag related errors they will be directed to os.Stderr
+func (ts TestSuite) Run() int {
+ if ts.Options == nil {
+ var err error
+ ts.Options, err = getDefaultOptions()
+ if err != nil {
+ return exitOptionError
+ }
+ }
+ if ts.Options.FS == nil {
+ ts.Options.FS = storage.FS{}
+ }
+ if ts.Options.ShowHelp {
+ flag.CommandLine.Usage()
+
+ return 0
+ }
+
+ r := runner{testSuiteInitializer: ts.TestSuiteInitializer, scenarioInitializer: ts.ScenarioInitializer}
+ return runWithOptions(ts.Name, r, *ts.Options)
+}
+
+// RetrieveFeatures will parse and return the features based on test suite option
+// Any modification on the parsed features will not have any impact on the next Run of the Test Suite
+func (ts TestSuite) RetrieveFeatures() ([]*models.Feature, error) {
+ opt := ts.Options
+
+ if opt == nil {
+ var err error
+ opt, err = getDefaultOptions()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if ts.Options.FS == nil {
+ ts.Options.FS = storage.FS{}
+ }
+
+ if len(opt.Paths) == 0 {
+ inf, err := func() (fs.FileInfo, error) {
+ file, err := opt.FS.Open("features")
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ return file.Stat()
+ }()
+ if err == nil && inf.IsDir() {
+ opt.Paths = []string{"features"}
+ }
+ }
+
+ return parser.ParseFeatures(opt.FS, opt.Tags, opt.Dialect, opt.Paths)
+}
+
+func getDefaultOptions() (*Options, error) {
+ opt := &Options{}
+ opt.Output = colors.Colored(os.Stdout)
+
+ flagSet := flagSet(opt)
+ if err := flagSet.Parse(os.Args[1:]); err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ return nil, err
+ }
+
+ opt.Paths = flagSet.Args()
+ opt.FS = storage.FS{}
+
+ return opt, nil
+}
diff --git a/vendor/github.com/cucumber/godog/stacktrace.go b/vendor/github.com/cucumber/godog/stacktrace.go
new file mode 100644
index 000000000..686c6b09b
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/stacktrace.go
@@ -0,0 +1,141 @@
+package godog
+
+import (
+ "fmt"
+ "go/build"
+ "io"
+ "path"
+ "path/filepath"
+ "runtime"
+ "strings"
+)
+
+// Frame represents a program counter inside a stack frame.
+type stackFrame uintptr
+
+// pc returns the program counter for this frame;
+// multiple frames may have the same PC value.
+func (f stackFrame) pc() uintptr { return uintptr(f) - 1 }
+
+// file returns the full path to the file that contains the
+// function for this Frame's pc.
+func (f stackFrame) file() string {
+ fn := runtime.FuncForPC(f.pc())
+ if fn == nil {
+ return "unknown"
+ }
+ file, _ := fn.FileLine(f.pc())
+ return file
+}
+
+func trimGoPath(file string) string {
+ for _, p := range filepath.SplitList(build.Default.GOPATH) {
+ file = strings.Replace(file, filepath.Join(p, "src")+string(filepath.Separator), "", 1)
+ }
+ return file
+}
+
+// line returns the line number of source code of the
+// function for this Frame's pc.
+func (f stackFrame) line() int {
+ fn := runtime.FuncForPC(f.pc())
+ if fn == nil {
+ return 0
+ }
+ _, line := fn.FileLine(f.pc())
+ return line
+}
+
+// Format formats the frame according to the fmt.Formatter interface.
+//
+// %s source file
+// %d source line
+// %n function name
+// %v equivalent to %s:%d
+//
+// Format accepts flags that alter the printing of some verbs, as follows:
+//
+// %+s path of source file relative to the compile time GOPATH
+// %+v equivalent to %+s:%d
+func (f stackFrame) Format(s fmt.State, verb rune) {
+ funcname := func(name string) string {
+ i := strings.LastIndex(name, "/")
+ name = name[i+1:]
+ i = strings.Index(name, ".")
+ return name[i+1:]
+ }
+
+ switch verb {
+ case 's':
+ switch {
+ case s.Flag('+'):
+ pc := f.pc()
+ fn := runtime.FuncForPC(pc)
+ if fn == nil {
+ io.WriteString(s, "unknown")
+ } else {
+ file, _ := fn.FileLine(pc)
+ fmt.Fprintf(s, "%s\n\t%s", fn.Name(), trimGoPath(file))
+ }
+ default:
+ io.WriteString(s, path.Base(f.file()))
+ }
+ case 'd':
+ fmt.Fprintf(s, "%d", f.line())
+ case 'n':
+ name := runtime.FuncForPC(f.pc()).Name()
+ io.WriteString(s, funcname(name))
+ case 'v':
+ f.Format(s, 's')
+ io.WriteString(s, ":")
+ f.Format(s, 'd')
+ }
+}
+
+// stack represents a stack of program counters.
+type stack []uintptr
+
+func (s *stack) Format(st fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ switch {
+ case st.Flag('+'):
+ for _, pc := range *s {
+ f := stackFrame(pc)
+ fmt.Fprintf(st, "\n%+v", f)
+ }
+ }
+ }
+}
+
+func callStack() *stack {
+ const depth = 32
+ var pcs [depth]uintptr
+ n := runtime.Callers(3, pcs[:])
+ var st stack = pcs[0:n]
+ return &st
+}
+
+// fundamental is an error that has a message and a stack, but no caller.
+type traceError struct {
+ msg string
+ *stack
+}
+
+func (f *traceError) Error() string { return f.msg }
+
+func (f *traceError) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ if s.Flag('+') {
+ io.WriteString(s, f.msg)
+ f.stack.Format(s, verb)
+ return
+ }
+ fallthrough
+ case 's':
+ io.WriteString(s, f.msg)
+ case 'q':
+ fmt.Fprintf(s, "%q", f.msg)
+ }
+}
diff --git a/vendor/github.com/cucumber/godog/suite.go b/vendor/github.com/cucumber/godog/suite.go
new file mode 100644
index 000000000..6ca1bf53d
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/suite.go
@@ -0,0 +1,651 @@
+package godog
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+ "testing"
+
+ messages "github.com/cucumber/messages/go/v21"
+
+ "github.com/cucumber/godog/formatters"
+ "github.com/cucumber/godog/internal/models"
+ "github.com/cucumber/godog/internal/storage"
+ "github.com/cucumber/godog/internal/utils"
+)
+
+var (
+ errorInterface = reflect.TypeOf((*error)(nil)).Elem()
+ contextInterface = reflect.TypeOf((*context.Context)(nil)).Elem()
+)
+
+// more than one regex matched the step text
+var ErrAmbiguous = fmt.Errorf("ambiguous step definition")
+
+// ErrUndefined is returned in case if step definition was not found
+var ErrUndefined = fmt.Errorf("step is undefined")
+
+// ErrPending should be returned by step definition if
+// step implementation is pending
+var ErrPending = fmt.Errorf("step implementation is pending")
+
+// ErrSkip should be returned by step definition or a hook if scenario and further steps are to be skipped.
+var ErrSkip = fmt.Errorf("skipped")
+
+// StepResultStatus describes step result.
+type StepResultStatus = models.StepResultStatus
+
+const (
+ // StepPassed indicates step that passed.
+ StepPassed StepResultStatus = models.Passed
+ // StepFailed indicates step that failed.
+ StepFailed = models.Failed
+ // StepSkipped indicates step that was skipped.
+ StepSkipped = models.Skipped
+ // StepUndefined indicates undefined step.
+ StepUndefined = models.Undefined
+ // StepPending indicates step with pending implementation.
+ StepPending = models.Pending
+ // StepAmbiguous indicates step text matches more than one step def
+ StepAmbiguous = models.Ambiguous
+)
+
+type suite struct {
+ steps []*models.StepDefinition
+
+ fmt Formatter
+ storage *storage.Storage
+
+ failed bool
+ randomSeed int64
+ stopOnFailure bool
+ strict bool
+
+ defaultContext context.Context
+ testingT *testing.T
+
+ // suite event handlers
+ beforeScenarioHandlers []BeforeScenarioHook
+ beforeStepHandlers []BeforeStepHook
+ afterStepHandlers []AfterStepHook
+ afterScenarioHandlers []AfterScenarioHook
+}
+
+type Attachment struct {
+ Body []byte
+ FileName string
+ MediaType string
+}
+
+type attachmentKey struct{}
+
+func Attach(ctx context.Context, attachments ...Attachment) context.Context {
+ existing := Attachments(ctx)
+ updated := append(existing, attachments...)
+ return context.WithValue(ctx, attachmentKey{}, updated)
+}
+
+func Attachments(ctx context.Context) []Attachment {
+ v := ctx.Value(attachmentKey{})
+
+ if v == nil {
+ return []Attachment{}
+ }
+ return v.([]Attachment)
+}
+
+func clearAttach(ctx context.Context) context.Context {
+ return context.WithValue(ctx, attachmentKey{}, nil)
+}
+
+func pickleAttachments(ctx context.Context) []models.PickleAttachment {
+
+ pickledAttachments := []models.PickleAttachment{}
+ attachments := Attachments(ctx)
+
+ for _, a := range attachments {
+ pickledAttachments = append(pickledAttachments, models.PickleAttachment{
+ Name: a.FileName,
+ Data: a.Body,
+ MimeType: a.MediaType,
+ })
+ }
+
+ return pickledAttachments
+}
+
+func (s *suite) matchStep(step *messages.PickleStep) (*models.StepDefinition, error) {
+ def, err := s.matchStepTextAndType(step.Text, step.Type)
+ if err != nil {
+ return nil, err
+ }
+
+ if def != nil && step.Argument != nil {
+ def.Args = append(def.Args, step.Argument)
+ }
+ return def, nil
+}
+
+func (s *suite) runStep(ctx context.Context, pickle *Scenario, step *Step, scenarioErr error, isFirst, isLast bool) (rctx context.Context, err error) {
+ var match *models.StepDefinition
+
+ rctx = ctx
+
+ // user multistep definitions may panic
+ defer func() {
+ if e := recover(); e != nil {
+ pe, isErr := e.(error)
+ switch {
+ case isErr && errors.Is(pe, errStopNow):
+ // FailNow or SkipNow called on dogTestingT, so clear the error to let the normal
+ // below getTestingT(ctx).isFailed() call handle the reasons.
+ err = nil
+ case err != nil:
+ err = &traceError{
+ msg: fmt.Sprintf("%s: %v", err.Error(), e),
+ stack: callStack(),
+ }
+ default:
+ err = &traceError{
+ msg: fmt.Sprintf("%v", e),
+ stack: callStack(),
+ }
+ }
+ }
+
+ earlyReturn := scenarioErr != nil || errors.Is(err, ErrUndefined)
+
+ // Check for any calls to Fail on dogT
+ if err == nil {
+ if t := getTestingT(ctx); t != nil {
+ err = t.isFailed()
+ }
+ }
+
+ status := StepUndefined
+
+ switch {
+ case errors.Is(err, ErrAmbiguous):
+ status = StepAmbiguous
+ case errors.Is(err, ErrPending):
+ status = StepPending
+ case errors.Is(err, ErrSkip), err == nil && scenarioErr != nil:
+ status = StepSkipped
+ case errors.Is(err, ErrUndefined):
+ status = StepUndefined
+ case err != nil:
+ status = StepFailed
+ case err == nil && scenarioErr == nil:
+ status = StepPassed
+ }
+
+ // Run after step handlers.
+ rctx, err = s.runAfterStepHooks(ctx, step, status, err)
+
+ // Trigger after scenario on failing or last step to attach possible hook error to step.
+ if !s.shouldFail(scenarioErr) && (isLast || s.shouldFail(err)) {
+ rctx, err = s.runAfterScenarioHooks(rctx, pickle, err)
+ }
+
+ // extract any accumulated attachments and clear them
+ pickledAttachments := pickleAttachments(rctx)
+ rctx = clearAttach(rctx)
+
+ if earlyReturn {
+ return
+ }
+
+ switch {
+ case err == nil:
+ sr := models.NewStepResult(models.Passed, pickle.Id, step.Id, match, pickledAttachments, nil)
+ s.storage.MustInsertPickleStepResult(sr)
+ s.fmt.Passed(pickle, step, match.GetInternalStepDefinition())
+ case errors.Is(err, ErrPending):
+ sr := models.NewStepResult(models.Pending, pickle.Id, step.Id, match, pickledAttachments, nil)
+ s.storage.MustInsertPickleStepResult(sr)
+ s.fmt.Pending(pickle, step, match.GetInternalStepDefinition())
+ case errors.Is(err, ErrSkip):
+ sr := models.NewStepResult(models.Skipped, pickle.Id, step.Id, match, pickledAttachments, nil)
+ s.storage.MustInsertPickleStepResult(sr)
+ s.fmt.Skipped(pickle, step, match.GetInternalStepDefinition())
+ case errors.Is(err, ErrAmbiguous):
+ sr := models.NewStepResult(models.Ambiguous, pickle.Id, step.Id, match, pickledAttachments, err)
+ s.storage.MustInsertPickleStepResult(sr)
+ s.fmt.Ambiguous(pickle, step, match.GetInternalStepDefinition(), err)
+ default:
+ sr := models.NewStepResult(models.Failed, pickle.Id, step.Id, match, pickledAttachments, err)
+ s.storage.MustInsertPickleStepResult(sr)
+ s.fmt.Failed(pickle, step, match.GetInternalStepDefinition(), err)
+ }
+ }()
+
+ // run before scenario handlers
+ if isFirst {
+ ctx, err = s.runBeforeScenarioHooks(ctx, pickle)
+ }
+
+ // run before step handlers
+ ctx, err = s.runBeforeStepHooks(ctx, step, err)
+
+ var matchError error
+ match, matchError = s.matchStep(step)
+
+ s.storage.MustInsertStepDefintionMatch(step.AstNodeIds[0], match)
+ s.fmt.Defined(pickle, step, match.GetInternalStepDefinition())
+
+ if err != nil {
+ pickledAttachments := pickleAttachments(ctx)
+ ctx = clearAttach(ctx)
+
+ sr := models.NewStepResult(models.Failed, pickle.Id, step.Id, match, pickledAttachments, nil)
+ s.storage.MustInsertPickleStepResult(sr)
+ return ctx, err
+ }
+
+ if matchError != nil {
+ return ctx, matchError
+ }
+
+ if ctx, undef, err := s.maybeUndefined(ctx, step.Text, step.Argument, step.Type); err != nil {
+ return ctx, err
+ } else if len(undef) > 0 {
+ if match != nil {
+ match = &models.StepDefinition{
+ StepDefinition: formatters.StepDefinition{
+ Expr: match.Expr,
+ Handler: match.Handler,
+ Keyword: match.Keyword,
+ },
+ Args: match.Args,
+ HandlerValue: match.HandlerValue,
+ File: match.File,
+ Line: match.Line,
+ Nested: match.Nested,
+ Undefined: undef,
+ }
+ }
+
+ pickledAttachments := pickleAttachments(ctx)
+ ctx = clearAttach(ctx)
+
+ sr := models.NewStepResult(models.Undefined, pickle.Id, step.Id, match, pickledAttachments, nil)
+ s.storage.MustInsertPickleStepResult(sr)
+
+ s.fmt.Undefined(pickle, step, match.GetInternalStepDefinition())
+ return ctx, fmt.Errorf("%w: %s", ErrUndefined, step.Text)
+ }
+
+ if scenarioErr != nil {
+ pickledAttachments := pickleAttachments(ctx)
+ ctx = clearAttach(ctx)
+
+ sr := models.NewStepResult(models.Skipped, pickle.Id, step.Id, match, pickledAttachments, nil)
+ s.storage.MustInsertPickleStepResult(sr)
+
+ s.fmt.Skipped(pickle, step, match.GetInternalStepDefinition())
+ return ctx, nil
+ }
+
+ ctx, err = s.maybeSubSteps(match.Run(ctx))
+
+ return ctx, err
+}
+
+func (s *suite) runBeforeStepHooks(ctx context.Context, step *Step, err error) (context.Context, error) {
+ hooksFailed := false
+
+ for _, f := range s.beforeStepHandlers {
+ hctx, herr := f(ctx, step)
+ if herr != nil {
+ hooksFailed = true
+
+ if err == nil {
+ err = herr
+ } else {
+ err = fmt.Errorf("%v, %w", herr, err)
+ }
+ }
+
+ if hctx != nil {
+ ctx = hctx
+ }
+ }
+
+ if hooksFailed {
+ err = fmt.Errorf("before step hook failed: %w", err)
+ }
+
+ return ctx, err
+}
+
+func (s *suite) runAfterStepHooks(ctx context.Context, step *Step, status StepResultStatus, err error) (context.Context, error) {
+ for _, f := range s.afterStepHandlers {
+ hctx, herr := f(ctx, step, status, err)
+
+ // Adding hook error to resulting error without breaking hooks loop.
+ if herr != nil {
+ if err == nil {
+ err = herr
+ } else {
+ err = fmt.Errorf("%v, %w", herr, err)
+ }
+ }
+
+ if hctx != nil {
+ ctx = hctx
+ }
+ }
+
+ return ctx, err
+}
+
+func (s *suite) runBeforeScenarioHooks(ctx context.Context, pickle *messages.Pickle) (context.Context, error) {
+ var err error
+
+ // run before scenario handlers
+ for _, f := range s.beforeScenarioHandlers {
+ hctx, herr := f(ctx, pickle)
+ if herr != nil {
+ if err == nil {
+ err = herr
+ } else {
+ err = fmt.Errorf("%v, %w", herr, err)
+ }
+ }
+
+ if hctx != nil {
+ ctx = hctx
+ }
+ }
+
+ if err != nil {
+ err = fmt.Errorf("before scenario hook failed: %w", err)
+ }
+
+ return ctx, err
+}
+
+func (s *suite) runAfterScenarioHooks(ctx context.Context, pickle *messages.Pickle, lastStepErr error) (context.Context, error) {
+ err := lastStepErr
+
+ hooksFailed := false
+ isStepErr := true
+
+ // run after scenario handlers
+ for _, f := range s.afterScenarioHandlers {
+ hctx, herr := f(ctx, pickle, err)
+
+ // Adding hook error to resulting error without breaking hooks loop.
+ if herr != nil {
+ hooksFailed = true
+
+ if err == nil {
+ isStepErr = false
+ err = herr
+ } else {
+ if isStepErr {
+ err = fmt.Errorf("step error: %w", err)
+ isStepErr = false
+ }
+ err = fmt.Errorf("%v, %w", herr, err)
+ }
+ }
+
+ if hctx != nil {
+ ctx = hctx
+ }
+ }
+
+ if hooksFailed {
+ err = fmt.Errorf("after scenario hook failed: %w", err)
+ }
+
+ return ctx, err
+}
+
+func (s *suite) maybeUndefined(ctx context.Context, text string, arg interface{}, stepType messages.PickleStepType) (context.Context, []string, error) {
+ var undefined []string
+ step, err := s.matchStepTextAndType(text, stepType)
+ if err != nil {
+ return ctx, undefined, err
+ }
+
+ if nil == step {
+ return ctx, []string{text}, nil
+ }
+
+ if !step.Nested {
+ return ctx, undefined, nil
+ }
+
+ if arg != nil {
+ step.Args = append(step.Args, arg)
+ }
+
+ ctx, steps := step.Run(ctx)
+
+ for _, next := range steps.(Steps) {
+ lines := strings.Split(next, "\n")
+ // @TODO: we cannot currently parse table or content body from nested steps
+ if len(lines) > 1 {
+ return ctx, undefined, fmt.Errorf("nested steps cannot be multiline and have table or content body argument")
+ }
+ if len(lines[0]) > 0 && lines[0][len(lines[0])-1] == ':' {
+ return ctx, undefined, fmt.Errorf("nested steps cannot be multiline and have table or content body argument")
+ }
+ ctx, undef, err := s.maybeUndefined(ctx, next, nil, messages.PickleStepType_UNKNOWN)
+ if err != nil {
+ return ctx, undefined, err
+ }
+ undefined = append(undefined, undef...)
+ }
+ return ctx, undefined, nil
+}
+
+func (s *suite) maybeSubSteps(ctx context.Context, result interface{}) (context.Context, error) {
+ if nil == result {
+ return ctx, nil
+ }
+
+ if err, ok := result.(error); ok {
+ return ctx, err
+ }
+
+ steps, ok := result.(Steps)
+ if !ok {
+ return ctx, fmt.Errorf("unexpected error, should have been godog.Steps: %T - %+v", result, result)
+ }
+
+ for _, text := range steps {
+ def, err := s.matchStepTextAndType(text, messages.PickleStepType_UNKNOWN)
+ if err != nil {
+ return ctx, err
+ }
+
+ if def == nil {
+ return ctx, fmt.Errorf("%w: %s", ErrUndefined, text)
+ } else {
+ ctx, err = s.runSubStep(ctx, text, def)
+ if err != nil {
+ return ctx, err
+ }
+ }
+ }
+ return ctx, nil
+}
+
+func (s *suite) runSubStep(ctx context.Context, text string, def *models.StepDefinition) (_ context.Context, err error) {
+ st := &Step{}
+ st.Text = text
+ st.Type = messages.PickleStepType_ACTION
+
+ defer func() {
+ status := StepPassed
+
+ switch {
+ case errors.Is(err, ErrUndefined):
+ status = StepUndefined
+ case errors.Is(err, ErrPending):
+ status = StepPending
+ case err != nil:
+ status = StepFailed
+ }
+
+ ctx, err = s.runAfterStepHooks(ctx, st, status, err)
+ }()
+
+ ctx, err = s.runBeforeStepHooks(ctx, st, nil)
+ if err != nil {
+ return ctx, fmt.Errorf("%s: %+v", text, err)
+ }
+
+ if ctx, err = s.maybeSubSteps(def.Run(ctx)); err != nil {
+ return ctx, fmt.Errorf("%s: %+v", text, err)
+ }
+
+ return ctx, nil
+}
+
+func (s *suite) matchStepTextAndType(text string, stepType messages.PickleStepType) (*models.StepDefinition, error) {
+ var first *models.StepDefinition
+ matchingExpressions := make([]string, 0)
+
+ for _, h := range s.steps {
+ if m := h.Expr.FindStringSubmatch(text); len(m) > 0 {
+ if !keywordMatches(h.Keyword, stepType) {
+ continue
+ }
+ var args []interface{}
+ for _, m := range m[1:] {
+ args = append(args, m)
+ }
+
+ matchingExpressions = append(matchingExpressions, h.Expr.String())
+
+ // since we need to assign arguments
+ // better to copy the step definition
+ match := &models.StepDefinition{
+ StepDefinition: formatters.StepDefinition{
+ Expr: h.Expr,
+ Handler: h.Handler,
+ Keyword: h.Keyword,
+ },
+ Args: args,
+ HandlerValue: h.HandlerValue,
+ File: h.File,
+ Line: h.Line,
+ Nested: h.Nested,
+ }
+
+ if first == nil {
+ first = match
+ }
+ }
+ }
+
+ if s.strict {
+ if len(matchingExpressions) > 1 {
+ errs := "\n " + strings.Join(matchingExpressions, "\n ")
+ return nil, fmt.Errorf("%w, step text: %s\n matches:%s", ErrAmbiguous, text, errs)
+ }
+ }
+
+ return first, nil
+}
+
+func keywordMatches(k formatters.Keyword, stepType messages.PickleStepType) bool {
+ if k == formatters.None {
+ return true
+ }
+ switch stepType {
+ case messages.PickleStepType_CONTEXT:
+ return k == formatters.Given
+ case messages.PickleStepType_ACTION:
+ return k == formatters.When
+ case messages.PickleStepType_OUTCOME:
+ return k == formatters.Then
+ default:
+ return true
+ }
+}
+
+func (s *suite) runSteps(ctx context.Context, pickle *Scenario, steps []*Step) (context.Context, error) {
+ var (
+ stepErr, scenarioErr error
+ )
+
+ for i, step := range steps {
+ isLast := i == len(steps)-1
+ isFirst := i == 0
+ ctx, stepErr = s.runStep(ctx, pickle, step, scenarioErr, isFirst, isLast)
+ if scenarioErr == nil || s.shouldFail(stepErr) {
+ scenarioErr = stepErr
+ }
+ }
+
+ return ctx, scenarioErr
+}
+
+func (s *suite) shouldFail(err error) bool {
+ if err == nil || errors.Is(err, ErrSkip) {
+ return false
+ }
+
+ if errors.Is(err, ErrUndefined) || errors.Is(err, ErrPending) {
+ return s.strict
+ }
+
+ return true
+}
+
+func (s *suite) runPickle(pickle *messages.Pickle) (err error) {
+ ctx := s.defaultContext
+ if ctx == nil {
+ ctx = context.Background()
+ }
+
+ ctx, cancel := context.WithCancel(ctx)
+
+ defer cancel()
+
+ if len(pickle.Steps) == 0 {
+ pr := models.PickleResult{PickleID: pickle.Id, StartedAt: utils.TimeNowFunc()}
+ s.storage.MustInsertPickleResult(pr)
+
+ s.fmt.Pickle(pickle)
+ return fmt.Errorf("%w: no steps in scenario", ErrUndefined)
+ }
+
+ // Before scenario hooks are called in context of first evaluated step
+ // so that error from handler can be added to step.
+
+ pr := models.PickleResult{PickleID: pickle.Id, StartedAt: utils.TimeNowFunc()}
+ s.storage.MustInsertPickleResult(pr)
+
+ s.fmt.Pickle(pickle)
+
+ dt := &testingT{
+ name: pickle.Name,
+ }
+ ctx = setContextTestingT(ctx, dt)
+ // scenario
+ if s.testingT != nil {
+ // Running scenario as a subtest.
+ s.testingT.Run(pickle.Name, func(t *testing.T) {
+ dt.t = t
+ ctx, err = s.runSteps(ctx, pickle, pickle.Steps)
+ if s.shouldFail(err) {
+ t.Errorf("%+v", err)
+ }
+ })
+ } else {
+ ctx, err = s.runSteps(ctx, pickle, pickle.Steps)
+ }
+
+ // After scenario handlers are called in context of last evaluated step
+ // so that error from handler can be added to step.
+
+ return err
+}
diff --git a/vendor/github.com/cucumber/godog/test_context.go b/vendor/github.com/cucumber/godog/test_context.go
new file mode 100644
index 000000000..add9f47b0
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/test_context.go
@@ -0,0 +1,371 @@
+package godog
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+ "regexp"
+ "runtime"
+
+ messages "github.com/cucumber/messages/go/v21"
+
+ "github.com/cucumber/godog/formatters"
+ "github.com/cucumber/godog/internal/builder"
+ "github.com/cucumber/godog/internal/flags"
+ "github.com/cucumber/godog/internal/models"
+)
+
+// GherkinDocument represents gherkin document.
+type GherkinDocument = messages.GherkinDocument
+
+// Scenario represents the executed scenario
+type Scenario = messages.Pickle
+
+// Step represents the executed step
+type Step = messages.PickleStep
+
+// Steps allows to nest steps
+// instead of returning an error in step func
+// it is possible to return combined steps:
+//
+// func multistep(name string) godog.Steps {
+// return godog.Steps{
+// fmt.Sprintf(`an user named "%s"`, name),
+// fmt.Sprintf(`user "%s" is authenticated`, name),
+// }
+// }
+//
+// These steps will be matched and executed in
+// sequential order. The first one which fails
+// will result in main step failure.
+type Steps []string
+
+// StepDefinition is a registered step definition
+// contains a StepHandler and regexp which
+// is used to match a step. Args which
+// were matched by last executed step
+//
+// This structure is passed to the formatter
+// when step is matched and is either failed
+// or successful
+type StepDefinition = formatters.StepDefinition
+
+// DocString represents the DocString argument made to a step definition
+type DocString = messages.PickleDocString
+
+// Table represents the Table argument made to a step definition
+type Table = messages.PickleTable
+
+// TestSuiteContext allows various contexts
+// to register event handlers.
+//
+// When running a test suite, the instance of TestSuiteContext
+// is passed to all functions (contexts), which
+// have it as a first and only argument.
+//
+// Note that all event hooks does not catch panic errors
+// in order to have a trace information
+type TestSuiteContext struct {
+ beforeSuiteHandlers []func()
+ afterSuiteHandlers []func()
+
+ suite *suite
+}
+
+// BeforeSuite registers a function or method
+// to be run once before suite runner.
+//
+// Use it to prepare the test suite for a spin.
+// Connect and prepare database for instance...
+func (ctx *TestSuiteContext) BeforeSuite(fn func()) {
+ ctx.beforeSuiteHandlers = append(ctx.beforeSuiteHandlers, fn)
+}
+
+// AfterSuite registers a function or method
+// to be run once after suite runner
+func (ctx *TestSuiteContext) AfterSuite(fn func()) {
+ ctx.afterSuiteHandlers = append(ctx.afterSuiteHandlers, fn)
+}
+
+// ScenarioContext allows registering scenario hooks.
+func (ctx *TestSuiteContext) ScenarioContext() *ScenarioContext {
+ return &ScenarioContext{
+ suite: ctx.suite,
+ }
+}
+
+// ScenarioContext allows various contexts
+// to register steps and event handlers.
+//
+// When running a scenario, the instance of ScenarioContext
+// is passed to all functions (contexts), which
+// have it as a first and only argument.
+//
+// Note that all event hooks does not catch panic errors
+// in order to have a trace information. Only step
+// executions are catching panic error since it may
+// be a context specific error.
+type ScenarioContext struct {
+ suite *suite
+}
+
+// StepContext allows registering step hooks.
+type StepContext struct {
+ suite *suite
+}
+
+// Before registers a function or method
+// to be run before every scenario.
+//
+// It is a good practice to restore the default state
+// before every scenario, so it would be isolated from
+// any kind of state.
+func (ctx ScenarioContext) Before(h BeforeScenarioHook) {
+ ctx.suite.beforeScenarioHandlers = append(ctx.suite.beforeScenarioHandlers, h)
+}
+
+// BeforeScenarioHook defines a hook before scenario.
+type BeforeScenarioHook func(ctx context.Context, sc *Scenario) (context.Context, error)
+
+// After registers a function or method
+// to be run after every scenario.
+func (ctx ScenarioContext) After(h AfterScenarioHook) {
+ ctx.suite.afterScenarioHandlers = append(ctx.suite.afterScenarioHandlers, h)
+}
+
+// AfterScenarioHook defines a hook after scenario.
+type AfterScenarioHook func(ctx context.Context, sc *Scenario, err error) (context.Context, error)
+
+// StepContext exposes StepContext of a scenario.
+func (ctx ScenarioContext) StepContext() StepContext {
+ return StepContext(ctx)
+}
+
+// Before registers a function or method
+// to be run before every step.
+func (ctx StepContext) Before(h BeforeStepHook) {
+ ctx.suite.beforeStepHandlers = append(ctx.suite.beforeStepHandlers, h)
+}
+
+// BeforeStepHook defines a hook before step.
+type BeforeStepHook func(ctx context.Context, st *Step) (context.Context, error)
+
+// After registers a function or method
+// to be run after every step.
+//
+// It may be convenient to return a different kind of error
+// in order to print more state details which may help
+// in case of step failure
+//
+// In some cases, for example when running a headless
+// browser, to take a screenshot after failure.
+func (ctx StepContext) After(h AfterStepHook) {
+ ctx.suite.afterStepHandlers = append(ctx.suite.afterStepHandlers, h)
+}
+
+// AfterStepHook defines a hook after step.
+type AfterStepHook func(ctx context.Context, st *Step, status StepResultStatus, err error) (context.Context, error)
+
+// BeforeScenario registers a function or method
+// to be run before every scenario.
+//
+// It is a good practice to restore the default state
+// before every scenario, so it would be isolated from
+// any kind of state.
+//
+// Deprecated: use Before.
+func (ctx ScenarioContext) BeforeScenario(fn func(sc *Scenario)) {
+ ctx.Before(func(ctx context.Context, sc *Scenario) (context.Context, error) {
+ fn(sc)
+
+ return ctx, nil
+ })
+}
+
+// AfterScenario registers a function or method
+// to be run after every scenario.
+//
+// Deprecated: use After.
+func (ctx ScenarioContext) AfterScenario(fn func(sc *Scenario, err error)) {
+ ctx.After(func(ctx context.Context, sc *Scenario, err error) (context.Context, error) {
+ fn(sc, err)
+
+ return ctx, nil
+ })
+}
+
+// BeforeStep registers a function or method
+// to be run before every step.
+//
+// Deprecated: use ScenarioContext.StepContext() and StepContext.Before.
+func (ctx ScenarioContext) BeforeStep(fn func(st *Step)) {
+ ctx.StepContext().Before(func(ctx context.Context, st *Step) (context.Context, error) {
+ fn(st)
+
+ return ctx, nil
+ })
+}
+
+// AfterStep registers a function or method
+// to be run after every step.
+//
+// It may be convenient to return a different kind of error
+// in order to print more state details which may help
+// in case of step failure
+//
+// In some cases, for example when running a headless
+// browser, to take a screenshot after failure.
+//
+// Deprecated: use ScenarioContext.StepContext() and StepContext.After.
+func (ctx ScenarioContext) AfterStep(fn func(st *Step, err error)) {
+ ctx.StepContext().After(func(ctx context.Context, st *Step, status StepResultStatus, err error) (context.Context, error) {
+ fn(st, err)
+
+ return ctx, nil
+ })
+}
+
+// Step allows to register a *StepDefinition in the
+// Godog feature suite, the definition will be applied
+// to all steps matching the given Regexp expr.
+//
+// It will panic if expr is not a valid regular
+// expression or stepFunc is not a valid step
+// handler.
+//
+// The expression can be of type: *regexp.Regexp, string or []byte
+//
+// The stepFunc may accept one or several arguments of type:
+// - int, int8, int16, int32, int64
+// - float32, float64
+// - string
+// - []byte
+// - *godog.DocString
+// - *godog.Table
+//
+// The stepFunc need to return either an error or []string for multistep
+//
+// Note that if there are two definitions which may match
+// the same step, then only the first matched handler
+// will be applied.
+//
+// If none of the *StepDefinition is matched, then
+// ErrUndefined error will be returned when
+// running steps.
+func (ctx ScenarioContext) Step(expr, stepFunc interface{}) {
+ ctx.stepWithKeyword(expr, stepFunc, formatters.None)
+}
+
+// Given functions identically to Step, but the *StepDefinition
+// will only be matched if the step starts with "Given". "And"
+// and "But" keywords copy the keyword of the last step for the
+// purpose of matching.
+func (ctx ScenarioContext) Given(expr, stepFunc interface{}) {
+ ctx.stepWithKeyword(expr, stepFunc, formatters.Given)
+}
+
+// When functions identically to Step, but the *StepDefinition
+// will only be matched if the step starts with "When". "And"
+// and "But" keywords copy the keyword of the last step for the
+// purpose of matching.
+func (ctx ScenarioContext) When(expr, stepFunc interface{}) {
+ ctx.stepWithKeyword(expr, stepFunc, formatters.When)
+}
+
+// Then functions identically to Step, but the *StepDefinition
+// will only be matched if the step starts with "Then". "And"
+// and "But" keywords copy the keyword of the last step for the
+// purpose of matching.
+func (ctx ScenarioContext) Then(expr, stepFunc interface{}) {
+ ctx.stepWithKeyword(expr, stepFunc, formatters.Then)
+}
+
+func (ctx ScenarioContext) stepWithKeyword(expr interface{}, stepFunc interface{}, keyword formatters.Keyword) {
+ var regex *regexp.Regexp
+
+ // Validate the first input param is regex compatible
+ switch t := expr.(type) {
+ case *regexp.Regexp:
+ regex = t
+ case string:
+ regex = regexp.MustCompile(t)
+ case []byte:
+ regex = regexp.MustCompile(string(t))
+ default:
+ panic(fmt.Sprintf("expecting expr to be a *regexp.Regexp or a string or []byte, got type: %T", expr))
+ }
+
+ // Validate that the handler is a function.
+ handlerType := reflect.TypeOf(stepFunc)
+ if handlerType.Kind() != reflect.Func {
+ panic(fmt.Sprintf("expected handler to be func, but got: %T", stepFunc))
+ }
+
+ // FIXME = Validate the handler function param types here so
+ // that any errors are discovered early.
+ // StepDefinition.Run defines the supported types but fails at run time not registration time
+
+ // Validate the function's return types.
+ helpPrefix := "expected handler to return one of error or context.Context or godog.Steps or (context.Context, error)"
+ isNested := false
+
+ numOut := handlerType.NumOut()
+ switch numOut {
+ case 0:
+ // No return values.
+ case 1:
+ // One return value: should be error, Steps, or context.Context.
+ outType := handlerType.Out(0)
+ if outType == reflect.TypeOf(Steps{}) {
+ isNested = true
+ } else {
+ if outType != errorInterface && outType != contextInterface {
+ panic(fmt.Sprintf("%s, but got: %v", helpPrefix, outType))
+ }
+ }
+ case 2:
+ // Two return values: should be (context.Context, error).
+ if handlerType.Out(0) != contextInterface || handlerType.Out(1) != errorInterface {
+ panic(fmt.Sprintf("%s, but got: %v, %v", helpPrefix, handlerType.Out(0), handlerType.Out(1)))
+ }
+ default:
+ // More than two return values.
+ panic(fmt.Sprintf("expected handler to return either zero, one or two values, but it has: %d", numOut))
+ }
+
+ // Register the handler
+ def := &models.StepDefinition{
+ StepDefinition: formatters.StepDefinition{
+ Handler: stepFunc,
+ Expr: regex,
+ Keyword: keyword,
+ },
+ HandlerValue: reflect.ValueOf(stepFunc),
+ Nested: isNested,
+ }
+
+ // Get the file and line number of the call that created this step with a
+ // call to one of the Step, Given, When, or Then wrappers.
+ _, def.File, def.Line, _ = runtime.Caller(2)
+
+ // stash the step
+ ctx.suite.steps = append(ctx.suite.steps, def)
+}
+
+// Build creates a test package like go test command at given target path.
+// If there are no go files in tested directory, then
+// it simply builds a godog executable to scan features.
+//
+// If there are go test files, it first builds a test
+// package with standard go test command.
+//
+// Finally, it generates godog suite executable which
+// registers exported godog contexts from the test files
+// of tested package.
+//
+// Returns the path to generated executable
+func Build(bin string) error {
+ return builder.Build(bin)
+}
+
+type Feature = flags.Feature
diff --git a/vendor/github.com/cucumber/godog/testingt.go b/vendor/github.com/cucumber/godog/testingt.go
new file mode 100644
index 000000000..25981b89a
--- /dev/null
+++ b/vendor/github.com/cucumber/godog/testingt.go
@@ -0,0 +1,206 @@
+package godog
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "testing"
+)
+
+// T returns a TestingT compatible interface from the current test context. It will return nil if
+// called outside the context of a test. This can be used with (for example) testify's assert and
+// require packages.
+func T(ctx context.Context) TestingT {
+ return getTestingT(ctx)
+}
+
+// TestingT is a subset of the public methods implemented by go's testing.T. It allows assertion
+// libraries to be used with godog, provided they depend only on this subset of methods.
+type TestingT interface {
+ // Name returns the name of the current pickle under test
+ Name() string
+ // Log will log to the current testing.T log if set, otherwise it will log to stdout
+ Log(args ...interface{})
+ // Logf will log a formatted string to the current testing.T log if set, otherwise it will log
+ // to stdout
+ Logf(format string, args ...interface{})
+ // Error fails the current test and logs the provided arguments. Equivalent to calling Log then
+ // Fail.
+ Error(args ...interface{})
+ // Errorf fails the current test and logs the formatted message. Equivalent to calling Logf then
+ // Fail.
+ Errorf(format string, args ...interface{})
+ // Fail marks the current test as failed, but does not halt execution of the step.
+ Fail()
+ // FailNow marks the current test as failed and halts execution of the step.
+ FailNow()
+ // Fatal logs the provided arguments, marks the test as failed and halts execution of the step.
+ Fatal(args ...interface{})
+ // Fatal logs the formatted message, marks the test as failed and halts execution of the step.
+ Fatalf(format string, args ...interface{})
+ // Skip logs the provided arguments and marks the test as skipped but does not halt execution
+ // of the step.
+ Skip(args ...interface{})
+ // Skipf logs the formatted message and marks the test as skipped but does not halt execution
+ // of the step.
+ Skipf(format string, args ...interface{})
+ // SkipNow marks the current test as skipped and halts execution of the step.
+ SkipNow()
+ // Skipped returns true if the test has been marked as skipped.
+ Skipped() bool
+}
+
+// Logf will log test output. If called in the context of a test and testing.T has been registered,
+// this will log using the step's testing.T, else it will simply log to stdout.
+func Logf(ctx context.Context, format string, args ...interface{}) {
+ if t := getTestingT(ctx); t != nil {
+ t.Logf(format, args...)
+ return
+ }
+ fmt.Printf(format+"\n", args...)
+}
+
+// Log will log test output. If called in the context of a test and testing.T has been registered,
+// this will log using the step's testing.T, else it will simply log to stdout.
+func Log(ctx context.Context, args ...interface{}) {
+ if t := getTestingT(ctx); t != nil {
+ t.Log(args...)
+ return
+ }
+ fmt.Println(args...)
+}
+
+// LoggedMessages returns an array of any logged messages that have been recorded during the test
+// through calls to godog.Log / godog.Logf or via operations against godog.T(ctx)
+func LoggedMessages(ctx context.Context) []string {
+ if t := getTestingT(ctx); t != nil {
+ return t.logMessages
+ }
+ return nil
+}
+
+// errStopNow should be returned inside a panic within the test to immediately halt execution of that
+// test
+var errStopNow = fmt.Errorf("FailNow or SkipNow called")
+
+type testingT struct {
+ name string
+ t *testing.T
+ failed bool
+ skipped bool
+ failMessages []string
+ logMessages []string
+}
+
+// check interface against our testingT and the upstream testing.B/F/T:
+var (
+ _ TestingT = &testingT{}
+ _ TestingT = (*testing.T)(nil)
+)
+
+func (dt *testingT) Name() string {
+ if dt.t != nil {
+ return dt.t.Name()
+ }
+ return dt.name
+}
+
+func (dt *testingT) Log(args ...interface{}) {
+ dt.logMessages = append(dt.logMessages, fmt.Sprint(args...))
+ if dt.t != nil {
+ dt.t.Log(args...)
+ return
+ }
+ fmt.Println(args...)
+}
+
+func (dt *testingT) Logf(format string, args ...interface{}) {
+ dt.logMessages = append(dt.logMessages, fmt.Sprintf(format, args...))
+ if dt.t != nil {
+ dt.t.Logf(format, args...)
+ return
+ }
+ fmt.Printf(format+"\n", args...)
+}
+
+func (dt *testingT) Error(args ...interface{}) {
+ dt.Log(args...)
+ dt.failMessages = append(dt.failMessages, fmt.Sprintln(args...))
+ dt.Fail()
+}
+
+func (dt *testingT) Errorf(format string, args ...interface{}) {
+ dt.Logf(format, args...)
+ dt.failMessages = append(dt.failMessages, fmt.Sprintf(format, args...))
+ dt.Fail()
+}
+
+func (dt *testingT) Fail() {
+ dt.failed = true
+}
+
+func (dt *testingT) FailNow() {
+ dt.Fail()
+ panic(errStopNow)
+}
+
+func (dt *testingT) Fatal(args ...interface{}) {
+ dt.Log(args...)
+ dt.FailNow()
+}
+
+func (dt *testingT) Fatalf(format string, args ...interface{}) {
+ dt.Logf(format, args...)
+ dt.FailNow()
+}
+
+func (dt *testingT) Skip(args ...interface{}) {
+ dt.Log(args...)
+ dt.skipped = true
+}
+
+func (dt *testingT) Skipf(format string, args ...interface{}) {
+ dt.Logf(format, args...)
+ dt.skipped = true
+}
+
+func (dt *testingT) SkipNow() {
+ dt.skipped = true
+ panic(errStopNow)
+}
+
+func (dt *testingT) Skipped() bool {
+ return dt.skipped
+}
+
+// isFailed will return an error representing the calls to Fail made during this test
+func (dt *testingT) isFailed() error {
+ if dt.skipped {
+ return ErrSkip
+ }
+ if !dt.failed {
+ return nil
+ }
+ switch len(dt.failMessages) {
+ case 0:
+ return fmt.Errorf("fail called on TestingT")
+ case 1:
+ return fmt.Errorf(dt.failMessages[0])
+ default:
+ return fmt.Errorf("checks failed:\n* %s", strings.Join(dt.failMessages, "\n* "))
+ }
+}
+
+type testingTCtxVal struct{}
+
+func setContextTestingT(ctx context.Context, dt *testingT) context.Context {
+ return context.WithValue(ctx, testingTCtxVal{}, dt)
+}
+
+func getTestingT(ctx context.Context) *testingT {
+ dt, ok := ctx.Value(testingTCtxVal{}).(*testingT)
+ if !ok {
+ return nil
+ }
+ return dt
+}
diff --git a/vendor/github.com/cucumber/messages/go/v21/.gitignore b/vendor/github.com/cucumber/messages/go/v21/.gitignore
new file mode 100644
index 000000000..7b0ee7aeb
--- /dev/null
+++ b/vendor/github.com/cucumber/messages/go/v21/.gitignore
@@ -0,0 +1,17 @@
+.built
+.compared
+.deps
+.dist
+.dist-compressed
+.go-get
+.gofmt
+.linted
+.tested*
+acceptance/
+bin/
+dist/
+dist_compressed/
+*.bin
+*.iml
+# upx dist/cucumber-gherkin-openbsd-386 fails with a core dump
+core.*.!usr!bin!upx-ucl
diff --git a/vendor/github.com/cucumber/messages/go/v21/Dockerfile b/vendor/github.com/cucumber/messages/go/v21/Dockerfile
new file mode 100644
index 000000000..64e1c2795
--- /dev/null
+++ b/vendor/github.com/cucumber/messages/go/v21/Dockerfile
@@ -0,0 +1,29 @@
+#syntax=docker/dockerfile:1.4
+
+# Base image
+ARG GO_VERSION=1.19
+FROM golang:${GO_VERSION}-alpine AS golang
+WORKDIR /cucumber
+
+
+# Dummy stage for generated code, overriden in main build
+FROM scratch AS schema-codegen
+
+
+FROM golang AS with-dependencies
+
+COPY --link go.mod go.sum .
+RUN --mount=type=cache,target=/go/pkg/mod/cache \
+ go mod download && go mod verify
+
+
+FROM golang AS tested
+
+RUN apk add gcc libc-dev
+
+COPY --link . .
+COPY --link --from=with-dependencies /go/pkg /go/pkg
+COPY --link --from=schema-codegen /*.go .
+
+RUN gofmt -w .
+RUN go test --v
diff --git a/vendor/github.com/cucumber/messages/go/v21/LICENSE b/vendor/github.com/cucumber/messages/go/v21/LICENSE
new file mode 100644
index 000000000..725ba9f4a
--- /dev/null
+++ b/vendor/github.com/cucumber/messages/go/v21/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) Cucumber Ltd
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/cucumber/messages/go/v21/Makefile b/vendor/github.com/cucumber/messages/go/v21/Makefile
new file mode 100644
index 000000000..898214efa
--- /dev/null
+++ b/vendor/github.com/cucumber/messages/go/v21/Makefile
@@ -0,0 +1,20 @@
+schemas = $(shell find ../jsonschema -name "*.json")
+
+.DEFAULT_GOAL = help
+
+help: ## Show this help
+ @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \n\nWhere is one of:\n"} /^[$$()% a-zA-Z_-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
+
+generate: require messages.go ## Generate go code based on the schemas found in ../jsonschema and using the scripts in ../jsonschema/scripts for the generation
+
+require: ## Check requirements for the code generation (ruby and go are required)
+ @ruby --version >/dev/null 2>&1 || (echo "ERROR: ruby is required."; exit 1)
+ @go version >/dev/null 2>&1 || (echo "ERROR: go is required."; exit 1)
+
+clean: ## Remove automatically generated files and related artifacts
+ rm -f messages.go
+
+messages.go: $(schemas) ../jsonschema/scripts/codegen.rb ../jsonschema/scripts/templates/go.go.erb ../jsonschema/scripts/templates/go.enum.go.erb
+ ruby ../jsonschema/scripts/codegen.rb Go ../jsonschema go.go.erb > $@
+ ruby ../jsonschema/scripts/codegen.rb Go ../jsonschema go.enum.go.erb >> $@
+ go fmt messages.go
diff --git a/vendor/github.com/cucumber/messages/go/v21/id_generator.go b/vendor/github.com/cucumber/messages/go/v21/id_generator.go
new file mode 100644
index 000000000..a721f9789
--- /dev/null
+++ b/vendor/github.com/cucumber/messages/go/v21/id_generator.go
@@ -0,0 +1,28 @@
+package messages
+
+import (
+ "github.com/gofrs/uuid"
+ "strconv"
+)
+
+type IdGenerator interface {
+ newId() func() string
+}
+
+type Incrementing struct {
+ next int
+}
+
+func (self *Incrementing) NewId() string {
+ result := strconv.Itoa(self.next)
+ self.next++
+ return result
+}
+
+type UUID struct {
+ next int
+}
+
+func (i UUID) NewId() string {
+ return uuid.Must(uuid.NewV4()).String()
+}
diff --git a/vendor/github.com/cucumber/messages/go/v21/messages.go b/vendor/github.com/cucumber/messages/go/v21/messages.go
new file mode 100644
index 000000000..fbb173d4e
--- /dev/null
+++ b/vendor/github.com/cucumber/messages/go/v21/messages.go
@@ -0,0 +1,510 @@
+package messages
+
+type Attachment struct {
+ Body string `json:"body"`
+ ContentEncoding AttachmentContentEncoding `json:"contentEncoding"`
+ FileName string `json:"fileName,omitempty"`
+ MediaType string `json:"mediaType"`
+ Source *Source `json:"source,omitempty"`
+ TestCaseStartedId string `json:"testCaseStartedId,omitempty"`
+ TestStepId string `json:"testStepId,omitempty"`
+ Url string `json:"url,omitempty"`
+}
+
+type Duration struct {
+ Seconds int64 `json:"seconds"`
+ Nanos int64 `json:"nanos"`
+}
+
+type Envelope struct {
+ Attachment *Attachment `json:"attachment,omitempty"`
+ GherkinDocument *GherkinDocument `json:"gherkinDocument,omitempty"`
+ Hook *Hook `json:"hook,omitempty"`
+ Meta *Meta `json:"meta,omitempty"`
+ ParameterType *ParameterType `json:"parameterType,omitempty"`
+ ParseError *ParseError `json:"parseError,omitempty"`
+ Pickle *Pickle `json:"pickle,omitempty"`
+ Source *Source `json:"source,omitempty"`
+ StepDefinition *StepDefinition `json:"stepDefinition,omitempty"`
+ TestCase *TestCase `json:"testCase,omitempty"`
+ TestCaseFinished *TestCaseFinished `json:"testCaseFinished,omitempty"`
+ TestCaseStarted *TestCaseStarted `json:"testCaseStarted,omitempty"`
+ TestRunFinished *TestRunFinished `json:"testRunFinished,omitempty"`
+ TestRunStarted *TestRunStarted `json:"testRunStarted,omitempty"`
+ TestStepFinished *TestStepFinished `json:"testStepFinished,omitempty"`
+ TestStepStarted *TestStepStarted `json:"testStepStarted,omitempty"`
+ UndefinedParameterType *UndefinedParameterType `json:"undefinedParameterType,omitempty"`
+}
+
+type Exception struct {
+ Type string `json:"type"`
+ Message string `json:"message,omitempty"`
+}
+
+type GherkinDocument struct {
+ Uri string `json:"uri,omitempty"`
+ Feature *Feature `json:"feature,omitempty"`
+ Comments []*Comment `json:"comments"`
+}
+
+type Background struct {
+ Location *Location `json:"location"`
+ Keyword string `json:"keyword"`
+ Name string `json:"name"`
+ Description string `json:"description"`
+ Steps []*Step `json:"steps"`
+ Id string `json:"id"`
+}
+
+type Comment struct {
+ Location *Location `json:"location"`
+ Text string `json:"text"`
+}
+
+type DataTable struct {
+ Location *Location `json:"location"`
+ Rows []*TableRow `json:"rows"`
+}
+
+type DocString struct {
+ Location *Location `json:"location"`
+ MediaType string `json:"mediaType,omitempty"`
+ Content string `json:"content"`
+ Delimiter string `json:"delimiter"`
+}
+
+type Examples struct {
+ Location *Location `json:"location"`
+ Tags []*Tag `json:"tags"`
+ Keyword string `json:"keyword"`
+ Name string `json:"name"`
+ Description string `json:"description"`
+ TableHeader *TableRow `json:"tableHeader,omitempty"`
+ TableBody []*TableRow `json:"tableBody"`
+ Id string `json:"id"`
+}
+
+type Feature struct {
+ Location *Location `json:"location"`
+ Tags []*Tag `json:"tags"`
+ Language string `json:"language"`
+ Keyword string `json:"keyword"`
+ Name string `json:"name"`
+ Description string `json:"description"`
+ Children []*FeatureChild `json:"children"`
+}
+
+type FeatureChild struct {
+ Rule *Rule `json:"rule,omitempty"`
+ Background *Background `json:"background,omitempty"`
+ Scenario *Scenario `json:"scenario,omitempty"`
+}
+
+type Rule struct {
+ Location *Location `json:"location"`
+ Tags []*Tag `json:"tags"`
+ Keyword string `json:"keyword"`
+ Name string `json:"name"`
+ Description string `json:"description"`
+ Children []*RuleChild `json:"children"`
+ Id string `json:"id"`
+}
+
+type RuleChild struct {
+ Background *Background `json:"background,omitempty"`
+ Scenario *Scenario `json:"scenario,omitempty"`
+}
+
+type Scenario struct {
+ Location *Location `json:"location"`
+ Tags []*Tag `json:"tags"`
+ Keyword string `json:"keyword"`
+ Name string `json:"name"`
+ Description string `json:"description"`
+ Steps []*Step `json:"steps"`
+ Examples []*Examples `json:"examples"`
+ Id string `json:"id"`
+}
+
+type Step struct {
+ Location *Location `json:"location"`
+ Keyword string `json:"keyword"`
+ KeywordType StepKeywordType `json:"keywordType,omitempty"`
+ Text string `json:"text"`
+ DocString *DocString `json:"docString,omitempty"`
+ DataTable *DataTable `json:"dataTable,omitempty"`
+ Id string `json:"id"`
+}
+
+type TableCell struct {
+ Location *Location `json:"location"`
+ Value string `json:"value"`
+}
+
+type TableRow struct {
+ Location *Location `json:"location"`
+ Cells []*TableCell `json:"cells"`
+ Id string `json:"id"`
+}
+
+type Tag struct {
+ Location *Location `json:"location"`
+ Name string `json:"name"`
+ Id string `json:"id"`
+}
+
+type Hook struct {
+ Id string `json:"id"`
+ Name string `json:"name,omitempty"`
+ SourceReference *SourceReference `json:"sourceReference"`
+ TagExpression string `json:"tagExpression,omitempty"`
+}
+
+type Location struct {
+ Line int64 `json:"line"`
+ Column int64 `json:"column,omitempty"`
+}
+
+type Meta struct {
+ ProtocolVersion string `json:"protocolVersion"`
+ Implementation *Product `json:"implementation"`
+ Runtime *Product `json:"runtime"`
+ Os *Product `json:"os"`
+ Cpu *Product `json:"cpu"`
+ Ci *Ci `json:"ci,omitempty"`
+}
+
+type Ci struct {
+ Name string `json:"name"`
+ Url string `json:"url,omitempty"`
+ BuildNumber string `json:"buildNumber,omitempty"`
+ Git *Git `json:"git,omitempty"`
+}
+
+type Git struct {
+ Remote string `json:"remote"`
+ Revision string `json:"revision"`
+ Branch string `json:"branch,omitempty"`
+ Tag string `json:"tag,omitempty"`
+}
+
+type Product struct {
+ Name string `json:"name"`
+ Version string `json:"version,omitempty"`
+}
+
+type ParameterType struct {
+ Name string `json:"name"`
+ RegularExpressions []string `json:"regularExpressions"`
+ PreferForRegularExpressionMatch bool `json:"preferForRegularExpressionMatch"`
+ UseForSnippets bool `json:"useForSnippets"`
+ Id string `json:"id"`
+}
+
+type ParseError struct {
+ Source *SourceReference `json:"source"`
+ Message string `json:"message"`
+}
+
+type Pickle struct {
+ Id string `json:"id"`
+ Uri string `json:"uri"`
+ Name string `json:"name"`
+ Language string `json:"language"`
+ Steps []*PickleStep `json:"steps"`
+ Tags []*PickleTag `json:"tags"`
+ AstNodeIds []string `json:"astNodeIds"`
+}
+
+type PickleDocString struct {
+ MediaType string `json:"mediaType,omitempty"`
+ Content string `json:"content"`
+}
+
+type PickleStep struct {
+ Argument *PickleStepArgument `json:"argument,omitempty"`
+ AstNodeIds []string `json:"astNodeIds"`
+ Id string `json:"id"`
+ Type PickleStepType `json:"type,omitempty"`
+ Text string `json:"text"`
+}
+
+type PickleStepArgument struct {
+ DocString *PickleDocString `json:"docString,omitempty"`
+ DataTable *PickleTable `json:"dataTable,omitempty"`
+}
+
+type PickleTable struct {
+ Rows []*PickleTableRow `json:"rows"`
+}
+
+type PickleTableCell struct {
+ Value string `json:"value"`
+}
+
+type PickleTableRow struct {
+ Cells []*PickleTableCell `json:"cells"`
+}
+
+type PickleTag struct {
+ Name string `json:"name"`
+ AstNodeId string `json:"astNodeId"`
+}
+
+type Source struct {
+ Uri string `json:"uri"`
+ Data string `json:"data"`
+ MediaType SourceMediaType `json:"mediaType"`
+}
+
+type SourceReference struct {
+ Uri string `json:"uri,omitempty"`
+ JavaMethod *JavaMethod `json:"javaMethod,omitempty"`
+ JavaStackTraceElement *JavaStackTraceElement `json:"javaStackTraceElement,omitempty"`
+ Location *Location `json:"location,omitempty"`
+}
+
+type JavaMethod struct {
+ ClassName string `json:"className"`
+ MethodName string `json:"methodName"`
+ MethodParameterTypes []string `json:"methodParameterTypes"`
+}
+
+type JavaStackTraceElement struct {
+ ClassName string `json:"className"`
+ FileName string `json:"fileName"`
+ MethodName string `json:"methodName"`
+}
+
+type StepDefinition struct {
+ Id string `json:"id"`
+ Pattern *StepDefinitionPattern `json:"pattern"`
+ SourceReference *SourceReference `json:"sourceReference"`
+}
+
+type StepDefinitionPattern struct {
+ Source string `json:"source"`
+ Type StepDefinitionPatternType `json:"type"`
+}
+
+type TestCase struct {
+ Id string `json:"id"`
+ PickleId string `json:"pickleId"`
+ TestSteps []*TestStep `json:"testSteps"`
+}
+
+type Group struct {
+ Children []*Group `json:"children"`
+ Start int64 `json:"start,omitempty"`
+ Value string `json:"value,omitempty"`
+}
+
+type StepMatchArgument struct {
+ Group *Group `json:"group"`
+ ParameterTypeName string `json:"parameterTypeName,omitempty"`
+}
+
+type StepMatchArgumentsList struct {
+ StepMatchArguments []*StepMatchArgument `json:"stepMatchArguments"`
+}
+
+type TestStep struct {
+ HookId string `json:"hookId,omitempty"`
+ Id string `json:"id"`
+ PickleStepId string `json:"pickleStepId,omitempty"`
+ StepDefinitionIds []string `json:"stepDefinitionIds,omitempty"`
+ StepMatchArgumentsLists []*StepMatchArgumentsList `json:"stepMatchArgumentsLists,omitempty"`
+}
+
+type TestCaseFinished struct {
+ TestCaseStartedId string `json:"testCaseStartedId"`
+ Timestamp *Timestamp `json:"timestamp"`
+ WillBeRetried bool `json:"willBeRetried"`
+}
+
+type TestCaseStarted struct {
+ Attempt int64 `json:"attempt"`
+ Id string `json:"id"`
+ TestCaseId string `json:"testCaseId"`
+ WorkerId string `json:"workerId,omitempty"`
+ Timestamp *Timestamp `json:"timestamp"`
+}
+
+type TestRunFinished struct {
+ Message string `json:"message,omitempty"`
+ Success bool `json:"success"`
+ Timestamp *Timestamp `json:"timestamp"`
+ Exception *Exception `json:"exception,omitempty"`
+}
+
+type TestRunStarted struct {
+ Timestamp *Timestamp `json:"timestamp"`
+}
+
+type TestStepFinished struct {
+ TestCaseStartedId string `json:"testCaseStartedId"`
+ TestStepId string `json:"testStepId"`
+ TestStepResult *TestStepResult `json:"testStepResult"`
+ Timestamp *Timestamp `json:"timestamp"`
+}
+
+type TestStepResult struct {
+ Duration *Duration `json:"duration"`
+ Message string `json:"message,omitempty"`
+ Status TestStepResultStatus `json:"status"`
+ Exception *Exception `json:"exception,omitempty"`
+}
+
+type TestStepStarted struct {
+ TestCaseStartedId string `json:"testCaseStartedId"`
+ TestStepId string `json:"testStepId"`
+ Timestamp *Timestamp `json:"timestamp"`
+}
+
+type Timestamp struct {
+ Seconds int64 `json:"seconds"`
+ Nanos int64 `json:"nanos"`
+}
+
+type UndefinedParameterType struct {
+ Expression string `json:"expression"`
+ Name string `json:"name"`
+}
+
+type AttachmentContentEncoding string
+
+const (
+ AttachmentContentEncoding_IDENTITY AttachmentContentEncoding = "IDENTITY"
+ AttachmentContentEncoding_BASE64 AttachmentContentEncoding = "BASE64"
+)
+
+func (e AttachmentContentEncoding) String() string {
+ switch e {
+ case AttachmentContentEncoding_IDENTITY:
+ return "IDENTITY"
+ case AttachmentContentEncoding_BASE64:
+ return "BASE64"
+ default:
+ panic("Bad enum value for AttachmentContentEncoding")
+ }
+}
+
+type PickleStepType string
+
+const (
+ PickleStepType_UNKNOWN PickleStepType = "Unknown"
+ PickleStepType_CONTEXT PickleStepType = "Context"
+ PickleStepType_ACTION PickleStepType = "Action"
+ PickleStepType_OUTCOME PickleStepType = "Outcome"
+)
+
+func (e PickleStepType) String() string {
+ switch e {
+ case PickleStepType_UNKNOWN:
+ return "Unknown"
+ case PickleStepType_CONTEXT:
+ return "Context"
+ case PickleStepType_ACTION:
+ return "Action"
+ case PickleStepType_OUTCOME:
+ return "Outcome"
+ default:
+ panic("Bad enum value for PickleStepType")
+ }
+}
+
+type SourceMediaType string
+
+const (
+ SourceMediaType_TEXT_X_CUCUMBER_GHERKIN_PLAIN SourceMediaType = "text/x.cucumber.gherkin+plain"
+ SourceMediaType_TEXT_X_CUCUMBER_GHERKIN_MARKDOWN SourceMediaType = "text/x.cucumber.gherkin+markdown"
+)
+
+func (e SourceMediaType) String() string {
+ switch e {
+ case SourceMediaType_TEXT_X_CUCUMBER_GHERKIN_PLAIN:
+ return "text/x.cucumber.gherkin+plain"
+ case SourceMediaType_TEXT_X_CUCUMBER_GHERKIN_MARKDOWN:
+ return "text/x.cucumber.gherkin+markdown"
+ default:
+ panic("Bad enum value for SourceMediaType")
+ }
+}
+
+type StepDefinitionPatternType string
+
+const (
+ StepDefinitionPatternType_CUCUMBER_EXPRESSION StepDefinitionPatternType = "CUCUMBER_EXPRESSION"
+ StepDefinitionPatternType_REGULAR_EXPRESSION StepDefinitionPatternType = "REGULAR_EXPRESSION"
+)
+
+func (e StepDefinitionPatternType) String() string {
+ switch e {
+ case StepDefinitionPatternType_CUCUMBER_EXPRESSION:
+ return "CUCUMBER_EXPRESSION"
+ case StepDefinitionPatternType_REGULAR_EXPRESSION:
+ return "REGULAR_EXPRESSION"
+ default:
+ panic("Bad enum value for StepDefinitionPatternType")
+ }
+}
+
+type StepKeywordType string
+
+const (
+ StepKeywordType_UNKNOWN StepKeywordType = "Unknown"
+ StepKeywordType_CONTEXT StepKeywordType = "Context"
+ StepKeywordType_ACTION StepKeywordType = "Action"
+ StepKeywordType_OUTCOME StepKeywordType = "Outcome"
+ StepKeywordType_CONJUNCTION StepKeywordType = "Conjunction"
+)
+
+func (e StepKeywordType) String() string {
+ switch e {
+ case StepKeywordType_UNKNOWN:
+ return "Unknown"
+ case StepKeywordType_CONTEXT:
+ return "Context"
+ case StepKeywordType_ACTION:
+ return "Action"
+ case StepKeywordType_OUTCOME:
+ return "Outcome"
+ case StepKeywordType_CONJUNCTION:
+ return "Conjunction"
+ default:
+ panic("Bad enum value for StepKeywordType")
+ }
+}
+
+type TestStepResultStatus string
+
+const (
+ TestStepResultStatus_UNKNOWN TestStepResultStatus = "UNKNOWN"
+ TestStepResultStatus_PASSED TestStepResultStatus = "PASSED"
+ TestStepResultStatus_SKIPPED TestStepResultStatus = "SKIPPED"
+ TestStepResultStatus_PENDING TestStepResultStatus = "PENDING"
+ TestStepResultStatus_UNDEFINED TestStepResultStatus = "UNDEFINED"
+ TestStepResultStatus_AMBIGUOUS TestStepResultStatus = "AMBIGUOUS"
+ TestStepResultStatus_FAILED TestStepResultStatus = "FAILED"
+)
+
+func (e TestStepResultStatus) String() string {
+ switch e {
+ case TestStepResultStatus_UNKNOWN:
+ return "UNKNOWN"
+ case TestStepResultStatus_PASSED:
+ return "PASSED"
+ case TestStepResultStatus_SKIPPED:
+ return "SKIPPED"
+ case TestStepResultStatus_PENDING:
+ return "PENDING"
+ case TestStepResultStatus_UNDEFINED:
+ return "UNDEFINED"
+ case TestStepResultStatus_AMBIGUOUS:
+ return "AMBIGUOUS"
+ case TestStepResultStatus_FAILED:
+ return "FAILED"
+ default:
+ panic("Bad enum value for TestStepResultStatus")
+ }
+}
diff --git a/vendor/github.com/cucumber/messages/go/v21/time_conversion.go b/vendor/github.com/cucumber/messages/go/v21/time_conversion.go
new file mode 100644
index 000000000..3a387931e
--- /dev/null
+++ b/vendor/github.com/cucumber/messages/go/v21/time_conversion.go
@@ -0,0 +1,34 @@
+package messages
+
+import "time"
+
+const nanosPerSecond = 1000000000
+
+func DurationToGoDuration(duration Duration) time.Duration {
+ secondNanos := duration.Seconds * nanosPerSecond
+ return time.Duration(secondNanos + int64(duration.Nanos))
+}
+
+func GoDurationToDuration(goDuration time.Duration) Duration {
+ seconds := int64(goDuration / nanosPerSecond)
+ nanos := int64(goDuration % nanosPerSecond)
+ return Duration{
+ Seconds: seconds,
+ Nanos: nanos,
+ }
+}
+
+func TimestampToGoTime(timestamp Timestamp) time.Time {
+ return time.Unix(timestamp.Seconds, timestamp.Nanos)
+}
+
+func GoTimeToTimestamp(t time.Time) Timestamp {
+ unixNanos := t.UnixNano()
+ seconds := unixNanos / nanosPerSecond
+ nanos := unixNanos % nanosPerSecond
+
+ return Timestamp{
+ Seconds: seconds,
+ Nanos: nanos,
+ }
+}
diff --git a/vendor/github.com/gofrs/uuid/.gitignore b/vendor/github.com/gofrs/uuid/.gitignore
new file mode 100644
index 000000000..666dbbb5b
--- /dev/null
+++ b/vendor/github.com/gofrs/uuid/.gitignore
@@ -0,0 +1,15 @@
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# binary bundle generated by go-fuzz
+uuid-fuzz.zip
diff --git a/vendor/github.com/gofrs/uuid/LICENSE b/vendor/github.com/gofrs/uuid/LICENSE
new file mode 100644
index 000000000..926d54987
--- /dev/null
+++ b/vendor/github.com/gofrs/uuid/LICENSE
@@ -0,0 +1,20 @@
+Copyright (C) 2013-2018 by Maxim Bublis
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/gofrs/uuid/README.md b/vendor/github.com/gofrs/uuid/README.md
new file mode 100644
index 000000000..f5db14f07
--- /dev/null
+++ b/vendor/github.com/gofrs/uuid/README.md
@@ -0,0 +1,117 @@
+# UUID
+
+[](https://github.com/gofrs/uuid/blob/master/LICENSE)
+[](https://travis-ci.org/gofrs/uuid)
+[](http://godoc.org/github.com/gofrs/uuid)
+[](https://codecov.io/gh/gofrs/uuid/)
+[](https://goreportcard.com/report/github.com/gofrs/uuid)
+
+Package uuid provides a pure Go implementation of Universally Unique Identifiers
+(UUID) variant as defined in RFC-4122. This package supports both the creation
+and parsing of UUIDs in different formats.
+
+This package supports the following UUID versions:
+* Version 1, based on timestamp and MAC address (RFC-4122)
+* Version 3, based on MD5 hashing of a named value (RFC-4122)
+* Version 4, based on random numbers (RFC-4122)
+* Version 5, based on SHA-1 hashing of a named value (RFC-4122)
+
+This package also supports experimental Universally Unique Identifier implementations based on a
+[draft RFC](https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03) that updates RFC-4122
+* Version 6, a k-sortable id based on timestamp, and field-compatible with v1 (draft-peabody-dispatch-new-uuid-format, RFC-4122)
+* Version 7, a k-sortable id based on timestamp (draft-peabody-dispatch-new-uuid-format, RFC-4122)
+
+The v6 and v7 IDs are **not** considered a part of the stable API, and may be subject to behavior or API changes as part of minor releases
+to this package. They will be updated as the draft RFC changes, and will become stable if and when the draft RFC is accepted.
+
+## Project History
+
+This project was originally forked from the
+[github.com/satori/go.uuid](https://github.com/satori/go.uuid) repository after
+it appeared to be no longer maintained, while exhibiting [critical
+flaws](https://github.com/satori/go.uuid/issues/73). We have decided to take
+over this project to ensure it receives regular maintenance for the benefit of
+the larger Go community.
+
+We'd like to thank Maxim Bublis for his hard work on the original iteration of
+the package.
+
+## License
+
+This source code of this package is released under the MIT License. Please see
+the [LICENSE](https://github.com/gofrs/uuid/blob/master/LICENSE) for the full
+content of the license.
+
+## Recommended Package Version
+
+We recommend using v2.0.0+ of this package, as versions prior to 2.0.0 were
+created before our fork of the original package and have some known
+deficiencies.
+
+## Installation
+
+It is recommended to use a package manager like `dep` that understands tagged
+releases of a package, as well as semantic versioning.
+
+If you are unable to make use of a dependency manager with your project, you can
+use the `go get` command to download it directly:
+
+```Shell
+$ go get github.com/gofrs/uuid
+```
+
+## Requirements
+
+Due to subtests not being supported in older versions of Go, this package is
+only regularly tested against Go 1.7+. This package may work perfectly fine with
+Go 1.2+, but support for these older versions is not actively maintained.
+
+## Go 1.11 Modules
+
+As of v3.2.0, this repository no longer adopts Go modules, and v3.2.0 no longer has a `go.mod` file. As a result, v3.2.0 also drops support for the `github.com/gofrs/uuid/v3` import path. Only module-based consumers are impacted. With the v3.2.0 release, _all_ gofrs/uuid consumers should use the `github.com/gofrs/uuid` import path.
+
+An existing module-based consumer will continue to be able to build using the `github.com/gofrs/uuid/v3` import path using any valid consumer `go.mod` that worked prior to the publishing of v3.2.0, but any module-based consumer should start using the `github.com/gofrs/uuid` import path when possible and _must_ use the `github.com/gofrs/uuid` import path prior to upgrading to v3.2.0.
+
+Please refer to [Issue #61](https://github.com/gofrs/uuid/issues/61) and [Issue #66](https://github.com/gofrs/uuid/issues/66) for more details.
+
+## Usage
+
+Here is a quick overview of how to use this package. For more detailed
+documentation, please see the [GoDoc Page](http://godoc.org/github.com/gofrs/uuid).
+
+```go
+package main
+
+import (
+ "log"
+
+ "github.com/gofrs/uuid"
+)
+
+// Create a Version 4 UUID, panicking on error.
+// Use this form to initialize package-level variables.
+var u1 = uuid.Must(uuid.NewV4())
+
+func main() {
+ // Create a Version 4 UUID.
+ u2, err := uuid.NewV4()
+ if err != nil {
+ log.Fatalf("failed to generate UUID: %v", err)
+ }
+ log.Printf("generated Version 4 UUID %v", u2)
+
+ // Parse a UUID from a string.
+ s := "6ba7b810-9dad-11d1-80b4-00c04fd430c8"
+ u3, err := uuid.FromString(s)
+ if err != nil {
+ log.Fatalf("failed to parse UUID %q: %v", s, err)
+ }
+ log.Printf("successfully parsed UUID %v", u3)
+}
+```
+
+## References
+
+* [RFC-4122](https://tools.ietf.org/html/rfc4122)
+* [DCE 1.1: Authentication and Security Services](http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01)
+* [New UUID Formats RFC Draft (Peabody) Rev 03](https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03)
diff --git a/vendor/github.com/gofrs/uuid/codec.go b/vendor/github.com/gofrs/uuid/codec.go
new file mode 100644
index 000000000..e3014c68c
--- /dev/null
+++ b/vendor/github.com/gofrs/uuid/codec.go
@@ -0,0 +1,212 @@
+// Copyright (C) 2013-2018 by Maxim Bublis
+//
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the
+// "Software"), to deal in the Software without restriction, including
+// without limitation the rights to use, copy, modify, merge, publish,
+// distribute, sublicense, and/or sell copies of the Software, and to
+// permit persons to whom the Software is furnished to do so, subject to
+// the following conditions:
+//
+// The above copyright notice and this permission notice shall be
+// included in all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+package uuid
+
+import (
+ "bytes"
+ "encoding/hex"
+ "fmt"
+)
+
+// FromBytes returns a UUID generated from the raw byte slice input.
+// It will return an error if the slice isn't 16 bytes long.
+func FromBytes(input []byte) (UUID, error) {
+ u := UUID{}
+ err := u.UnmarshalBinary(input)
+ return u, err
+}
+
+// FromBytesOrNil returns a UUID generated from the raw byte slice input.
+// Same behavior as FromBytes(), but returns uuid.Nil instead of an error.
+func FromBytesOrNil(input []byte) UUID {
+ uuid, err := FromBytes(input)
+ if err != nil {
+ return Nil
+ }
+ return uuid
+}
+
+// FromString returns a UUID parsed from the input string.
+// Input is expected in a form accepted by UnmarshalText.
+func FromString(input string) (UUID, error) {
+ u := UUID{}
+ err := u.UnmarshalText([]byte(input))
+ return u, err
+}
+
+// FromStringOrNil returns a UUID parsed from the input string.
+// Same behavior as FromString(), but returns uuid.Nil instead of an error.
+func FromStringOrNil(input string) UUID {
+ uuid, err := FromString(input)
+ if err != nil {
+ return Nil
+ }
+ return uuid
+}
+
+// MarshalText implements the encoding.TextMarshaler interface.
+// The encoding is the same as returned by the String() method.
+func (u UUID) MarshalText() ([]byte, error) {
+ return []byte(u.String()), nil
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+// Following formats are supported:
+//
+// "6ba7b810-9dad-11d1-80b4-00c04fd430c8",
+// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}",
+// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8"
+// "6ba7b8109dad11d180b400c04fd430c8"
+// "{6ba7b8109dad11d180b400c04fd430c8}",
+// "urn:uuid:6ba7b8109dad11d180b400c04fd430c8"
+//
+// ABNF for supported UUID text representation follows:
+//
+// URN := 'urn'
+// UUID-NID := 'uuid'
+//
+// hexdig := '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' |
+// 'a' | 'b' | 'c' | 'd' | 'e' | 'f' |
+// 'A' | 'B' | 'C' | 'D' | 'E' | 'F'
+//
+// hexoct := hexdig hexdig
+// 2hexoct := hexoct hexoct
+// 4hexoct := 2hexoct 2hexoct
+// 6hexoct := 4hexoct 2hexoct
+// 12hexoct := 6hexoct 6hexoct
+//
+// hashlike := 12hexoct
+// canonical := 4hexoct '-' 2hexoct '-' 2hexoct '-' 6hexoct
+//
+// plain := canonical | hashlike
+// uuid := canonical | hashlike | braced | urn
+//
+// braced := '{' plain '}' | '{' hashlike '}'
+// urn := URN ':' UUID-NID ':' plain
+//
+func (u *UUID) UnmarshalText(text []byte) error {
+ switch len(text) {
+ case 32:
+ return u.decodeHashLike(text)
+ case 34, 38:
+ return u.decodeBraced(text)
+ case 36:
+ return u.decodeCanonical(text)
+ case 41, 45:
+ return u.decodeURN(text)
+ default:
+ return fmt.Errorf("uuid: incorrect UUID length %d in string %q", len(text), text)
+ }
+}
+
+// decodeCanonical decodes UUID strings that are formatted as defined in RFC-4122 (section 3):
+// "6ba7b810-9dad-11d1-80b4-00c04fd430c8".
+func (u *UUID) decodeCanonical(t []byte) error {
+ if t[8] != '-' || t[13] != '-' || t[18] != '-' || t[23] != '-' {
+ return fmt.Errorf("uuid: incorrect UUID format in string %q", t)
+ }
+
+ src := t
+ dst := u[:]
+
+ for i, byteGroup := range byteGroups {
+ if i > 0 {
+ src = src[1:] // skip dash
+ }
+ _, err := hex.Decode(dst[:byteGroup/2], src[:byteGroup])
+ if err != nil {
+ return err
+ }
+ src = src[byteGroup:]
+ dst = dst[byteGroup/2:]
+ }
+
+ return nil
+}
+
+// decodeHashLike decodes UUID strings that are using the following format:
+// "6ba7b8109dad11d180b400c04fd430c8".
+func (u *UUID) decodeHashLike(t []byte) error {
+ src := t[:]
+ dst := u[:]
+
+ _, err := hex.Decode(dst, src)
+ return err
+}
+
+// decodeBraced decodes UUID strings that are using the following formats:
+// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}"
+// "{6ba7b8109dad11d180b400c04fd430c8}".
+func (u *UUID) decodeBraced(t []byte) error {
+ l := len(t)
+
+ if t[0] != '{' || t[l-1] != '}' {
+ return fmt.Errorf("uuid: incorrect UUID format in string %q", t)
+ }
+
+ return u.decodePlain(t[1 : l-1])
+}
+
+// decodeURN decodes UUID strings that are using the following formats:
+// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8"
+// "urn:uuid:6ba7b8109dad11d180b400c04fd430c8".
+func (u *UUID) decodeURN(t []byte) error {
+ total := len(t)
+
+ urnUUIDPrefix := t[:9]
+
+ if !bytes.Equal(urnUUIDPrefix, urnPrefix) {
+ return fmt.Errorf("uuid: incorrect UUID format in string %q", t)
+ }
+
+ return u.decodePlain(t[9:total])
+}
+
+// decodePlain decodes UUID strings that are using the following formats:
+// "6ba7b810-9dad-11d1-80b4-00c04fd430c8" or in hash-like format
+// "6ba7b8109dad11d180b400c04fd430c8".
+func (u *UUID) decodePlain(t []byte) error {
+ switch len(t) {
+ case 32:
+ return u.decodeHashLike(t)
+ case 36:
+ return u.decodeCanonical(t)
+ default:
+ return fmt.Errorf("uuid: incorrect UUID length %d in string %q", len(t), t)
+ }
+}
+
+// MarshalBinary implements the encoding.BinaryMarshaler interface.
+func (u UUID) MarshalBinary() ([]byte, error) {
+ return u.Bytes(), nil
+}
+
+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
+// It will return an error if the slice isn't 16 bytes long.
+func (u *UUID) UnmarshalBinary(data []byte) error {
+ if len(data) != Size {
+ return fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data))
+ }
+ copy(u[:], data)
+
+ return nil
+}
diff --git a/vendor/github.com/gofrs/uuid/fuzz.go b/vendor/github.com/gofrs/uuid/fuzz.go
new file mode 100644
index 000000000..afaefbc8e
--- /dev/null
+++ b/vendor/github.com/gofrs/uuid/fuzz.go
@@ -0,0 +1,47 @@
+// Copyright (c) 2018 Andrei Tudor Călin
+//
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the
+// "Software"), to deal in the Software without restriction, including
+// without limitation the rights to use, copy, modify, merge, publish,
+// distribute, sublicense, and/or sell copies of the Software, and to
+// permit persons to whom the Software is furnished to do so, subject to
+// the following conditions:
+//
+// The above copyright notice and this permission notice shall be
+// included in all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+// +build gofuzz
+
+package uuid
+
+// Fuzz implements a simple fuzz test for FromString / UnmarshalText.
+//
+// To run:
+//
+// $ go get github.com/dvyukov/go-fuzz/...
+// $ cd $GOPATH/src/github.com/gofrs/uuid
+// $ go-fuzz-build github.com/gofrs/uuid
+// $ go-fuzz -bin=uuid-fuzz.zip -workdir=./testdata
+//
+// If you make significant changes to FromString / UnmarshalText and add
+// new cases to fromStringTests (in codec_test.go), please run
+//
+// $ go test -seed_fuzz_corpus
+//
+// to seed the corpus with the new interesting inputs, then run the fuzzer.
+func Fuzz(data []byte) int {
+ _, err := FromString(string(data))
+ if err != nil {
+ return 0
+ }
+ return 1
+}
diff --git a/vendor/github.com/gofrs/uuid/generator.go b/vendor/github.com/gofrs/uuid/generator.go
new file mode 100644
index 000000000..4550bc6b3
--- /dev/null
+++ b/vendor/github.com/gofrs/uuid/generator.go
@@ -0,0 +1,356 @@
+// Copyright (C) 2013-2018 by Maxim Bublis
+//
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the
+// "Software"), to deal in the Software without restriction, including
+// without limitation the rights to use, copy, modify, merge, publish,
+// distribute, sublicense, and/or sell copies of the Software, and to
+// permit persons to whom the Software is furnished to do so, subject to
+// the following conditions:
+//
+// The above copyright notice and this permission notice shall be
+// included in all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+package uuid
+
+import (
+ "crypto/md5"
+ "crypto/rand"
+ "crypto/sha1"
+ "encoding/binary"
+ "fmt"
+ "hash"
+ "io"
+ "net"
+ "sync"
+ "time"
+)
+
+// Difference in 100-nanosecond intervals between
+// UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970).
+const epochStart = 122192928000000000
+
+type epochFunc func() time.Time
+
+// HWAddrFunc is the function type used to provide hardware (MAC) addresses.
+type HWAddrFunc func() (net.HardwareAddr, error)
+
+// DefaultGenerator is the default UUID Generator used by this package.
+var DefaultGenerator Generator = NewGen()
+
+// NewV1 returns a UUID based on the current timestamp and MAC address.
+func NewV1() (UUID, error) {
+ return DefaultGenerator.NewV1()
+}
+
+// NewV3 returns a UUID based on the MD5 hash of the namespace UUID and name.
+func NewV3(ns UUID, name string) UUID {
+ return DefaultGenerator.NewV3(ns, name)
+}
+
+// NewV4 returns a randomly generated UUID.
+func NewV4() (UUID, error) {
+ return DefaultGenerator.NewV4()
+}
+
+// NewV5 returns a UUID based on SHA-1 hash of the namespace UUID and name.
+func NewV5(ns UUID, name string) UUID {
+ return DefaultGenerator.NewV5(ns, name)
+}
+
+// NewV6 returns a k-sortable UUID based on a timestamp and 48 bits of
+// pseudorandom data. The timestamp in a V6 UUID is the same as V1, with the bit
+// order being adjusted to allow the UUID to be k-sortable.
+//
+// This is implemented based on revision 03 of the Peabody UUID draft, and may
+// be subject to change pending further revisions. Until the final specification
+// revision is finished, changes required to implement updates to the spec will
+// not be considered a breaking change. They will happen as a minor version
+// releases until the spec is final.
+func NewV6() (UUID, error) {
+ return DefaultGenerator.NewV6()
+}
+
+// NewV7 returns a k-sortable UUID based on the current millisecond precision
+// UNIX epoch and 74 bits of pseudorandom data.
+//
+// This is implemented based on revision 03 of the Peabody UUID draft, and may
+// be subject to change pending further revisions. Until the final specification
+// revision is finished, changes required to implement updates to the spec will
+// not be considered a breaking change. They will happen as a minor version
+// releases until the spec is final.
+func NewV7() (UUID, error) {
+ return DefaultGenerator.NewV7()
+}
+
+// Generator provides an interface for generating UUIDs.
+type Generator interface {
+ NewV1() (UUID, error)
+ NewV3(ns UUID, name string) UUID
+ NewV4() (UUID, error)
+ NewV5(ns UUID, name string) UUID
+ NewV6() (UUID, error)
+ NewV7() (UUID, error)
+}
+
+// Gen is a reference UUID generator based on the specifications laid out in
+// RFC-4122 and DCE 1.1: Authentication and Security Services. This type
+// satisfies the Generator interface as defined in this package.
+//
+// For consumers who are generating V1 UUIDs, but don't want to expose the MAC
+// address of the node generating the UUIDs, the NewGenWithHWAF() function has been
+// provided as a convenience. See the function's documentation for more info.
+//
+// The authors of this package do not feel that the majority of users will need
+// to obfuscate their MAC address, and so we recommend using NewGen() to create
+// a new generator.
+type Gen struct {
+ clockSequenceOnce sync.Once
+ hardwareAddrOnce sync.Once
+ storageMutex sync.Mutex
+
+ rand io.Reader
+
+ epochFunc epochFunc
+ hwAddrFunc HWAddrFunc
+ lastTime uint64
+ clockSequence uint16
+ hardwareAddr [6]byte
+}
+
+// interface check -- build will fail if *Gen doesn't satisfy Generator
+var _ Generator = (*Gen)(nil)
+
+// NewGen returns a new instance of Gen with some default values set. Most
+// people should use this.
+func NewGen() *Gen {
+ return NewGenWithHWAF(defaultHWAddrFunc)
+}
+
+// NewGenWithHWAF builds a new UUID generator with the HWAddrFunc provided. Most
+// consumers should use NewGen() instead.
+//
+// This is used so that consumers can generate their own MAC addresses, for use
+// in the generated UUIDs, if there is some concern about exposing the physical
+// address of the machine generating the UUID.
+//
+// The Gen generator will only invoke the HWAddrFunc once, and cache that MAC
+// address for all the future UUIDs generated by it. If you'd like to switch the
+// MAC address being used, you'll need to create a new generator using this
+// function.
+func NewGenWithHWAF(hwaf HWAddrFunc) *Gen {
+ return &Gen{
+ epochFunc: time.Now,
+ hwAddrFunc: hwaf,
+ rand: rand.Reader,
+ }
+}
+
+// NewV1 returns a UUID based on the current timestamp and MAC address.
+func (g *Gen) NewV1() (UUID, error) {
+ u := UUID{}
+
+ timeNow, clockSeq, err := g.getClockSequence()
+ if err != nil {
+ return Nil, err
+ }
+ binary.BigEndian.PutUint32(u[0:], uint32(timeNow))
+ binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32))
+ binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48))
+ binary.BigEndian.PutUint16(u[8:], clockSeq)
+
+ hardwareAddr, err := g.getHardwareAddr()
+ if err != nil {
+ return Nil, err
+ }
+ copy(u[10:], hardwareAddr)
+
+ u.SetVersion(V1)
+ u.SetVariant(VariantRFC4122)
+
+ return u, nil
+}
+
+// NewV3 returns a UUID based on the MD5 hash of the namespace UUID and name.
+func (g *Gen) NewV3(ns UUID, name string) UUID {
+ u := newFromHash(md5.New(), ns, name)
+ u.SetVersion(V3)
+ u.SetVariant(VariantRFC4122)
+
+ return u
+}
+
+// NewV4 returns a randomly generated UUID.
+func (g *Gen) NewV4() (UUID, error) {
+ u := UUID{}
+ if _, err := io.ReadFull(g.rand, u[:]); err != nil {
+ return Nil, err
+ }
+ u.SetVersion(V4)
+ u.SetVariant(VariantRFC4122)
+
+ return u, nil
+}
+
+// NewV5 returns a UUID based on SHA-1 hash of the namespace UUID and name.
+func (g *Gen) NewV5(ns UUID, name string) UUID {
+ u := newFromHash(sha1.New(), ns, name)
+ u.SetVersion(V5)
+ u.SetVariant(VariantRFC4122)
+
+ return u
+}
+
+// NewV6 returns a k-sortable UUID based on a timestamp and 48 bits of
+// pseudorandom data. The timestamp in a V6 UUID is the same as V1, with the bit
+// order being adjusted to allow the UUID to be k-sortable.
+//
+// This is implemented based on revision 03 of the Peabody UUID draft, and may
+// be subject to change pending further revisions. Until the final specification
+// revision is finished, changes required to implement updates to the spec will
+// not be considered a breaking change. They will happen as a minor version
+// releases until the spec is final.
+func (g *Gen) NewV6() (UUID, error) {
+ var u UUID
+
+ if _, err := io.ReadFull(g.rand, u[10:]); err != nil {
+ return Nil, err
+ }
+
+ timeNow, clockSeq, err := g.getClockSequence()
+ if err != nil {
+ return Nil, err
+ }
+
+ binary.BigEndian.PutUint32(u[0:], uint32(timeNow>>28)) // set time_high
+ binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>12)) // set time_mid
+ binary.BigEndian.PutUint16(u[6:], uint16(timeNow&0xfff)) // set time_low (minus four version bits)
+ binary.BigEndian.PutUint16(u[8:], clockSeq&0x3fff) // set clk_seq_hi_res (minus two variant bits)
+
+ u.SetVersion(V6)
+ u.SetVariant(VariantRFC4122)
+
+ return u, nil
+}
+
+// getClockSequence returns the epoch and clock sequence for V1 and V6 UUIDs.
+func (g *Gen) getClockSequence() (uint64, uint16, error) {
+ var err error
+ g.clockSequenceOnce.Do(func() {
+ buf := make([]byte, 2)
+ if _, err = io.ReadFull(g.rand, buf); err != nil {
+ return
+ }
+ g.clockSequence = binary.BigEndian.Uint16(buf)
+ })
+ if err != nil {
+ return 0, 0, err
+ }
+
+ g.storageMutex.Lock()
+ defer g.storageMutex.Unlock()
+
+ timeNow := g.getEpoch()
+ // Clock didn't change since last UUID generation.
+ // Should increase clock sequence.
+ if timeNow <= g.lastTime {
+ g.clockSequence++
+ }
+ g.lastTime = timeNow
+
+ return timeNow, g.clockSequence, nil
+}
+
+// NewV7 returns a k-sortable UUID based on the current millisecond precision
+// UNIX epoch and 74 bits of pseudorandom data.
+//
+// This is implemented based on revision 03 of the Peabody UUID draft, and may
+// be subject to change pending further revisions. Until the final specification
+// revision is finished, changes required to implement updates to the spec will
+// not be considered a breaking change. They will happen as a minor version
+// releases until the spec is final.
+func (g *Gen) NewV7() (UUID, error) {
+ var u UUID
+
+ if _, err := io.ReadFull(g.rand, u[6:]); err != nil {
+ return Nil, err
+ }
+
+ tn := g.epochFunc()
+ ms := uint64(tn.Unix())*1e3 + uint64(tn.Nanosecond())/1e6
+ u[0] = byte(ms >> 40)
+ u[1] = byte(ms >> 32)
+ u[2] = byte(ms >> 24)
+ u[3] = byte(ms >> 16)
+ u[4] = byte(ms >> 8)
+ u[5] = byte(ms)
+
+ u.SetVersion(V7)
+ u.SetVariant(VariantRFC4122)
+
+ return u, nil
+}
+
+// Returns the hardware address.
+func (g *Gen) getHardwareAddr() ([]byte, error) {
+ var err error
+ g.hardwareAddrOnce.Do(func() {
+ var hwAddr net.HardwareAddr
+ if hwAddr, err = g.hwAddrFunc(); err == nil {
+ copy(g.hardwareAddr[:], hwAddr)
+ return
+ }
+
+ // Initialize hardwareAddr randomly in case
+ // of real network interfaces absence.
+ if _, err = io.ReadFull(g.rand, g.hardwareAddr[:]); err != nil {
+ return
+ }
+ // Set multicast bit as recommended by RFC-4122
+ g.hardwareAddr[0] |= 0x01
+ })
+ if err != nil {
+ return []byte{}, err
+ }
+ return g.hardwareAddr[:], nil
+}
+
+// Returns the difference between UUID epoch (October 15, 1582)
+// and current time in 100-nanosecond intervals.
+func (g *Gen) getEpoch() uint64 {
+ return epochStart + uint64(g.epochFunc().UnixNano()/100)
+}
+
+// Returns the UUID based on the hashing of the namespace UUID and name.
+func newFromHash(h hash.Hash, ns UUID, name string) UUID {
+ u := UUID{}
+ h.Write(ns[:])
+ h.Write([]byte(name))
+ copy(u[:], h.Sum(nil))
+
+ return u
+}
+
+var netInterfaces = net.Interfaces
+
+// Returns the hardware address.
+func defaultHWAddrFunc() (net.HardwareAddr, error) {
+ ifaces, err := netInterfaces()
+ if err != nil {
+ return []byte{}, err
+ }
+ for _, iface := range ifaces {
+ if len(iface.HardwareAddr) >= 6 {
+ return iface.HardwareAddr, nil
+ }
+ }
+ return []byte{}, fmt.Errorf("uuid: no HW address found")
+}
diff --git a/vendor/github.com/gofrs/uuid/sql.go b/vendor/github.com/gofrs/uuid/sql.go
new file mode 100644
index 000000000..6f254a4fd
--- /dev/null
+++ b/vendor/github.com/gofrs/uuid/sql.go
@@ -0,0 +1,109 @@
+// Copyright (C) 2013-2018 by Maxim Bublis
+//
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the
+// "Software"), to deal in the Software without restriction, including
+// without limitation the rights to use, copy, modify, merge, publish,
+// distribute, sublicense, and/or sell copies of the Software, and to
+// permit persons to whom the Software is furnished to do so, subject to
+// the following conditions:
+//
+// The above copyright notice and this permission notice shall be
+// included in all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+package uuid
+
+import (
+ "bytes"
+ "database/sql/driver"
+ "encoding/json"
+ "fmt"
+)
+
+// Value implements the driver.Valuer interface.
+func (u UUID) Value() (driver.Value, error) {
+ return u.String(), nil
+}
+
+// Scan implements the sql.Scanner interface.
+// A 16-byte slice will be handled by UnmarshalBinary, while
+// a longer byte slice or a string will be handled by UnmarshalText.
+func (u *UUID) Scan(src interface{}) error {
+ switch src := src.(type) {
+ case UUID: // support gorm convert from UUID to NullUUID
+ *u = src
+ return nil
+
+ case []byte:
+ if len(src) == Size {
+ return u.UnmarshalBinary(src)
+ }
+ return u.UnmarshalText(src)
+
+ case string:
+ return u.UnmarshalText([]byte(src))
+ }
+
+ return fmt.Errorf("uuid: cannot convert %T to UUID", src)
+}
+
+// NullUUID can be used with the standard sql package to represent a
+// UUID value that can be NULL in the database.
+type NullUUID struct {
+ UUID UUID
+ Valid bool
+}
+
+// Value implements the driver.Valuer interface.
+func (u NullUUID) Value() (driver.Value, error) {
+ if !u.Valid {
+ return nil, nil
+ }
+ // Delegate to UUID Value function
+ return u.UUID.Value()
+}
+
+// Scan implements the sql.Scanner interface.
+func (u *NullUUID) Scan(src interface{}) error {
+ if src == nil {
+ u.UUID, u.Valid = Nil, false
+ return nil
+ }
+
+ // Delegate to UUID Scan function
+ u.Valid = true
+ return u.UUID.Scan(src)
+}
+
+// MarshalJSON marshals the NullUUID as null or the nested UUID
+func (u NullUUID) MarshalJSON() ([]byte, error) {
+ if !u.Valid {
+ return json.Marshal(nil)
+ }
+
+ return json.Marshal(u.UUID)
+}
+
+// UnmarshalJSON unmarshals a NullUUID
+func (u *NullUUID) UnmarshalJSON(b []byte) error {
+ if bytes.Equal(b, []byte("null")) {
+ u.UUID, u.Valid = Nil, false
+ return nil
+ }
+
+ if err := json.Unmarshal(b, &u.UUID); err != nil {
+ return err
+ }
+
+ u.Valid = true
+
+ return nil
+}
diff --git a/vendor/github.com/gofrs/uuid/uuid.go b/vendor/github.com/gofrs/uuid/uuid.go
new file mode 100644
index 000000000..e747e5412
--- /dev/null
+++ b/vendor/github.com/gofrs/uuid/uuid.go
@@ -0,0 +1,292 @@
+// Copyright (C) 2013-2018 by Maxim Bublis
+//
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the
+// "Software"), to deal in the Software without restriction, including
+// without limitation the rights to use, copy, modify, merge, publish,
+// distribute, sublicense, and/or sell copies of the Software, and to
+// permit persons to whom the Software is furnished to do so, subject to
+// the following conditions:
+//
+// The above copyright notice and this permission notice shall be
+// included in all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+// Package uuid provides implementations of the Universally Unique Identifier
+// (UUID), as specified in RFC-4122 and the Peabody RFC Draft (revision 03).
+//
+// RFC-4122[1] provides the specification for versions 1, 3, 4, and 5. The
+// Peabody UUID RFC Draft[2] provides the specification for the new k-sortable
+// UUIDs, versions 6 and 7.
+//
+// DCE 1.1[3] provides the specification for version 2, but version 2 support
+// was removed from this package in v4 due to some concerns with the
+// specification itself. Reading the spec, it seems that it would result in
+// generating UUIDs that aren't very unique. In having read the spec it seemed
+// that our implementation did not meet the spec. It also seems to be at-odds
+// with RFC 4122, meaning we would need quite a bit of special code to support
+// it. Lastly, there were no Version 2 implementations that we could find to
+// ensure we were understanding the specification correctly.
+//
+// [1] https://tools.ietf.org/html/rfc4122
+// [2] https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03
+// [3] http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01
+package uuid
+
+import (
+ "encoding/binary"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "strings"
+ "time"
+)
+
+// Size of a UUID in bytes.
+const Size = 16
+
+// UUID is an array type to represent the value of a UUID, as defined in RFC-4122.
+type UUID [Size]byte
+
+// UUID versions.
+const (
+ _ byte = iota
+ V1 // Version 1 (date-time and MAC address)
+ _ // Version 2 (date-time and MAC address, DCE security version) [removed]
+ V3 // Version 3 (namespace name-based)
+ V4 // Version 4 (random)
+ V5 // Version 5 (namespace name-based)
+ V6 // Version 6 (k-sortable timestamp and random data, field-compatible with v1) [peabody draft]
+ V7 // Version 7 (k-sortable timestamp and random data) [peabody draft]
+ _ // Version 8 (k-sortable timestamp, meant for custom implementations) [peabody draft] [not implemented]
+)
+
+// UUID layout variants.
+const (
+ VariantNCS byte = iota
+ VariantRFC4122
+ VariantMicrosoft
+ VariantFuture
+)
+
+// UUID DCE domains.
+const (
+ DomainPerson = iota
+ DomainGroup
+ DomainOrg
+)
+
+// Timestamp is the count of 100-nanosecond intervals since 00:00:00.00,
+// 15 October 1582 within a V1 UUID. This type has no meaning for other
+// UUID versions since they don't have an embedded timestamp.
+type Timestamp uint64
+
+const _100nsPerSecond = 10000000
+
+// Time returns the UTC time.Time representation of a Timestamp
+func (t Timestamp) Time() (time.Time, error) {
+ secs := uint64(t) / _100nsPerSecond
+ nsecs := 100 * (uint64(t) % _100nsPerSecond)
+
+ return time.Unix(int64(secs)-(epochStart/_100nsPerSecond), int64(nsecs)), nil
+}
+
+// TimestampFromV1 returns the Timestamp embedded within a V1 UUID.
+// Returns an error if the UUID is any version other than 1.
+func TimestampFromV1(u UUID) (Timestamp, error) {
+ if u.Version() != 1 {
+ err := fmt.Errorf("uuid: %s is version %d, not version 1", u, u.Version())
+ return 0, err
+ }
+
+ low := binary.BigEndian.Uint32(u[0:4])
+ mid := binary.BigEndian.Uint16(u[4:6])
+ hi := binary.BigEndian.Uint16(u[6:8]) & 0xfff
+
+ return Timestamp(uint64(low) + (uint64(mid) << 32) + (uint64(hi) << 48)), nil
+}
+
+// TimestampFromV6 returns the Timestamp embedded within a V6 UUID. This
+// function returns an error if the UUID is any version other than 6.
+//
+// This is implemented based on revision 03 of the Peabody UUID draft, and may
+// be subject to change pending further revisions. Until the final specification
+// revision is finished, changes required to implement updates to the spec will
+// not be considered a breaking change. They will happen as a minor version
+// releases until the spec is final.
+func TimestampFromV6(u UUID) (Timestamp, error) {
+ if u.Version() != 6 {
+ return 0, fmt.Errorf("uuid: %s is version %d, not version 6", u, u.Version())
+ }
+
+ hi := binary.BigEndian.Uint32(u[0:4])
+ mid := binary.BigEndian.Uint16(u[4:6])
+ low := binary.BigEndian.Uint16(u[6:8]) & 0xfff
+
+ return Timestamp(uint64(low) + (uint64(mid) << 12) + (uint64(hi) << 28)), nil
+}
+
+// String parse helpers.
+var (
+ urnPrefix = []byte("urn:uuid:")
+ byteGroups = []int{8, 4, 4, 4, 12}
+)
+
+// Nil is the nil UUID, as specified in RFC-4122, that has all 128 bits set to
+// zero.
+var Nil = UUID{}
+
+// Predefined namespace UUIDs.
+var (
+ NamespaceDNS = Must(FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8"))
+ NamespaceURL = Must(FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8"))
+ NamespaceOID = Must(FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
+ NamespaceX500 = Must(FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
+)
+
+// IsNil returns if the UUID is equal to the nil UUID
+func (u UUID) IsNil() bool {
+ return u == Nil
+}
+
+// Version returns the algorithm version used to generate the UUID.
+func (u UUID) Version() byte {
+ return u[6] >> 4
+}
+
+// Variant returns the UUID layout variant.
+func (u UUID) Variant() byte {
+ switch {
+ case (u[8] >> 7) == 0x00:
+ return VariantNCS
+ case (u[8] >> 6) == 0x02:
+ return VariantRFC4122
+ case (u[8] >> 5) == 0x06:
+ return VariantMicrosoft
+ case (u[8] >> 5) == 0x07:
+ fallthrough
+ default:
+ return VariantFuture
+ }
+}
+
+// Bytes returns a byte slice representation of the UUID.
+func (u UUID) Bytes() []byte {
+ return u[:]
+}
+
+// String returns a canonical RFC-4122 string representation of the UUID:
+// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx.
+func (u UUID) String() string {
+ buf := make([]byte, 36)
+
+ hex.Encode(buf[0:8], u[0:4])
+ buf[8] = '-'
+ hex.Encode(buf[9:13], u[4:6])
+ buf[13] = '-'
+ hex.Encode(buf[14:18], u[6:8])
+ buf[18] = '-'
+ hex.Encode(buf[19:23], u[8:10])
+ buf[23] = '-'
+ hex.Encode(buf[24:], u[10:])
+
+ return string(buf)
+}
+
+// Format implements fmt.Formatter for UUID values.
+//
+// The behavior is as follows:
+// The 'x' and 'X' verbs output only the hex digits of the UUID, using a-f for 'x' and A-F for 'X'.
+// The 'v', '+v', 's' and 'q' verbs return the canonical RFC-4122 string representation.
+// The 'S' verb returns the RFC-4122 format, but with capital hex digits.
+// The '#v' verb returns the "Go syntax" representation, which is a 16 byte array initializer.
+// All other verbs not handled directly by the fmt package (like '%p') are unsupported and will return
+// "%!verb(uuid.UUID=value)" as recommended by the fmt package.
+func (u UUID) Format(f fmt.State, c rune) {
+ switch c {
+ case 'x', 'X':
+ s := hex.EncodeToString(u.Bytes())
+ if c == 'X' {
+ s = strings.Map(toCapitalHexDigits, s)
+ }
+ _, _ = io.WriteString(f, s)
+ case 'v':
+ var s string
+ if f.Flag('#') {
+ s = fmt.Sprintf("%#v", [Size]byte(u))
+ } else {
+ s = u.String()
+ }
+ _, _ = io.WriteString(f, s)
+ case 's', 'S':
+ s := u.String()
+ if c == 'S' {
+ s = strings.Map(toCapitalHexDigits, s)
+ }
+ _, _ = io.WriteString(f, s)
+ case 'q':
+ _, _ = io.WriteString(f, `"`+u.String()+`"`)
+ default:
+ // invalid/unsupported format verb
+ fmt.Fprintf(f, "%%!%c(uuid.UUID=%s)", c, u.String())
+ }
+}
+
+func toCapitalHexDigits(ch rune) rune {
+ // convert a-f hex digits to A-F
+ switch ch {
+ case 'a':
+ return 'A'
+ case 'b':
+ return 'B'
+ case 'c':
+ return 'C'
+ case 'd':
+ return 'D'
+ case 'e':
+ return 'E'
+ case 'f':
+ return 'F'
+ default:
+ return ch
+ }
+}
+
+// SetVersion sets the version bits.
+func (u *UUID) SetVersion(v byte) {
+ u[6] = (u[6] & 0x0f) | (v << 4)
+}
+
+// SetVariant sets the variant bits.
+func (u *UUID) SetVariant(v byte) {
+ switch v {
+ case VariantNCS:
+ u[8] = (u[8]&(0xff>>1) | (0x00 << 7))
+ case VariantRFC4122:
+ u[8] = (u[8]&(0xff>>2) | (0x02 << 6))
+ case VariantMicrosoft:
+ u[8] = (u[8]&(0xff>>3) | (0x06 << 5))
+ case VariantFuture:
+ fallthrough
+ default:
+ u[8] = (u[8]&(0xff>>3) | (0x07 << 5))
+ }
+}
+
+// Must is a helper that wraps a call to a function returning (UUID, error)
+// and panics if the error is non-nil. It is intended for use in variable
+// initializations such as
+// var packageUUID = uuid.Must(uuid.FromString("123e4567-e89b-12d3-a456-426655440000"))
+func Must(u UUID, err error) UUID {
+ if err != nil {
+ panic(err)
+ }
+ return u
+}
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/.gitignore b/vendor/github.com/hashicorp/go-immutable-radix/.gitignore
new file mode 100644
index 000000000..daf913b1b
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-immutable-radix/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md b/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md
new file mode 100644
index 000000000..86c6d03fb
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md
@@ -0,0 +1,23 @@
+# UNRELEASED
+
+# 1.3.0 (September 17th, 2020)
+
+FEATURES
+
+* Add reverse tree traversal [[GH-30](https://github.com/hashicorp/go-immutable-radix/pull/30)]
+
+# 1.2.0 (March 18th, 2020)
+
+FEATURES
+
+* Adds a `Clone` method to `Txn` allowing transactions to be split either into two independently mutable trees. [[GH-26](https://github.com/hashicorp/go-immutable-radix/pull/26)]
+
+# 1.1.0 (May 22nd, 2019)
+
+FEATURES
+
+* Add `SeekLowerBound` to allow for range scans. [[GH-24](https://github.com/hashicorp/go-immutable-radix/pull/24)]
+
+# 1.0.0 (August 30th, 2018)
+
+* go mod adopted
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/LICENSE b/vendor/github.com/hashicorp/go-immutable-radix/LICENSE
new file mode 100644
index 000000000..e87a115e4
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-immutable-radix/LICENSE
@@ -0,0 +1,363 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. "Contributor"
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the terms of
+ a Secondary License.
+
+1.6. "Executable Form"
+
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+
+ means a work that combines Covered Software with other material, in a
+ separate file or files, that is not Covered Software.
+
+1.8. "License"
+
+ means this document.
+
+1.9. "Licensable"
+
+ means having the right to grant, to the maximum extent possible, whether
+ at the time of the initial grant or subsequently, any and all of the
+ rights conveyed by this License.
+
+1.10. "Modifications"
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. "Patent Claims" of a Contributor
+
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the License,
+ by the making, using, selling, offering for sale, having made, import,
+ or transfer of either its Contributions or its Contributor Version.
+
+1.12. "Secondary License"
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. "Source Code Form"
+
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, "control" means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution
+ become effective for each Contribution on the date the Contributor first
+ distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under
+ this License. No additional rights or licenses will be implied from the
+ distribution or licensing of Covered Software under this License.
+ Notwithstanding Section 2.1(b) above, no patent license is granted by a
+ Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+ This License does not grant any rights in the trademarks, service marks,
+ or logos of any Contributor (except as may be necessary to comply with
+ the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this
+ License (see Section 10.2) or under the terms of a Secondary License (if
+ permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its
+ Contributions are its original creation(s) or it has sufficient rights to
+ grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under
+ applicable copyright doctrines of fair use, fair dealing, or other
+ equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under
+ the terms of this License. You must inform recipients that the Source
+ Code Form of the Covered Software is governed by the terms of this
+ License, and how they can obtain a copy of this License. You may not
+ attempt to alter or restrict the recipients' rights in the Source Code
+ Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter the
+ recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for
+ the Covered Software. If the Larger Work is a combination of Covered
+ Software with a work governed by one or more Secondary Licenses, and the
+ Covered Software is not Incompatible With Secondary Licenses, this
+ License permits You to additionally distribute such Covered Software
+ under the terms of such Secondary License(s), so that the recipient of
+ the Larger Work may, at their option, further distribute the Covered
+ Software under the terms of either this License or such Secondary
+ License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices
+ (including copyright notices, patent notices, disclaimers of warranty, or
+ limitations of liability) contained within the Source Code Form of the
+ Covered Software, except that You may alter any license notices to the
+ extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on
+ behalf of any Contributor. You must make it absolutely clear that any
+ such warranty, support, indemnity, or liability obligation is offered by
+ You alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute,
+ judicial order, or regulation then You must: (a) comply with the terms of
+ this License to the maximum extent possible; and (b) describe the
+ limitations and the code they affect. Such description must be placed in a
+ text file included with all distributions of the Covered Software under
+ this License. Except to the extent prohibited by statute or regulation,
+ such description must be sufficiently detailed for a recipient of ordinary
+ skill to be able to understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing
+ basis, if such Contributor fails to notify You of the non-compliance by
+ some reasonable means prior to 60 days after You have come back into
+ compliance. Moreover, Your grants from a particular Contributor are
+ reinstated on an ongoing basis if such Contributor notifies You of the
+ non-compliance by some reasonable means, this is the first time You have
+ received notice of non-compliance with this License from such
+ Contributor, and You become compliant prior to 30 days after Your receipt
+ of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions,
+ counter-claims, and cross-claims) alleging that a Contributor Version
+ directly or indirectly infringes any patent, then the rights granted to
+ You by any and all Contributors for the Covered Software under Section
+ 2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an "as is" basis,
+ without warranty of any kind, either expressed, implied, or statutory,
+ including, without limitation, warranties that the Covered Software is free
+ of defects, merchantable, fit for a particular purpose or non-infringing.
+ The entire risk as to the quality and performance of the Covered Software
+ is with You. Should any Covered Software prove defective in any respect,
+ You (not any Contributor) assume the cost of any necessary servicing,
+ repair, or correction. This disclaimer of warranty constitutes an essential
+ part of this License. No use of any Covered Software is authorized under
+ this License except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from
+ such party's negligence to the extent applicable law prohibits such
+ limitation. Some jurisdictions do not allow the exclusion or limitation of
+ incidental or consequential damages, so this exclusion and limitation may
+ not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts
+ of a jurisdiction where the defendant maintains its principal place of
+ business and such litigation shall be governed by laws of that
+ jurisdiction, without reference to its conflict-of-law provisions. Nothing
+ in this Section shall prevent a party's ability to bring cross-claims or
+ counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. Any law or regulation which provides that
+ the language of a contract shall be construed against the drafter shall not
+ be used to construe this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version
+ of the License under which You originally received the Covered Software,
+ or under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a
+ modified version of this License if you rename the license and remove
+ any references to the name of the license steward (except to note that
+ such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+ Licenses If You choose to distribute Source Code Form that is
+ Incompatible With Secondary Licenses under the terms of this version of
+ the License, the notice described in Exhibit B of this License must be
+ attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file,
+then You may include the notice in a location (such as a LICENSE file in a
+relevant directory) where a recipient would be likely to look for such a
+notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+
+ This Source Code Form is "Incompatible
+ With Secondary Licenses", as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/README.md b/vendor/github.com/hashicorp/go-immutable-radix/README.md
new file mode 100644
index 000000000..aca15a642
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-immutable-radix/README.md
@@ -0,0 +1,66 @@
+go-immutable-radix [](https://circleci.com/gh/hashicorp/go-immutable-radix/tree/master)
+=========
+
+Provides the `iradix` package that implements an immutable [radix tree](http://en.wikipedia.org/wiki/Radix_tree).
+The package only provides a single `Tree` implementation, optimized for sparse nodes.
+
+As a radix tree, it provides the following:
+ * O(k) operations. In many cases, this can be faster than a hash table since
+ the hash function is an O(k) operation, and hash tables have very poor cache locality.
+ * Minimum / Maximum value lookups
+ * Ordered iteration
+
+A tree supports using a transaction to batch multiple updates (insert, delete)
+in a more efficient manner than performing each operation one at a time.
+
+For a mutable variant, see [go-radix](https://github.com/armon/go-radix).
+
+Documentation
+=============
+
+The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-immutable-radix).
+
+Example
+=======
+
+Below is a simple example of usage
+
+```go
+// Create a tree
+r := iradix.New()
+r, _, _ = r.Insert([]byte("foo"), 1)
+r, _, _ = r.Insert([]byte("bar"), 2)
+r, _, _ = r.Insert([]byte("foobar"), 2)
+
+// Find the longest prefix match
+m, _, _ := r.Root().LongestPrefix([]byte("foozip"))
+if string(m) != "foo" {
+ panic("should be foo")
+}
+```
+
+Here is an example of performing a range scan of the keys.
+
+```go
+// Create a tree
+r := iradix.New()
+r, _, _ = r.Insert([]byte("001"), 1)
+r, _, _ = r.Insert([]byte("002"), 2)
+r, _, _ = r.Insert([]byte("005"), 5)
+r, _, _ = r.Insert([]byte("010"), 10)
+r, _, _ = r.Insert([]byte("100"), 10)
+
+// Range scan over the keys that sort lexicographically between [003, 050)
+it := r.Root().Iterator()
+it.SeekLowerBound([]byte("003"))
+for key, _, ok := it.Next(); ok; key, _, ok = it.Next() {
+ if key >= "050" {
+ break
+ }
+ fmt.Println(key)
+}
+// Output:
+// 005
+// 010
+```
+
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/edges.go b/vendor/github.com/hashicorp/go-immutable-radix/edges.go
new file mode 100644
index 000000000..a63674775
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-immutable-radix/edges.go
@@ -0,0 +1,21 @@
+package iradix
+
+import "sort"
+
+type edges []edge
+
+func (e edges) Len() int {
+ return len(e)
+}
+
+func (e edges) Less(i, j int) bool {
+ return e[i].label < e[j].label
+}
+
+func (e edges) Swap(i, j int) {
+ e[i], e[j] = e[j], e[i]
+}
+
+func (e edges) Sort() {
+ sort.Sort(e)
+}
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/iradix.go b/vendor/github.com/hashicorp/go-immutable-radix/iradix.go
new file mode 100644
index 000000000..168bda76d
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-immutable-radix/iradix.go
@@ -0,0 +1,676 @@
+package iradix
+
+import (
+ "bytes"
+ "strings"
+
+ "github.com/hashicorp/golang-lru/simplelru"
+)
+
+const (
+ // defaultModifiedCache is the default size of the modified node
+ // cache used per transaction. This is used to cache the updates
+ // to the nodes near the root, while the leaves do not need to be
+ // cached. This is important for very large transactions to prevent
+ // the modified cache from growing to be enormous. This is also used
+ // to set the max size of the mutation notify maps since those should
+ // also be bounded in a similar way.
+ defaultModifiedCache = 8192
+)
+
+// Tree implements an immutable radix tree. This can be treated as a
+// Dictionary abstract data type. The main advantage over a standard
+// hash map is prefix-based lookups and ordered iteration. The immutability
+// means that it is safe to concurrently read from a Tree without any
+// coordination.
+type Tree struct {
+ root *Node
+ size int
+}
+
+// New returns an empty Tree
+func New() *Tree {
+ t := &Tree{
+ root: &Node{
+ mutateCh: make(chan struct{}),
+ },
+ }
+ return t
+}
+
+// Len is used to return the number of elements in the tree
+func (t *Tree) Len() int {
+ return t.size
+}
+
+// Txn is a transaction on the tree. This transaction is applied
+// atomically and returns a new tree when committed. A transaction
+// is not thread safe, and should only be used by a single goroutine.
+type Txn struct {
+ // root is the modified root for the transaction.
+ root *Node
+
+ // snap is a snapshot of the root node for use if we have to run the
+ // slow notify algorithm.
+ snap *Node
+
+ // size tracks the size of the tree as it is modified during the
+ // transaction.
+ size int
+
+ // writable is a cache of writable nodes that have been created during
+ // the course of the transaction. This allows us to re-use the same
+ // nodes for further writes and avoid unnecessary copies of nodes that
+ // have never been exposed outside the transaction. This will only hold
+ // up to defaultModifiedCache number of entries.
+ writable *simplelru.LRU
+
+ // trackChannels is used to hold channels that need to be notified to
+ // signal mutation of the tree. This will only hold up to
+ // defaultModifiedCache number of entries, after which we will set the
+ // trackOverflow flag, which will cause us to use a more expensive
+ // algorithm to perform the notifications. Mutation tracking is only
+ // performed if trackMutate is true.
+ trackChannels map[chan struct{}]struct{}
+ trackOverflow bool
+ trackMutate bool
+}
+
+// Txn starts a new transaction that can be used to mutate the tree
+func (t *Tree) Txn() *Txn {
+ txn := &Txn{
+ root: t.root,
+ snap: t.root,
+ size: t.size,
+ }
+ return txn
+}
+
+// Clone makes an independent copy of the transaction. The new transaction
+// does not track any nodes and has TrackMutate turned off. The cloned transaction will contain any uncommitted writes in the original transaction but further mutations to either will be independent and result in different radix trees on Commit. A cloned transaction may be passed to another goroutine and mutated there independently however each transaction may only be mutated in a single thread.
+func (t *Txn) Clone() *Txn {
+ // reset the writable node cache to avoid leaking future writes into the clone
+ t.writable = nil
+
+ txn := &Txn{
+ root: t.root,
+ snap: t.snap,
+ size: t.size,
+ }
+ return txn
+}
+
+// TrackMutate can be used to toggle if mutations are tracked. If this is enabled
+// then notifications will be issued for affected internal nodes and leaves when
+// the transaction is committed.
+func (t *Txn) TrackMutate(track bool) {
+ t.trackMutate = track
+}
+
+// trackChannel safely attempts to track the given mutation channel, setting the
+// overflow flag if we can no longer track any more. This limits the amount of
+// state that will accumulate during a transaction and we have a slower algorithm
+// to switch to if we overflow.
+func (t *Txn) trackChannel(ch chan struct{}) {
+ // In overflow, make sure we don't store any more objects.
+ if t.trackOverflow {
+ return
+ }
+
+ // If this would overflow the state we reject it and set the flag (since
+ // we aren't tracking everything that's required any longer).
+ if len(t.trackChannels) >= defaultModifiedCache {
+ // Mark that we are in the overflow state
+ t.trackOverflow = true
+
+ // Clear the map so that the channels can be garbage collected. It is
+ // safe to do this since we have already overflowed and will be using
+ // the slow notify algorithm.
+ t.trackChannels = nil
+ return
+ }
+
+ // Create the map on the fly when we need it.
+ if t.trackChannels == nil {
+ t.trackChannels = make(map[chan struct{}]struct{})
+ }
+
+ // Otherwise we are good to track it.
+ t.trackChannels[ch] = struct{}{}
+}
+
+// writeNode returns a node to be modified, if the current node has already been
+// modified during the course of the transaction, it is used in-place. Set
+// forLeafUpdate to true if you are getting a write node to update the leaf,
+// which will set leaf mutation tracking appropriately as well.
+func (t *Txn) writeNode(n *Node, forLeafUpdate bool) *Node {
+ // Ensure the writable set exists.
+ if t.writable == nil {
+ lru, err := simplelru.NewLRU(defaultModifiedCache, nil)
+ if err != nil {
+ panic(err)
+ }
+ t.writable = lru
+ }
+
+ // If this node has already been modified, we can continue to use it
+ // during this transaction. We know that we don't need to track it for
+ // a node update since the node is writable, but if this is for a leaf
+ // update we track it, in case the initial write to this node didn't
+ // update the leaf.
+ if _, ok := t.writable.Get(n); ok {
+ if t.trackMutate && forLeafUpdate && n.leaf != nil {
+ t.trackChannel(n.leaf.mutateCh)
+ }
+ return n
+ }
+
+ // Mark this node as being mutated.
+ if t.trackMutate {
+ t.trackChannel(n.mutateCh)
+ }
+
+ // Mark its leaf as being mutated, if appropriate.
+ if t.trackMutate && forLeafUpdate && n.leaf != nil {
+ t.trackChannel(n.leaf.mutateCh)
+ }
+
+ // Copy the existing node. If you have set forLeafUpdate it will be
+ // safe to replace this leaf with another after you get your node for
+ // writing. You MUST replace it, because the channel associated with
+ // this leaf will be closed when this transaction is committed.
+ nc := &Node{
+ mutateCh: make(chan struct{}),
+ leaf: n.leaf,
+ }
+ if n.prefix != nil {
+ nc.prefix = make([]byte, len(n.prefix))
+ copy(nc.prefix, n.prefix)
+ }
+ if len(n.edges) != 0 {
+ nc.edges = make([]edge, len(n.edges))
+ copy(nc.edges, n.edges)
+ }
+
+ // Mark this node as writable.
+ t.writable.Add(nc, nil)
+ return nc
+}
+
+// Visit all the nodes in the tree under n, and add their mutateChannels to the transaction
+// Returns the size of the subtree visited
+func (t *Txn) trackChannelsAndCount(n *Node) int {
+ // Count only leaf nodes
+ leaves := 0
+ if n.leaf != nil {
+ leaves = 1
+ }
+ // Mark this node as being mutated.
+ if t.trackMutate {
+ t.trackChannel(n.mutateCh)
+ }
+
+ // Mark its leaf as being mutated, if appropriate.
+ if t.trackMutate && n.leaf != nil {
+ t.trackChannel(n.leaf.mutateCh)
+ }
+
+ // Recurse on the children
+ for _, e := range n.edges {
+ leaves += t.trackChannelsAndCount(e.node)
+ }
+ return leaves
+}
+
+// mergeChild is called to collapse the given node with its child. This is only
+// called when the given node is not a leaf and has a single edge.
+func (t *Txn) mergeChild(n *Node) {
+ // Mark the child node as being mutated since we are about to abandon
+ // it. We don't need to mark the leaf since we are retaining it if it
+ // is there.
+ e := n.edges[0]
+ child := e.node
+ if t.trackMutate {
+ t.trackChannel(child.mutateCh)
+ }
+
+ // Merge the nodes.
+ n.prefix = concat(n.prefix, child.prefix)
+ n.leaf = child.leaf
+ if len(child.edges) != 0 {
+ n.edges = make([]edge, len(child.edges))
+ copy(n.edges, child.edges)
+ } else {
+ n.edges = nil
+ }
+}
+
+// insert does a recursive insertion
+func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface{}, bool) {
+ // Handle key exhaustion
+ if len(search) == 0 {
+ var oldVal interface{}
+ didUpdate := false
+ if n.isLeaf() {
+ oldVal = n.leaf.val
+ didUpdate = true
+ }
+
+ nc := t.writeNode(n, true)
+ nc.leaf = &leafNode{
+ mutateCh: make(chan struct{}),
+ key: k,
+ val: v,
+ }
+ return nc, oldVal, didUpdate
+ }
+
+ // Look for the edge
+ idx, child := n.getEdge(search[0])
+
+ // No edge, create one
+ if child == nil {
+ e := edge{
+ label: search[0],
+ node: &Node{
+ mutateCh: make(chan struct{}),
+ leaf: &leafNode{
+ mutateCh: make(chan struct{}),
+ key: k,
+ val: v,
+ },
+ prefix: search,
+ },
+ }
+ nc := t.writeNode(n, false)
+ nc.addEdge(e)
+ return nc, nil, false
+ }
+
+ // Determine longest prefix of the search key on match
+ commonPrefix := longestPrefix(search, child.prefix)
+ if commonPrefix == len(child.prefix) {
+ search = search[commonPrefix:]
+ newChild, oldVal, didUpdate := t.insert(child, k, search, v)
+ if newChild != nil {
+ nc := t.writeNode(n, false)
+ nc.edges[idx].node = newChild
+ return nc, oldVal, didUpdate
+ }
+ return nil, oldVal, didUpdate
+ }
+
+ // Split the node
+ nc := t.writeNode(n, false)
+ splitNode := &Node{
+ mutateCh: make(chan struct{}),
+ prefix: search[:commonPrefix],
+ }
+ nc.replaceEdge(edge{
+ label: search[0],
+ node: splitNode,
+ })
+
+ // Restore the existing child node
+ modChild := t.writeNode(child, false)
+ splitNode.addEdge(edge{
+ label: modChild.prefix[commonPrefix],
+ node: modChild,
+ })
+ modChild.prefix = modChild.prefix[commonPrefix:]
+
+ // Create a new leaf node
+ leaf := &leafNode{
+ mutateCh: make(chan struct{}),
+ key: k,
+ val: v,
+ }
+
+ // If the new key is a subset, add to to this node
+ search = search[commonPrefix:]
+ if len(search) == 0 {
+ splitNode.leaf = leaf
+ return nc, nil, false
+ }
+
+ // Create a new edge for the node
+ splitNode.addEdge(edge{
+ label: search[0],
+ node: &Node{
+ mutateCh: make(chan struct{}),
+ leaf: leaf,
+ prefix: search,
+ },
+ })
+ return nc, nil, false
+}
+
+// delete does a recursive deletion
+func (t *Txn) delete(parent, n *Node, search []byte) (*Node, *leafNode) {
+ // Check for key exhaustion
+ if len(search) == 0 {
+ if !n.isLeaf() {
+ return nil, nil
+ }
+ // Copy the pointer in case we are in a transaction that already
+ // modified this node since the node will be reused. Any changes
+ // made to the node will not affect returning the original leaf
+ // value.
+ oldLeaf := n.leaf
+
+ // Remove the leaf node
+ nc := t.writeNode(n, true)
+ nc.leaf = nil
+
+ // Check if this node should be merged
+ if n != t.root && len(nc.edges) == 1 {
+ t.mergeChild(nc)
+ }
+ return nc, oldLeaf
+ }
+
+ // Look for an edge
+ label := search[0]
+ idx, child := n.getEdge(label)
+ if child == nil || !bytes.HasPrefix(search, child.prefix) {
+ return nil, nil
+ }
+
+ // Consume the search prefix
+ search = search[len(child.prefix):]
+ newChild, leaf := t.delete(n, child, search)
+ if newChild == nil {
+ return nil, nil
+ }
+
+ // Copy this node. WATCH OUT - it's safe to pass "false" here because we
+ // will only ADD a leaf via nc.mergeChild() if there isn't one due to
+ // the !nc.isLeaf() check in the logic just below. This is pretty subtle,
+ // so be careful if you change any of the logic here.
+ nc := t.writeNode(n, false)
+
+ // Delete the edge if the node has no edges
+ if newChild.leaf == nil && len(newChild.edges) == 0 {
+ nc.delEdge(label)
+ if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() {
+ t.mergeChild(nc)
+ }
+ } else {
+ nc.edges[idx].node = newChild
+ }
+ return nc, leaf
+}
+
+// delete does a recursive deletion
+func (t *Txn) deletePrefix(parent, n *Node, search []byte) (*Node, int) {
+ // Check for key exhaustion
+ if len(search) == 0 {
+ nc := t.writeNode(n, true)
+ if n.isLeaf() {
+ nc.leaf = nil
+ }
+ nc.edges = nil
+ return nc, t.trackChannelsAndCount(n)
+ }
+
+ // Look for an edge
+ label := search[0]
+ idx, child := n.getEdge(label)
+ // We make sure that either the child node's prefix starts with the search term, or the search term starts with the child node's prefix
+ // Need to do both so that we can delete prefixes that don't correspond to any node in the tree
+ if child == nil || (!bytes.HasPrefix(child.prefix, search) && !bytes.HasPrefix(search, child.prefix)) {
+ return nil, 0
+ }
+
+ // Consume the search prefix
+ if len(child.prefix) > len(search) {
+ search = []byte("")
+ } else {
+ search = search[len(child.prefix):]
+ }
+ newChild, numDeletions := t.deletePrefix(n, child, search)
+ if newChild == nil {
+ return nil, 0
+ }
+ // Copy this node. WATCH OUT - it's safe to pass "false" here because we
+ // will only ADD a leaf via nc.mergeChild() if there isn't one due to
+ // the !nc.isLeaf() check in the logic just below. This is pretty subtle,
+ // so be careful if you change any of the logic here.
+
+ nc := t.writeNode(n, false)
+
+ // Delete the edge if the node has no edges
+ if newChild.leaf == nil && len(newChild.edges) == 0 {
+ nc.delEdge(label)
+ if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() {
+ t.mergeChild(nc)
+ }
+ } else {
+ nc.edges[idx].node = newChild
+ }
+ return nc, numDeletions
+}
+
+// Insert is used to add or update a given key. The return provides
+// the previous value and a bool indicating if any was set.
+func (t *Txn) Insert(k []byte, v interface{}) (interface{}, bool) {
+ newRoot, oldVal, didUpdate := t.insert(t.root, k, k, v)
+ if newRoot != nil {
+ t.root = newRoot
+ }
+ if !didUpdate {
+ t.size++
+ }
+ return oldVal, didUpdate
+}
+
+// Delete is used to delete a given key. Returns the old value if any,
+// and a bool indicating if the key was set.
+func (t *Txn) Delete(k []byte) (interface{}, bool) {
+ newRoot, leaf := t.delete(nil, t.root, k)
+ if newRoot != nil {
+ t.root = newRoot
+ }
+ if leaf != nil {
+ t.size--
+ return leaf.val, true
+ }
+ return nil, false
+}
+
+// DeletePrefix is used to delete an entire subtree that matches the prefix
+// This will delete all nodes under that prefix
+func (t *Txn) DeletePrefix(prefix []byte) bool {
+ newRoot, numDeletions := t.deletePrefix(nil, t.root, prefix)
+ if newRoot != nil {
+ t.root = newRoot
+ t.size = t.size - numDeletions
+ return true
+ }
+ return false
+
+}
+
+// Root returns the current root of the radix tree within this
+// transaction. The root is not safe across insert and delete operations,
+// but can be used to read the current state during a transaction.
+func (t *Txn) Root() *Node {
+ return t.root
+}
+
+// Get is used to lookup a specific key, returning
+// the value and if it was found
+func (t *Txn) Get(k []byte) (interface{}, bool) {
+ return t.root.Get(k)
+}
+
+// GetWatch is used to lookup a specific key, returning
+// the watch channel, value and if it was found
+func (t *Txn) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) {
+ return t.root.GetWatch(k)
+}
+
+// Commit is used to finalize the transaction and return a new tree. If mutation
+// tracking is turned on then notifications will also be issued.
+func (t *Txn) Commit() *Tree {
+ nt := t.CommitOnly()
+ if t.trackMutate {
+ t.Notify()
+ }
+ return nt
+}
+
+// CommitOnly is used to finalize the transaction and return a new tree, but
+// does not issue any notifications until Notify is called.
+func (t *Txn) CommitOnly() *Tree {
+ nt := &Tree{t.root, t.size}
+ t.writable = nil
+ return nt
+}
+
+// slowNotify does a complete comparison of the before and after trees in order
+// to trigger notifications. This doesn't require any additional state but it
+// is very expensive to compute.
+func (t *Txn) slowNotify() {
+ snapIter := t.snap.rawIterator()
+ rootIter := t.root.rawIterator()
+ for snapIter.Front() != nil || rootIter.Front() != nil {
+ // If we've exhausted the nodes in the old snapshot, we know
+ // there's nothing remaining to notify.
+ if snapIter.Front() == nil {
+ return
+ }
+ snapElem := snapIter.Front()
+
+ // If we've exhausted the nodes in the new root, we know we need
+ // to invalidate everything that remains in the old snapshot. We
+ // know from the loop condition there's something in the old
+ // snapshot.
+ if rootIter.Front() == nil {
+ close(snapElem.mutateCh)
+ if snapElem.isLeaf() {
+ close(snapElem.leaf.mutateCh)
+ }
+ snapIter.Next()
+ continue
+ }
+
+ // Do one string compare so we can check the various conditions
+ // below without repeating the compare.
+ cmp := strings.Compare(snapIter.Path(), rootIter.Path())
+
+ // If the snapshot is behind the root, then we must have deleted
+ // this node during the transaction.
+ if cmp < 0 {
+ close(snapElem.mutateCh)
+ if snapElem.isLeaf() {
+ close(snapElem.leaf.mutateCh)
+ }
+ snapIter.Next()
+ continue
+ }
+
+ // If the snapshot is ahead of the root, then we must have added
+ // this node during the transaction.
+ if cmp > 0 {
+ rootIter.Next()
+ continue
+ }
+
+ // If we have the same path, then we need to see if we mutated a
+ // node and possibly the leaf.
+ rootElem := rootIter.Front()
+ if snapElem != rootElem {
+ close(snapElem.mutateCh)
+ if snapElem.leaf != nil && (snapElem.leaf != rootElem.leaf) {
+ close(snapElem.leaf.mutateCh)
+ }
+ }
+ snapIter.Next()
+ rootIter.Next()
+ }
+}
+
+// Notify is used along with TrackMutate to trigger notifications. This must
+// only be done once a transaction is committed via CommitOnly, and it is called
+// automatically by Commit.
+func (t *Txn) Notify() {
+ if !t.trackMutate {
+ return
+ }
+
+ // If we've overflowed the tracking state we can't use it in any way and
+ // need to do a full tree compare.
+ if t.trackOverflow {
+ t.slowNotify()
+ } else {
+ for ch := range t.trackChannels {
+ close(ch)
+ }
+ }
+
+ // Clean up the tracking state so that a re-notify is safe (will trigger
+ // the else clause above which will be a no-op).
+ t.trackChannels = nil
+ t.trackOverflow = false
+}
+
+// Insert is used to add or update a given key. The return provides
+// the new tree, previous value and a bool indicating if any was set.
+func (t *Tree) Insert(k []byte, v interface{}) (*Tree, interface{}, bool) {
+ txn := t.Txn()
+ old, ok := txn.Insert(k, v)
+ return txn.Commit(), old, ok
+}
+
+// Delete is used to delete a given key. Returns the new tree,
+// old value if any, and a bool indicating if the key was set.
+func (t *Tree) Delete(k []byte) (*Tree, interface{}, bool) {
+ txn := t.Txn()
+ old, ok := txn.Delete(k)
+ return txn.Commit(), old, ok
+}
+
+// DeletePrefix is used to delete all nodes starting with a given prefix. Returns the new tree,
+// and a bool indicating if the prefix matched any nodes
+func (t *Tree) DeletePrefix(k []byte) (*Tree, bool) {
+ txn := t.Txn()
+ ok := txn.DeletePrefix(k)
+ return txn.Commit(), ok
+}
+
+// Root returns the root node of the tree which can be used for richer
+// query operations.
+func (t *Tree) Root() *Node {
+ return t.root
+}
+
+// Get is used to lookup a specific key, returning
+// the value and if it was found
+func (t *Tree) Get(k []byte) (interface{}, bool) {
+ return t.root.Get(k)
+}
+
+// longestPrefix finds the length of the shared prefix
+// of two strings
+func longestPrefix(k1, k2 []byte) int {
+ max := len(k1)
+ if l := len(k2); l < max {
+ max = l
+ }
+ var i int
+ for i = 0; i < max; i++ {
+ if k1[i] != k2[i] {
+ break
+ }
+ }
+ return i
+}
+
+// concat two byte slices, returning a third new copy
+func concat(a, b []byte) []byte {
+ c := make([]byte, len(a)+len(b))
+ copy(c, a)
+ copy(c[len(a):], b)
+ return c
+}
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/iter.go b/vendor/github.com/hashicorp/go-immutable-radix/iter.go
new file mode 100644
index 000000000..f17d0a644
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-immutable-radix/iter.go
@@ -0,0 +1,205 @@
+package iradix
+
+import (
+ "bytes"
+)
+
+// Iterator is used to iterate over a set of nodes
+// in pre-order
+type Iterator struct {
+ node *Node
+ stack []edges
+}
+
+// SeekPrefixWatch is used to seek the iterator to a given prefix
+// and returns the watch channel of the finest granularity
+func (i *Iterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) {
+ // Wipe the stack
+ i.stack = nil
+ n := i.node
+ watch = n.mutateCh
+ search := prefix
+ for {
+ // Check for key exhaustion
+ if len(search) == 0 {
+ i.node = n
+ return
+ }
+
+ // Look for an edge
+ _, n = n.getEdge(search[0])
+ if n == nil {
+ i.node = nil
+ return
+ }
+
+ // Update to the finest granularity as the search makes progress
+ watch = n.mutateCh
+
+ // Consume the search prefix
+ if bytes.HasPrefix(search, n.prefix) {
+ search = search[len(n.prefix):]
+
+ } else if bytes.HasPrefix(n.prefix, search) {
+ i.node = n
+ return
+ } else {
+ i.node = nil
+ return
+ }
+ }
+}
+
+// SeekPrefix is used to seek the iterator to a given prefix
+func (i *Iterator) SeekPrefix(prefix []byte) {
+ i.SeekPrefixWatch(prefix)
+}
+
+func (i *Iterator) recurseMin(n *Node) *Node {
+ // Traverse to the minimum child
+ if n.leaf != nil {
+ return n
+ }
+ nEdges := len(n.edges)
+ if nEdges > 1 {
+ // Add all the other edges to the stack (the min node will be added as
+ // we recurse)
+ i.stack = append(i.stack, n.edges[1:])
+ }
+ if nEdges > 0 {
+ return i.recurseMin(n.edges[0].node)
+ }
+ // Shouldn't be possible
+ return nil
+}
+
+// SeekLowerBound is used to seek the iterator to the smallest key that is
+// greater or equal to the given key. There is no watch variant as it's hard to
+// predict based on the radix structure which node(s) changes might affect the
+// result.
+func (i *Iterator) SeekLowerBound(key []byte) {
+ // Wipe the stack. Unlike Prefix iteration, we need to build the stack as we
+ // go because we need only a subset of edges of many nodes in the path to the
+ // leaf with the lower bound. Note that the iterator will still recurse into
+ // children that we don't traverse on the way to the reverse lower bound as it
+ // walks the stack.
+ i.stack = []edges{}
+ // i.node starts off in the common case as pointing to the root node of the
+ // tree. By the time we return we have either found a lower bound and setup
+ // the stack to traverse all larger keys, or we have not and the stack and
+ // node should both be nil to prevent the iterator from assuming it is just
+ // iterating the whole tree from the root node. Either way this needs to end
+ // up as nil so just set it here.
+ n := i.node
+ i.node = nil
+ search := key
+
+ found := func(n *Node) {
+ i.stack = append(i.stack, edges{edge{node: n}})
+ }
+
+ findMin := func(n *Node) {
+ n = i.recurseMin(n)
+ if n != nil {
+ found(n)
+ return
+ }
+ }
+
+ for {
+ // Compare current prefix with the search key's same-length prefix.
+ var prefixCmp int
+ if len(n.prefix) < len(search) {
+ prefixCmp = bytes.Compare(n.prefix, search[0:len(n.prefix)])
+ } else {
+ prefixCmp = bytes.Compare(n.prefix, search)
+ }
+
+ if prefixCmp > 0 {
+ // Prefix is larger, that means the lower bound is greater than the search
+ // and from now on we need to follow the minimum path to the smallest
+ // leaf under this subtree.
+ findMin(n)
+ return
+ }
+
+ if prefixCmp < 0 {
+ // Prefix is smaller than search prefix, that means there is no lower
+ // bound
+ i.node = nil
+ return
+ }
+
+ // Prefix is equal, we are still heading for an exact match. If this is a
+ // leaf and an exact match we're done.
+ if n.leaf != nil && bytes.Equal(n.leaf.key, key) {
+ found(n)
+ return
+ }
+
+ // Consume the search prefix if the current node has one. Note that this is
+ // safe because if n.prefix is longer than the search slice prefixCmp would
+ // have been > 0 above and the method would have already returned.
+ search = search[len(n.prefix):]
+
+ if len(search) == 0 {
+ // We've exhausted the search key, but the current node is not an exact
+ // match or not a leaf. That means that the leaf value if it exists, and
+ // all child nodes must be strictly greater, the smallest key in this
+ // subtree must be the lower bound.
+ findMin(n)
+ return
+ }
+
+ // Otherwise, take the lower bound next edge.
+ idx, lbNode := n.getLowerBoundEdge(search[0])
+ if lbNode == nil {
+ return
+ }
+
+ // Create stack edges for the all strictly higher edges in this node.
+ if idx+1 < len(n.edges) {
+ i.stack = append(i.stack, n.edges[idx+1:])
+ }
+
+ // Recurse
+ n = lbNode
+ }
+}
+
+// Next returns the next node in order
+func (i *Iterator) Next() ([]byte, interface{}, bool) {
+ // Initialize our stack if needed
+ if i.stack == nil && i.node != nil {
+ i.stack = []edges{
+ {
+ edge{node: i.node},
+ },
+ }
+ }
+
+ for len(i.stack) > 0 {
+ // Inspect the last element of the stack
+ n := len(i.stack)
+ last := i.stack[n-1]
+ elem := last[0].node
+
+ // Update the stack
+ if len(last) > 1 {
+ i.stack[n-1] = last[1:]
+ } else {
+ i.stack = i.stack[:n-1]
+ }
+
+ // Push the edges onto the frontier
+ if len(elem.edges) > 0 {
+ i.stack = append(i.stack, elem.edges)
+ }
+
+ // Return the leaf values if any
+ if elem.leaf != nil {
+ return elem.leaf.key, elem.leaf.val, true
+ }
+ }
+ return nil, nil, false
+}
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/node.go b/vendor/github.com/hashicorp/go-immutable-radix/node.go
new file mode 100644
index 000000000..359854808
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-immutable-radix/node.go
@@ -0,0 +1,334 @@
+package iradix
+
+import (
+ "bytes"
+ "sort"
+)
+
+// WalkFn is used when walking the tree. Takes a
+// key and value, returning if iteration should
+// be terminated.
+type WalkFn func(k []byte, v interface{}) bool
+
+// leafNode is used to represent a value
+type leafNode struct {
+ mutateCh chan struct{}
+ key []byte
+ val interface{}
+}
+
+// edge is used to represent an edge node
+type edge struct {
+ label byte
+ node *Node
+}
+
+// Node is an immutable node in the radix tree
+type Node struct {
+ // mutateCh is closed if this node is modified
+ mutateCh chan struct{}
+
+ // leaf is used to store possible leaf
+ leaf *leafNode
+
+ // prefix is the common prefix we ignore
+ prefix []byte
+
+ // Edges should be stored in-order for iteration.
+ // We avoid a fully materialized slice to save memory,
+ // since in most cases we expect to be sparse
+ edges edges
+}
+
+func (n *Node) isLeaf() bool {
+ return n.leaf != nil
+}
+
+func (n *Node) addEdge(e edge) {
+ num := len(n.edges)
+ idx := sort.Search(num, func(i int) bool {
+ return n.edges[i].label >= e.label
+ })
+ n.edges = append(n.edges, e)
+ if idx != num {
+ copy(n.edges[idx+1:], n.edges[idx:num])
+ n.edges[idx] = e
+ }
+}
+
+func (n *Node) replaceEdge(e edge) {
+ num := len(n.edges)
+ idx := sort.Search(num, func(i int) bool {
+ return n.edges[i].label >= e.label
+ })
+ if idx < num && n.edges[idx].label == e.label {
+ n.edges[idx].node = e.node
+ return
+ }
+ panic("replacing missing edge")
+}
+
+func (n *Node) getEdge(label byte) (int, *Node) {
+ num := len(n.edges)
+ idx := sort.Search(num, func(i int) bool {
+ return n.edges[i].label >= label
+ })
+ if idx < num && n.edges[idx].label == label {
+ return idx, n.edges[idx].node
+ }
+ return -1, nil
+}
+
+func (n *Node) getLowerBoundEdge(label byte) (int, *Node) {
+ num := len(n.edges)
+ idx := sort.Search(num, func(i int) bool {
+ return n.edges[i].label >= label
+ })
+ // we want lower bound behavior so return even if it's not an exact match
+ if idx < num {
+ return idx, n.edges[idx].node
+ }
+ return -1, nil
+}
+
+func (n *Node) delEdge(label byte) {
+ num := len(n.edges)
+ idx := sort.Search(num, func(i int) bool {
+ return n.edges[i].label >= label
+ })
+ if idx < num && n.edges[idx].label == label {
+ copy(n.edges[idx:], n.edges[idx+1:])
+ n.edges[len(n.edges)-1] = edge{}
+ n.edges = n.edges[:len(n.edges)-1]
+ }
+}
+
+func (n *Node) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) {
+ search := k
+ watch := n.mutateCh
+ for {
+ // Check for key exhaustion
+ if len(search) == 0 {
+ if n.isLeaf() {
+ return n.leaf.mutateCh, n.leaf.val, true
+ }
+ break
+ }
+
+ // Look for an edge
+ _, n = n.getEdge(search[0])
+ if n == nil {
+ break
+ }
+
+ // Update to the finest granularity as the search makes progress
+ watch = n.mutateCh
+
+ // Consume the search prefix
+ if bytes.HasPrefix(search, n.prefix) {
+ search = search[len(n.prefix):]
+ } else {
+ break
+ }
+ }
+ return watch, nil, false
+}
+
+func (n *Node) Get(k []byte) (interface{}, bool) {
+ _, val, ok := n.GetWatch(k)
+ return val, ok
+}
+
+// LongestPrefix is like Get, but instead of an
+// exact match, it will return the longest prefix match.
+func (n *Node) LongestPrefix(k []byte) ([]byte, interface{}, bool) {
+ var last *leafNode
+ search := k
+ for {
+ // Look for a leaf node
+ if n.isLeaf() {
+ last = n.leaf
+ }
+
+ // Check for key exhaution
+ if len(search) == 0 {
+ break
+ }
+
+ // Look for an edge
+ _, n = n.getEdge(search[0])
+ if n == nil {
+ break
+ }
+
+ // Consume the search prefix
+ if bytes.HasPrefix(search, n.prefix) {
+ search = search[len(n.prefix):]
+ } else {
+ break
+ }
+ }
+ if last != nil {
+ return last.key, last.val, true
+ }
+ return nil, nil, false
+}
+
+// Minimum is used to return the minimum value in the tree
+func (n *Node) Minimum() ([]byte, interface{}, bool) {
+ for {
+ if n.isLeaf() {
+ return n.leaf.key, n.leaf.val, true
+ }
+ if len(n.edges) > 0 {
+ n = n.edges[0].node
+ } else {
+ break
+ }
+ }
+ return nil, nil, false
+}
+
+// Maximum is used to return the maximum value in the tree
+func (n *Node) Maximum() ([]byte, interface{}, bool) {
+ for {
+ if num := len(n.edges); num > 0 {
+ n = n.edges[num-1].node
+ continue
+ }
+ if n.isLeaf() {
+ return n.leaf.key, n.leaf.val, true
+ } else {
+ break
+ }
+ }
+ return nil, nil, false
+}
+
+// Iterator is used to return an iterator at
+// the given node to walk the tree
+func (n *Node) Iterator() *Iterator {
+ return &Iterator{node: n}
+}
+
+// ReverseIterator is used to return an iterator at
+// the given node to walk the tree backwards
+func (n *Node) ReverseIterator() *ReverseIterator {
+ return NewReverseIterator(n)
+}
+
+// rawIterator is used to return a raw iterator at the given node to walk the
+// tree.
+func (n *Node) rawIterator() *rawIterator {
+ iter := &rawIterator{node: n}
+ iter.Next()
+ return iter
+}
+
+// Walk is used to walk the tree
+func (n *Node) Walk(fn WalkFn) {
+ recursiveWalk(n, fn)
+}
+
+// WalkBackwards is used to walk the tree in reverse order
+func (n *Node) WalkBackwards(fn WalkFn) {
+ reverseRecursiveWalk(n, fn)
+}
+
+// WalkPrefix is used to walk the tree under a prefix
+func (n *Node) WalkPrefix(prefix []byte, fn WalkFn) {
+ search := prefix
+ for {
+ // Check for key exhaution
+ if len(search) == 0 {
+ recursiveWalk(n, fn)
+ return
+ }
+
+ // Look for an edge
+ _, n = n.getEdge(search[0])
+ if n == nil {
+ break
+ }
+
+ // Consume the search prefix
+ if bytes.HasPrefix(search, n.prefix) {
+ search = search[len(n.prefix):]
+
+ } else if bytes.HasPrefix(n.prefix, search) {
+ // Child may be under our search prefix
+ recursiveWalk(n, fn)
+ return
+ } else {
+ break
+ }
+ }
+}
+
+// WalkPath is used to walk the tree, but only visiting nodes
+// from the root down to a given leaf. Where WalkPrefix walks
+// all the entries *under* the given prefix, this walks the
+// entries *above* the given prefix.
+func (n *Node) WalkPath(path []byte, fn WalkFn) {
+ search := path
+ for {
+ // Visit the leaf values if any
+ if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
+ return
+ }
+
+ // Check for key exhaution
+ if len(search) == 0 {
+ return
+ }
+
+ // Look for an edge
+ _, n = n.getEdge(search[0])
+ if n == nil {
+ return
+ }
+
+ // Consume the search prefix
+ if bytes.HasPrefix(search, n.prefix) {
+ search = search[len(n.prefix):]
+ } else {
+ break
+ }
+ }
+}
+
+// recursiveWalk is used to do a pre-order walk of a node
+// recursively. Returns true if the walk should be aborted
+func recursiveWalk(n *Node, fn WalkFn) bool {
+ // Visit the leaf values if any
+ if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
+ return true
+ }
+
+ // Recurse on the children
+ for _, e := range n.edges {
+ if recursiveWalk(e.node, fn) {
+ return true
+ }
+ }
+ return false
+}
+
+// reverseRecursiveWalk is used to do a reverse pre-order
+// walk of a node recursively. Returns true if the walk
+// should be aborted
+func reverseRecursiveWalk(n *Node, fn WalkFn) bool {
+ // Visit the leaf values if any
+ if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
+ return true
+ }
+
+ // Recurse on the children in reverse order
+ for i := len(n.edges) - 1; i >= 0; i-- {
+ e := n.edges[i]
+ if reverseRecursiveWalk(e.node, fn) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go b/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go
new file mode 100644
index 000000000..3c6a22525
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go
@@ -0,0 +1,78 @@
+package iradix
+
+// rawIterator visits each of the nodes in the tree, even the ones that are not
+// leaves. It keeps track of the effective path (what a leaf at a given node
+// would be called), which is useful for comparing trees.
+type rawIterator struct {
+ // node is the starting node in the tree for the iterator.
+ node *Node
+
+ // stack keeps track of edges in the frontier.
+ stack []rawStackEntry
+
+ // pos is the current position of the iterator.
+ pos *Node
+
+ // path is the effective path of the current iterator position,
+ // regardless of whether the current node is a leaf.
+ path string
+}
+
+// rawStackEntry is used to keep track of the cumulative common path as well as
+// its associated edges in the frontier.
+type rawStackEntry struct {
+ path string
+ edges edges
+}
+
+// Front returns the current node that has been iterated to.
+func (i *rawIterator) Front() *Node {
+ return i.pos
+}
+
+// Path returns the effective path of the current node, even if it's not actually
+// a leaf.
+func (i *rawIterator) Path() string {
+ return i.path
+}
+
+// Next advances the iterator to the next node.
+func (i *rawIterator) Next() {
+ // Initialize our stack if needed.
+ if i.stack == nil && i.node != nil {
+ i.stack = []rawStackEntry{
+ {
+ edges: edges{
+ edge{node: i.node},
+ },
+ },
+ }
+ }
+
+ for len(i.stack) > 0 {
+ // Inspect the last element of the stack.
+ n := len(i.stack)
+ last := i.stack[n-1]
+ elem := last.edges[0].node
+
+ // Update the stack.
+ if len(last.edges) > 1 {
+ i.stack[n-1].edges = last.edges[1:]
+ } else {
+ i.stack = i.stack[:n-1]
+ }
+
+ // Push the edges onto the frontier.
+ if len(elem.edges) > 0 {
+ path := last.path + string(elem.prefix)
+ i.stack = append(i.stack, rawStackEntry{path, elem.edges})
+ }
+
+ i.pos = elem
+ i.path = last.path + string(elem.prefix)
+ return
+ }
+
+ i.pos = nil
+ i.path = ""
+}
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/reverse_iter.go b/vendor/github.com/hashicorp/go-immutable-radix/reverse_iter.go
new file mode 100644
index 000000000..554fa7129
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-immutable-radix/reverse_iter.go
@@ -0,0 +1,239 @@
+package iradix
+
+import (
+ "bytes"
+)
+
+// ReverseIterator is used to iterate over a set of nodes
+// in reverse in-order
+type ReverseIterator struct {
+ i *Iterator
+
+ // expandedParents stores the set of parent nodes whose relevant children have
+ // already been pushed into the stack. This can happen during seek or during
+ // iteration.
+ //
+ // Unlike forward iteration we need to recurse into children before we can
+ // output the value stored in an internal leaf since all children are greater.
+ // We use this to track whether we have already ensured all the children are
+ // in the stack.
+ expandedParents map[*Node]struct{}
+}
+
+// NewReverseIterator returns a new ReverseIterator at a node
+func NewReverseIterator(n *Node) *ReverseIterator {
+ return &ReverseIterator{
+ i: &Iterator{node: n},
+ }
+}
+
+// SeekPrefixWatch is used to seek the iterator to a given prefix
+// and returns the watch channel of the finest granularity
+func (ri *ReverseIterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) {
+ return ri.i.SeekPrefixWatch(prefix)
+}
+
+// SeekPrefix is used to seek the iterator to a given prefix
+func (ri *ReverseIterator) SeekPrefix(prefix []byte) {
+ ri.i.SeekPrefixWatch(prefix)
+}
+
+// SeekReverseLowerBound is used to seek the iterator to the largest key that is
+// lower or equal to the given key. There is no watch variant as it's hard to
+// predict based on the radix structure which node(s) changes might affect the
+// result.
+func (ri *ReverseIterator) SeekReverseLowerBound(key []byte) {
+ // Wipe the stack. Unlike Prefix iteration, we need to build the stack as we
+ // go because we need only a subset of edges of many nodes in the path to the
+ // leaf with the lower bound. Note that the iterator will still recurse into
+ // children that we don't traverse on the way to the reverse lower bound as it
+ // walks the stack.
+ ri.i.stack = []edges{}
+ // ri.i.node starts off in the common case as pointing to the root node of the
+ // tree. By the time we return we have either found a lower bound and setup
+ // the stack to traverse all larger keys, or we have not and the stack and
+ // node should both be nil to prevent the iterator from assuming it is just
+ // iterating the whole tree from the root node. Either way this needs to end
+ // up as nil so just set it here.
+ n := ri.i.node
+ ri.i.node = nil
+ search := key
+
+ if ri.expandedParents == nil {
+ ri.expandedParents = make(map[*Node]struct{})
+ }
+
+ found := func(n *Node) {
+ ri.i.stack = append(ri.i.stack, edges{edge{node: n}})
+ // We need to mark this node as expanded in advance too otherwise the
+ // iterator will attempt to walk all of its children even though they are
+ // greater than the lower bound we have found. We've expanded it in the
+ // sense that all of its children that we want to walk are already in the
+ // stack (i.e. none of them).
+ ri.expandedParents[n] = struct{}{}
+ }
+
+ for {
+ // Compare current prefix with the search key's same-length prefix.
+ var prefixCmp int
+ if len(n.prefix) < len(search) {
+ prefixCmp = bytes.Compare(n.prefix, search[0:len(n.prefix)])
+ } else {
+ prefixCmp = bytes.Compare(n.prefix, search)
+ }
+
+ if prefixCmp < 0 {
+ // Prefix is smaller than search prefix, that means there is no exact
+ // match for the search key. But we are looking in reverse, so the reverse
+ // lower bound will be the largest leaf under this subtree, since it is
+ // the value that would come right before the current search key if it
+ // were in the tree. So we need to follow the maximum path in this subtree
+ // to find it. Note that this is exactly what the iterator will already do
+ // if it finds a node in the stack that has _not_ been marked as expanded
+ // so in this one case we don't call `found` and instead let the iterator
+ // do the expansion and recursion through all the children.
+ ri.i.stack = append(ri.i.stack, edges{edge{node: n}})
+ return
+ }
+
+ if prefixCmp > 0 {
+ // Prefix is larger than search prefix, or there is no prefix but we've
+ // also exhausted the search key. Either way, that means there is no
+ // reverse lower bound since nothing comes before our current search
+ // prefix.
+ return
+ }
+
+ // If this is a leaf, something needs to happen! Note that if it's a leaf
+ // and prefixCmp was zero (which it must be to get here) then the leaf value
+ // is either an exact match for the search, or it's lower. It can't be
+ // greater.
+ if n.isLeaf() {
+
+ // Firstly, if it's an exact match, we're done!
+ if bytes.Equal(n.leaf.key, key) {
+ found(n)
+ return
+ }
+
+ // It's not so this node's leaf value must be lower and could still be a
+ // valid contender for reverse lower bound.
+
+ // If it has no children then we are also done.
+ if len(n.edges) == 0 {
+ // This leaf is the lower bound.
+ found(n)
+ return
+ }
+
+ // Finally, this leaf is internal (has children) so we'll keep searching,
+ // but we need to add it to the iterator's stack since it has a leaf value
+ // that needs to be iterated over. It needs to be added to the stack
+ // before its children below as it comes first.
+ ri.i.stack = append(ri.i.stack, edges{edge{node: n}})
+ // We also need to mark it as expanded since we'll be adding any of its
+ // relevant children below and so don't want the iterator to re-add them
+ // on its way back up the stack.
+ ri.expandedParents[n] = struct{}{}
+ }
+
+ // Consume the search prefix. Note that this is safe because if n.prefix is
+ // longer than the search slice prefixCmp would have been > 0 above and the
+ // method would have already returned.
+ search = search[len(n.prefix):]
+
+ if len(search) == 0 {
+ // We've exhausted the search key but we are not at a leaf. That means all
+ // children are greater than the search key so a reverse lower bound
+ // doesn't exist in this subtree. Note that there might still be one in
+ // the whole radix tree by following a different path somewhere further
+ // up. If that's the case then the iterator's stack will contain all the
+ // smaller nodes already and Previous will walk through them correctly.
+ return
+ }
+
+ // Otherwise, take the lower bound next edge.
+ idx, lbNode := n.getLowerBoundEdge(search[0])
+
+ // From here, we need to update the stack with all values lower than
+ // the lower bound edge. Since getLowerBoundEdge() returns -1 when the
+ // search prefix is larger than all edges, we need to place idx at the
+ // last edge index so they can all be place in the stack, since they
+ // come before our search prefix.
+ if idx == -1 {
+ idx = len(n.edges)
+ }
+
+ // Create stack edges for the all strictly lower edges in this node.
+ if len(n.edges[:idx]) > 0 {
+ ri.i.stack = append(ri.i.stack, n.edges[:idx])
+ }
+
+ // Exit if there's no lower bound edge. The stack will have the previous
+ // nodes already.
+ if lbNode == nil {
+ return
+ }
+
+ // Recurse
+ n = lbNode
+ }
+}
+
+// Previous returns the previous node in reverse order
+func (ri *ReverseIterator) Previous() ([]byte, interface{}, bool) {
+ // Initialize our stack if needed
+ if ri.i.stack == nil && ri.i.node != nil {
+ ri.i.stack = []edges{
+ {
+ edge{node: ri.i.node},
+ },
+ }
+ }
+
+ if ri.expandedParents == nil {
+ ri.expandedParents = make(map[*Node]struct{})
+ }
+
+ for len(ri.i.stack) > 0 {
+ // Inspect the last element of the stack
+ n := len(ri.i.stack)
+ last := ri.i.stack[n-1]
+ m := len(last)
+ elem := last[m-1].node
+
+ _, alreadyExpanded := ri.expandedParents[elem]
+
+ // If this is an internal node and we've not seen it already, we need to
+ // leave it in the stack so we can return its possible leaf value _after_
+ // we've recursed through all its children.
+ if len(elem.edges) > 0 && !alreadyExpanded {
+ // record that we've seen this node!
+ ri.expandedParents[elem] = struct{}{}
+ // push child edges onto stack and skip the rest of the loop to recurse
+ // into the largest one.
+ ri.i.stack = append(ri.i.stack, elem.edges)
+ continue
+ }
+
+ // Remove the node from the stack
+ if m > 1 {
+ ri.i.stack[n-1] = last[:m-1]
+ } else {
+ ri.i.stack = ri.i.stack[:n-1]
+ }
+ // We don't need this state any more as it's no longer in the stack so we
+ // won't visit it again
+ if alreadyExpanded {
+ delete(ri.expandedParents, elem)
+ }
+
+ // If this is a leaf, return it
+ if elem.leaf != nil {
+ return elem.leaf.key, elem.leaf.val, true
+ }
+
+ // it's not a leaf so keep walking the stack to find the previous leaf
+ }
+ return nil, nil, false
+}
diff --git a/vendor/github.com/hashicorp/go-memdb/.gitignore b/vendor/github.com/hashicorp/go-memdb/.gitignore
new file mode 100644
index 000000000..11b90db8d
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-memdb/.gitignore
@@ -0,0 +1,26 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+
+.idea
diff --git a/vendor/github.com/hashicorp/go-memdb/LICENSE b/vendor/github.com/hashicorp/go-memdb/LICENSE
new file mode 100644
index 000000000..e87a115e4
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-memdb/LICENSE
@@ -0,0 +1,363 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. "Contributor"
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the terms of
+ a Secondary License.
+
+1.6. "Executable Form"
+
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+
+ means a work that combines Covered Software with other material, in a
+ separate file or files, that is not Covered Software.
+
+1.8. "License"
+
+ means this document.
+
+1.9. "Licensable"
+
+ means having the right to grant, to the maximum extent possible, whether
+ at the time of the initial grant or subsequently, any and all of the
+ rights conveyed by this License.
+
+1.10. "Modifications"
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. "Patent Claims" of a Contributor
+
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the License,
+ by the making, using, selling, offering for sale, having made, import,
+ or transfer of either its Contributions or its Contributor Version.
+
+1.12. "Secondary License"
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. "Source Code Form"
+
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, "control" means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution
+ become effective for each Contribution on the date the Contributor first
+ distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under
+ this License. No additional rights or licenses will be implied from the
+ distribution or licensing of Covered Software under this License.
+ Notwithstanding Section 2.1(b) above, no patent license is granted by a
+ Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+ This License does not grant any rights in the trademarks, service marks,
+ or logos of any Contributor (except as may be necessary to comply with
+ the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this
+ License (see Section 10.2) or under the terms of a Secondary License (if
+ permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its
+ Contributions are its original creation(s) or it has sufficient rights to
+ grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under
+ applicable copyright doctrines of fair use, fair dealing, or other
+ equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under
+ the terms of this License. You must inform recipients that the Source
+ Code Form of the Covered Software is governed by the terms of this
+ License, and how they can obtain a copy of this License. You may not
+ attempt to alter or restrict the recipients' rights in the Source Code
+ Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter the
+ recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for
+ the Covered Software. If the Larger Work is a combination of Covered
+ Software with a work governed by one or more Secondary Licenses, and the
+ Covered Software is not Incompatible With Secondary Licenses, this
+ License permits You to additionally distribute such Covered Software
+ under the terms of such Secondary License(s), so that the recipient of
+ the Larger Work may, at their option, further distribute the Covered
+ Software under the terms of either this License or such Secondary
+ License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices
+ (including copyright notices, patent notices, disclaimers of warranty, or
+ limitations of liability) contained within the Source Code Form of the
+ Covered Software, except that You may alter any license notices to the
+ extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on
+ behalf of any Contributor. You must make it absolutely clear that any
+ such warranty, support, indemnity, or liability obligation is offered by
+ You alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute,
+ judicial order, or regulation then You must: (a) comply with the terms of
+ this License to the maximum extent possible; and (b) describe the
+ limitations and the code they affect. Such description must be placed in a
+ text file included with all distributions of the Covered Software under
+ this License. Except to the extent prohibited by statute or regulation,
+ such description must be sufficiently detailed for a recipient of ordinary
+ skill to be able to understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing
+ basis, if such Contributor fails to notify You of the non-compliance by
+ some reasonable means prior to 60 days after You have come back into
+ compliance. Moreover, Your grants from a particular Contributor are
+ reinstated on an ongoing basis if such Contributor notifies You of the
+ non-compliance by some reasonable means, this is the first time You have
+ received notice of non-compliance with this License from such
+ Contributor, and You become compliant prior to 30 days after Your receipt
+ of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions,
+ counter-claims, and cross-claims) alleging that a Contributor Version
+ directly or indirectly infringes any patent, then the rights granted to
+ You by any and all Contributors for the Covered Software under Section
+ 2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an "as is" basis,
+ without warranty of any kind, either expressed, implied, or statutory,
+ including, without limitation, warranties that the Covered Software is free
+ of defects, merchantable, fit for a particular purpose or non-infringing.
+ The entire risk as to the quality and performance of the Covered Software
+ is with You. Should any Covered Software prove defective in any respect,
+ You (not any Contributor) assume the cost of any necessary servicing,
+ repair, or correction. This disclaimer of warranty constitutes an essential
+ part of this License. No use of any Covered Software is authorized under
+ this License except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from
+ such party's negligence to the extent applicable law prohibits such
+ limitation. Some jurisdictions do not allow the exclusion or limitation of
+ incidental or consequential damages, so this exclusion and limitation may
+ not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts
+ of a jurisdiction where the defendant maintains its principal place of
+ business and such litigation shall be governed by laws of that
+ jurisdiction, without reference to its conflict-of-law provisions. Nothing
+ in this Section shall prevent a party's ability to bring cross-claims or
+ counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. Any law or regulation which provides that
+ the language of a contract shall be construed against the drafter shall not
+ be used to construe this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version
+ of the License under which You originally received the Covered Software,
+ or under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a
+ modified version of this License if you rename the license and remove
+ any references to the name of the license steward (except to note that
+ such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+ Licenses If You choose to distribute Source Code Form that is
+ Incompatible With Secondary Licenses under the terms of this version of
+ the License, the notice described in Exhibit B of this License must be
+ attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file,
+then You may include the notice in a location (such as a LICENSE file in a
+relevant directory) where a recipient would be likely to look for such a
+notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+
+ This Source Code Form is "Incompatible
+ With Secondary Licenses", as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/go-memdb/README.md b/vendor/github.com/hashicorp/go-memdb/README.md
new file mode 100644
index 000000000..080b7447b
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-memdb/README.md
@@ -0,0 +1,146 @@
+# go-memdb [](https://circleci.com/gh/hashicorp/go-memdb/tree/master)
+
+Provides the `memdb` package that implements a simple in-memory database
+built on immutable radix trees. The database provides Atomicity, Consistency
+and Isolation from ACID. Being that it is in-memory, it does not provide durability.
+The database is instantiated with a schema that specifies the tables and indices
+that exist and allows transactions to be executed.
+
+The database provides the following:
+
+* Multi-Version Concurrency Control (MVCC) - By leveraging immutable radix trees
+ the database is able to support any number of concurrent readers without locking,
+ and allows a writer to make progress.
+
+* Transaction Support - The database allows for rich transactions, in which multiple
+ objects are inserted, updated or deleted. The transactions can span multiple tables,
+ and are applied atomically. The database provides atomicity and isolation in ACID
+ terminology, such that until commit the updates are not visible.
+
+* Rich Indexing - Tables can support any number of indexes, which can be simple like
+ a single field index, or more advanced compound field indexes. Certain types like
+ UUID can be efficiently compressed from strings into byte indexes for reduced
+ storage requirements.
+
+* Watches - Callers can populate a watch set as part of a query, which can be used to
+ detect when a modification has been made to the database which affects the query
+ results. This lets callers easily watch for changes in the database in a very general
+ way.
+
+For the underlying immutable radix trees, see [go-immutable-radix](https://github.com/hashicorp/go-immutable-radix).
+
+Documentation
+=============
+
+The full documentation is available on [Godoc](https://pkg.go.dev/github.com/hashicorp/go-memdb).
+
+Example
+=======
+
+Below is a [simple example](https://play.golang.org/p/gCGE9FA4og1) of usage
+
+```go
+// Create a sample struct
+type Person struct {
+ Email string
+ Name string
+ Age int
+}
+
+// Create the DB schema
+schema := &memdb.DBSchema{
+ Tables: map[string]*memdb.TableSchema{
+ "person": &memdb.TableSchema{
+ Name: "person",
+ Indexes: map[string]*memdb.IndexSchema{
+ "id": &memdb.IndexSchema{
+ Name: "id",
+ Unique: true,
+ Indexer: &memdb.StringFieldIndex{Field: "Email"},
+ },
+ "age": &memdb.IndexSchema{
+ Name: "age",
+ Unique: false,
+ Indexer: &memdb.IntFieldIndex{Field: "Age"},
+ },
+ },
+ },
+ },
+}
+
+// Create a new data base
+db, err := memdb.NewMemDB(schema)
+if err != nil {
+ panic(err)
+}
+
+// Create a write transaction
+txn := db.Txn(true)
+
+// Insert some people
+people := []*Person{
+ &Person{"joe@aol.com", "Joe", 30},
+ &Person{"lucy@aol.com", "Lucy", 35},
+ &Person{"tariq@aol.com", "Tariq", 21},
+ &Person{"dorothy@aol.com", "Dorothy", 53},
+}
+for _, p := range people {
+ if err := txn.Insert("person", p); err != nil {
+ panic(err)
+ }
+}
+
+// Commit the transaction
+txn.Commit()
+
+// Create read-only transaction
+txn = db.Txn(false)
+defer txn.Abort()
+
+// Lookup by email
+raw, err := txn.First("person", "id", "joe@aol.com")
+if err != nil {
+ panic(err)
+}
+
+// Say hi!
+fmt.Printf("Hello %s!\n", raw.(*Person).Name)
+
+// List all the people
+it, err := txn.Get("person", "id")
+if err != nil {
+ panic(err)
+}
+
+fmt.Println("All the people:")
+for obj := it.Next(); obj != nil; obj = it.Next() {
+ p := obj.(*Person)
+ fmt.Printf(" %s\n", p.Name)
+}
+
+// Range scan over people with ages between 25 and 35 inclusive
+it, err = txn.LowerBound("person", "age", 25)
+if err != nil {
+ panic(err)
+}
+
+fmt.Println("People aged 25 - 35:")
+for obj := it.Next(); obj != nil; obj = it.Next() {
+ p := obj.(*Person)
+ if p.Age > 35 {
+ break
+ }
+ fmt.Printf(" %s is aged %d\n", p.Name, p.Age)
+}
+// Output:
+// Hello Joe!
+// All the people:
+// Dorothy
+// Joe
+// Lucy
+// Tariq
+// People aged 25 - 35:
+// Joe is aged 30
+// Lucy is aged 35
+```
+
diff --git a/vendor/github.com/hashicorp/go-memdb/changes.go b/vendor/github.com/hashicorp/go-memdb/changes.go
new file mode 100644
index 000000000..35089f5ce
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-memdb/changes.go
@@ -0,0 +1,34 @@
+package memdb
+
+// Changes describes a set of mutations to memDB tables performed during a
+// transaction.
+type Changes []Change
+
+// Change describes a mutation to an object in a table.
+type Change struct {
+ Table string
+ Before interface{}
+ After interface{}
+
+ // primaryKey stores the raw key value from the primary index so that we can
+ // de-duplicate multiple updates of the same object in the same transaction
+ // but we don't expose this implementation detail to the consumer.
+ primaryKey []byte
+}
+
+// Created returns true if the mutation describes a new object being inserted.
+func (m *Change) Created() bool {
+ return m.Before == nil && m.After != nil
+}
+
+// Updated returns true if the mutation describes an existing object being
+// updated.
+func (m *Change) Updated() bool {
+ return m.Before != nil && m.After != nil
+}
+
+// Deleted returns true if the mutation describes an existing object being
+// deleted.
+func (m *Change) Deleted() bool {
+ return m.Before != nil && m.After == nil
+}
diff --git a/vendor/github.com/hashicorp/go-memdb/filter.go b/vendor/github.com/hashicorp/go-memdb/filter.go
new file mode 100644
index 000000000..0071ab311
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-memdb/filter.go
@@ -0,0 +1,38 @@
+package memdb
+
+// FilterFunc is a function that takes the results of an iterator and returns
+// whether the result should be filtered out.
+type FilterFunc func(interface{}) bool
+
+// FilterIterator is used to wrap a ResultIterator and apply a filter over it.
+type FilterIterator struct {
+ // filter is the filter function applied over the base iterator.
+ filter FilterFunc
+
+ // iter is the iterator that is being wrapped.
+ iter ResultIterator
+}
+
+// NewFilterIterator wraps a ResultIterator. The filter function is applied
+// to each value returned by a call to iter.Next.
+//
+// See the documentation for ResultIterator to understand the behaviour of the
+// returned FilterIterator.
+func NewFilterIterator(iter ResultIterator, filter FilterFunc) *FilterIterator {
+ return &FilterIterator{
+ filter: filter,
+ iter: iter,
+ }
+}
+
+// WatchCh returns the watch channel of the wrapped iterator.
+func (f *FilterIterator) WatchCh() <-chan struct{} { return f.iter.WatchCh() }
+
+// Next returns the next non-filtered result from the wrapped iterator.
+func (f *FilterIterator) Next() interface{} {
+ for {
+ if value := f.iter.Next(); value == nil || !f.filter(value) {
+ return value
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-memdb/index.go b/vendor/github.com/hashicorp/go-memdb/index.go
new file mode 100644
index 000000000..172a0e86b
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-memdb/index.go
@@ -0,0 +1,931 @@
+package memdb
+
+import (
+ "encoding/binary"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+// Indexer is an interface used for defining indexes. Indexes are used
+// for efficient lookup of objects in a MemDB table. An Indexer must also
+// implement one of SingleIndexer or MultiIndexer.
+//
+// Indexers are primarily responsible for returning the lookup key as
+// a byte slice. The byte slice is the key data in the underlying data storage.
+type Indexer interface {
+ // FromArgs is called to build the exact index key from a list of arguments.
+ FromArgs(args ...interface{}) ([]byte, error)
+}
+
+// SingleIndexer is an interface used for defining indexes that generate a
+// single value per object
+type SingleIndexer interface {
+ // FromObject extracts the index value from an object. The return values
+ // are whether the index value was found, the index value, and any error
+ // while extracting the index value, respectively.
+ FromObject(raw interface{}) (bool, []byte, error)
+}
+
+// MultiIndexer is an interface used for defining indexes that generate
+// multiple values per object. Each value is stored as a seperate index
+// pointing to the same object.
+//
+// For example, an index that extracts the first and last name of a person
+// and allows lookup based on eitherd would be a MultiIndexer. The FromObject
+// of this example would split the first and last name and return both as
+// values.
+type MultiIndexer interface {
+ // FromObject extracts index values from an object. The return values
+ // are the same as a SingleIndexer except there can be multiple index
+ // values.
+ FromObject(raw interface{}) (bool, [][]byte, error)
+}
+
+// PrefixIndexer is an optional interface on top of an Indexer that allows
+// indexes to support prefix-based iteration.
+type PrefixIndexer interface {
+ // PrefixFromArgs is the same as FromArgs for an Indexer except that
+ // the index value returned should return all prefix-matched values.
+ PrefixFromArgs(args ...interface{}) ([]byte, error)
+}
+
+// StringFieldIndex is used to extract a field from an object
+// using reflection and builds an index on that field.
+type StringFieldIndex struct {
+ Field string
+ Lowercase bool
+}
+
+func (s *StringFieldIndex) FromObject(obj interface{}) (bool, []byte, error) {
+ v := reflect.ValueOf(obj)
+ v = reflect.Indirect(v) // Dereference the pointer if any
+
+ fv := v.FieldByName(s.Field)
+ isPtr := fv.Kind() == reflect.Ptr
+ fv = reflect.Indirect(fv)
+ if !isPtr && !fv.IsValid() {
+ return false, nil,
+ fmt.Errorf("field '%s' for %#v is invalid %v ", s.Field, obj, isPtr)
+ }
+
+ if isPtr && !fv.IsValid() {
+ val := ""
+ return false, []byte(val), nil
+ }
+
+ val := fv.String()
+ if val == "" {
+ return false, nil, nil
+ }
+
+ if s.Lowercase {
+ val = strings.ToLower(val)
+ }
+
+ // Add the null character as a terminator
+ val += "\x00"
+ return true, []byte(val), nil
+}
+
+func (s *StringFieldIndex) FromArgs(args ...interface{}) ([]byte, error) {
+ if len(args) != 1 {
+ return nil, fmt.Errorf("must provide only a single argument")
+ }
+ arg, ok := args[0].(string)
+ if !ok {
+ return nil, fmt.Errorf("argument must be a string: %#v", args[0])
+ }
+ if s.Lowercase {
+ arg = strings.ToLower(arg)
+ }
+ // Add the null character as a terminator
+ arg += "\x00"
+ return []byte(arg), nil
+}
+
+func (s *StringFieldIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) {
+ val, err := s.FromArgs(args...)
+ if err != nil {
+ return nil, err
+ }
+
+ // Strip the null terminator, the rest is a prefix
+ n := len(val)
+ if n > 0 {
+ return val[:n-1], nil
+ }
+ return val, nil
+}
+
+// StringSliceFieldIndex builds an index from a field on an object that is a
+// string slice ([]string). Each value within the string slice can be used for
+// lookup.
+type StringSliceFieldIndex struct {
+ Field string
+ Lowercase bool
+}
+
+func (s *StringSliceFieldIndex) FromObject(obj interface{}) (bool, [][]byte, error) {
+ v := reflect.ValueOf(obj)
+ v = reflect.Indirect(v) // Dereference the pointer if any
+
+ fv := v.FieldByName(s.Field)
+ if !fv.IsValid() {
+ return false, nil,
+ fmt.Errorf("field '%s' for %#v is invalid", s.Field, obj)
+ }
+
+ if fv.Kind() != reflect.Slice || fv.Type().Elem().Kind() != reflect.String {
+ return false, nil, fmt.Errorf("field '%s' is not a string slice", s.Field)
+ }
+
+ length := fv.Len()
+ vals := make([][]byte, 0, length)
+ for i := 0; i < fv.Len(); i++ {
+ val := fv.Index(i).String()
+ if val == "" {
+ continue
+ }
+
+ if s.Lowercase {
+ val = strings.ToLower(val)
+ }
+
+ // Add the null character as a terminator
+ val += "\x00"
+ vals = append(vals, []byte(val))
+ }
+ if len(vals) == 0 {
+ return false, nil, nil
+ }
+ return true, vals, nil
+}
+
+func (s *StringSliceFieldIndex) FromArgs(args ...interface{}) ([]byte, error) {
+ if len(args) != 1 {
+ return nil, fmt.Errorf("must provide only a single argument")
+ }
+ arg, ok := args[0].(string)
+ if !ok {
+ return nil, fmt.Errorf("argument must be a string: %#v", args[0])
+ }
+ if s.Lowercase {
+ arg = strings.ToLower(arg)
+ }
+ // Add the null character as a terminator
+ arg += "\x00"
+ return []byte(arg), nil
+}
+
+func (s *StringSliceFieldIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) {
+ val, err := s.FromArgs(args...)
+ if err != nil {
+ return nil, err
+ }
+
+ // Strip the null terminator, the rest is a prefix
+ n := len(val)
+ if n > 0 {
+ return val[:n-1], nil
+ }
+ return val, nil
+}
+
+// StringMapFieldIndex is used to extract a field of type map[string]string
+// from an object using reflection and builds an index on that field.
+//
+// Note that although FromArgs in theory supports using either one or
+// two arguments, there is a bug: FromObject only creates an index
+// using key/value, and does not also create an index using key. This
+// means a lookup using one argument will never actually work.
+//
+// It is currently left as-is to prevent backwards compatibility
+// issues.
+//
+// TODO: Fix this in the next major bump.
+type StringMapFieldIndex struct {
+ Field string
+ Lowercase bool
+}
+
+var MapType = reflect.MapOf(reflect.TypeOf(""), reflect.TypeOf("")).Kind()
+
+func (s *StringMapFieldIndex) FromObject(obj interface{}) (bool, [][]byte, error) {
+ v := reflect.ValueOf(obj)
+ v = reflect.Indirect(v) // Dereference the pointer if any
+
+ fv := v.FieldByName(s.Field)
+ if !fv.IsValid() {
+ return false, nil, fmt.Errorf("field '%s' for %#v is invalid", s.Field, obj)
+ }
+
+ if fv.Kind() != MapType {
+ return false, nil, fmt.Errorf("field '%s' is not a map[string]string", s.Field)
+ }
+
+ length := fv.Len()
+ vals := make([][]byte, 0, length)
+ for _, key := range fv.MapKeys() {
+ k := key.String()
+ if k == "" {
+ continue
+ }
+ val := fv.MapIndex(key).String()
+
+ if s.Lowercase {
+ k = strings.ToLower(k)
+ val = strings.ToLower(val)
+ }
+
+ // Add the null character as a terminator
+ k += "\x00" + val + "\x00"
+
+ vals = append(vals, []byte(k))
+ }
+ if len(vals) == 0 {
+ return false, nil, nil
+ }
+ return true, vals, nil
+}
+
+// WARNING: Because of a bug in FromObject, this function will never return
+// a value when using the single-argument version.
+func (s *StringMapFieldIndex) FromArgs(args ...interface{}) ([]byte, error) {
+ if len(args) > 2 || len(args) == 0 {
+ return nil, fmt.Errorf("must provide one or two arguments")
+ }
+ key, ok := args[0].(string)
+ if !ok {
+ return nil, fmt.Errorf("argument must be a string: %#v", args[0])
+ }
+ if s.Lowercase {
+ key = strings.ToLower(key)
+ }
+ // Add the null character as a terminator
+ key += "\x00"
+
+ if len(args) == 2 {
+ val, ok := args[1].(string)
+ if !ok {
+ return nil, fmt.Errorf("argument must be a string: %#v", args[1])
+ }
+ if s.Lowercase {
+ val = strings.ToLower(val)
+ }
+ // Add the null character as a terminator
+ key += val + "\x00"
+ }
+
+ return []byte(key), nil
+}
+
+// IntFieldIndex is used to extract an int field from an object using
+// reflection and builds an index on that field.
+type IntFieldIndex struct {
+ Field string
+}
+
+func (i *IntFieldIndex) FromObject(obj interface{}) (bool, []byte, error) {
+ v := reflect.ValueOf(obj)
+ v = reflect.Indirect(v) // Dereference the pointer if any
+
+ fv := v.FieldByName(i.Field)
+ if !fv.IsValid() {
+ return false, nil,
+ fmt.Errorf("field '%s' for %#v is invalid", i.Field, obj)
+ }
+
+ // Check the type
+ k := fv.Kind()
+ size, ok := IsIntType(k)
+ if !ok {
+ return false, nil, fmt.Errorf("field %q is of type %v; want an int", i.Field, k)
+ }
+
+ // Get the value and encode it
+ val := fv.Int()
+ buf := encodeInt(val, size)
+
+ return true, buf, nil
+}
+
+func (i *IntFieldIndex) FromArgs(args ...interface{}) ([]byte, error) {
+ if len(args) != 1 {
+ return nil, fmt.Errorf("must provide only a single argument")
+ }
+
+ v := reflect.ValueOf(args[0])
+ if !v.IsValid() {
+ return nil, fmt.Errorf("%#v is invalid", args[0])
+ }
+
+ k := v.Kind()
+ size, ok := IsIntType(k)
+ if !ok {
+ return nil, fmt.Errorf("arg is of type %v; want a int", k)
+ }
+
+ val := v.Int()
+ buf := encodeInt(val, size)
+
+ return buf, nil
+}
+
+func encodeInt(val int64, size int) []byte {
+ buf := make([]byte, size)
+
+ // This bit flips the sign bit on any sized signed twos-complement integer,
+ // which when truncated to a uint of the same size will bias the value such
+ // that the maximum negative int becomes 0, and the maximum positive int
+ // becomes the maximum positive uint.
+ scaled := val ^ int64(-1<<(size*8-1))
+
+ switch size {
+ case 1:
+ buf[0] = uint8(scaled)
+ case 2:
+ binary.BigEndian.PutUint16(buf, uint16(scaled))
+ case 4:
+ binary.BigEndian.PutUint32(buf, uint32(scaled))
+ case 8:
+ binary.BigEndian.PutUint64(buf, uint64(scaled))
+ default:
+ panic(fmt.Sprintf("unsupported int size parameter: %d", size))
+ }
+
+ return buf
+}
+
+// IsIntType returns whether the passed type is a type of int and the number
+// of bytes needed to encode the type.
+func IsIntType(k reflect.Kind) (size int, okay bool) {
+ switch k {
+ case reflect.Int:
+ return strconv.IntSize / 8, true
+ case reflect.Int8:
+ return 1, true
+ case reflect.Int16:
+ return 2, true
+ case reflect.Int32:
+ return 4, true
+ case reflect.Int64:
+ return 8, true
+ default:
+ return 0, false
+ }
+}
+
+// UintFieldIndex is used to extract a uint field from an object using
+// reflection and builds an index on that field.
+type UintFieldIndex struct {
+ Field string
+}
+
+func (u *UintFieldIndex) FromObject(obj interface{}) (bool, []byte, error) {
+ v := reflect.ValueOf(obj)
+ v = reflect.Indirect(v) // Dereference the pointer if any
+
+ fv := v.FieldByName(u.Field)
+ if !fv.IsValid() {
+ return false, nil,
+ fmt.Errorf("field '%s' for %#v is invalid", u.Field, obj)
+ }
+
+ // Check the type
+ k := fv.Kind()
+ size, ok := IsUintType(k)
+ if !ok {
+ return false, nil, fmt.Errorf("field %q is of type %v; want a uint", u.Field, k)
+ }
+
+ // Get the value and encode it
+ val := fv.Uint()
+ buf := encodeUInt(val, size)
+
+ return true, buf, nil
+}
+
+func (u *UintFieldIndex) FromArgs(args ...interface{}) ([]byte, error) {
+ if len(args) != 1 {
+ return nil, fmt.Errorf("must provide only a single argument")
+ }
+
+ v := reflect.ValueOf(args[0])
+ if !v.IsValid() {
+ return nil, fmt.Errorf("%#v is invalid", args[0])
+ }
+
+ k := v.Kind()
+ size, ok := IsUintType(k)
+ if !ok {
+ return nil, fmt.Errorf("arg is of type %v; want a uint", k)
+ }
+
+ val := v.Uint()
+ buf := encodeUInt(val, size)
+
+ return buf, nil
+}
+
+func encodeUInt(val uint64, size int) []byte {
+ buf := make([]byte, size)
+
+ switch size {
+ case 1:
+ buf[0] = uint8(val)
+ case 2:
+ binary.BigEndian.PutUint16(buf, uint16(val))
+ case 4:
+ binary.BigEndian.PutUint32(buf, uint32(val))
+ case 8:
+ binary.BigEndian.PutUint64(buf, val)
+ default:
+ panic(fmt.Sprintf("unsupported uint size parameter: %d", size))
+ }
+
+ return buf
+}
+
+// IsUintType returns whether the passed type is a type of uint and the number
+// of bytes needed to encode the type.
+func IsUintType(k reflect.Kind) (size int, okay bool) {
+ switch k {
+ case reflect.Uint:
+ return strconv.IntSize / 8, true
+ case reflect.Uint8:
+ return 1, true
+ case reflect.Uint16:
+ return 2, true
+ case reflect.Uint32:
+ return 4, true
+ case reflect.Uint64:
+ return 8, true
+ default:
+ return 0, false
+ }
+}
+
+// BoolFieldIndex is used to extract an boolean field from an object using
+// reflection and builds an index on that field.
+type BoolFieldIndex struct {
+ Field string
+}
+
+func (i *BoolFieldIndex) FromObject(obj interface{}) (bool, []byte, error) {
+ v := reflect.ValueOf(obj)
+ v = reflect.Indirect(v) // Dereference the pointer if any
+
+ fv := v.FieldByName(i.Field)
+ if !fv.IsValid() {
+ return false, nil,
+ fmt.Errorf("field '%s' for %#v is invalid", i.Field, obj)
+ }
+
+ // Check the type
+ k := fv.Kind()
+ if k != reflect.Bool {
+ return false, nil, fmt.Errorf("field %q is of type %v; want a bool", i.Field, k)
+ }
+
+ // Get the value and encode it
+ buf := make([]byte, 1)
+ if fv.Bool() {
+ buf[0] = 1
+ }
+
+ return true, buf, nil
+}
+
+func (i *BoolFieldIndex) FromArgs(args ...interface{}) ([]byte, error) {
+ return fromBoolArgs(args)
+}
+
+// UUIDFieldIndex is used to extract a field from an object
+// using reflection and builds an index on that field by treating
+// it as a UUID. This is an optimization to using a StringFieldIndex
+// as the UUID can be more compactly represented in byte form.
+type UUIDFieldIndex struct {
+ Field string
+}
+
+func (u *UUIDFieldIndex) FromObject(obj interface{}) (bool, []byte, error) {
+ v := reflect.ValueOf(obj)
+ v = reflect.Indirect(v) // Dereference the pointer if any
+
+ fv := v.FieldByName(u.Field)
+ if !fv.IsValid() {
+ return false, nil,
+ fmt.Errorf("field '%s' for %#v is invalid", u.Field, obj)
+ }
+
+ val := fv.String()
+ if val == "" {
+ return false, nil, nil
+ }
+
+ buf, err := u.parseString(val, true)
+ return true, buf, err
+}
+
+func (u *UUIDFieldIndex) FromArgs(args ...interface{}) ([]byte, error) {
+ if len(args) != 1 {
+ return nil, fmt.Errorf("must provide only a single argument")
+ }
+ switch arg := args[0].(type) {
+ case string:
+ return u.parseString(arg, true)
+ case []byte:
+ if len(arg) != 16 {
+ return nil, fmt.Errorf("byte slice must be 16 characters")
+ }
+ return arg, nil
+ default:
+ return nil,
+ fmt.Errorf("argument must be a string or byte slice: %#v", args[0])
+ }
+}
+
+func (u *UUIDFieldIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) {
+ if len(args) != 1 {
+ return nil, fmt.Errorf("must provide only a single argument")
+ }
+ switch arg := args[0].(type) {
+ case string:
+ return u.parseString(arg, false)
+ case []byte:
+ return arg, nil
+ default:
+ return nil,
+ fmt.Errorf("argument must be a string or byte slice: %#v", args[0])
+ }
+}
+
+// parseString parses a UUID from the string. If enforceLength is false, it will
+// parse a partial UUID. An error is returned if the input, stripped of hyphens,
+// is not even length.
+func (u *UUIDFieldIndex) parseString(s string, enforceLength bool) ([]byte, error) {
+ // Verify the length
+ l := len(s)
+ if enforceLength && l != 36 {
+ return nil, fmt.Errorf("UUID must be 36 characters")
+ } else if l > 36 {
+ return nil, fmt.Errorf("Invalid UUID length. UUID have 36 characters; got %d", l)
+ }
+
+ hyphens := strings.Count(s, "-")
+ if hyphens > 4 {
+ return nil, fmt.Errorf(`UUID should have maximum of 4 "-"; got %d`, hyphens)
+ }
+
+ // The sanitized length is the length of the original string without the "-".
+ sanitized := strings.Replace(s, "-", "", -1)
+ sanitizedLength := len(sanitized)
+ if sanitizedLength%2 != 0 {
+ return nil, fmt.Errorf("Input (without hyphens) must be even length")
+ }
+
+ dec, err := hex.DecodeString(sanitized)
+ if err != nil {
+ return nil, fmt.Errorf("Invalid UUID: %v", err)
+ }
+
+ return dec, nil
+}
+
+// FieldSetIndex is used to extract a field from an object using reflection and
+// builds an index on whether the field is set by comparing it against its
+// type's nil value.
+type FieldSetIndex struct {
+ Field string
+}
+
+func (f *FieldSetIndex) FromObject(obj interface{}) (bool, []byte, error) {
+ v := reflect.ValueOf(obj)
+ v = reflect.Indirect(v) // Dereference the pointer if any
+
+ fv := v.FieldByName(f.Field)
+ if !fv.IsValid() {
+ return false, nil,
+ fmt.Errorf("field '%s' for %#v is invalid", f.Field, obj)
+ }
+
+ if fv.Interface() == reflect.Zero(fv.Type()).Interface() {
+ return true, []byte{0}, nil
+ }
+
+ return true, []byte{1}, nil
+}
+
+func (f *FieldSetIndex) FromArgs(args ...interface{}) ([]byte, error) {
+ return fromBoolArgs(args)
+}
+
+// ConditionalIndex builds an index based on a condition specified by a passed
+// user function. This function may examine the passed object and return a
+// boolean to encapsulate an arbitrarily complex conditional.
+type ConditionalIndex struct {
+ Conditional ConditionalIndexFunc
+}
+
+// ConditionalIndexFunc is the required function interface for a
+// ConditionalIndex.
+type ConditionalIndexFunc func(obj interface{}) (bool, error)
+
+func (c *ConditionalIndex) FromObject(obj interface{}) (bool, []byte, error) {
+ // Call the user's function
+ res, err := c.Conditional(obj)
+ if err != nil {
+ return false, nil, fmt.Errorf("ConditionalIndexFunc(%#v) failed: %v", obj, err)
+ }
+
+ if res {
+ return true, []byte{1}, nil
+ }
+
+ return true, []byte{0}, nil
+}
+
+func (c *ConditionalIndex) FromArgs(args ...interface{}) ([]byte, error) {
+ return fromBoolArgs(args)
+}
+
+// fromBoolArgs is a helper that expects only a single boolean argument and
+// returns a single length byte array containing either a one or zero depending
+// on whether the passed input is true or false respectively.
+func fromBoolArgs(args []interface{}) ([]byte, error) {
+ if len(args) != 1 {
+ return nil, fmt.Errorf("must provide only a single argument")
+ }
+
+ if val, ok := args[0].(bool); !ok {
+ return nil, fmt.Errorf("argument must be a boolean type: %#v", args[0])
+ } else if val {
+ return []byte{1}, nil
+ }
+
+ return []byte{0}, nil
+}
+
+// CompoundIndex is used to build an index using multiple sub-indexes
+// Prefix based iteration is supported as long as the appropriate prefix
+// of indexers support it. All sub-indexers are only assumed to expect
+// a single argument.
+type CompoundIndex struct {
+ Indexes []Indexer
+
+ // AllowMissing results in an index based on only the indexers
+ // that return data. If true, you may end up with 2/3 columns
+ // indexed which might be useful for an index scan. Otherwise,
+ // the CompoundIndex requires all indexers to be satisfied.
+ AllowMissing bool
+}
+
+func (c *CompoundIndex) FromObject(raw interface{}) (bool, []byte, error) {
+ var out []byte
+ for i, idxRaw := range c.Indexes {
+ idx, ok := idxRaw.(SingleIndexer)
+ if !ok {
+ return false, nil, fmt.Errorf("sub-index %d error: %s", i, "sub-index must be a SingleIndexer")
+ }
+ ok, val, err := idx.FromObject(raw)
+ if err != nil {
+ return false, nil, fmt.Errorf("sub-index %d error: %v", i, err)
+ }
+ if !ok {
+ if c.AllowMissing {
+ break
+ } else {
+ return false, nil, nil
+ }
+ }
+ out = append(out, val...)
+ }
+ return true, out, nil
+}
+
+func (c *CompoundIndex) FromArgs(args ...interface{}) ([]byte, error) {
+ if len(args) != len(c.Indexes) {
+ return nil, fmt.Errorf("non-equivalent argument count and index fields")
+ }
+ var out []byte
+ for i, arg := range args {
+ val, err := c.Indexes[i].FromArgs(arg)
+ if err != nil {
+ return nil, fmt.Errorf("sub-index %d error: %v", i, err)
+ }
+ out = append(out, val...)
+ }
+ return out, nil
+}
+
+func (c *CompoundIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) {
+ if len(args) > len(c.Indexes) {
+ return nil, fmt.Errorf("more arguments than index fields")
+ }
+ var out []byte
+ for i, arg := range args {
+ if i+1 < len(args) {
+ val, err := c.Indexes[i].FromArgs(arg)
+ if err != nil {
+ return nil, fmt.Errorf("sub-index %d error: %v", i, err)
+ }
+ out = append(out, val...)
+ } else {
+ prefixIndexer, ok := c.Indexes[i].(PrefixIndexer)
+ if !ok {
+ return nil, fmt.Errorf("sub-index %d does not support prefix scanning", i)
+ }
+ val, err := prefixIndexer.PrefixFromArgs(arg)
+ if err != nil {
+ return nil, fmt.Errorf("sub-index %d error: %v", i, err)
+ }
+ out = append(out, val...)
+ }
+ }
+ return out, nil
+}
+
+// CompoundMultiIndex is used to build an index using multiple
+// sub-indexes.
+//
+// Unlike CompoundIndex, CompoundMultiIndex can have both
+// SingleIndexer and MultiIndexer sub-indexers. However, each
+// MultiIndexer adds considerable overhead/complexity in terms of
+// the number of indexes created under-the-hood. It is not suggested
+// to use more than one or two, if possible.
+//
+// Another change from CompoundIndexer is that if AllowMissing is
+// set, not only is it valid to have empty index fields, but it will
+// still create index values up to the first empty index. This means
+// that if you have a value with an empty field, rather than using a
+// prefix for lookup, you can simply pass in less arguments. As an
+// example, if {Foo, Bar} is indexed but Bar is missing for a value
+// and AllowMissing is set, an index will still be created for {Foo}
+// and it is valid to do a lookup passing in only Foo as an argument.
+// Note that the ordering isn't guaranteed -- it's last-insert wins,
+// but this is true if you have two objects that have the same
+// indexes not using AllowMissing anyways.
+//
+// Because StringMapFieldIndexers can take a varying number of args,
+// it is currently a requirement that whenever it is used, two
+// arguments must _always_ be provided for it. In theory we only
+// need one, except a bug in that indexer means the single-argument
+// version will never work. You can leave the second argument nil,
+// but it will never produce a value. We support this for whenever
+// that bug is fixed, likely in a next major version bump.
+//
+// Prefix-based indexing is not currently supported.
+type CompoundMultiIndex struct {
+ Indexes []Indexer
+
+ // AllowMissing results in an index based on only the indexers
+ // that return data. If true, you may end up with 2/3 columns
+ // indexed which might be useful for an index scan. Otherwise,
+ // CompoundMultiIndex requires all indexers to be satisfied.
+ AllowMissing bool
+}
+
+func (c *CompoundMultiIndex) FromObject(raw interface{}) (bool, [][]byte, error) {
+ // At each entry, builder is storing the results from the next index
+ builder := make([][][]byte, 0, len(c.Indexes))
+
+forloop:
+ // This loop goes through each indexer and adds the value(s) provided to the next
+ // entry in the slice. We can then later walk it like a tree to construct the indices.
+ for i, idxRaw := range c.Indexes {
+ switch idx := idxRaw.(type) {
+ case SingleIndexer:
+ ok, val, err := idx.FromObject(raw)
+ if err != nil {
+ return false, nil, fmt.Errorf("single sub-index %d error: %v", i, err)
+ }
+ if !ok {
+ if c.AllowMissing {
+ break forloop
+ } else {
+ return false, nil, nil
+ }
+ }
+ builder = append(builder, [][]byte{val})
+
+ case MultiIndexer:
+ ok, vals, err := idx.FromObject(raw)
+ if err != nil {
+ return false, nil, fmt.Errorf("multi sub-index %d error: %v", i, err)
+ }
+ if !ok {
+ if c.AllowMissing {
+ break forloop
+ } else {
+ return false, nil, nil
+ }
+ }
+
+ // Add each of the new values to each of the old values
+ builder = append(builder, vals)
+
+ default:
+ return false, nil, fmt.Errorf("sub-index %d does not satisfy either SingleIndexer or MultiIndexer", i)
+ }
+ }
+
+ // Start with something higher to avoid resizing if possible
+ out := make([][]byte, 0, len(c.Indexes)^3)
+
+ // We are walking through the builder slice essentially in a depth-first fashion,
+ // building the prefix and leaves as we go. If AllowMissing is false, we only insert
+ // these full paths to leaves. Otherwise, we also insert each prefix along the way.
+ // This allows for lookup in FromArgs when AllowMissing is true that does not contain
+ // the full set of arguments. e.g. for {Foo, Bar} where an object has only the Foo
+ // field specified as "abc", it is valid to call FromArgs with just "abc".
+ var walkVals func([]byte, int)
+ walkVals = func(currPrefix []byte, depth int) {
+ if depth >= len(builder) {
+ return
+ }
+
+ if depth == len(builder)-1 {
+ // These are the "leaves", so append directly
+ for _, v := range builder[depth] {
+ outcome := make([]byte, len(currPrefix))
+ copy(outcome, currPrefix)
+ out = append(out, append(outcome, v...))
+ }
+ return
+ }
+ for _, v := range builder[depth] {
+ nextPrefix := append(currPrefix, v...)
+ if c.AllowMissing {
+ out = append(out, nextPrefix)
+ }
+ walkVals(nextPrefix, depth+1)
+ }
+ }
+
+ walkVals(nil, 0)
+
+ return true, out, nil
+}
+
+func (c *CompoundMultiIndex) FromArgs(args ...interface{}) ([]byte, error) {
+ var stringMapCount int
+ var argCount int
+ for _, index := range c.Indexes {
+ if argCount >= len(args) {
+ break
+ }
+ if _, ok := index.(*StringMapFieldIndex); ok {
+ // We require pairs for StringMapFieldIndex, but only got one
+ if argCount+1 >= len(args) {
+ return nil, errors.New("invalid number of arguments")
+ }
+ stringMapCount++
+ argCount += 2
+ } else {
+ argCount++
+ }
+ }
+ argCount = 0
+
+ switch c.AllowMissing {
+ case true:
+ if len(args) > len(c.Indexes)+stringMapCount {
+ return nil, errors.New("too many arguments")
+ }
+
+ default:
+ if len(args) != len(c.Indexes)+stringMapCount {
+ return nil, errors.New("number of arguments does not equal number of indexers")
+ }
+ }
+
+ var out []byte
+ var val []byte
+ var err error
+ for i, idx := range c.Indexes {
+ if argCount >= len(args) {
+ // We're done; should only hit this if AllowMissing
+ break
+ }
+ if _, ok := idx.(*StringMapFieldIndex); ok {
+ if args[argCount+1] == nil {
+ val, err = idx.FromArgs(args[argCount])
+ } else {
+ val, err = idx.FromArgs(args[argCount : argCount+2]...)
+ }
+ argCount += 2
+ } else {
+ val, err = idx.FromArgs(args[argCount])
+ argCount++
+ }
+ if err != nil {
+ return nil, fmt.Errorf("sub-index %d error: %v", i, err)
+ }
+ out = append(out, val...)
+ }
+ return out, nil
+}
diff --git a/vendor/github.com/hashicorp/go-memdb/memdb.go b/vendor/github.com/hashicorp/go-memdb/memdb.go
new file mode 100644
index 000000000..0508d0aae
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-memdb/memdb.go
@@ -0,0 +1,116 @@
+// Package memdb provides an in-memory database that supports transactions
+// and MVCC.
+package memdb
+
+import (
+ "sync"
+ "sync/atomic"
+ "unsafe"
+
+ "github.com/hashicorp/go-immutable-radix"
+)
+
+// MemDB is an in-memory database providing Atomicity, Consistency, and
+// Isolation from ACID. MemDB doesn't provide Durability since it is an
+// in-memory database.
+//
+// MemDB provides a table abstraction to store objects (rows) with multiple
+// indexes based on inserted values. The database makes use of immutable radix
+// trees to provide transactions and MVCC.
+//
+// Objects inserted into MemDB are not copied. It is **extremely important**
+// that objects are not modified in-place after they are inserted since they
+// are stored directly in MemDB. It remains unsafe to modify inserted objects
+// even after they've been deleted from MemDB since there may still be older
+// snapshots of the DB being read from other goroutines.
+type MemDB struct {
+ schema *DBSchema
+ root unsafe.Pointer // *iradix.Tree underneath
+ primary bool
+
+ // There can only be a single writer at once
+ writer sync.Mutex
+}
+
+// NewMemDB creates a new MemDB with the given schema.
+func NewMemDB(schema *DBSchema) (*MemDB, error) {
+ // Validate the schema
+ if err := schema.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Create the MemDB
+ db := &MemDB{
+ schema: schema,
+ root: unsafe.Pointer(iradix.New()),
+ primary: true,
+ }
+ if err := db.initialize(); err != nil {
+ return nil, err
+ }
+
+ return db, nil
+}
+
+// DBSchema returns schema in use for introspection.
+//
+// The method is intended for *read-only* debugging use cases,
+// returned schema should *never be modified in-place*.
+func (db *MemDB) DBSchema() *DBSchema {
+ return db.schema
+}
+
+// getRoot is used to do an atomic load of the root pointer
+func (db *MemDB) getRoot() *iradix.Tree {
+ root := (*iradix.Tree)(atomic.LoadPointer(&db.root))
+ return root
+}
+
+// Txn is used to start a new transaction in either read or write mode.
+// There can only be a single concurrent writer, but any number of readers.
+func (db *MemDB) Txn(write bool) *Txn {
+ if write {
+ db.writer.Lock()
+ }
+ txn := &Txn{
+ db: db,
+ write: write,
+ rootTxn: db.getRoot().Txn(),
+ }
+ return txn
+}
+
+// Snapshot is used to capture a point-in-time snapshot of the database that
+// will not be affected by any write operations to the existing DB.
+//
+// If MemDB is storing reference-based values (pointers, maps, slices, etc.),
+// the Snapshot will not deep copy those values. Therefore, it is still unsafe
+// to modify any inserted values in either DB.
+func (db *MemDB) Snapshot() *MemDB {
+ clone := &MemDB{
+ schema: db.schema,
+ root: unsafe.Pointer(db.getRoot()),
+ primary: false,
+ }
+ return clone
+}
+
+// initialize is used to setup the DB for use after creation. This should
+// be called only once after allocating a MemDB.
+func (db *MemDB) initialize() error {
+ root := db.getRoot()
+ for tName, tableSchema := range db.schema.Tables {
+ for iName := range tableSchema.Indexes {
+ index := iradix.New()
+ path := indexPath(tName, iName)
+ root, _, _ = root.Insert(path, index)
+ }
+ }
+ db.root = unsafe.Pointer(root)
+ return nil
+}
+
+// indexPath returns the path from the root to the given table index
+func indexPath(table, index string) []byte {
+ return []byte(table + "." + index)
+}
diff --git a/vendor/github.com/hashicorp/go-memdb/schema.go b/vendor/github.com/hashicorp/go-memdb/schema.go
new file mode 100644
index 000000000..e6a9b526b
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-memdb/schema.go
@@ -0,0 +1,114 @@
+package memdb
+
+import "fmt"
+
+// DBSchema is the schema to use for the full database with a MemDB instance.
+//
+// MemDB will require a valid schema. Schema validation can be tested using
+// the Validate function. Calling this function is recommended in unit tests.
+type DBSchema struct {
+ // Tables is the set of tables within this database. The key is the
+ // table name and must match the Name in TableSchema.
+ Tables map[string]*TableSchema
+}
+
+// Validate validates the schema.
+func (s *DBSchema) Validate() error {
+ if s == nil {
+ return fmt.Errorf("schema is nil")
+ }
+
+ if len(s.Tables) == 0 {
+ return fmt.Errorf("schema has no tables defined")
+ }
+
+ for name, table := range s.Tables {
+ if name != table.Name {
+ return fmt.Errorf("table name mis-match for '%s'", name)
+ }
+
+ if err := table.Validate(); err != nil {
+ return fmt.Errorf("table %q: %s", name, err)
+ }
+ }
+
+ return nil
+}
+
+// TableSchema is the schema for a single table.
+type TableSchema struct {
+ // Name of the table. This must match the key in the Tables map in DBSchema.
+ Name string
+
+ // Indexes is the set of indexes for querying this table. The key
+ // is a unique name for the index and must match the Name in the
+ // IndexSchema.
+ Indexes map[string]*IndexSchema
+}
+
+// Validate is used to validate the table schema
+func (s *TableSchema) Validate() error {
+ if s.Name == "" {
+ return fmt.Errorf("missing table name")
+ }
+
+ if len(s.Indexes) == 0 {
+ return fmt.Errorf("missing table indexes for '%s'", s.Name)
+ }
+
+ if _, ok := s.Indexes["id"]; !ok {
+ return fmt.Errorf("must have id index")
+ }
+
+ if !s.Indexes["id"].Unique {
+ return fmt.Errorf("id index must be unique")
+ }
+
+ if _, ok := s.Indexes["id"].Indexer.(SingleIndexer); !ok {
+ return fmt.Errorf("id index must be a SingleIndexer")
+ }
+
+ for name, index := range s.Indexes {
+ if name != index.Name {
+ return fmt.Errorf("index name mis-match for '%s'", name)
+ }
+
+ if err := index.Validate(); err != nil {
+ return fmt.Errorf("index %q: %s", name, err)
+ }
+ }
+
+ return nil
+}
+
+// IndexSchema is the schema for an index. An index defines how a table is
+// queried.
+type IndexSchema struct {
+ // Name of the index. This must be unique among a tables set of indexes.
+ // This must match the key in the map of Indexes for a TableSchema.
+ Name string
+
+ // AllowMissing if true ignores this index if it doesn't produce a
+ // value. For example, an index that extracts a field that doesn't
+ // exist from a structure.
+ AllowMissing bool
+
+ Unique bool
+ Indexer Indexer
+}
+
+func (s *IndexSchema) Validate() error {
+ if s.Name == "" {
+ return fmt.Errorf("missing index name")
+ }
+ if s.Indexer == nil {
+ return fmt.Errorf("missing index function for '%s'", s.Name)
+ }
+ switch s.Indexer.(type) {
+ case SingleIndexer:
+ case MultiIndexer:
+ default:
+ return fmt.Errorf("indexer for '%s' must be a SingleIndexer or MultiIndexer", s.Name)
+ }
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/go-memdb/txn.go b/vendor/github.com/hashicorp/go-memdb/txn.go
new file mode 100644
index 000000000..951c2a1d9
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-memdb/txn.go
@@ -0,0 +1,1021 @@
+package memdb
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+ "sync/atomic"
+ "unsafe"
+
+ iradix "github.com/hashicorp/go-immutable-radix"
+)
+
+const (
+ id = "id"
+)
+
+var (
+ // ErrNotFound is returned when the requested item is not found
+ ErrNotFound = fmt.Errorf("not found")
+)
+
+// tableIndex is a tuple of (Table, Index) used for lookups
+type tableIndex struct {
+ Table string
+ Index string
+}
+
+// Txn is a transaction against a MemDB.
+// This can be a read or write transaction.
+type Txn struct {
+ db *MemDB
+ write bool
+ rootTxn *iradix.Txn
+ after []func()
+
+ // changes is used to track the changes performed during the transaction. If
+ // it is nil at transaction start then changes are not tracked.
+ changes Changes
+
+ modified map[tableIndex]*iradix.Txn
+}
+
+// TrackChanges enables change tracking for the transaction. If called at any
+// point before commit, subsequent mutations will be recorded and can be
+// retrieved using ChangeSet. Once this has been called on a transaction it
+// can't be unset. As with other Txn methods it's not safe to call this from a
+// different goroutine than the one making mutations or committing the
+// transaction.
+func (txn *Txn) TrackChanges() {
+ if txn.changes == nil {
+ txn.changes = make(Changes, 0, 1)
+ }
+}
+
+// readableIndex returns a transaction usable for reading the given index in a
+// table. If the transaction is a write transaction with modifications, a clone of the
+// modified index will be returned.
+func (txn *Txn) readableIndex(table, index string) *iradix.Txn {
+ // Look for existing transaction
+ if txn.write && txn.modified != nil {
+ key := tableIndex{table, index}
+ exist, ok := txn.modified[key]
+ if ok {
+ return exist.Clone()
+ }
+ }
+
+ // Create a read transaction
+ path := indexPath(table, index)
+ raw, _ := txn.rootTxn.Get(path)
+ indexTxn := raw.(*iradix.Tree).Txn()
+ return indexTxn
+}
+
+// writableIndex returns a transaction usable for modifying the
+// given index in a table.
+func (txn *Txn) writableIndex(table, index string) *iradix.Txn {
+ if txn.modified == nil {
+ txn.modified = make(map[tableIndex]*iradix.Txn)
+ }
+
+ // Look for existing transaction
+ key := tableIndex{table, index}
+ exist, ok := txn.modified[key]
+ if ok {
+ return exist
+ }
+
+ // Start a new transaction
+ path := indexPath(table, index)
+ raw, _ := txn.rootTxn.Get(path)
+ indexTxn := raw.(*iradix.Tree).Txn()
+
+ // If we are the primary DB, enable mutation tracking. Snapshots should
+ // not notify, otherwise we will trigger watches on the primary DB when
+ // the writes will not be visible.
+ indexTxn.TrackMutate(txn.db.primary)
+
+ // Keep this open for the duration of the txn
+ txn.modified[key] = indexTxn
+ return indexTxn
+}
+
+// Abort is used to cancel this transaction.
+// This is a noop for read transactions,
+// already aborted or commited transactions.
+func (txn *Txn) Abort() {
+ // Noop for a read transaction
+ if !txn.write {
+ return
+ }
+
+ // Check if already aborted or committed
+ if txn.rootTxn == nil {
+ return
+ }
+
+ // Clear the txn
+ txn.rootTxn = nil
+ txn.modified = nil
+ txn.changes = nil
+
+ // Release the writer lock since this is invalid
+ txn.db.writer.Unlock()
+}
+
+// Commit is used to finalize this transaction.
+// This is a noop for read transactions,
+// already aborted or committed transactions.
+func (txn *Txn) Commit() {
+ // Noop for a read transaction
+ if !txn.write {
+ return
+ }
+
+ // Check if already aborted or committed
+ if txn.rootTxn == nil {
+ return
+ }
+
+ // Commit each sub-transaction scoped to (table, index)
+ for key, subTxn := range txn.modified {
+ path := indexPath(key.Table, key.Index)
+ final := subTxn.CommitOnly()
+ txn.rootTxn.Insert(path, final)
+ }
+
+ // Update the root of the DB
+ newRoot := txn.rootTxn.CommitOnly()
+ atomic.StorePointer(&txn.db.root, unsafe.Pointer(newRoot))
+
+ // Now issue all of the mutation updates (this is safe to call
+ // even if mutation tracking isn't enabled); we do this after
+ // the root pointer is swapped so that waking responders will
+ // see the new state.
+ for _, subTxn := range txn.modified {
+ subTxn.Notify()
+ }
+ txn.rootTxn.Notify()
+
+ // Clear the txn
+ txn.rootTxn = nil
+ txn.modified = nil
+
+ // Release the writer lock since this is invalid
+ txn.db.writer.Unlock()
+
+ // Run the deferred functions, if any
+ for i := len(txn.after); i > 0; i-- {
+ fn := txn.after[i-1]
+ fn()
+ }
+}
+
+// Insert is used to add or update an object into the given table.
+//
+// When updating an object, the obj provided should be a copy rather
+// than a value updated in-place. Modifying values in-place that are already
+// inserted into MemDB is not supported behavior.
+func (txn *Txn) Insert(table string, obj interface{}) error {
+ if !txn.write {
+ return fmt.Errorf("cannot insert in read-only transaction")
+ }
+
+ // Get the table schema
+ tableSchema, ok := txn.db.schema.Tables[table]
+ if !ok {
+ return fmt.Errorf("invalid table '%s'", table)
+ }
+
+ // Get the primary ID of the object
+ idSchema := tableSchema.Indexes[id]
+ idIndexer := idSchema.Indexer.(SingleIndexer)
+ ok, idVal, err := idIndexer.FromObject(obj)
+ if err != nil {
+ return fmt.Errorf("failed to build primary index: %v", err)
+ }
+ if !ok {
+ return fmt.Errorf("object missing primary index")
+ }
+
+ // Lookup the object by ID first, to see if this is an update
+ idTxn := txn.writableIndex(table, id)
+ existing, update := idTxn.Get(idVal)
+
+ // On an update, there is an existing object with the given
+ // primary ID. We do the update by deleting the current object
+ // and inserting the new object.
+ for name, indexSchema := range tableSchema.Indexes {
+ indexTxn := txn.writableIndex(table, name)
+
+ // Determine the new index value
+ var (
+ ok bool
+ vals [][]byte
+ err error
+ )
+ switch indexer := indexSchema.Indexer.(type) {
+ case SingleIndexer:
+ var val []byte
+ ok, val, err = indexer.FromObject(obj)
+ vals = [][]byte{val}
+ case MultiIndexer:
+ ok, vals, err = indexer.FromObject(obj)
+ }
+ if err != nil {
+ return fmt.Errorf("failed to build index '%s': %v", name, err)
+ }
+
+ // Handle non-unique index by computing a unique index.
+ // This is done by appending the primary key which must
+ // be unique anyways.
+ if ok && !indexSchema.Unique {
+ for i := range vals {
+ vals[i] = append(vals[i], idVal...)
+ }
+ }
+
+ // Handle the update by deleting from the index first
+ if update {
+ var (
+ okExist bool
+ valsExist [][]byte
+ err error
+ )
+ switch indexer := indexSchema.Indexer.(type) {
+ case SingleIndexer:
+ var valExist []byte
+ okExist, valExist, err = indexer.FromObject(existing)
+ valsExist = [][]byte{valExist}
+ case MultiIndexer:
+ okExist, valsExist, err = indexer.FromObject(existing)
+ }
+ if err != nil {
+ return fmt.Errorf("failed to build index '%s': %v", name, err)
+ }
+ if okExist {
+ for i, valExist := range valsExist {
+ // Handle non-unique index by computing a unique index.
+ // This is done by appending the primary key which must
+ // be unique anyways.
+ if !indexSchema.Unique {
+ valExist = append(valExist, idVal...)
+ }
+
+ // If we are writing to the same index with the same value,
+ // we can avoid the delete as the insert will overwrite the
+ // value anyways.
+ if i >= len(vals) || !bytes.Equal(valExist, vals[i]) {
+ indexTxn.Delete(valExist)
+ }
+ }
+ }
+ }
+
+ // If there is no index value, either this is an error or an expected
+ // case and we can skip updating
+ if !ok {
+ if indexSchema.AllowMissing {
+ continue
+ } else {
+ return fmt.Errorf("missing value for index '%s'", name)
+ }
+ }
+
+ // Update the value of the index
+ for _, val := range vals {
+ indexTxn.Insert(val, obj)
+ }
+ }
+ if txn.changes != nil {
+ txn.changes = append(txn.changes, Change{
+ Table: table,
+ Before: existing, // might be nil on a create
+ After: obj,
+ primaryKey: idVal,
+ })
+ }
+ return nil
+}
+
+// Delete is used to delete a single object from the given table.
+// This object must already exist in the table.
+func (txn *Txn) Delete(table string, obj interface{}) error {
+ if !txn.write {
+ return fmt.Errorf("cannot delete in read-only transaction")
+ }
+
+ // Get the table schema
+ tableSchema, ok := txn.db.schema.Tables[table]
+ if !ok {
+ return fmt.Errorf("invalid table '%s'", table)
+ }
+
+ // Get the primary ID of the object
+ idSchema := tableSchema.Indexes[id]
+ idIndexer := idSchema.Indexer.(SingleIndexer)
+ ok, idVal, err := idIndexer.FromObject(obj)
+ if err != nil {
+ return fmt.Errorf("failed to build primary index: %v", err)
+ }
+ if !ok {
+ return fmt.Errorf("object missing primary index")
+ }
+
+ // Lookup the object by ID first, check if we should continue
+ idTxn := txn.writableIndex(table, id)
+ existing, ok := idTxn.Get(idVal)
+ if !ok {
+ return ErrNotFound
+ }
+
+ // Remove the object from all the indexes
+ for name, indexSchema := range tableSchema.Indexes {
+ indexTxn := txn.writableIndex(table, name)
+
+ // Handle the update by deleting from the index first
+ var (
+ ok bool
+ vals [][]byte
+ err error
+ )
+ switch indexer := indexSchema.Indexer.(type) {
+ case SingleIndexer:
+ var val []byte
+ ok, val, err = indexer.FromObject(existing)
+ vals = [][]byte{val}
+ case MultiIndexer:
+ ok, vals, err = indexer.FromObject(existing)
+ }
+ if err != nil {
+ return fmt.Errorf("failed to build index '%s': %v", name, err)
+ }
+ if ok {
+ // Handle non-unique index by computing a unique index.
+ // This is done by appending the primary key which must
+ // be unique anyways.
+ for _, val := range vals {
+ if !indexSchema.Unique {
+ val = append(val, idVal...)
+ }
+ indexTxn.Delete(val)
+ }
+ }
+ }
+ if txn.changes != nil {
+ txn.changes = append(txn.changes, Change{
+ Table: table,
+ Before: existing,
+ After: nil, // Now nil indicates deletion
+ primaryKey: idVal,
+ })
+ }
+ return nil
+}
+
+// DeletePrefix is used to delete an entire subtree based on a prefix.
+// The given index must be a prefix index, and will be used to perform a scan and enumerate the set of objects to delete.
+// These will be removed from all other indexes, and then a special prefix operation will delete the objects from the given index in an efficient subtree delete operation.
+// This is useful when you have a very large number of objects indexed by the given index, along with a much smaller number of entries in the other indexes for those objects.
+func (txn *Txn) DeletePrefix(table string, prefix_index string, prefix string) (bool, error) {
+ if !txn.write {
+ return false, fmt.Errorf("cannot delete in read-only transaction")
+ }
+
+ if !strings.HasSuffix(prefix_index, "_prefix") {
+ return false, fmt.Errorf("Index name for DeletePrefix must be a prefix index, Got %v ", prefix_index)
+ }
+
+ deletePrefixIndex := strings.TrimSuffix(prefix_index, "_prefix")
+
+ // Get an iterator over all of the keys with the given prefix.
+ entries, err := txn.Get(table, prefix_index, prefix)
+ if err != nil {
+ return false, fmt.Errorf("failed kvs lookup: %s", err)
+ }
+ // Get the table schema
+ tableSchema, ok := txn.db.schema.Tables[table]
+ if !ok {
+ return false, fmt.Errorf("invalid table '%s'", table)
+ }
+
+ foundAny := false
+ for entry := entries.Next(); entry != nil; entry = entries.Next() {
+ if !foundAny {
+ foundAny = true
+ }
+ // Get the primary ID of the object
+ idSchema := tableSchema.Indexes[id]
+ idIndexer := idSchema.Indexer.(SingleIndexer)
+ ok, idVal, err := idIndexer.FromObject(entry)
+ if err != nil {
+ return false, fmt.Errorf("failed to build primary index: %v", err)
+ }
+ if !ok {
+ return false, fmt.Errorf("object missing primary index")
+ }
+ if txn.changes != nil {
+ // Record the deletion
+ idTxn := txn.writableIndex(table, id)
+ existing, ok := idTxn.Get(idVal)
+ if ok {
+ txn.changes = append(txn.changes, Change{
+ Table: table,
+ Before: existing,
+ After: nil, // Now nil indicates deletion
+ primaryKey: idVal,
+ })
+ }
+ }
+ // Remove the object from all the indexes except the given prefix index
+ for name, indexSchema := range tableSchema.Indexes {
+ if name == deletePrefixIndex {
+ continue
+ }
+ indexTxn := txn.writableIndex(table, name)
+
+ // Handle the update by deleting from the index first
+ var (
+ ok bool
+ vals [][]byte
+ err error
+ )
+ switch indexer := indexSchema.Indexer.(type) {
+ case SingleIndexer:
+ var val []byte
+ ok, val, err = indexer.FromObject(entry)
+ vals = [][]byte{val}
+ case MultiIndexer:
+ ok, vals, err = indexer.FromObject(entry)
+ }
+ if err != nil {
+ return false, fmt.Errorf("failed to build index '%s': %v", name, err)
+ }
+
+ if ok {
+ // Handle non-unique index by computing a unique index.
+ // This is done by appending the primary key which must
+ // be unique anyways.
+ for _, val := range vals {
+ if !indexSchema.Unique {
+ val = append(val, idVal...)
+ }
+ indexTxn.Delete(val)
+ }
+ }
+ }
+
+ }
+ if foundAny {
+ indexTxn := txn.writableIndex(table, deletePrefixIndex)
+ ok = indexTxn.DeletePrefix([]byte(prefix))
+ if !ok {
+ panic(fmt.Errorf("prefix %v matched some entries but DeletePrefix did not delete any ", prefix))
+ }
+ return true, nil
+ }
+ return false, nil
+}
+
+// DeleteAll is used to delete all the objects in a given table
+// matching the constraints on the index
+func (txn *Txn) DeleteAll(table, index string, args ...interface{}) (int, error) {
+ if !txn.write {
+ return 0, fmt.Errorf("cannot delete in read-only transaction")
+ }
+
+ // Get all the objects
+ iter, err := txn.Get(table, index, args...)
+ if err != nil {
+ return 0, err
+ }
+
+ // Put them into a slice so there are no safety concerns while actually
+ // performing the deletes
+ var objs []interface{}
+ for {
+ obj := iter.Next()
+ if obj == nil {
+ break
+ }
+
+ objs = append(objs, obj)
+ }
+
+ // Do the deletes
+ num := 0
+ for _, obj := range objs {
+ if err := txn.Delete(table, obj); err != nil {
+ return num, err
+ }
+ num++
+ }
+ return num, nil
+}
+
+// FirstWatch is used to return the first matching object for
+// the given constraints on the index along with the watch channel.
+//
+// Note that all values read in the transaction form a consistent snapshot
+// from the time when the transaction was created.
+//
+// The watch channel is closed when a subsequent write transaction
+// has updated the result of the query. Since each read transaction
+// operates on an isolated snapshot, a new read transaction must be
+// started to observe the changes that have been made.
+//
+// If the value of index ends with "_prefix", FirstWatch will perform a prefix
+// match instead of full match on the index. The registered indexer must implement
+// PrefixIndexer, otherwise an error is returned.
+func (txn *Txn) FirstWatch(table, index string, args ...interface{}) (<-chan struct{}, interface{}, error) {
+ // Get the index value
+ indexSchema, val, err := txn.getIndexValue(table, index, args...)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Get the index itself
+ indexTxn := txn.readableIndex(table, indexSchema.Name)
+
+ // Do an exact lookup
+ if indexSchema.Unique && val != nil && indexSchema.Name == index {
+ watch, obj, ok := indexTxn.GetWatch(val)
+ if !ok {
+ return watch, nil, nil
+ }
+ return watch, obj, nil
+ }
+
+ // Handle non-unique index by using an iterator and getting the first value
+ iter := indexTxn.Root().Iterator()
+ watch := iter.SeekPrefixWatch(val)
+ _, value, _ := iter.Next()
+ return watch, value, nil
+}
+
+// LastWatch is used to return the last matching object for
+// the given constraints on the index along with the watch channel.
+//
+// Note that all values read in the transaction form a consistent snapshot
+// from the time when the transaction was created.
+//
+// The watch channel is closed when a subsequent write transaction
+// has updated the result of the query. Since each read transaction
+// operates on an isolated snapshot, a new read transaction must be
+// started to observe the changes that have been made.
+//
+// If the value of index ends with "_prefix", LastWatch will perform a prefix
+// match instead of full match on the index. The registered indexer must implement
+// PrefixIndexer, otherwise an error is returned.
+func (txn *Txn) LastWatch(table, index string, args ...interface{}) (<-chan struct{}, interface{}, error) {
+ // Get the index value
+ indexSchema, val, err := txn.getIndexValue(table, index, args...)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Get the index itself
+ indexTxn := txn.readableIndex(table, indexSchema.Name)
+
+ // Do an exact lookup
+ if indexSchema.Unique && val != nil && indexSchema.Name == index {
+ watch, obj, ok := indexTxn.GetWatch(val)
+ if !ok {
+ return watch, nil, nil
+ }
+ return watch, obj, nil
+ }
+
+ // Handle non-unique index by using an iterator and getting the last value
+ iter := indexTxn.Root().ReverseIterator()
+ watch := iter.SeekPrefixWatch(val)
+ _, value, _ := iter.Previous()
+ return watch, value, nil
+}
+
+// First is used to return the first matching object for
+// the given constraints on the index.
+//
+// Note that all values read in the transaction form a consistent snapshot
+// from the time when the transaction was created.
+func (txn *Txn) First(table, index string, args ...interface{}) (interface{}, error) {
+ _, val, err := txn.FirstWatch(table, index, args...)
+ return val, err
+}
+
+// Last is used to return the last matching object for
+// the given constraints on the index.
+//
+// Note that all values read in the transaction form a consistent snapshot
+// from the time when the transaction was created.
+func (txn *Txn) Last(table, index string, args ...interface{}) (interface{}, error) {
+ _, val, err := txn.LastWatch(table, index, args...)
+ return val, err
+}
+
+// LongestPrefix is used to fetch the longest prefix match for the given
+// constraints on the index. Note that this will not work with the memdb
+// StringFieldIndex because it adds null terminators which prevent the
+// algorithm from correctly finding a match (it will get to right before the
+// null and fail to find a leaf node). This should only be used where the prefix
+// given is capable of matching indexed entries directly, which typically only
+// applies to a custom indexer. See the unit test for an example.
+//
+// Note that all values read in the transaction form a consistent snapshot
+// from the time when the transaction was created.
+func (txn *Txn) LongestPrefix(table, index string, args ...interface{}) (interface{}, error) {
+ // Enforce that this only works on prefix indexes.
+ if !strings.HasSuffix(index, "_prefix") {
+ return nil, fmt.Errorf("must use '%s_prefix' on index", index)
+ }
+
+ // Get the index value.
+ indexSchema, val, err := txn.getIndexValue(table, index, args...)
+ if err != nil {
+ return nil, err
+ }
+
+ // This algorithm only makes sense against a unique index, otherwise the
+ // index keys will have the IDs appended to them.
+ if !indexSchema.Unique {
+ return nil, fmt.Errorf("index '%s' is not unique", index)
+ }
+
+ // Find the longest prefix match with the given index.
+ indexTxn := txn.readableIndex(table, indexSchema.Name)
+ if _, value, ok := indexTxn.Root().LongestPrefix(val); ok {
+ return value, nil
+ }
+ return nil, nil
+}
+
+// getIndexValue is used to get the IndexSchema and the value
+// used to scan the index given the parameters. This handles prefix based
+// scans when the index has the "_prefix" suffix. The index must support
+// prefix iteration.
+func (txn *Txn) getIndexValue(table, index string, args ...interface{}) (*IndexSchema, []byte, error) {
+ // Get the table schema
+ tableSchema, ok := txn.db.schema.Tables[table]
+ if !ok {
+ return nil, nil, fmt.Errorf("invalid table '%s'", table)
+ }
+
+ // Check for a prefix scan
+ prefixScan := false
+ if strings.HasSuffix(index, "_prefix") {
+ index = strings.TrimSuffix(index, "_prefix")
+ prefixScan = true
+ }
+
+ // Get the index schema
+ indexSchema, ok := tableSchema.Indexes[index]
+ if !ok {
+ return nil, nil, fmt.Errorf("invalid index '%s'", index)
+ }
+
+ // Hot-path for when there are no arguments
+ if len(args) == 0 {
+ return indexSchema, nil, nil
+ }
+
+ // Special case the prefix scanning
+ if prefixScan {
+ prefixIndexer, ok := indexSchema.Indexer.(PrefixIndexer)
+ if !ok {
+ return indexSchema, nil,
+ fmt.Errorf("index '%s' does not support prefix scanning", index)
+ }
+
+ val, err := prefixIndexer.PrefixFromArgs(args...)
+ if err != nil {
+ return indexSchema, nil, fmt.Errorf("index error: %v", err)
+ }
+ return indexSchema, val, err
+ }
+
+ // Get the exact match index
+ val, err := indexSchema.Indexer.FromArgs(args...)
+ if err != nil {
+ return indexSchema, nil, fmt.Errorf("index error: %v", err)
+ }
+ return indexSchema, val, err
+}
+
+// ResultIterator is used to iterate over a list of results from a query on a table.
+//
+// When a ResultIterator is created from a write transaction, the results from
+// Next will reflect a snapshot of the table at the time the ResultIterator is
+// created.
+// This means that calling Insert or Delete on a transaction while iterating is
+// allowed, but the changes made by Insert or Delete will not be observed in the
+// results returned from subsequent calls to Next. For example if an item is deleted
+// from the index used by the iterator it will still be returned by Next. If an
+// item is inserted into the index used by the iterator, it will not be returned
+// by Next. However, an iterator created after a call to Insert or Delete will
+// reflect the modifications.
+//
+// When a ResultIterator is created from a write transaction, and there are already
+// modifications to the index used by the iterator, the modification cache of the
+// index will be invalidated. This may result in some additional allocations if
+// the same node in the index is modified again.
+type ResultIterator interface {
+ WatchCh() <-chan struct{}
+ // Next returns the next result from the iterator. If there are no more results
+ // nil is returned.
+ Next() interface{}
+}
+
+// Get is used to construct a ResultIterator over all the rows that match the
+// given constraints of an index. The index values must match exactly (this
+// is not a range-based or prefix-based lookup) by default.
+//
+// Prefix lookups: if the named index implements PrefixIndexer, you may perform
+// prefix-based lookups by appending "_prefix" to the index name. In this
+// scenario, the index values given in args are treated as prefix lookups. For
+// example, a StringFieldIndex will match any string with the given value
+// as a prefix: "mem" matches "memdb".
+//
+// See the documentation for ResultIterator to understand the behaviour of the
+// returned ResultIterator.
+func (txn *Txn) Get(table, index string, args ...interface{}) (ResultIterator, error) {
+ indexIter, val, err := txn.getIndexIterator(table, index, args...)
+ if err != nil {
+ return nil, err
+ }
+
+ // Seek the iterator to the appropriate sub-set
+ watchCh := indexIter.SeekPrefixWatch(val)
+
+ // Create an iterator
+ iter := &radixIterator{
+ iter: indexIter,
+ watchCh: watchCh,
+ }
+ return iter, nil
+}
+
+// GetReverse is used to construct a Reverse ResultIterator over all the
+// rows that match the given constraints of an index.
+// The returned ResultIterator's Next() will return the next Previous value.
+//
+// See the documentation on Get for details on arguments.
+//
+// See the documentation for ResultIterator to understand the behaviour of the
+// returned ResultIterator.
+func (txn *Txn) GetReverse(table, index string, args ...interface{}) (ResultIterator, error) {
+ indexIter, val, err := txn.getIndexIteratorReverse(table, index, args...)
+ if err != nil {
+ return nil, err
+ }
+
+ // Seek the iterator to the appropriate sub-set
+ watchCh := indexIter.SeekPrefixWatch(val)
+
+ // Create an iterator
+ iter := &radixReverseIterator{
+ iter: indexIter,
+ watchCh: watchCh,
+ }
+ return iter, nil
+}
+
+// LowerBound is used to construct a ResultIterator over all the the range of
+// rows that have an index value greater than or equal to the provide args.
+// Calling this then iterating until the rows are larger than required allows
+// range scans within an index. It is not possible to watch the resulting
+// iterator since the radix tree doesn't efficiently allow watching on lower
+// bound changes. The WatchCh returned will be nill and so will block forever.
+//
+// If the value of index ends with "_prefix", LowerBound will perform a prefix match instead of
+// a full match on the index. The registered index must implement PrefixIndexer,
+// otherwise an error is returned.
+//
+// See the documentation for ResultIterator to understand the behaviour of the
+// returned ResultIterator.
+func (txn *Txn) LowerBound(table, index string, args ...interface{}) (ResultIterator, error) {
+ indexIter, val, err := txn.getIndexIterator(table, index, args...)
+ if err != nil {
+ return nil, err
+ }
+
+ // Seek the iterator to the appropriate sub-set
+ indexIter.SeekLowerBound(val)
+
+ // Create an iterator
+ iter := &radixIterator{
+ iter: indexIter,
+ }
+ return iter, nil
+}
+
+// ReverseLowerBound is used to construct a Reverse ResultIterator over all the
+// the range of rows that have an index value less than or equal to the
+// provide args. Calling this then iterating until the rows are lower than
+// required allows range scans within an index. It is not possible to watch the
+// resulting iterator since the radix tree doesn't efficiently allow watching
+// on lower bound changes. The WatchCh returned will be nill and so will block
+// forever.
+//
+// See the documentation for ResultIterator to understand the behaviour of the
+// returned ResultIterator.
+func (txn *Txn) ReverseLowerBound(table, index string, args ...interface{}) (ResultIterator, error) {
+ indexIter, val, err := txn.getIndexIteratorReverse(table, index, args...)
+ if err != nil {
+ return nil, err
+ }
+
+ // Seek the iterator to the appropriate sub-set
+ indexIter.SeekReverseLowerBound(val)
+
+ // Create an iterator
+ iter := &radixReverseIterator{
+ iter: indexIter,
+ }
+ return iter, nil
+}
+
+// objectID is a tuple of table name and the raw internal id byte slice
+// converted to a string. It's only converted to a string to make it comparable
+// so this struct can be used as a map index.
+type objectID struct {
+ Table string
+ IndexVal string
+}
+
+// mutInfo stores metadata about mutations to allow collapsing multiple
+// mutations to the same object into one.
+type mutInfo struct {
+ firstBefore interface{}
+ lastIdx int
+}
+
+// Changes returns the set of object changes that have been made in the
+// transaction so far. If change tracking is not enabled it wil always return
+// nil. It can be called before or after Commit. If it is before Commit it will
+// return all changes made so far which may not be the same as the final
+// Changes. After abort it will always return nil. As with other Txn methods
+// it's not safe to call this from a different goroutine than the one making
+// mutations or committing the transaction. Mutations will appear in the order
+// they were performed in the transaction but multiple operations to the same
+// object will be collapsed so only the effective overall change to that object
+// is present. If transaction operations are dependent (e.g. copy object X to Y
+// then delete X) this might mean the set of mutations is incomplete to verify
+// history, but it is complete in that the net effect is preserved (Y got a new
+// value, X got removed).
+func (txn *Txn) Changes() Changes {
+ if txn.changes == nil {
+ return nil
+ }
+
+ // De-duplicate mutations by key so all take effect at the point of the last
+ // write but we keep the mutations in order.
+ dups := make(map[objectID]mutInfo)
+ for i, m := range txn.changes {
+ oid := objectID{
+ Table: m.Table,
+ IndexVal: string(m.primaryKey),
+ }
+ // Store the latest mutation index for each key value
+ mi, ok := dups[oid]
+ if !ok {
+ // First entry for key, store the before value
+ mi.firstBefore = m.Before
+ }
+ mi.lastIdx = i
+ dups[oid] = mi
+ }
+ if len(dups) == len(txn.changes) {
+ // No duplicates found, fast path return it as is
+ return txn.changes
+ }
+
+ // Need to remove the duplicates
+ cs := make(Changes, 0, len(dups))
+ for i, m := range txn.changes {
+ oid := objectID{
+ Table: m.Table,
+ IndexVal: string(m.primaryKey),
+ }
+ mi := dups[oid]
+ if mi.lastIdx == i {
+ // This was the latest value for this key copy it with the before value in
+ // case it's different. Note that m is not a pointer so we are not
+ // modifying the txn.changeSet here - it's already a copy.
+ m.Before = mi.firstBefore
+
+ // Edge case - if the object was inserted and then eventually deleted in
+ // the same transaction, then the net affect on that key is a no-op. Don't
+ // emit a mutation with nil for before and after as it's meaningless and
+ // might violate expectations and cause a panic in code that assumes at
+ // least one must be set.
+ if m.Before == nil && m.After == nil {
+ continue
+ }
+ cs = append(cs, m)
+ }
+ }
+ // Store the de-duped version in case this is called again
+ txn.changes = cs
+ return cs
+}
+
+func (txn *Txn) getIndexIterator(table, index string, args ...interface{}) (*iradix.Iterator, []byte, error) {
+ // Get the index value to scan
+ indexSchema, val, err := txn.getIndexValue(table, index, args...)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Get the index itself
+ indexTxn := txn.readableIndex(table, indexSchema.Name)
+ indexRoot := indexTxn.Root()
+
+ // Get an iterator over the index
+ indexIter := indexRoot.Iterator()
+ return indexIter, val, nil
+}
+
+func (txn *Txn) getIndexIteratorReverse(table, index string, args ...interface{}) (*iradix.ReverseIterator, []byte, error) {
+ // Get the index value to scan
+ indexSchema, val, err := txn.getIndexValue(table, index, args...)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Get the index itself
+ indexTxn := txn.readableIndex(table, indexSchema.Name)
+ indexRoot := indexTxn.Root()
+
+ // Get an interator over the index
+ indexIter := indexRoot.ReverseIterator()
+ return indexIter, val, nil
+}
+
+// Defer is used to push a new arbitrary function onto a stack which
+// gets called when a transaction is committed and finished. Deferred
+// functions are called in LIFO order, and only invoked at the end of
+// write transactions.
+func (txn *Txn) Defer(fn func()) {
+ txn.after = append(txn.after, fn)
+}
+
+// radixIterator is used to wrap an underlying iradix iterator.
+// This is much more efficient than a sliceIterator as we are not
+// materializing the entire view.
+type radixIterator struct {
+ iter *iradix.Iterator
+ watchCh <-chan struct{}
+}
+
+func (r *radixIterator) WatchCh() <-chan struct{} {
+ return r.watchCh
+}
+
+func (r *radixIterator) Next() interface{} {
+ _, value, ok := r.iter.Next()
+ if !ok {
+ return nil
+ }
+ return value
+}
+
+type radixReverseIterator struct {
+ iter *iradix.ReverseIterator
+ watchCh <-chan struct{}
+}
+
+func (r *radixReverseIterator) Next() interface{} {
+ _, value, ok := r.iter.Previous()
+ if !ok {
+ return nil
+ }
+ return value
+}
+
+func (r *radixReverseIterator) WatchCh() <-chan struct{} {
+ return r.watchCh
+}
+
+// Snapshot creates a snapshot of the current state of the transaction.
+// Returns a new read-only transaction or nil if the transaction is already
+// aborted or committed.
+func (txn *Txn) Snapshot() *Txn {
+ if txn.rootTxn == nil {
+ return nil
+ }
+
+ snapshot := &Txn{
+ db: txn.db,
+ rootTxn: txn.rootTxn.Clone(),
+ }
+
+ // Commit sub-transactions into the snapshot
+ for key, subTxn := range txn.modified {
+ path := indexPath(key.Table, key.Index)
+ final := subTxn.CommitOnly()
+ snapshot.rootTxn.Insert(path, final)
+ }
+
+ return snapshot
+}
diff --git a/vendor/github.com/hashicorp/go-memdb/watch.go b/vendor/github.com/hashicorp/go-memdb/watch.go
new file mode 100644
index 000000000..13a4da145
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-memdb/watch.go
@@ -0,0 +1,152 @@
+package memdb
+
+import (
+ "context"
+ "time"
+)
+
+// WatchSet is a collection of watch channels. The zero value is not usable.
+// Use NewWatchSet to create a WatchSet.
+type WatchSet map[<-chan struct{}]struct{}
+
+// NewWatchSet constructs a new watch set.
+func NewWatchSet() WatchSet {
+ return make(map[<-chan struct{}]struct{})
+}
+
+// Add appends a watchCh to the WatchSet if non-nil.
+func (w WatchSet) Add(watchCh <-chan struct{}) {
+ if w == nil {
+ return
+ }
+
+ if _, ok := w[watchCh]; !ok {
+ w[watchCh] = struct{}{}
+ }
+}
+
+// AddWithLimit appends a watchCh to the WatchSet if non-nil, and if the given
+// softLimit hasn't been exceeded. Otherwise, it will watch the given alternate
+// channel. It's expected that the altCh will be the same on many calls to this
+// function, so you will exceed the soft limit a little bit if you hit this, but
+// not by much.
+//
+// This is useful if you want to track individual items up to some limit, after
+// which you watch a higher-level channel (usually a channel from start of
+// an iterator higher up in the radix tree) that will watch a superset of items.
+func (w WatchSet) AddWithLimit(softLimit int, watchCh <-chan struct{}, altCh <-chan struct{}) {
+ // This is safe for a nil WatchSet so we don't need to check that here.
+ if len(w) < softLimit {
+ w.Add(watchCh)
+ } else {
+ w.Add(altCh)
+ }
+}
+
+// Watch blocks until one of the channels in the watch set is closed, or
+// timeoutCh sends a value.
+// Returns true if timeoutCh is what caused Watch to unblock.
+func (w WatchSet) Watch(timeoutCh <-chan time.Time) bool {
+ if w == nil {
+ return false
+ }
+
+ // Create a context that gets cancelled when the timeout is triggered
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ go func() {
+ select {
+ case <-timeoutCh:
+ cancel()
+ case <-ctx.Done():
+ }
+ }()
+
+ return w.WatchCtx(ctx) == context.Canceled
+}
+
+// WatchCtx blocks until one of the channels in the watch set is closed, or
+// ctx is done (cancelled or exceeds the deadline). WatchCtx returns an error
+// if the ctx causes it to unblock, otherwise returns nil.
+//
+// WatchCtx should be preferred over Watch.
+func (w WatchSet) WatchCtx(ctx context.Context) error {
+ if w == nil {
+ return nil
+ }
+
+ if n := len(w); n <= aFew {
+ idx := 0
+ chunk := make([]<-chan struct{}, aFew)
+ for watchCh := range w {
+ chunk[idx] = watchCh
+ idx++
+ }
+ return watchFew(ctx, chunk)
+ }
+
+ return w.watchMany(ctx)
+}
+
+// watchMany is used if there are many watchers.
+func (w WatchSet) watchMany(ctx context.Context) error {
+ // Cancel all watcher goroutines when return.
+ watcherCtx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ // Set up a goroutine for each watcher.
+ triggerCh := make(chan struct{}, 1)
+ watcher := func(chunk []<-chan struct{}) {
+ if err := watchFew(watcherCtx, chunk); err == nil {
+ select {
+ case triggerCh <- struct{}{}:
+ default:
+ }
+ }
+ }
+
+ // Apportion the watch channels into chunks we can feed into the
+ // watchFew helper.
+ idx := 0
+ chunk := make([]<-chan struct{}, aFew)
+ for watchCh := range w {
+ subIdx := idx % aFew
+ chunk[subIdx] = watchCh
+ idx++
+
+ // Fire off this chunk and start a fresh one.
+ if idx%aFew == 0 {
+ go watcher(chunk)
+ chunk = make([]<-chan struct{}, aFew)
+ }
+ }
+
+ // Make sure to watch any residual channels in the last chunk.
+ if idx%aFew != 0 {
+ go watcher(chunk)
+ }
+
+ // Wait for a channel to trigger or timeout.
+ select {
+ case <-triggerCh:
+ return nil
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+}
+
+// WatchCh returns a channel that is used to wait for any channel of the watch set to trigger
+// or for the context to be cancelled. WatchCh creates a new goroutine each call, so
+// callers may need to cache the returned channel to avoid creating extra goroutines.
+func (w WatchSet) WatchCh(ctx context.Context) <-chan error {
+ // Create the outgoing channel
+ triggerCh := make(chan error, 1)
+
+ // Create a goroutine to collect the error from WatchCtx
+ go func() {
+ triggerCh <- w.WatchCtx(ctx)
+ }()
+
+ return triggerCh
+}
diff --git a/vendor/github.com/hashicorp/go-memdb/watch_few.go b/vendor/github.com/hashicorp/go-memdb/watch_few.go
new file mode 100644
index 000000000..b211eeea2
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-memdb/watch_few.go
@@ -0,0 +1,117 @@
+package memdb
+
+//go:generate sh -c "go run watch-gen/main.go >watch_few.go"
+
+import (
+ "context"
+)
+
+// aFew gives how many watchers this function is wired to support. You must
+// always pass a full slice of this length, but unused channels can be nil.
+const aFew = 32
+
+// watchFew is used if there are only a few watchers as a performance
+// optimization.
+func watchFew(ctx context.Context, ch []<-chan struct{}) error {
+ select {
+
+ case <-ch[0]:
+ return nil
+
+ case <-ch[1]:
+ return nil
+
+ case <-ch[2]:
+ return nil
+
+ case <-ch[3]:
+ return nil
+
+ case <-ch[4]:
+ return nil
+
+ case <-ch[5]:
+ return nil
+
+ case <-ch[6]:
+ return nil
+
+ case <-ch[7]:
+ return nil
+
+ case <-ch[8]:
+ return nil
+
+ case <-ch[9]:
+ return nil
+
+ case <-ch[10]:
+ return nil
+
+ case <-ch[11]:
+ return nil
+
+ case <-ch[12]:
+ return nil
+
+ case <-ch[13]:
+ return nil
+
+ case <-ch[14]:
+ return nil
+
+ case <-ch[15]:
+ return nil
+
+ case <-ch[16]:
+ return nil
+
+ case <-ch[17]:
+ return nil
+
+ case <-ch[18]:
+ return nil
+
+ case <-ch[19]:
+ return nil
+
+ case <-ch[20]:
+ return nil
+
+ case <-ch[21]:
+ return nil
+
+ case <-ch[22]:
+ return nil
+
+ case <-ch[23]:
+ return nil
+
+ case <-ch[24]:
+ return nil
+
+ case <-ch[25]:
+ return nil
+
+ case <-ch[26]:
+ return nil
+
+ case <-ch[27]:
+ return nil
+
+ case <-ch[28]:
+ return nil
+
+ case <-ch[29]:
+ return nil
+
+ case <-ch[30]:
+ return nil
+
+ case <-ch[31]:
+ return nil
+
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+}
diff --git a/vendor/github.com/hashicorp/golang-lru/LICENSE b/vendor/github.com/hashicorp/golang-lru/LICENSE
new file mode 100644
index 000000000..be2cc4dfb
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/LICENSE
@@ -0,0 +1,362 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. "Contributor"
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the terms of
+ a Secondary License.
+
+1.6. "Executable Form"
+
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+
+ means a work that combines Covered Software with other material, in a
+ separate file or files, that is not Covered Software.
+
+1.8. "License"
+
+ means this document.
+
+1.9. "Licensable"
+
+ means having the right to grant, to the maximum extent possible, whether
+ at the time of the initial grant or subsequently, any and all of the
+ rights conveyed by this License.
+
+1.10. "Modifications"
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. "Patent Claims" of a Contributor
+
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the License,
+ by the making, using, selling, offering for sale, having made, import,
+ or transfer of either its Contributions or its Contributor Version.
+
+1.12. "Secondary License"
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. "Source Code Form"
+
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, "control" means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution
+ become effective for each Contribution on the date the Contributor first
+ distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under
+ this License. No additional rights or licenses will be implied from the
+ distribution or licensing of Covered Software under this License.
+ Notwithstanding Section 2.1(b) above, no patent license is granted by a
+ Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+ This License does not grant any rights in the trademarks, service marks,
+ or logos of any Contributor (except as may be necessary to comply with
+ the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this
+ License (see Section 10.2) or under the terms of a Secondary License (if
+ permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its
+ Contributions are its original creation(s) or it has sufficient rights to
+ grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under
+ applicable copyright doctrines of fair use, fair dealing, or other
+ equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under
+ the terms of this License. You must inform recipients that the Source
+ Code Form of the Covered Software is governed by the terms of this
+ License, and how they can obtain a copy of this License. You may not
+ attempt to alter or restrict the recipients' rights in the Source Code
+ Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter the
+ recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for
+ the Covered Software. If the Larger Work is a combination of Covered
+ Software with a work governed by one or more Secondary Licenses, and the
+ Covered Software is not Incompatible With Secondary Licenses, this
+ License permits You to additionally distribute such Covered Software
+ under the terms of such Secondary License(s), so that the recipient of
+ the Larger Work may, at their option, further distribute the Covered
+ Software under the terms of either this License or such Secondary
+ License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices
+ (including copyright notices, patent notices, disclaimers of warranty, or
+ limitations of liability) contained within the Source Code Form of the
+ Covered Software, except that You may alter any license notices to the
+ extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on
+ behalf of any Contributor. You must make it absolutely clear that any
+ such warranty, support, indemnity, or liability obligation is offered by
+ You alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute,
+ judicial order, or regulation then You must: (a) comply with the terms of
+ this License to the maximum extent possible; and (b) describe the
+ limitations and the code they affect. Such description must be placed in a
+ text file included with all distributions of the Covered Software under
+ this License. Except to the extent prohibited by statute or regulation,
+ such description must be sufficiently detailed for a recipient of ordinary
+ skill to be able to understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing
+ basis, if such Contributor fails to notify You of the non-compliance by
+ some reasonable means prior to 60 days after You have come back into
+ compliance. Moreover, Your grants from a particular Contributor are
+ reinstated on an ongoing basis if such Contributor notifies You of the
+ non-compliance by some reasonable means, this is the first time You have
+ received notice of non-compliance with this License from such
+ Contributor, and You become compliant prior to 30 days after Your receipt
+ of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions,
+ counter-claims, and cross-claims) alleging that a Contributor Version
+ directly or indirectly infringes any patent, then the rights granted to
+ You by any and all Contributors for the Covered Software under Section
+ 2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an "as is" basis,
+ without warranty of any kind, either expressed, implied, or statutory,
+ including, without limitation, warranties that the Covered Software is free
+ of defects, merchantable, fit for a particular purpose or non-infringing.
+ The entire risk as to the quality and performance of the Covered Software
+ is with You. Should any Covered Software prove defective in any respect,
+ You (not any Contributor) assume the cost of any necessary servicing,
+ repair, or correction. This disclaimer of warranty constitutes an essential
+ part of this License. No use of any Covered Software is authorized under
+ this License except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from
+ such party's negligence to the extent applicable law prohibits such
+ limitation. Some jurisdictions do not allow the exclusion or limitation of
+ incidental or consequential damages, so this exclusion and limitation may
+ not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts
+ of a jurisdiction where the defendant maintains its principal place of
+ business and such litigation shall be governed by laws of that
+ jurisdiction, without reference to its conflict-of-law provisions. Nothing
+ in this Section shall prevent a party's ability to bring cross-claims or
+ counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. Any law or regulation which provides that
+ the language of a contract shall be construed against the drafter shall not
+ be used to construe this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version
+ of the License under which You originally received the Covered Software,
+ or under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a
+ modified version of this License if you rename the license and remove
+ any references to the name of the license steward (except to note that
+ such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+ Licenses If You choose to distribute Source Code Form that is
+ Incompatible With Secondary Licenses under the terms of this version of
+ the License, the notice described in Exhibit B of this License must be
+ attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file,
+then You may include the notice in a location (such as a LICENSE file in a
+relevant directory) where a recipient would be likely to look for such a
+notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+
+ This Source Code Form is "Incompatible
+ With Secondary Licenses", as defined by
+ the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go
new file mode 100644
index 000000000..a86c8539e
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go
@@ -0,0 +1,177 @@
+package simplelru
+
+import (
+ "container/list"
+ "errors"
+)
+
+// EvictCallback is used to get a callback when a cache entry is evicted
+type EvictCallback func(key interface{}, value interface{})
+
+// LRU implements a non-thread safe fixed size LRU cache
+type LRU struct {
+ size int
+ evictList *list.List
+ items map[interface{}]*list.Element
+ onEvict EvictCallback
+}
+
+// entry is used to hold a value in the evictList
+type entry struct {
+ key interface{}
+ value interface{}
+}
+
+// NewLRU constructs an LRU of the given size
+func NewLRU(size int, onEvict EvictCallback) (*LRU, error) {
+ if size <= 0 {
+ return nil, errors.New("Must provide a positive size")
+ }
+ c := &LRU{
+ size: size,
+ evictList: list.New(),
+ items: make(map[interface{}]*list.Element),
+ onEvict: onEvict,
+ }
+ return c, nil
+}
+
+// Purge is used to completely clear the cache.
+func (c *LRU) Purge() {
+ for k, v := range c.items {
+ if c.onEvict != nil {
+ c.onEvict(k, v.Value.(*entry).value)
+ }
+ delete(c.items, k)
+ }
+ c.evictList.Init()
+}
+
+// Add adds a value to the cache. Returns true if an eviction occurred.
+func (c *LRU) Add(key, value interface{}) (evicted bool) {
+ // Check for existing item
+ if ent, ok := c.items[key]; ok {
+ c.evictList.MoveToFront(ent)
+ ent.Value.(*entry).value = value
+ return false
+ }
+
+ // Add new item
+ ent := &entry{key, value}
+ entry := c.evictList.PushFront(ent)
+ c.items[key] = entry
+
+ evict := c.evictList.Len() > c.size
+ // Verify size not exceeded
+ if evict {
+ c.removeOldest()
+ }
+ return evict
+}
+
+// Get looks up a key's value from the cache.
+func (c *LRU) Get(key interface{}) (value interface{}, ok bool) {
+ if ent, ok := c.items[key]; ok {
+ c.evictList.MoveToFront(ent)
+ if ent.Value.(*entry) == nil {
+ return nil, false
+ }
+ return ent.Value.(*entry).value, true
+ }
+ return
+}
+
+// Contains checks if a key is in the cache, without updating the recent-ness
+// or deleting it for being stale.
+func (c *LRU) Contains(key interface{}) (ok bool) {
+ _, ok = c.items[key]
+ return ok
+}
+
+// Peek returns the key value (or undefined if not found) without updating
+// the "recently used"-ness of the key.
+func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) {
+ var ent *list.Element
+ if ent, ok = c.items[key]; ok {
+ return ent.Value.(*entry).value, true
+ }
+ return nil, ok
+}
+
+// Remove removes the provided key from the cache, returning if the
+// key was contained.
+func (c *LRU) Remove(key interface{}) (present bool) {
+ if ent, ok := c.items[key]; ok {
+ c.removeElement(ent)
+ return true
+ }
+ return false
+}
+
+// RemoveOldest removes the oldest item from the cache.
+func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) {
+ ent := c.evictList.Back()
+ if ent != nil {
+ c.removeElement(ent)
+ kv := ent.Value.(*entry)
+ return kv.key, kv.value, true
+ }
+ return nil, nil, false
+}
+
+// GetOldest returns the oldest entry
+func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) {
+ ent := c.evictList.Back()
+ if ent != nil {
+ kv := ent.Value.(*entry)
+ return kv.key, kv.value, true
+ }
+ return nil, nil, false
+}
+
+// Keys returns a slice of the keys in the cache, from oldest to newest.
+func (c *LRU) Keys() []interface{} {
+ keys := make([]interface{}, len(c.items))
+ i := 0
+ for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() {
+ keys[i] = ent.Value.(*entry).key
+ i++
+ }
+ return keys
+}
+
+// Len returns the number of items in the cache.
+func (c *LRU) Len() int {
+ return c.evictList.Len()
+}
+
+// Resize changes the cache size.
+func (c *LRU) Resize(size int) (evicted int) {
+ diff := c.Len() - size
+ if diff < 0 {
+ diff = 0
+ }
+ for i := 0; i < diff; i++ {
+ c.removeOldest()
+ }
+ c.size = size
+ return diff
+}
+
+// removeOldest removes the oldest item from the cache.
+func (c *LRU) removeOldest() {
+ ent := c.evictList.Back()
+ if ent != nil {
+ c.removeElement(ent)
+ }
+}
+
+// removeElement is used to remove a given list element from the cache
+func (c *LRU) removeElement(e *list.Element) {
+ c.evictList.Remove(e)
+ kv := e.Value.(*entry)
+ delete(c.items, kv.key)
+ if c.onEvict != nil {
+ c.onEvict(kv.key, kv.value)
+ }
+}
diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go
new file mode 100644
index 000000000..92d70934d
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go
@@ -0,0 +1,39 @@
+package simplelru
+
+// LRUCache is the interface for simple LRU cache.
+type LRUCache interface {
+ // Adds a value to the cache, returns true if an eviction occurred and
+ // updates the "recently used"-ness of the key.
+ Add(key, value interface{}) bool
+
+ // Returns key's value from the cache and
+ // updates the "recently used"-ness of the key. #value, isFound
+ Get(key interface{}) (value interface{}, ok bool)
+
+ // Checks if a key exists in cache without updating the recent-ness.
+ Contains(key interface{}) (ok bool)
+
+ // Returns key's value without updating the "recently used"-ness of the key.
+ Peek(key interface{}) (value interface{}, ok bool)
+
+ // Removes a key from the cache.
+ Remove(key interface{}) bool
+
+ // Removes the oldest entry from cache.
+ RemoveOldest() (interface{}, interface{}, bool)
+
+ // Returns the oldest entry from the cache. #key, value, isFound
+ GetOldest() (interface{}, interface{}, bool)
+
+ // Returns a slice of the keys in the cache, from oldest to newest.
+ Keys() []interface{}
+
+ // Returns the number of items in the cache.
+ Len() int
+
+ // Clears all cache entries.
+ Purge()
+
+ // Resizes cache, returning number evicted
+ Resize(int) int
+}
diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md
index 244ee19c4..af2ef6395 100644
--- a/vendor/github.com/klauspost/compress/README.md
+++ b/vendor/github.com/klauspost/compress/README.md
@@ -27,6 +27,16 @@ Use the links above for more information on each.
# changelog
+* Oct 20, 2025 - [1.18.1](https://github.com/klauspost/compress/releases/tag/v1.18.1)
+ * zstd: Add simple zstd EncodeTo/DecodeTo functions https://github.com/klauspost/compress/pull/1079
+ * zstd: Fix incorrect buffer size in dictionary encodes https://github.com/klauspost/compress/pull/1059
+ * s2: check for cap, not len of buffer in EncodeBetter/Best by @vdarulis in https://github.com/klauspost/compress/pull/1080
+ * zlib: Avoiding extra allocation in zlib.reader.Reset by @travelpolicy in https://github.com/klauspost/compress/pull/1086
+ * gzhttp: remove redundant err check in zstdReader by @ryanfowler in https://github.com/klauspost/compress/pull/1090
+ * flate: Faster load+store https://github.com/klauspost/compress/pull/1104
+ * flate: Simplify matchlen https://github.com/klauspost/compress/pull/1101
+ * flate: Use exact sizes for huffman tables https://github.com/klauspost/compress/pull/1103
+
* Feb 19th, 2025 - [1.18.0](https://github.com/klauspost/compress/releases/tag/v1.18.0)
* Add unsafe little endian loaders https://github.com/klauspost/compress/pull/1036
* fix: check `r.err != nil` but return a nil value error `err` by @alingse in https://github.com/klauspost/compress/pull/1028
@@ -36,6 +46,9 @@ Use the links above for more information on each.
* flate: Fix matchlen L5+L6 https://github.com/klauspost/compress/pull/1049
* flate: Cleanup & reduce casts https://github.com/klauspost/compress/pull/1050
+
+ See changes to v1.17.x
+
* Oct 11th, 2024 - [1.17.11](https://github.com/klauspost/compress/releases/tag/v1.17.11)
* zstd: Fix extra CRC written with multiple Close calls https://github.com/klauspost/compress/pull/1017
* s2: Don't use stack for index tables https://github.com/klauspost/compress/pull/1014
@@ -102,7 +115,8 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
* s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839
* flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837
* gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860
-
+
+See changes to v1.16.x
@@ -669,3 +683,4 @@ Here are other packages of good quality and pure Go (no cgo wrappers or autoconv
# license
This code is licensed under the same conditions as the original Go code. See LICENSE file.
+
diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go
index 4e92f5998..57d17eeab 100644
--- a/vendor/github.com/klauspost/compress/flate/deflate.go
+++ b/vendor/github.com/klauspost/compress/flate/deflate.go
@@ -421,7 +421,9 @@ func (d *compressor) deflateLazy() {
d.h = newHuffmanEncoder(maxFlateBlockTokens)
}
var tmp [256]uint16
- for _, v := range d.window[s.index:d.windowEnd] {
+ toIndex := d.window[s.index:d.windowEnd]
+ toIndex = toIndex[:min(len(toIndex), maxFlateBlockTokens)]
+ for _, v := range toIndex {
tmp[v]++
}
d.h.generate(tmp[:], 15)
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
index 03a179697..7151140cc 100644
--- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
+++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
@@ -646,7 +646,7 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b
w.lastHeader = 0
}
- numLiterals, numOffsets := w.indexTokens(tokens, fillReuse && !sync)
+ numLiterals, numOffsets := w.indexTokens(tokens, true)
extraBits := 0
ssize, storable := w.storedSize(input)
@@ -781,7 +781,7 @@ func (w *huffmanBitWriter) fillTokens() {
// literalFreq and offsetFreq, and generates literalEncoding
// and offsetEncoding.
// The number of literal and offset tokens is returned.
-func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, numOffsets int) {
+func (w *huffmanBitWriter) indexTokens(t *tokens, alwaysEOB bool) (numLiterals, numOffsets int) {
//copy(w.literalFreq[:], t.litHist[:])
*(*[256]uint16)(w.literalFreq[:]) = t.litHist
//copy(w.literalFreq[256:], t.extraHist[:])
@@ -791,9 +791,10 @@ func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, num
if t.n == 0 {
return
}
- if filled {
- return maxNumLit, maxNumDist
+ if alwaysEOB {
+ w.literalFreq[endBlockMarker] = 1
}
+
// get the number of literals
numLiterals = len(w.literalFreq)
for w.literalFreq[numLiterals-1] == 0 {
diff --git a/vendor/github.com/klauspost/compress/flate/stateless.go b/vendor/github.com/klauspost/compress/flate/stateless.go
index 90b74f7ac..455ed3e2b 100644
--- a/vendor/github.com/klauspost/compress/flate/stateless.go
+++ b/vendor/github.com/klauspost/compress/flate/stateless.go
@@ -61,13 +61,19 @@ var bitWriterPool = sync.Pool{
},
}
+// tokensPool contains tokens struct objects that can be reused
+var tokensPool = sync.Pool{
+ New: func() any {
+ return &tokens{}
+ },
+}
+
// StatelessDeflate allows compressing directly to a Writer without retaining state.
// When returning everything will be flushed.
// Up to 8KB of an optional dictionary can be given which is presumed to precede the block.
// Longer dictionaries will be truncated and will still produce valid output.
// Sending nil dictionary is perfectly fine.
func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error {
- var dst tokens
bw := bitWriterPool.Get().(*huffmanBitWriter)
bw.reset(out)
defer func() {
@@ -91,6 +97,12 @@ func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error {
// For subsequent loops, keep shallow dict reference to avoid alloc+copy.
var inDict []byte
+ dst := tokensPool.Get().(*tokens)
+ dst.Reset()
+ defer func() {
+ tokensPool.Put(dst)
+ }()
+
for len(in) > 0 {
todo := in
if len(inDict) > 0 {
@@ -113,9 +125,9 @@ func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error {
}
// Compress
if len(inDict) == 0 {
- statelessEnc(&dst, todo, int16(len(dict)))
+ statelessEnc(dst, todo, int16(len(dict)))
} else {
- statelessEnc(&dst, inDict[:maxStatelessDict+len(todo)], maxStatelessDict)
+ statelessEnc(dst, inDict[:maxStatelessDict+len(todo)], maxStatelessDict)
}
isEof := eof && len(in) == 0
@@ -129,7 +141,7 @@ func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error {
// If we removed less than 1/16th, huffman compress the block.
bw.writeBlockHuff(isEof, uncompressed, len(in) == 0)
} else {
- bw.writeBlockDynamic(&dst, isEof, uncompressed, len(in) == 0)
+ bw.writeBlockDynamic(dst, isEof, uncompressed, len(in) == 0)
}
if len(in) > 0 {
// Retain a dict if we have more
diff --git a/vendor/github.com/spf13/cobra/.golangci.yml b/vendor/github.com/spf13/cobra/.golangci.yml
index 6acf8ab1e..104dc2440 100644
--- a/vendor/github.com/spf13/cobra/.golangci.yml
+++ b/vendor/github.com/spf13/cobra/.golangci.yml
@@ -57,3 +57,10 @@ linters:
- common-false-positives
- legacy
- std-error-handling
+ settings:
+ govet:
+ # Disable buildtag check to allow dual build tag syntax (both //go:build and // +build).
+ # This is necessary for Go 1.15 compatibility since //go:build was introduced in Go 1.17.
+ # This can be removed once Cobra requires Go 1.17 or higher.
+ disable:
+ - buildtag
diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go
index 78088db69..c05fed45a 100644
--- a/vendor/github.com/spf13/cobra/command.go
+++ b/vendor/github.com/spf13/cobra/command.go
@@ -557,7 +557,7 @@ func (c *Command) FlagErrorFunc() (f func(*Command, error) error) {
}
}
-var minUsagePadding = 25
+const minUsagePadding = 25
// UsagePadding return padding for the usage.
func (c *Command) UsagePadding() int {
@@ -567,7 +567,7 @@ func (c *Command) UsagePadding() int {
return c.parent.commandsMaxUseLen
}
-var minCommandPathPadding = 11
+const minCommandPathPadding = 11
// CommandPathPadding return padding for the command path.
func (c *Command) CommandPathPadding() int {
@@ -577,7 +577,7 @@ func (c *Command) CommandPathPadding() int {
return c.parent.commandsMaxCommandPathLen
}
-var minNamePadding = 11
+const minNamePadding = 11
// NamePadding returns padding for the name.
func (c *Command) NamePadding() int {
@@ -1939,7 +1939,7 @@ type tmplFunc struct {
fn func(io.Writer, interface{}) error
}
-var defaultUsageTemplate = `Usage:{{if .Runnable}}
+const defaultUsageTemplate = `Usage:{{if .Runnable}}
{{.UseLine}}{{end}}{{if .HasAvailableSubCommands}}
{{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}}
@@ -2039,7 +2039,7 @@ func defaultUsageFunc(w io.Writer, in interface{}) error {
return nil
}
-var defaultHelpTemplate = `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}}
+const defaultHelpTemplate = `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}}
{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}`
@@ -2061,7 +2061,7 @@ func defaultHelpFunc(w io.Writer, in interface{}) error {
return nil
}
-var defaultVersionTemplate = `{{with .DisplayName}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}}
+const defaultVersionTemplate = `{{with .DisplayName}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}}
`
// defaultVersionFunc is equivalent to executing defaultVersionTemplate. The two should be changed in sync.
diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go
index 2f45dbc86..f69fd7546 100644
--- a/vendor/golang.org/x/sync/errgroup/errgroup.go
+++ b/vendor/golang.org/x/sync/errgroup/errgroup.go
@@ -144,8 +144,8 @@ func (g *Group) SetLimit(n int) {
g.sem = nil
return
}
- if len(g.sem) != 0 {
- panic(fmt.Errorf("errgroup: modify limit while %v goroutines in the group are still active", len(g.sem)))
+ if active := len(g.sem); active != 0 {
+ panic(fmt.Errorf("errgroup: modify limit while %v goroutines in the group are still active", active))
}
g.sem = make(chan token, n)
}
diff --git a/vendor/k8s.io/klog/v2/internal/verbosity/verbosity.go b/vendor/k8s.io/klog/v2/internal/verbosity/verbosity.go
new file mode 100644
index 000000000..40ec27d87
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/internal/verbosity/verbosity.go
@@ -0,0 +1,303 @@
+/*
+Copyright 2013 Google Inc. All Rights Reserved.
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package verbosity
+
+import (
+ "bytes"
+ "errors"
+ "flag"
+ "fmt"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+)
+
+// New returns a struct that implements -v and -vmodule support. Changing and
+// checking these settings is thread-safe, with all concurrency issues handled
+// internally.
+func New() *VState {
+ vs := new(VState)
+
+ // The two fields must have a pointer to the overal struct for their
+ // implementation of Set.
+ vs.vmodule.vs = vs
+ vs.verbosity.vs = vs
+
+ return vs
+}
+
+// Value is an extension that makes it possible to use the values in pflag.
+type Value interface {
+ flag.Value
+ Type() string
+}
+
+func (vs *VState) V() Value {
+ return &vs.verbosity
+}
+
+func (vs *VState) VModule() Value {
+ return &vs.vmodule
+}
+
+// VState contains settings and state. Some of its fields can be accessed
+// through atomic read/writes, in other cases a mutex must be held.
+type VState struct {
+ mu sync.Mutex
+
+ // These flags are modified only under lock, although verbosity may be fetched
+ // safely using atomic.LoadInt32.
+ vmodule moduleSpec // The state of the -vmodule flag.
+ verbosity levelSpec // V logging level, the value of the -v flag/
+
+ // pcs is used in V to avoid an allocation when computing the caller's PC.
+ pcs [1]uintptr
+ // vmap is a cache of the V Level for each V() call site, identified by PC.
+ // It is wiped whenever the vmodule flag changes state.
+ vmap map[uintptr]Level
+ // filterLength stores the length of the vmodule filter chain. If greater
+ // than zero, it means vmodule is enabled. It may be read safely
+ // using sync.LoadInt32, but is only modified under mu.
+ filterLength int32
+}
+
+// Level must be an int32 to support atomic read/writes.
+type Level int32
+
+type levelSpec struct {
+ vs *VState
+ l Level
+}
+
+// get returns the value of the level.
+func (l *levelSpec) get() Level {
+ return Level(atomic.LoadInt32((*int32)(&l.l)))
+}
+
+// set sets the value of the level.
+func (l *levelSpec) set(val Level) {
+ atomic.StoreInt32((*int32)(&l.l), int32(val))
+}
+
+// String is part of the flag.Value interface.
+func (l *levelSpec) String() string {
+ return strconv.FormatInt(int64(l.l), 10)
+}
+
+// Get is part of the flag.Getter interface. It returns the
+// verbosity level as int32.
+func (l *levelSpec) Get() interface{} {
+ return int32(l.l)
+}
+
+// Type is part of pflag.Value.
+func (l *levelSpec) Type() string {
+ return "Level"
+}
+
+// Set is part of the flag.Value interface.
+func (l *levelSpec) Set(value string) error {
+ v, err := strconv.ParseInt(value, 10, 32)
+ if err != nil {
+ return err
+ }
+ l.vs.mu.Lock()
+ defer l.vs.mu.Unlock()
+ l.vs.set(Level(v), l.vs.vmodule.filter, false)
+ return nil
+}
+
+// moduleSpec represents the setting of the -vmodule flag.
+type moduleSpec struct {
+ vs *VState
+ filter []modulePat
+}
+
+// modulePat contains a filter for the -vmodule flag.
+// It holds a verbosity level and a file pattern to match.
+type modulePat struct {
+ pattern string
+ literal bool // The pattern is a literal string
+ level Level
+}
+
+// match reports whether the file matches the pattern. It uses a string
+// comparison if the pattern contains no metacharacters.
+func (m *modulePat) match(file string) bool {
+ if m.literal {
+ return file == m.pattern
+ }
+ match, _ := filepath.Match(m.pattern, file)
+ return match
+}
+
+func (m *moduleSpec) String() string {
+ // Lock because the type is not atomic. TODO: clean this up.
+ // Empty instances don't have and don't need a lock (can
+ // happen when flag uses introspection).
+ if m.vs != nil {
+ m.vs.mu.Lock()
+ defer m.vs.mu.Unlock()
+ }
+ var b bytes.Buffer
+ for i, f := range m.filter {
+ if i > 0 {
+ b.WriteRune(',')
+ }
+ fmt.Fprintf(&b, "%s=%d", f.pattern, f.level)
+ }
+ return b.String()
+}
+
+// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the
+// struct is not exported.
+func (m *moduleSpec) Get() interface{} {
+ return nil
+}
+
+// Type is part of pflag.Value
+func (m *moduleSpec) Type() string {
+ return "pattern=N,..."
+}
+
+var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N")
+
+// Set will sets module value
+// Syntax: -vmodule=recordio=2,file=1,gfs*=3
+func (m *moduleSpec) Set(value string) error {
+ var filter []modulePat
+ for _, pat := range strings.Split(value, ",") {
+ if len(pat) == 0 {
+ // Empty strings such as from a trailing comma can be ignored.
+ continue
+ }
+ patLev := strings.Split(pat, "=")
+ if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 {
+ return errVmoduleSyntax
+ }
+ pattern := patLev[0]
+ v, err := strconv.ParseInt(patLev[1], 10, 32)
+ if err != nil {
+ return errors.New("syntax error: expect comma-separated list of filename=N")
+ }
+ if v < 0 {
+ return errors.New("negative value for vmodule level")
+ }
+ if v == 0 {
+ continue // Ignore. It's harmless but no point in paying the overhead.
+ }
+ // TODO: check syntax of filter?
+ filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)})
+ }
+ m.vs.mu.Lock()
+ defer m.vs.mu.Unlock()
+ m.vs.set(m.vs.verbosity.l, filter, true)
+ return nil
+}
+
+// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters
+// that require filepath.Match to be called to match the pattern.
+func isLiteral(pattern string) bool {
+ return !strings.ContainsAny(pattern, `\*?[]`)
+}
+
+// set sets a consistent state for V logging.
+// The mutex must be held.
+func (vs *VState) set(l Level, filter []modulePat, setFilter bool) {
+ // Turn verbosity off so V will not fire while we are in transition.
+ vs.verbosity.set(0)
+ // Ditto for filter length.
+ atomic.StoreInt32(&vs.filterLength, 0)
+
+ // Set the new filters and wipe the pc->Level map if the filter has changed.
+ if setFilter {
+ vs.vmodule.filter = filter
+ vs.vmap = make(map[uintptr]Level)
+ }
+
+ // Things are consistent now, so enable filtering and verbosity.
+ // They are enabled in order opposite to that in V.
+ atomic.StoreInt32(&vs.filterLength, int32(len(filter)))
+ vs.verbosity.set(l)
+}
+
+// Enabled checks whether logging is enabled at the given level. This must be
+// called with depth=0 when the caller of enabled will do the logging and
+// higher values when more stack levels need to be skipped.
+//
+// The mutex will be locked only if needed.
+func (vs *VState) Enabled(level Level, depth int) bool {
+ // This function tries hard to be cheap unless there's work to do.
+ // The fast path is two atomic loads and compares.
+
+ // Here is a cheap but safe test to see if V logging is enabled globally.
+ if vs.verbosity.get() >= level {
+ return true
+ }
+
+ // It's off globally but vmodule may still be set.
+ // Here is another cheap but safe test to see if vmodule is enabled.
+ if atomic.LoadInt32(&vs.filterLength) > 0 {
+ // Now we need a proper lock to use the logging structure. The pcs field
+ // is shared so we must lock before accessing it. This is fairly expensive,
+ // but if V logging is enabled we're slow anyway.
+ vs.mu.Lock()
+ defer vs.mu.Unlock()
+ if runtime.Callers(depth+2, vs.pcs[:]) == 0 {
+ return false
+ }
+ // runtime.Callers returns "return PCs", but we want
+ // to look up the symbolic information for the call,
+ // so subtract 1 from the PC. runtime.CallersFrames
+ // would be cleaner, but allocates.
+ pc := vs.pcs[0] - 1
+ v, ok := vs.vmap[pc]
+ if !ok {
+ v = vs.setV(pc)
+ }
+ return v >= level
+ }
+ return false
+}
+
+// setV computes and remembers the V level for a given PC
+// when vmodule is enabled.
+// File pattern matching takes the basename of the file, stripped
+// of its .go suffix, and uses filepath.Match, which is a little more
+// general than the *? matching used in C++.
+// Mutex is held.
+func (vs *VState) setV(pc uintptr) Level {
+ fn := runtime.FuncForPC(pc)
+ file, _ := fn.FileLine(pc)
+ // The file is something like /a/b/c/d.go. We want just the d.
+ file = strings.TrimSuffix(file, ".go")
+ if slash := strings.LastIndex(file, "/"); slash >= 0 {
+ file = file[slash+1:]
+ }
+ for _, filter := range vs.vmodule.filter {
+ if filter.match(file) {
+ vs.vmap[pc] = filter.level
+ return filter.level
+ }
+ }
+ vs.vmap[pc] = 0
+ return 0
+}
diff --git a/vendor/k8s.io/klog/v2/textlogger/options.go b/vendor/k8s.io/klog/v2/textlogger/options.go
new file mode 100644
index 000000000..b1c4eefb3
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/textlogger/options.go
@@ -0,0 +1,154 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package textlogger
+
+import (
+ "flag"
+ "io"
+ "os"
+ "strconv"
+ "time"
+
+ "k8s.io/klog/v2/internal/verbosity"
+)
+
+// Config influences logging in a text logger. To make this configurable via
+// command line flags, instantiate this once per program and use AddFlags to
+// bind command line flags to the instance before passing it to NewTestContext.
+//
+// Must be constructed with NewConfig.
+type Config struct {
+ vstate *verbosity.VState
+ co configOptions
+}
+
+// Verbosity returns a value instance that can be used to query (via String) or
+// modify (via Set) the verbosity threshold. This is thread-safe and can be
+// done at runtime.
+func (c *Config) Verbosity() flag.Value {
+ return c.vstate.V()
+}
+
+// VModule returns a value instance that can be used to query (via String) or
+// modify (via Set) the vmodule settings. This is thread-safe and can be done
+// at runtime.
+func (c *Config) VModule() flag.Value {
+ return c.vstate.VModule()
+}
+
+// ConfigOption implements functional parameters for NewConfig.
+type ConfigOption func(co *configOptions)
+
+type configOptions struct {
+ verbosityFlagName string
+ vmoduleFlagName string
+ verbosityDefault int
+ fixedTime *time.Time
+ unwind func(int) (string, int)
+ output io.Writer
+}
+
+// VerbosityFlagName overrides the default -v for the verbosity level.
+func VerbosityFlagName(name string) ConfigOption {
+ return func(co *configOptions) {
+
+ co.verbosityFlagName = name
+ }
+}
+
+// VModulFlagName overrides the default -vmodule for the per-module
+// verbosity levels.
+func VModuleFlagName(name string) ConfigOption {
+ return func(co *configOptions) {
+ co.vmoduleFlagName = name
+ }
+}
+
+// Verbosity overrides the default verbosity level of 0.
+// See https://github.com/kubernetes/community/blob/9406b4352fe2d5810cb21cc3cb059ce5886de157/contributors/devel/sig-instrumentation/logging.md#logging-conventions
+// for log level conventions in Kubernetes.
+func Verbosity(level int) ConfigOption {
+ return func(co *configOptions) {
+ co.verbosityDefault = level
+ }
+}
+
+// Output overrides stderr as the output stream.
+func Output(output io.Writer) ConfigOption {
+ return func(co *configOptions) {
+ co.output = output
+ }
+}
+
+// FixedTime overrides the actual time with a fixed time. Useful only for testing.
+//
+// # Experimental
+//
+// Notice: This function is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func FixedTime(ts time.Time) ConfigOption {
+ return func(co *configOptions) {
+ co.fixedTime = &ts
+ }
+}
+
+// Backtrace overrides the default mechanism for determining the call site.
+// The callback is invoked with the number of function calls between itself
+// and the call site. It must return the file name and line number. An empty
+// file name indicates that the information is unknown.
+//
+// # Experimental
+//
+// Notice: This function is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func Backtrace(unwind func(skip int) (filename string, line int)) ConfigOption {
+ return func(co *configOptions) {
+ co.unwind = unwind
+ }
+}
+
+// NewConfig returns a configuration with recommended defaults and optional
+// modifications. Command line flags are not bound to any FlagSet yet.
+func NewConfig(opts ...ConfigOption) *Config {
+ c := &Config{
+ vstate: verbosity.New(),
+ co: configOptions{
+ verbosityFlagName: "v",
+ vmoduleFlagName: "vmodule",
+ verbosityDefault: 0,
+ unwind: runtimeBacktrace,
+ output: os.Stderr,
+ },
+ }
+ for _, opt := range opts {
+ opt(&c.co)
+ }
+
+ // Cannot fail for this input.
+ _ = c.Verbosity().Set(strconv.FormatInt(int64(c.co.verbosityDefault), 10))
+ return c
+}
+
+// AddFlags registers the command line flags that control the configuration.
+//
+// The default flag names are the same as in klog, so unless those defaults
+// are changed, either klog.InitFlags or Config.AddFlags can be used for the
+// same flag set, but not both.
+func (c *Config) AddFlags(fs *flag.FlagSet) {
+ fs.Var(c.Verbosity(), c.co.verbosityFlagName, "number for the log level verbosity of the testing logger")
+ fs.Var(c.VModule(), c.co.vmoduleFlagName, "comma-separated list of pattern=N log level settings for files matching the patterns")
+}
diff --git a/vendor/k8s.io/klog/v2/textlogger/textlogger.go b/vendor/k8s.io/klog/v2/textlogger/textlogger.go
new file mode 100644
index 000000000..0b55a2994
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/textlogger/textlogger.go
@@ -0,0 +1,187 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+Copyright 2020 Intel Corporation.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package textlogger contains an implementation of the logr interface which is
+// producing the exact same output as klog. It does not route output through
+// klog (i.e. ignores [k8s.io/klog/v2.InitFlags]). Instead, all settings must be
+// configured through its own [NewConfig] and [Config.AddFlags].
+package textlogger
+
+import (
+ "runtime"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/go-logr/logr"
+
+ "k8s.io/klog/v2/internal/buffer"
+ "k8s.io/klog/v2/internal/serialize"
+ "k8s.io/klog/v2/internal/severity"
+ "k8s.io/klog/v2/internal/verbosity"
+)
+
+var (
+ // TimeNow is used to retrieve the current time. May be changed for testing.
+ TimeNow = time.Now
+)
+
+const (
+ // nameKey is used to log the `WithName` values as an additional attribute.
+ nameKey = "logger"
+)
+
+// NewLogger constructs a new logger.
+//
+// Verbosity can be modified at any time through the Config.V and
+// Config.VModule API.
+func NewLogger(c *Config) logr.Logger {
+ return logr.New(&tlogger{
+ values: nil,
+ config: c,
+ })
+}
+
+type tlogger struct {
+ callDepth int
+
+ // hasPrefix is true if the first entry in values is the special
+ // nameKey key/value. Such an entry gets added and later updated in
+ // WithName.
+ hasPrefix bool
+
+ values []interface{}
+ groups string
+ config *Config
+}
+
+func (l *tlogger) Init(info logr.RuntimeInfo) {
+ l.callDepth = info.CallDepth
+}
+
+func (l *tlogger) WithCallDepth(depth int) logr.LogSink {
+ newLogger := *l
+ newLogger.callDepth += depth
+ return &newLogger
+}
+
+func (l *tlogger) Enabled(level int) bool {
+ return l.config.vstate.Enabled(verbosity.Level(level), 1+l.callDepth)
+}
+
+func (l *tlogger) Info(_ int, msg string, kvList ...interface{}) {
+ l.print(nil, severity.InfoLog, msg, kvList)
+}
+
+func (l *tlogger) Error(err error, msg string, kvList ...interface{}) {
+ l.print(err, severity.ErrorLog, msg, kvList)
+}
+
+func (l *tlogger) print(err error, s severity.Severity, msg string, kvList []interface{}) {
+ // Determine caller.
+ // +1 for this frame, +1 for Info/Error.
+ skip := l.callDepth + 2
+ file, line := l.config.co.unwind(skip)
+ if file == "" {
+ file = "???"
+ line = 1
+ } else if slash := strings.LastIndex(file, "/"); slash >= 0 {
+ file = file[slash+1:]
+ }
+ l.printWithInfos(file, line, time.Now(), err, s, msg, kvList)
+}
+
+func runtimeBacktrace(skip int) (string, int) {
+ _, file, line, ok := runtime.Caller(skip + 1)
+ if !ok {
+ return "", 0
+ }
+ return file, line
+}
+
+func (l *tlogger) printWithInfos(file string, line int, now time.Time, err error, s severity.Severity, msg string, kvList []interface{}) {
+ // Only create a new buffer if we don't have one cached.
+ b := buffer.GetBuffer()
+ defer buffer.PutBuffer(b)
+
+ // Format header.
+ if l.config.co.fixedTime != nil {
+ now = *l.config.co.fixedTime
+ }
+ b.FormatHeader(s, file, line, now)
+
+ // The message is always quoted, even if it contains line breaks.
+ // If developers want multi-line output, they should use a small, fixed
+ // message and put the multi-line output into a value.
+ b.WriteString(strconv.Quote(msg))
+ if err != nil {
+ serialize.KVFormat(&b.Buffer, "err", err)
+ }
+ serialize.MergeAndFormatKVs(&b.Buffer, l.values, kvList)
+ if b.Len() == 0 || b.Bytes()[b.Len()-1] != '\n' {
+ b.WriteByte('\n')
+ }
+ _, _ = l.config.co.output.Write(b.Bytes())
+}
+
+func (l *tlogger) WriteKlogBuffer(data []byte) {
+ _, _ = l.config.co.output.Write(data)
+}
+
+// WithName returns a new logr.Logger with the specified name appended. klogr
+// uses '/' characters to separate name elements. Callers should not pass '/'
+// in the provided name string, but this library does not actually enforce that.
+func (l *tlogger) WithName(name string) logr.LogSink {
+ clone := *l
+ if l.hasPrefix {
+ // Copy slice and modify value. No length checks and type
+ // assertions are needed because hasPrefix is only true if the
+ // first two elements exist and are key/value strings.
+ v := make([]interface{}, 0, len(l.values))
+ v = append(v, l.values...)
+ prefix, _ := v[1].(string)
+ v[1] = prefix + "." + name
+ clone.values = v
+ } else {
+ // Preprend new key/value pair.
+ v := make([]interface{}, 0, 2+len(l.values))
+ v = append(v, nameKey, name)
+ v = append(v, l.values...)
+ clone.values = v
+ clone.hasPrefix = true
+ }
+ return &clone
+}
+
+func (l *tlogger) WithValues(kvList ...interface{}) logr.LogSink {
+ clone := *l
+ clone.values = serialize.WithValues(l.values, kvList)
+ return &clone
+}
+
+// KlogBufferWriter is implemented by the textlogger LogSink.
+type KlogBufferWriter interface {
+ // WriteKlogBuffer takes a pre-formatted buffer prepared by klog and
+ // writes it unchanged to the output stream. Can be used with
+ // klog.WriteKlogBuffer when setting a logger through
+ // klog.SetLoggerWithOptions.
+ WriteKlogBuffer([]byte)
+}
+
+var _ logr.LogSink = &tlogger{}
+var _ logr.CallDepthLogSink = &tlogger{}
+var _ KlogBufferWriter = &tlogger{}
diff --git a/vendor/k8s.io/klog/v2/textlogger/textlogger_slog.go b/vendor/k8s.io/klog/v2/textlogger/textlogger_slog.go
new file mode 100644
index 000000000..c888ef8a6
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/textlogger/textlogger_slog.go
@@ -0,0 +1,52 @@
+//go:build go1.21
+// +build go1.21
+
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package textlogger
+
+import (
+ "context"
+ "log/slog"
+
+ "github.com/go-logr/logr"
+
+ "k8s.io/klog/v2/internal/serialize"
+ "k8s.io/klog/v2/internal/sloghandler"
+)
+
+func (l *tlogger) Handle(ctx context.Context, record slog.Record) error {
+ return sloghandler.Handle(ctx, record, l.groups, l.printWithInfos)
+}
+
+func (l *tlogger) WithAttrs(attrs []slog.Attr) logr.SlogSink {
+ clone := *l
+ clone.values = serialize.WithValues(l.values, sloghandler.Attrs2KVList(l.groups, attrs))
+ return &clone
+}
+
+func (l *tlogger) WithGroup(name string) logr.SlogSink {
+ clone := *l
+ if clone.groups != "" {
+ clone.groups += "." + name
+ } else {
+ clone.groups = name
+ }
+ return &clone
+}
+
+var _ logr.SlogSink = &tlogger{}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index ed132d3cc..6165073e5 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -200,6 +200,25 @@ github.com/containers/ocicrypt/keywrap/pkcs7
github.com/containers/ocicrypt/spec
github.com/containers/ocicrypt/utils
github.com/containers/ocicrypt/utils/keyprovider
+# github.com/cucumber/gherkin/go/v26 v26.2.0
+## explicit; go 1.19
+github.com/cucumber/gherkin/go/v26
+# github.com/cucumber/godog v0.15.1
+## explicit; go 1.16
+github.com/cucumber/godog
+github.com/cucumber/godog/colors
+github.com/cucumber/godog/formatters
+github.com/cucumber/godog/internal/builder
+github.com/cucumber/godog/internal/flags
+github.com/cucumber/godog/internal/formatters
+github.com/cucumber/godog/internal/models
+github.com/cucumber/godog/internal/parser
+github.com/cucumber/godog/internal/storage
+github.com/cucumber/godog/internal/tags
+github.com/cucumber/godog/internal/utils
+# github.com/cucumber/messages/go/v21 v21.0.1
+## explicit; go 1.19
+github.com/cucumber/messages/go/v21
# github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467
## explicit
github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer
@@ -354,6 +373,9 @@ github.com/gobwas/glob/syntax/ast
github.com/gobwas/glob/syntax/lexer
github.com/gobwas/glob/util/runes
github.com/gobwas/glob/util/strings
+# github.com/gofrs/uuid v4.3.1+incompatible
+## explicit
+github.com/gofrs/uuid
# github.com/gogo/protobuf v1.3.2
## explicit; go 1.15
github.com/gogo/protobuf/proto
@@ -484,9 +506,18 @@ github.com/h2non/go-is-svg
# github.com/hashicorp/errwrap v1.1.0
## explicit
github.com/hashicorp/errwrap
+# github.com/hashicorp/go-immutable-radix v1.3.1
+## explicit
+github.com/hashicorp/go-immutable-radix
+# github.com/hashicorp/go-memdb v1.3.4
+## explicit; go 1.13
+github.com/hashicorp/go-memdb
# github.com/hashicorp/go-multierror v1.1.1
## explicit; go 1.13
github.com/hashicorp/go-multierror
+# github.com/hashicorp/golang-lru v0.5.4
+## explicit; go 1.12
+github.com/hashicorp/golang-lru/simplelru
# github.com/huandu/xstrings v1.5.0
## explicit; go 1.12
github.com/huandu/xstrings
@@ -509,7 +540,7 @@ github.com/josharian/intern
# github.com/json-iterator/go v1.1.12
## explicit; go 1.12
github.com/json-iterator/go
-# github.com/klauspost/compress v1.18.1
+# github.com/klauspost/compress v1.18.2
## explicit; go 1.23
github.com/klauspost/compress
github.com/klauspost/compress/flate
@@ -761,7 +792,7 @@ github.com/smallstep/pkcs7/internal/legacy/x509
# github.com/spf13/cast v1.7.1
## explicit; go 1.19
github.com/spf13/cast
-# github.com/spf13/cobra v1.10.1
+# github.com/spf13/cobra v1.10.2
## explicit; go 1.15
github.com/spf13/cobra
# github.com/spf13/pflag v1.0.10
@@ -1022,7 +1053,7 @@ golang.org/x/net/websocket
## explicit; go 1.24.0
golang.org/x/oauth2
golang.org/x/oauth2/internal
-# golang.org/x/sync v0.18.0
+# golang.org/x/sync v0.19.0
## explicit; go 1.24.0
golang.org/x/sync/errgroup
golang.org/x/sync/semaphore
@@ -1884,6 +1915,8 @@ k8s.io/klog/v2/internal/dbg
k8s.io/klog/v2/internal/serialize
k8s.io/klog/v2/internal/severity
k8s.io/klog/v2/internal/sloghandler
+k8s.io/klog/v2/internal/verbosity
+k8s.io/klog/v2/textlogger
# k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b
## explicit; go 1.23
k8s.io/kube-openapi/pkg/cached