diff --git a/.env.bootstrap b/.env.bootstrap new file mode 100644 index 0000000..0bcbdb4 --- /dev/null +++ b/.env.bootstrap @@ -0,0 +1,16 @@ +GIT_CLONES="\ +https://github.com/forestrie/taskfiles.git#../taskfiles +https://github.com/robinbryce/go-datatrails-common.git#../go-datatrails-common +https://github.com/robinbryce/go-datatrails-merklelog.git#../go-datatrails-merklelog +https://github.com/forestrie/go-merklelog-datatrails.git#../go-merklelog-datatrails +https://github.com/datatrails/go-datatrails-simplehash.git#../go-datatrails-simplehash +https://github.com/datatrails/go-datatrails-serialization.git#../go-datatrails-serialization +https://github.com/robinbryce/go-merklelog-azure.git#../go-merklelog-azure +https://github.com/robinbryce/go-merklelog-fs.git#../go-merklelog-fs +https://github.com/robinbryce/go-merklelog-provider-testing.git#../go-merklelog-provider-testing +" + +export GIT_CHECKOUTS="\ +https://github.com/robinbryce/go-datatrails-common.git#../go-datatrails-common^robin/accept-uint64-reading-cose-keys +https://github.com/datatrails/go-datatrails-serialization.git#../go-datatrails-serialization@eventsv1/v0.0.3 +" diff --git a/.github/workflows/push.yaml b/.github/workflows/push.yaml index eb003ce..7c1d546 100644 --- a/.github/workflows/push.yaml +++ b/.github/workflows/push.yaml @@ -7,7 +7,7 @@ on: [ jobs: build: - name: Quality Control + name: CI runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 @@ -32,28 +32,25 @@ jobs: version: 3.x repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Code quality checks + - name: Bootstrap run: | - # Note: it is by design that we don't use the builder - task format - task lint + task bootstrap + - name: Unit tests run: | # Note: it is by design that we don't use the builder task test:unit - name: Integration tests run: | + # Note: it is by design that we don't use the builder task test:integration - - name: Azurite logs - run: | - task azurite:logs - - name: Stop azurite - if: always() + - name: DataTrails public log tests run: | - task azurite:stop + # Note: it is by design that we don't use the builder + task test:datatrails:public systemtest: - name: System Test + name: Built binary tests runs-on: [ubuntu-latest] steps: - uses: actions/checkout@v3 @@ -74,15 +71,19 @@ jobs: - name: Build project run: | + task bootstrap task build export VERACITY_INSTALL=$(readlink -f ./veracity) >> $GITHUB_ENV + ./veracity --help + ./veracity --version - - name: System tests - run: | - echo "Veracity Installation Path: $VERACITY_INSTALL" - task test:system + # - name: Test the built binary + # run: | + # echo "Veracity Installation Path: $VERACITY_INSTALL" + # task bootstrap + # task test:binary - - name: Publish Test Report - uses: mikepenz/action-junit-report@v4 - with: - report_paths: ./tests/systemtest/res.xml + # - name: Publish Test Report + # uses: mikepenz/action-junit-report@v4 + # with: + # report_paths: ./tests/systemtest/res.xml diff --git a/.golangci.yml b/.golangci.yml index ede7cf0..6ad95c5 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,120 +1,7 @@ -linters-settings: - funlen: - lines: 350 - statements: 135 - depguard: - list-type: blacklist - packages: - # logging is allowed only by logutils.Log, logrus - # is allowed to use only in logutils package - - github.com/sirupsen/logrus - dupl: - threshold: 100 - errorlint: - # Check whether fmt.Errorf uses the %w verb for formatting errors. - # See the https://github.com/polyfloyd/go-errorlint for caveats. - # Default: true - errorf: false - # Permit more than 1 %w verb, valid per Go 1.20 (Requires errorf:true) - # Default: true - errorf-multi: true - # Check for plain type assertions and type switches. - # Default: true - asserts: true - # Check for plain error comparisons. - # Default: true - comparison: true - exhaustive: - # Program elements to check for exhaustiveness. - # Default: [ switch ] - check: - - switch - - map - # Check switch statements in generated files also. - # Default: false - check-generated: true - # Presence of "default" case in switch statements satisfies exhaustiveness, - # even if all enum members are not listed. - # Default: false - default-signifies-exhaustive: true - # Enum members matching the supplied regex do not have to be listed in - # switch statements to satisfy exhaustiveness. - # Default: "" - ignore-enum-members: "Example.+" - # Enum types matching the supplied regex do not have to be listed in - # switch statements to satisfy exhaustiveness. - # Default: "" - ignore-enum-types: "Example.+" - # Consider enums only in package scopes, not in inner scopes. - # Default: false - package-scope-only: true - # Only run exhaustive check on switches with "//exhaustive:enforce" comment. - # Default: false - explicit-exhaustive-switch: false - # Only run exhaustive check on map literals with "//exhaustive:enforce" comment. - # Default: false - explicit-exhaustive-map: false - gci: - local-prefixes: github.com/datatrails/go-datatrails-merklelog - goconst: - min-len: 2 - min-occurrences: 2 - gocritic: - enabled-tags: - - performance - - style - - experimental - disabled-checks: - - wrapperFunc - gocognit: - min-complexity: 75 - gocyclo: - min-complexity: 10 - goimports: - local-prefixes: github.com/golangci/golangci-lint - golint: - min-confidence: 0 - govet: - check-shadowing: true - settings: - printf: - funcs: - - Infof - - Warnf - - Errorf - - Fatalf - lll: - line-length: 500 - maligned: - suggest-new: true - misspell: - locale: UK +run: + timeout: 5m + build-tags: "golangcilint unit integration e2e azurite" -# depguard (control upstream repos) not needed -# dupl - see ticket #3095 -# funlen - it is to anoying for test code and this sort of subjective judgement is what PR reviews are for -# exhaustive - see ticket #3096 -# gci - disabled as confusing and not really useful -# gochecknoglobals - not really useful -# goconst - see ticket #3097 -# goerr113 - disabled see https://github.com/Djarvur/go-err113/issues/10 -# gofumpt - not useful - confusing messages -# gomnd - see ticket #3116 -# govet - see ticket #3117 -# nilreturn onwardis not yet evaluated... -# maligned - this guards against performance issues due to accessing -# mis-aligned structs. We don't have direct evidence of this being a -# real problem for us. We use a lot of generated code in our hot -# paths anyway (we have no control over there layout). Until we get -# direct evidence this is hurting us, we prefer our stucts layed out -# logically and don't want to have to nolint tag everything. -# -# misspell - expected UK spelling with misspell, but customer facing text needs to be US. -# tagalign - suppress until we can get a golang code formatter that will fix this (cosmetic) -# -# WARN: typecheck cannot be disabled as golang-ci uses it internally to detect uncompilable code. -# Unfortunately the src/azb2c package triggers this erroneously so we add it to skip-dirs below. -# linters: enable-all: true disable: @@ -132,8 +19,7 @@ linters: - exhaustruct - forbidigo - forcetypeassert - # DONT re-enable funlen please - - funlen + - funlen # DONT re-enable funlen please - gci - gochecknoglobals - goconst @@ -192,9 +78,82 @@ linters: - wsl - wrapcheck -run: - build-tags: - - golangcilint +linters-settings: + funlen: + lines: 350 + statements: 135 + + depguard: + list-type: blacklist + packages: + - github.com/sirupsen/logrus # only allowed in logutils + + dupl: + threshold: 100 + + errorlint: + errorf: false + errorf-multi: true + asserts: true + comparison: true + + exhaustive: + check: + - switch + - map + check-generated: true + default-signifies-exhaustive: true + ignore-enum-members: "Example.+" + ignore-enum-types: "Example.+" + package-scope-only: true + explicit-exhaustive-switch: false + explicit-exhaustive-map: false + + gci: + local-prefixes: github.com/datatrails/go-datatrails-merklelog + + goconst: + min-len: 2 + min-occurrences: 2 + + gocritic: + enabled-tags: + - performance + - style + - experimental + disabled-checks: + - wrapperFunc + + gocognit: + min-complexity: 75 + + gocyclo: + min-complexity: 10 + + goimports: + local-prefixes: github.com/golangci/golangci-lint + + golint: + min-confidence: 0 + + govet: + check-shadowing: true + settings: + printf: + funcs: + - Infof + - Warnf + - Errorf + - Fatalf + + lll: + line-length: 500 + + maligned: + suggest-new: true + + misspell: + locale: UK issues: exclude-rules: diff --git a/Taskfile.dist.yml b/Taskfile.dist.yml new file mode 100644 index 0000000..e7dda27 --- /dev/null +++ b/Taskfile.dist.yml @@ -0,0 +1,169 @@ +--- +version: "3" + +includes: + gotest: + taskfile: ../taskfiles/gotest.yml + # optional because the ../taskfiles dir is cloned via bootstrap below + optional: true + dir: . + azurite: + taskfile: ../taskfiles/azurite.yml + # optional because the ../taskfiles dir is cloned via bootstrap below + optional: true + dir: . + +tasks: + bootstrap: + desc: bootstrap the development environment + cmds: + - | + GIT_BOOTSTRAP_SH=$(curl -fsSL https://raw.githubusercontent.com/robinbryce/git-bootstrap/refs/heads/main/git-bootstrap.v2.sh) + sh -c "$GIT_BOOTSTRAP_SH" - clone . + sh -c "$GIT_BOOTSTRAP_SH" - checkout . + + if [ -f ../go.work ]; then + echo "go.work exists, you may need to update it" + else + cat < ../go.work + go 1.24.4 + use ( + ./go-datatrails-common + ./go-merklelog-datatrails + ./go-datatrails-merklelog/massifs + ./go-datatrails-merklelog/mmr + ./go-datatrails-serialization/eventsv1 + ./go-datatrails-simplehash + ./go-merklelog-azure + ./go-merklelog-fs + ./go-merklelog-provider-testing + ./veracity + ) + EOF + fi + exit 0 + + build: + desc: "run go build against all go modules" + + cmds: + - | + go build -o veracity cmd/veracity/main.go + + test: + desc: run all the tests + cmds: + - task: test:unit + - task: test:integration + + test:unit: + desc: run the unit tests + cmds: + - task: azurite:preflight + - task: gotest:unit + - task: azurite:stop + + test:integration: + cmds: + - task: azurite:preflight + - task: gotest:integration + - task: azurite:stop + + test:datatrails:public: + desc: "run the integration tests against known public content on datatrails" + vars: + GO_TEST_TAGS: '{{.GO_TEST_TAGS | default "-tags integration,prodpublic"}}' + GO_MOD_DIRS: + sh: find . -type f -name 'go.mod' + + cmds: + - for: { var: GO_MOD_DIRS, as: MODULE } + cmd: | + cd $(dirname {{.MODULE}}) + mkdir -p {{.UNITTEST_DIR}} + go test \ + {{.GO_TEST_TAGS}} \ + -race \ + -v \ + ./cmd/ + + test:binary:setup: + cmds: + - | + downloadDir=$(mktemp -d) + installDir={{.ROOT_DIR}}/tests/systemtest/shunit + mkdir -p ${installDir} + + # We need to get the master branch of shunit2 to get junit output + wget -q -O ${downloadDir}/shunit2.zip https://github.com/kward/shunit2/archive/refs/heads/master.zip + unzip -q ${downloadDir}/shunit2.zip -d ${downloadDir} + + cp ${downloadDir}/shunit2-master/shunit2 ${installDir} + cp ${downloadDir}/shunit2-master/shunit2_test_helpers ${installDir} + cp ${downloadDir}/shunit2-master/test_runner ${installDir} + cp -r ${downloadDir}/shunit2-master/lib ${installDir} + + rm -rf ${downloadDir} + pushd {{.ROOT_DIR}}/tests/systemtest + cat ./test-setup.sh \ + ./test-01-*.sh \ + ./test-02-*.sh \ + ./test-03-*.sh \ + ./test-04-*.sh \ + > ./run.sh + popd + + test:binary: + vars: + OUTARGS: '{{.OUTARGS | default "--output-junit-xml=res.xml"}}' + cmds: + - task: test:binary:setup + - task: test:binary:run + vars: + OUTARGS: "{{.OUTARGS}}" + + test:binary:run: + vars: + OUTARGS: '{{.OUTARGS | default ""}}' + cmds: + - | + pushd {{.ROOT_DIR}}/tests/systemtest + ./shunit/shunit2 ./run.sh {{.OUTARGS}} + popd + + test:binary:one: + requires: + vars: [TEST] + cmds: + - | + pushd {{.ROOT_DIR}}/tests/systemtest + cat ./test-setup.sh \ + ./test-{{.TEST}}-*.sh \ + > ./run.sh + ./shunit/shunit2 ./run.sh + popd + + test:binary:xx: + cmds: + - | + downloadDir=$(mktemp -d) + installDir={{.ROOT_DIR}}/tests/systemtest/shunit + mkdir -p ${installDir} + + # We need to get the master branch of shunit2 to get junit output + wget -q -O ${downloadDir}/shunit2.zip https://github.com/kward/shunit2/archive/refs/heads/master.zip + unzip -q ${downloadDir}/shunit2.zip -d ${downloadDir} + + cp ${downloadDir}/shunit2-master/shunit2 ${installDir} + cp ${downloadDir}/shunit2-master/shunit2_test_helpers ${installDir} + cp ${downloadDir}/shunit2-master/test_runner ${installDir} + cp -r ${downloadDir}/shunit2-master/lib ${installDir} + + rm -rf ${downloadDir} + - | + pushd {{.ROOT_DIR}}/tests/systemtest + ./shunit/shunit2 ./test.sh --output-junit-xml=res.xml + popd + + sources: + - "{{.ROOT_DIR}}/tests/systemtest" diff --git a/Taskfile.yml b/Taskfile.yml deleted file mode 100644 index 9236488..0000000 --- a/Taskfile.yml +++ /dev/null @@ -1,83 +0,0 @@ ---- -version: '3' - -# This Taskfile represents the primary control surface for developers interacting with -# this component. -# -# Primary "configure/build/deploy/test" tasks must be provided directly in this top level -# taskfile -# -# Infrequently used or pure sub-tasks should be in subservient taskfiles and included -# here. -# -# All tasks that are expected to be run directly by developers must have a meaningful -# 'desc' with all optional variables defined, in this file or in included files. -# -# All tasks that are internal sub-tasks only should have a meaningful 'summary'. -# -includes: - codequality: - taskfile: ./taskfiles/Taskfile_codequality.yml - dir: ./taskfiles - azurite: - taskfile: ./taskfiles/Taskfile_azurite.yml - dir: ./taskfiles - gobuild: - taskfile: ./taskfiles/Taskfile_gobuild.yml - dir: ./taskfiles - gotest: - taskfile: ./taskfiles/Taskfile_gotest.yml - dir: ./taskfiles - systemtest: - taskfile: ./taskfiles/Taskfile_systemtest.yml - dir: . - -tasks: - - build: - desc: ensure go build works for all modules - cmds: - - task: gobuild:go:build - - build:fast: - desc: ensure go build works for all modules - cmds: - - task: gobuild:go:build - - build:clean: - desc: ensure go build works for all modules - cmds: - - task: gobuild:go:build - - format: - desc: formats the code correctly - cmds: - - task: codequality:format - - lint: - desc: lints the go code - cmds: - - task: codequality:lint - - go:modules: - desc: tidies the go modules - cmds: - - task: codequality:modules - - test:unit: - desc: run the unit tests - cmds: - - task: gotest:go:unit - - test:integration: - desc: run the azurite integration tests - cmds: - - task: azurite:preflight - - task: gotest:go:azurite - - task: gotest:go:prodpublic - - test:system: - desc: run the system tests - cmds: - - task: systemtest:setup - - task: systemtest:test diff --git a/amourystatement.go b/amourystatement.go new file mode 100644 index 0000000..ae71001 --- /dev/null +++ b/amourystatement.go @@ -0,0 +1,467 @@ +package veracity + +var AmourySignedStatement = []byte{ + 0xd2, 0x84, 0x59, 0x13, 0xf2, 0xa6, 0x01, 0x38, 0x25, 0x0f, 0xa4, 0x01, + 0x78, 0x5c, 0x64, 0x69, 0x64, 0x3a, 0x78, 0x35, 0x30, 0x39, 0x3a, 0x30, + 0x3a, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x3a, 0x49, 0x5f, 0x5f, 0x69, + 0x75, 0x4c, 0x32, 0x35, 0x6f, 0x58, 0x45, 0x56, 0x46, 0x64, 0x54, 0x50, + 0x5f, 0x61, 0x42, 0x4c, 0x78, 0x5f, 0x65, 0x54, 0x31, 0x52, 0x50, 0x48, + 0x62, 0x43, 0x51, 0x5f, 0x45, 0x43, 0x42, 0x51, 0x66, 0x59, 0x5a, 0x70, + 0x74, 0x39, 0x73, 0x3a, 0x3a, 0x65, 0x6b, 0x75, 0x3a, 0x31, 0x2e, 0x33, + 0x2e, 0x36, 0x2e, 0x31, 0x2e, 0x34, 0x2e, 0x31, 0x2e, 0x33, 0x31, 0x31, + 0x2e, 0x37, 0x36, 0x2e, 0x35, 0x39, 0x2e, 0x31, 0x2e, 0x31, 0x02, 0x78, + 0x26, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, + 0x6c, 0x2f, 0x6d, 0x69, 0x63, 0x72, 0x6f, 0x73, 0x6f, 0x66, 0x74, 0x2f, + 0x70, 0x68, 0x69, 0x2d, 0x34, 0x2d, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, + 0x69, 0x6e, 0x67, 0x06, 0xc1, 0x1a, 0x68, 0x54, 0x89, 0xb3, 0x63, 0x73, + 0x76, 0x6e, 0x00, 0x18, 0x21, 0x83, 0x59, 0x06, 0x78, 0x30, 0x82, 0x06, + 0x74, 0x30, 0x82, 0x04, 0x5c, 0xa0, 0x03, 0x02, 0x01, 0x02, 0x02, 0x13, + 0x33, 0x00, 0x00, 0x00, 0x47, 0xa0, 0xab, 0xc0, 0xe5, 0xbd, 0x99, 0x39, + 0xb2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x30, 0x0d, 0x06, 0x09, 0x2a, + 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x0c, 0x05, 0x00, 0x30, 0x55, + 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, 0x55, + 0x53, 0x31, 0x1e, 0x30, 0x1c, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, 0x15, + 0x4d, 0x69, 0x63, 0x72, 0x6f, 0x73, 0x6f, 0x66, 0x74, 0x20, 0x43, 0x6f, + 0x72, 0x70, 0x6f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x31, 0x26, 0x30, + 0x24, 0x06, 0x03, 0x55, 0x04, 0x03, 0x13, 0x1d, 0x4d, 0x69, 0x63, 0x72, + 0x6f, 0x73, 0x6f, 0x66, 0x74, 0x20, 0x53, 0x43, 0x44, 0x20, 0x50, 0x72, + 0x6f, 0x64, 0x75, 0x63, 0x74, 0x73, 0x20, 0x52, 0x53, 0x41, 0x20, 0x43, + 0x41, 0x30, 0x1e, 0x17, 0x0d, 0x32, 0x35, 0x30, 0x32, 0x32, 0x30, 0x32, + 0x30, 0x34, 0x35, 0x34, 0x36, 0x5a, 0x17, 0x0d, 0x32, 0x36, 0x30, 0x32, + 0x31, 0x38, 0x32, 0x30, 0x34, 0x35, 0x34, 0x36, 0x5a, 0x30, 0x81, 0x81, + 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, 0x55, + 0x53, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, + 0x57, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x74, 0x6f, 0x6e, 0x31, 0x10, + 0x30, 0x0e, 0x06, 0x03, 0x55, 0x04, 0x07, 0x13, 0x07, 0x52, 0x65, 0x64, + 0x6d, 0x6f, 0x6e, 0x64, 0x31, 0x1e, 0x30, 0x1c, 0x06, 0x03, 0x55, 0x04, + 0x0a, 0x13, 0x15, 0x4d, 0x69, 0x63, 0x72, 0x6f, 0x73, 0x6f, 0x66, 0x74, + 0x20, 0x43, 0x6f, 0x72, 0x70, 0x6f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x31, 0x2b, 0x30, 0x29, 0x06, 0x03, 0x55, 0x04, 0x03, 0x13, 0x22, 0x4d, + 0x69, 0x63, 0x72, 0x6f, 0x73, 0x6f, 0x66, 0x74, 0x20, 0x53, 0x43, 0x44, + 0x20, 0x50, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x74, 0x73, 0x20, 0x52, 0x53, + 0x41, 0x20, 0x53, 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x30, 0x82, 0x01, + 0xa2, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, + 0x01, 0x01, 0x05, 0x00, 0x03, 0x82, 0x01, 0x8f, 0x00, 0x30, 0x82, 0x01, + 0x8a, 0x02, 0x82, 0x01, 0x81, 0x00, 0xb6, 0x9a, 0x2e, 0xa4, 0xba, 0xce, + 0xfe, 0xaf, 0x9a, 0xc5, 0x63, 0xd2, 0xa9, 0x5d, 0x14, 0x69, 0xc9, 0x39, + 0x5f, 0xdb, 0x0a, 0x23, 0xad, 0xc7, 0x3f, 0x7c, 0x6e, 0x94, 0x50, 0x71, + 0x32, 0xea, 0xe6, 0xef, 0x33, 0x90, 0x58, 0x5c, 0xb6, 0xf6, 0x6f, 0xbd, + 0x70, 0x84, 0x49, 0x33, 0x35, 0xa4, 0xa9, 0x58, 0x02, 0xb8, 0xad, 0x37, + 0xb5, 0xa4, 0x13, 0x3f, 0x9e, 0xc0, 0x54, 0x2a, 0x83, 0x47, 0xf7, 0xa3, + 0xe3, 0xa6, 0x30, 0x46, 0x9d, 0x88, 0x42, 0xde, 0x4f, 0x2c, 0xea, 0x6e, + 0x4b, 0xbf, 0x96, 0x5e, 0x68, 0x36, 0xf8, 0x0b, 0x11, 0x31, 0x24, 0xed, + 0x8c, 0x75, 0x17, 0xd0, 0x27, 0x37, 0x49, 0x11, 0x9a, 0x37, 0x92, 0x2b, + 0xda, 0xc7, 0x5a, 0x6a, 0x6d, 0xc1, 0xdb, 0xec, 0x23, 0x54, 0x47, 0xd3, + 0x6e, 0x55, 0x64, 0x87, 0x3a, 0xf5, 0x64, 0x87, 0x3f, 0x84, 0x18, 0x99, + 0x91, 0x1e, 0x28, 0x75, 0x0b, 0x57, 0xe5, 0xfa, 0xf5, 0x1f, 0xfc, 0x52, + 0x1f, 0x79, 0x20, 0x6a, 0x9c, 0x0a, 0x24, 0x2b, 0xac, 0xf1, 0x7a, 0x7a, + 0x7f, 0xdc, 0x08, 0xa0, 0x33, 0x7b, 0x93, 0x6d, 0x14, 0x18, 0x38, 0x3b, + 0xa6, 0xe1, 0xee, 0xa8, 0x71, 0x2e, 0x81, 0x86, 0x2c, 0x69, 0x92, 0xc5, + 0x80, 0x27, 0x82, 0xb1, 0xb2, 0x80, 0xdc, 0x62, 0x86, 0x1d, 0xa4, 0x01, + 0x56, 0x3e, 0x08, 0x3e, 0x6b, 0xd5, 0x1a, 0x7a, 0x42, 0xd5, 0x74, 0x21, + 0x5a, 0x43, 0x39, 0x5d, 0x69, 0x90, 0x44, 0x77, 0x57, 0x99, 0xbf, 0x3a, + 0x21, 0x66, 0x87, 0xbc, 0xca, 0x86, 0x45, 0xa5, 0xc3, 0x38, 0xf3, 0xe4, + 0x42, 0xc7, 0xa5, 0x8d, 0x92, 0xa9, 0xc0, 0x14, 0x69, 0xc0, 0xa1, 0x2d, + 0xcc, 0x28, 0x43, 0xb5, 0xd7, 0x2b, 0x9e, 0xd4, 0xe2, 0x8b, 0x96, 0x71, + 0x0f, 0x6c, 0xff, 0xcb, 0xc8, 0x96, 0xcc, 0x35, 0x37, 0x5a, 0x79, 0x1a, + 0x2b, 0x2a, 0x45, 0xc5, 0xc3, 0x26, 0x5f, 0x03, 0x25, 0xe4, 0xdf, 0xd4, + 0xf7, 0xec, 0x1a, 0x30, 0xc1, 0xbe, 0xa0, 0xa5, 0x76, 0xa5, 0x02, 0x98, + 0xc0, 0x60, 0x0b, 0x34, 0x9b, 0x9f, 0xd7, 0x47, 0xe8, 0x92, 0xf1, 0xa5, + 0xa9, 0xeb, 0x03, 0x4f, 0x33, 0x9d, 0x54, 0x5d, 0x47, 0xde, 0xcc, 0x2e, + 0x02, 0xfa, 0x6b, 0xe2, 0x1a, 0x25, 0x79, 0x38, 0x44, 0xd7, 0x68, 0x91, + 0xe3, 0x2d, 0x60, 0x33, 0x80, 0x8b, 0x7c, 0x56, 0x5a, 0xf9, 0x49, 0x0c, + 0x94, 0x2d, 0x83, 0x3e, 0x51, 0x04, 0xef, 0xf3, 0x73, 0x42, 0x13, 0x0c, + 0xc6, 0x31, 0xf1, 0xb6, 0x6c, 0x4f, 0xb2, 0x0b, 0x0f, 0x1d, 0xd7, 0xfe, + 0x33, 0x3b, 0x77, 0x75, 0xa6, 0x6f, 0x1a, 0x35, 0x49, 0x08, 0x2c, 0x3c, + 0x30, 0xe2, 0x70, 0x32, 0xc8, 0x69, 0x3f, 0xb4, 0xf5, 0xb6, 0xdc, 0xe5, + 0x34, 0xf2, 0x6e, 0xa4, 0xf7, 0x73, 0x02, 0x03, 0x01, 0x00, 0x01, 0xa3, + 0x82, 0x01, 0x8e, 0x30, 0x82, 0x01, 0x8a, 0x30, 0x0e, 0x06, 0x03, 0x55, + 0x1d, 0x0f, 0x01, 0x01, 0xff, 0x04, 0x04, 0x03, 0x02, 0x07, 0x80, 0x30, + 0x16, 0x06, 0x03, 0x55, 0x1d, 0x25, 0x04, 0x0f, 0x30, 0x0d, 0x06, 0x0b, + 0x2b, 0x06, 0x01, 0x04, 0x01, 0x82, 0x37, 0x4c, 0x3b, 0x01, 0x01, 0x30, + 0x1d, 0x06, 0x03, 0x55, 0x1d, 0x0e, 0x04, 0x16, 0x04, 0x14, 0xf3, 0x16, + 0x75, 0x55, 0x53, 0x35, 0x76, 0x3f, 0x69, 0xbe, 0xef, 0x1f, 0xd4, 0xab, + 0x22, 0x89, 0x1f, 0x7f, 0x9e, 0x18, 0x30, 0x45, 0x06, 0x03, 0x55, 0x1d, + 0x11, 0x04, 0x3e, 0x30, 0x3c, 0xa4, 0x3a, 0x30, 0x38, 0x31, 0x1e, 0x30, + 0x1c, 0x06, 0x03, 0x55, 0x04, 0x0b, 0x13, 0x15, 0x4d, 0x69, 0x63, 0x72, + 0x6f, 0x73, 0x6f, 0x66, 0x74, 0x20, 0x43, 0x6f, 0x72, 0x70, 0x6f, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x31, 0x16, 0x30, 0x14, 0x06, 0x03, 0x55, + 0x04, 0x05, 0x13, 0x0d, 0x34, 0x36, 0x39, 0x34, 0x35, 0x31, 0x2b, 0x35, + 0x30, 0x33, 0x37, 0x39, 0x30, 0x30, 0x1f, 0x06, 0x03, 0x55, 0x1d, 0x23, + 0x04, 0x18, 0x30, 0x16, 0x80, 0x14, 0x55, 0xcd, 0x4d, 0x85, 0x6e, 0xcd, + 0x4a, 0x35, 0xc3, 0x8e, 0x3f, 0x72, 0x01, 0xba, 0xaa, 0x98, 0x19, 0x97, + 0x4b, 0xa7, 0x30, 0x5e, 0x06, 0x03, 0x55, 0x1d, 0x1f, 0x04, 0x57, 0x30, + 0x55, 0x30, 0x53, 0xa0, 0x51, 0xa0, 0x4f, 0x86, 0x4d, 0x68, 0x74, 0x74, + 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x6d, 0x69, 0x63, 0x72, + 0x6f, 0x73, 0x6f, 0x66, 0x74, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x6b, + 0x69, 0x6f, 0x70, 0x73, 0x2f, 0x63, 0x72, 0x6c, 0x2f, 0x4d, 0x69, 0x63, + 0x72, 0x6f, 0x73, 0x6f, 0x66, 0x74, 0x25, 0x32, 0x30, 0x53, 0x43, 0x44, + 0x25, 0x32, 0x30, 0x50, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x74, 0x73, 0x25, + 0x32, 0x30, 0x52, 0x53, 0x41, 0x25, 0x32, 0x30, 0x43, 0x41, 0x2e, 0x63, + 0x72, 0x6c, 0x30, 0x6b, 0x06, 0x08, 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, + 0x01, 0x01, 0x04, 0x5f, 0x30, 0x5d, 0x30, 0x5b, 0x06, 0x08, 0x2b, 0x06, + 0x01, 0x05, 0x05, 0x07, 0x30, 0x02, 0x86, 0x4f, 0x68, 0x74, 0x74, 0x70, + 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x6d, 0x69, 0x63, 0x72, 0x6f, + 0x73, 0x6f, 0x66, 0x74, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x6b, 0x69, + 0x6f, 0x70, 0x73, 0x2f, 0x63, 0x65, 0x72, 0x74, 0x73, 0x2f, 0x4d, 0x69, + 0x63, 0x72, 0x6f, 0x73, 0x6f, 0x66, 0x74, 0x25, 0x32, 0x30, 0x53, 0x43, + 0x44, 0x25, 0x32, 0x30, 0x50, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x74, 0x73, + 0x25, 0x32, 0x30, 0x52, 0x53, 0x41, 0x25, 0x32, 0x30, 0x43, 0x41, 0x2e, + 0x63, 0x72, 0x74, 0x30, 0x0c, 0x06, 0x03, 0x55, 0x1d, 0x13, 0x01, 0x01, + 0xff, 0x04, 0x02, 0x30, 0x00, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, + 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x0c, 0x05, 0x00, 0x03, 0x82, 0x02, 0x01, + 0x00, 0x8c, 0x4f, 0x3c, 0xdf, 0x9b, 0x0c, 0x36, 0x09, 0xfb, 0x6a, 0x31, + 0x7a, 0x7a, 0xa5, 0xf2, 0x36, 0x40, 0x67, 0x29, 0x4b, 0xec, 0xfc, 0x85, + 0xa8, 0x4d, 0xc6, 0xda, 0x46, 0x40, 0x1d, 0x7e, 0x92, 0x17, 0x3b, 0xfb, + 0x84, 0x6c, 0xcc, 0xd0, 0x4a, 0x14, 0x21, 0xf4, 0xd2, 0x5a, 0xa8, 0x44, + 0x94, 0xbe, 0x75, 0x82, 0x9e, 0x77, 0x2e, 0x74, 0x3c, 0x5f, 0xe6, 0x2b, + 0xd0, 0x9d, 0x07, 0xe4, 0x3b, 0xc5, 0x43, 0xec, 0x47, 0x7c, 0xfa, 0xaa, + 0x32, 0xd4, 0x1e, 0xd7, 0x0b, 0xf3, 0xb6, 0xb5, 0xd8, 0x12, 0x29, 0x76, + 0xa8, 0x74, 0x8d, 0xd4, 0x4c, 0xc2, 0xb3, 0x03, 0xce, 0x67, 0x43, 0x02, + 0x0b, 0xf2, 0x23, 0x77, 0x99, 0x3f, 0xa8, 0x20, 0x62, 0x79, 0xc4, 0xd3, + 0xbd, 0x40, 0x64, 0x91, 0x93, 0x6c, 0x74, 0xe5, 0xd8, 0xa4, 0x28, 0x34, + 0x1b, 0xf5, 0xe8, 0x10, 0xb3, 0xaa, 0xa1, 0x64, 0x09, 0xef, 0x72, 0xaf, + 0x6d, 0xfb, 0xce, 0x0e, 0x91, 0xe2, 0x7e, 0x8c, 0xc8, 0x28, 0x8a, 0x2f, + 0x3e, 0xe6, 0x89, 0x7d, 0x8a, 0x5f, 0xf9, 0x5e, 0x54, 0xb0, 0xf0, 0xc9, + 0x8e, 0x0c, 0xfc, 0x0d, 0x8b, 0xb4, 0x6c, 0x52, 0x12, 0x8c, 0x90, 0x94, + 0x22, 0x9b, 0x04, 0x80, 0x38, 0xad, 0xf7, 0x41, 0x18, 0x2c, 0x12, 0xe9, + 0x7a, 0x05, 0xba, 0x2d, 0x77, 0xf2, 0xc2, 0x96, 0xd8, 0x61, 0x8c, 0xd0, + 0x99, 0x47, 0xd7, 0xee, 0x1e, 0xb3, 0x42, 0x31, 0xda, 0x46, 0x1d, 0x9b, + 0x29, 0xfe, 0x36, 0x54, 0xe9, 0xa9, 0xd4, 0xc6, 0x7b, 0x8c, 0xb4, 0x21, + 0x48, 0xbd, 0x93, 0x50, 0xa3, 0x91, 0x33, 0x63, 0x67, 0x03, 0xbe, 0xe2, + 0x68, 0x93, 0x30, 0x5c, 0xda, 0x22, 0xbb, 0x80, 0xd7, 0xc0, 0x9c, 0x4b, + 0xf8, 0x4e, 0xb1, 0x3a, 0x79, 0x2a, 0x57, 0x67, 0xb5, 0x1e, 0xd0, 0xba, + 0xd7, 0x79, 0x6d, 0x2e, 0xf1, 0x7d, 0x9c, 0x9b, 0x43, 0xdd, 0xf2, 0x21, + 0x05, 0xb1, 0x59, 0x28, 0xdf, 0x7a, 0x3b, 0x5c, 0x46, 0x3f, 0x29, 0x33, + 0xf1, 0x28, 0x77, 0x85, 0xfb, 0x75, 0x5e, 0x89, 0xea, 0xbf, 0xe5, 0x12, + 0xe8, 0x29, 0x67, 0xb1, 0x06, 0x48, 0xd5, 0xb2, 0xf0, 0x78, 0xc4, 0xed, + 0x87, 0x9e, 0x71, 0x88, 0x32, 0x05, 0xf6, 0x1d, 0x34, 0x44, 0x4d, 0x26, + 0x01, 0xf4, 0xf6, 0x19, 0x83, 0x1d, 0x01, 0xc1, 0xa6, 0x80, 0xa2, 0x81, + 0x2e, 0x3a, 0x13, 0x49, 0xbd, 0xea, 0x8f, 0x2e, 0x08, 0x2f, 0xf2, 0x4f, + 0x69, 0xa9, 0x4b, 0x3e, 0x37, 0xcb, 0xc5, 0xb8, 0x19, 0x00, 0xa4, 0xab, + 0x9e, 0x61, 0xfc, 0x35, 0x8b, 0xd8, 0xba, 0xf4, 0x3a, 0x19, 0xab, 0xff, + 0x6f, 0x2a, 0x0a, 0x21, 0x37, 0x1e, 0x37, 0x52, 0x0b, 0xdc, 0x5a, 0x88, + 0x49, 0x5b, 0x8a, 0xea, 0x7d, 0xd4, 0x88, 0x50, 0x28, 0xaa, 0xb9, 0xad, + 0x3f, 0x90, 0x5f, 0x16, 0xd7, 0xe7, 0x9f, 0x21, 0xfe, 0x8a, 0x8c, 0x42, + 0x70, 0xdf, 0x2d, 0xc5, 0x83, 0x04, 0xb6, 0x96, 0xd6, 0x69, 0xff, 0x7b, + 0x6e, 0x30, 0xcd, 0xc2, 0xa0, 0x9b, 0xe4, 0xb0, 0xf4, 0x4a, 0x45, 0xdc, + 0x03, 0xea, 0xf2, 0x17, 0x90, 0xb8, 0x5f, 0x58, 0x97, 0x9d, 0x4f, 0x23, + 0xd9, 0xee, 0x4f, 0x29, 0x6d, 0x80, 0x4c, 0x63, 0x71, 0xdf, 0x20, 0x78, + 0x8c, 0xfd, 0x6b, 0x1b, 0x63, 0x48, 0xcd, 0xaa, 0xb2, 0x4f, 0x4b, 0x1f, + 0x3d, 0x94, 0x1b, 0xd9, 0xa0, 0x7f, 0xf2, 0x2e, 0xb0, 0xe1, 0xc0, 0xa9, + 0x52, 0x4f, 0xe6, 0xe3, 0x56, 0xb7, 0xed, 0xd0, 0x49, 0xd9, 0x91, 0x67, + 0x6a, 0xab, 0x6b, 0x8e, 0xca, 0xce, 0x65, 0xc2, 0x5b, 0xe4, 0xea, 0x12, + 0xf2, 0x9c, 0x26, 0xe4, 0xd6, 0xb3, 0xc8, 0xe1, 0xd2, 0xe3, 0x39, 0x4d, + 0xc1, 0x22, 0x50, 0x37, 0x2c, 0x69, 0x1b, 0xa3, 0xe5, 0x59, 0x06, 0xd5, + 0x30, 0x82, 0x06, 0xd1, 0x30, 0x82, 0x04, 0xb9, 0xa0, 0x03, 0x02, 0x01, + 0x02, 0x02, 0x13, 0x33, 0x00, 0x00, 0x00, 0x03, 0x95, 0x84, 0x47, 0xff, + 0x89, 0xe8, 0x66, 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x30, 0x0d, + 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x0c, 0x05, + 0x00, 0x30, 0x5f, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, + 0x13, 0x02, 0x55, 0x53, 0x31, 0x1e, 0x30, 0x1c, 0x06, 0x03, 0x55, 0x04, + 0x0a, 0x13, 0x15, 0x4d, 0x69, 0x63, 0x72, 0x6f, 0x73, 0x6f, 0x66, 0x74, + 0x20, 0x43, 0x6f, 0x72, 0x70, 0x6f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x31, 0x30, 0x30, 0x2e, 0x06, 0x03, 0x55, 0x04, 0x03, 0x13, 0x27, 0x4d, + 0x69, 0x63, 0x72, 0x6f, 0x73, 0x6f, 0x66, 0x74, 0x20, 0x53, 0x75, 0x70, + 0x70, 0x6c, 0x79, 0x20, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x20, 0x52, 0x53, + 0x41, 0x20, 0x52, 0x6f, 0x6f, 0x74, 0x20, 0x43, 0x41, 0x20, 0x32, 0x30, + 0x32, 0x32, 0x30, 0x1e, 0x17, 0x0d, 0x32, 0x32, 0x30, 0x32, 0x31, 0x37, + 0x30, 0x30, 0x34, 0x35, 0x32, 0x33, 0x5a, 0x17, 0x0d, 0x34, 0x32, 0x30, + 0x32, 0x31, 0x37, 0x30, 0x30, 0x35, 0x35, 0x32, 0x33, 0x5a, 0x30, 0x55, + 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, 0x55, + 0x53, 0x31, 0x1e, 0x30, 0x1c, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, 0x15, + 0x4d, 0x69, 0x63, 0x72, 0x6f, 0x73, 0x6f, 0x66, 0x74, 0x20, 0x43, 0x6f, + 0x72, 0x70, 0x6f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x31, 0x26, 0x30, + 0x24, 0x06, 0x03, 0x55, 0x04, 0x03, 0x13, 0x1d, 0x4d, 0x69, 0x63, 0x72, + 0x6f, 0x73, 0x6f, 0x66, 0x74, 0x20, 0x53, 0x43, 0x44, 0x20, 0x50, 0x72, + 0x6f, 0x64, 0x75, 0x63, 0x74, 0x73, 0x20, 0x52, 0x53, 0x41, 0x20, 0x43, + 0x41, 0x30, 0x82, 0x02, 0x22, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, + 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x03, 0x82, 0x02, 0x0f, + 0x00, 0x30, 0x82, 0x02, 0x0a, 0x02, 0x82, 0x02, 0x01, 0x00, 0xab, 0xed, + 0x7f, 0xb5, 0x71, 0xbe, 0x8c, 0x73, 0xbe, 0xf1, 0xd7, 0xca, 0x9d, 0xf1, + 0x01, 0xd6, 0x74, 0x87, 0xbc, 0x8c, 0x42, 0x93, 0x4c, 0x9f, 0xca, 0x95, + 0x74, 0x6b, 0x4e, 0x47, 0xea, 0x72, 0x84, 0xb5, 0xa4, 0x16, 0xc1, 0x8c, + 0x42, 0x54, 0xd7, 0x0d, 0xed, 0x98, 0x7a, 0xcf, 0xa8, 0xee, 0x60, 0xb4, + 0x20, 0x44, 0x09, 0x33, 0x3d, 0xfd, 0x08, 0x9c, 0x48, 0x8c, 0x6e, 0x97, + 0x60, 0x84, 0x1f, 0x70, 0x5d, 0x82, 0x68, 0xef, 0xfa, 0x30, 0x2c, 0xd6, + 0xcf, 0x2b, 0x1b, 0x16, 0xf9, 0x53, 0x92, 0x86, 0x3f, 0x2c, 0xdf, 0xe0, + 0xd3, 0xf4, 0x65, 0x70, 0x9f, 0xc8, 0x74, 0x59, 0x5f, 0xd1, 0x55, 0x9c, + 0xbe, 0xe8, 0xe9, 0x64, 0xf8, 0x7d, 0x08, 0xb9, 0x44, 0x77, 0x41, 0xd2, + 0xf6, 0xa6, 0x05, 0x44, 0x03, 0xd1, 0x45, 0x03, 0xaf, 0xc1, 0xed, 0xcd, + 0x4f, 0x9b, 0x84, 0x77, 0x7f, 0x1f, 0x45, 0xb2, 0x9b, 0x67, 0xab, 0xc2, + 0x24, 0x6d, 0x9c, 0xfd, 0x8c, 0x47, 0x07, 0x22, 0x9b, 0x7a, 0x8a, 0x18, + 0x45, 0xea, 0x2f, 0x3e, 0x83, 0x69, 0x56, 0x9c, 0x5d, 0x68, 0x80, 0xd2, + 0xeb, 0x82, 0x1d, 0x80, 0x69, 0x7c, 0x99, 0x7f, 0xb2, 0x4c, 0xfc, 0x30, + 0xc0, 0xb1, 0xce, 0x7d, 0x1f, 0x84, 0xd9, 0x45, 0xa0, 0x9e, 0x74, 0x2a, + 0x80, 0xd6, 0x29, 0xd2, 0x10, 0x8c, 0xd9, 0x86, 0x7e, 0x27, 0x9c, 0xd4, + 0xd1, 0x06, 0x42, 0xc1, 0x9d, 0x49, 0x30, 0xb5, 0xd0, 0xf5, 0xe2, 0xb4, + 0xb0, 0x95, 0xb7, 0xb8, 0xf7, 0xe3, 0xee, 0x20, 0x3f, 0x93, 0x59, 0x39, + 0xee, 0x43, 0x77, 0x75, 0x26, 0x78, 0x3f, 0x88, 0x64, 0xa8, 0x65, 0x53, + 0x02, 0x7a, 0xc1, 0xcd, 0xaa, 0x19, 0xb0, 0x83, 0x4c, 0x90, 0x65, 0x49, + 0x6e, 0x01, 0x29, 0x7d, 0x23, 0xeb, 0x44, 0xb0, 0x4e, 0x92, 0xbe, 0x19, + 0x9a, 0x1e, 0xe6, 0xf0, 0xf8, 0xa0, 0x2f, 0xc0, 0x7c, 0xc4, 0x82, 0x74, + 0xd5, 0x3c, 0x75, 0x28, 0x19, 0x9f, 0x89, 0x60, 0x05, 0x1a, 0x65, 0x71, + 0xfb, 0xe3, 0x52, 0x63, 0xca, 0x05, 0xc5, 0x15, 0xbf, 0x0d, 0xd2, 0x9d, + 0xc1, 0x62, 0xeb, 0xe6, 0xcb, 0x82, 0xa4, 0x1d, 0x8e, 0x36, 0x31, 0x7b, + 0x2c, 0xdb, 0xf8, 0x03, 0x9b, 0xf8, 0x49, 0x9f, 0xb3, 0x60, 0x2c, 0x29, + 0x4d, 0xcf, 0x28, 0xbb, 0x13, 0xcf, 0x52, 0xd6, 0x52, 0x1b, 0xf7, 0xe4, + 0x95, 0x51, 0x05, 0xbd, 0xe5, 0xb7, 0xd2, 0x33, 0x09, 0xc1, 0x00, 0x1f, + 0xdb, 0xd5, 0xfc, 0xc0, 0x0b, 0x89, 0xd2, 0x9c, 0x2e, 0x59, 0xa3, 0xf6, + 0x3f, 0x38, 0x90, 0x4a, 0x89, 0xd1, 0xe1, 0x59, 0x91, 0x3f, 0x77, 0x0a, + 0xcf, 0xcf, 0x1a, 0x01, 0xb9, 0xb4, 0xce, 0x6c, 0xef, 0xc7, 0xea, 0x5d, + 0x4c, 0x25, 0xfd, 0x7c, 0x7f, 0xdc, 0x4e, 0xe6, 0x30, 0x12, 0xb8, 0xc9, + 0x03, 0x77, 0x7d, 0x1b, 0xbf, 0xf7, 0xb0, 0x31, 0x84, 0xfd, 0x00, 0x6a, + 0x92, 0x30, 0xbe, 0x36, 0x46, 0x48, 0xf1, 0x70, 0x9d, 0x9b, 0xa5, 0x2b, + 0xf1, 0x02, 0x0a, 0xe0, 0xb6, 0x99, 0x27, 0xf6, 0x41, 0x4f, 0xd4, 0x04, + 0x91, 0x71, 0x7b, 0xc4, 0xc2, 0xf4, 0x14, 0x17, 0xb7, 0x60, 0xb6, 0x16, + 0x93, 0x91, 0x76, 0xa5, 0xce, 0x1d, 0xdb, 0x02, 0x62, 0x9d, 0x92, 0x05, + 0xbc, 0x92, 0x6f, 0x2e, 0xf9, 0x00, 0xa7, 0xff, 0xe0, 0xb9, 0xa6, 0xed, + 0xeb, 0x00, 0x97, 0x4e, 0x3c, 0x47, 0x0f, 0x3d, 0x91, 0x92, 0x22, 0x67, + 0x9a, 0x2b, 0x5e, 0x48, 0xb3, 0xb4, 0xf4, 0x37, 0x90, 0x22, 0xf8, 0x04, + 0x19, 0x8e, 0xe7, 0x25, 0x32, 0xb2, 0xd6, 0x83, 0x30, 0x46, 0x86, 0xa5, + 0x1a, 0xb2, 0xf5, 0xe1, 0x80, 0xf8, 0x43, 0x23, 0x5a, 0xc1, 0xc6, 0xb3, + 0x06, 0xd1, 0x99, 0x43, 0x6d, 0x0d, 0x02, 0x03, 0x01, 0x00, 0x01, 0xa3, + 0x82, 0x01, 0x8e, 0x30, 0x82, 0x01, 0x8a, 0x30, 0x0e, 0x06, 0x03, 0x55, + 0x1d, 0x0f, 0x01, 0x01, 0xff, 0x04, 0x04, 0x03, 0x02, 0x01, 0x86, 0x30, + 0x10, 0x06, 0x09, 0x2b, 0x06, 0x01, 0x04, 0x01, 0x82, 0x37, 0x15, 0x01, + 0x04, 0x03, 0x02, 0x01, 0x00, 0x30, 0x1d, 0x06, 0x03, 0x55, 0x1d, 0x0e, + 0x04, 0x16, 0x04, 0x14, 0x55, 0xcd, 0x4d, 0x85, 0x6e, 0xcd, 0x4a, 0x35, + 0xc3, 0x8e, 0x3f, 0x72, 0x01, 0xba, 0xaa, 0x98, 0x19, 0x97, 0x4b, 0xa7, + 0x30, 0x11, 0x06, 0x03, 0x55, 0x1d, 0x20, 0x04, 0x0a, 0x30, 0x08, 0x30, + 0x06, 0x06, 0x04, 0x55, 0x1d, 0x20, 0x00, 0x30, 0x19, 0x06, 0x09, 0x2b, + 0x06, 0x01, 0x04, 0x01, 0x82, 0x37, 0x14, 0x02, 0x04, 0x0c, 0x1e, 0x0a, + 0x00, 0x53, 0x00, 0x75, 0x00, 0x62, 0x00, 0x43, 0x00, 0x41, 0x30, 0x0f, + 0x06, 0x03, 0x55, 0x1d, 0x13, 0x01, 0x01, 0xff, 0x04, 0x05, 0x30, 0x03, + 0x01, 0x01, 0xff, 0x30, 0x1f, 0x06, 0x03, 0x55, 0x1d, 0x23, 0x04, 0x18, + 0x30, 0x16, 0x80, 0x14, 0x0b, 0xb3, 0x68, 0x3b, 0xaf, 0xda, 0xaf, 0xee, + 0x70, 0xa5, 0x76, 0xd9, 0x21, 0xf7, 0xcc, 0x44, 0x16, 0x07, 0xd0, 0xf8, + 0x30, 0x6c, 0x06, 0x03, 0x55, 0x1d, 0x1f, 0x04, 0x65, 0x30, 0x63, 0x30, + 0x61, 0xa0, 0x5f, 0xa0, 0x5d, 0x86, 0x5b, 0x68, 0x74, 0x74, 0x70, 0x3a, + 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x6d, 0x69, 0x63, 0x72, 0x6f, 0x73, + 0x6f, 0x66, 0x74, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x6b, 0x69, 0x6f, + 0x70, 0x73, 0x2f, 0x63, 0x72, 0x6c, 0x2f, 0x4d, 0x69, 0x63, 0x72, 0x6f, + 0x73, 0x6f, 0x66, 0x74, 0x25, 0x32, 0x30, 0x53, 0x75, 0x70, 0x70, 0x6c, + 0x79, 0x25, 0x32, 0x30, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x25, 0x32, 0x30, + 0x52, 0x53, 0x41, 0x25, 0x32, 0x30, 0x52, 0x6f, 0x6f, 0x74, 0x25, 0x32, + 0x30, 0x43, 0x41, 0x25, 0x32, 0x30, 0x32, 0x30, 0x32, 0x32, 0x2e, 0x63, + 0x72, 0x6c, 0x30, 0x79, 0x06, 0x08, 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, + 0x01, 0x01, 0x04, 0x6d, 0x30, 0x6b, 0x30, 0x69, 0x06, 0x08, 0x2b, 0x06, + 0x01, 0x05, 0x05, 0x07, 0x30, 0x02, 0x86, 0x5d, 0x68, 0x74, 0x74, 0x70, + 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x6d, 0x69, 0x63, 0x72, 0x6f, + 0x73, 0x6f, 0x66, 0x74, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x6b, 0x69, + 0x6f, 0x70, 0x73, 0x2f, 0x63, 0x65, 0x72, 0x74, 0x73, 0x2f, 0x4d, 0x69, + 0x63, 0x72, 0x6f, 0x73, 0x6f, 0x66, 0x74, 0x25, 0x32, 0x30, 0x53, 0x75, + 0x70, 0x70, 0x6c, 0x79, 0x25, 0x32, 0x30, 0x43, 0x68, 0x61, 0x69, 0x6e, + 0x25, 0x32, 0x30, 0x52, 0x53, 0x41, 0x25, 0x32, 0x30, 0x52, 0x6f, 0x6f, + 0x74, 0x25, 0x32, 0x30, 0x43, 0x41, 0x25, 0x32, 0x30, 0x32, 0x30, 0x32, + 0x32, 0x2e, 0x63, 0x72, 0x74, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, + 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x0c, 0x05, 0x00, 0x03, 0x82, 0x02, 0x01, + 0x00, 0x6f, 0xde, 0x61, 0xd6, 0x6b, 0xfa, 0x41, 0xbf, 0x6d, 0x1c, 0x94, + 0xc8, 0xe1, 0x8a, 0xc3, 0xca, 0xa7, 0xf4, 0x33, 0x04, 0xe4, 0x29, 0x26, + 0xcf, 0x7b, 0xe6, 0x21, 0xd7, 0x26, 0x75, 0x4f, 0x8b, 0x13, 0x74, 0xe7, + 0x17, 0x31, 0x01, 0x46, 0x8d, 0x45, 0x44, 0x5d, 0x6d, 0x74, 0xe7, 0x6a, + 0x0a, 0xae, 0x7c, 0xbe, 0xd1, 0xf9, 0x96, 0xec, 0x5a, 0xf2, 0x19, 0x25, + 0xe3, 0x0c, 0xaf, 0xbc, 0x08, 0xef, 0xd1, 0xa8, 0x69, 0xa6, 0xbf, 0xb6, + 0x50, 0x8e, 0xfd, 0xbf, 0x2a, 0x33, 0x28, 0x62, 0x02, 0xe2, 0xe7, 0x76, + 0xcc, 0x1a, 0x56, 0x82, 0xd9, 0xb1, 0x89, 0xf1, 0x6f, 0xe4, 0xac, 0x97, + 0xcb, 0xb9, 0x19, 0xca, 0xbb, 0xee, 0x69, 0x50, 0xe6, 0x47, 0x78, 0x70, + 0x02, 0x1a, 0x59, 0xc9, 0x37, 0xd2, 0xe9, 0x72, 0xf1, 0x75, 0x19, 0xec, + 0x0e, 0x5b, 0x03, 0xf7, 0x9a, 0x9d, 0xc3, 0xcf, 0x61, 0x04, 0xa7, 0xfc, + 0x97, 0xf4, 0x1f, 0x16, 0x10, 0xa4, 0x3c, 0x98, 0xb7, 0x04, 0xf7, 0xed, + 0x6f, 0x41, 0x35, 0x90, 0x54, 0x39, 0xa9, 0x4c, 0xe5, 0xe2, 0x34, 0xa7, + 0x80, 0x22, 0xb2, 0x4f, 0xc7, 0xdd, 0x5d, 0x90, 0x51, 0x74, 0x79, 0x47, + 0x8a, 0x5d, 0x75, 0x04, 0x9a, 0x4d, 0x9b, 0xb8, 0x1c, 0x27, 0x12, 0x50, + 0x7d, 0x85, 0x81, 0x5f, 0xe1, 0x03, 0x46, 0x93, 0x46, 0x4b, 0x46, 0x08, + 0xe7, 0xf7, 0x10, 0x84, 0xc1, 0x12, 0xdf, 0x98, 0xd8, 0x25, 0xf1, 0x86, + 0xa2, 0xcc, 0x3d, 0x0c, 0x50, 0x9f, 0x39, 0x1c, 0xe3, 0x46, 0x67, 0x35, + 0xce, 0x91, 0x15, 0x5d, 0x4a, 0xe7, 0x6e, 0x72, 0x43, 0xb5, 0xc8, 0xeb, + 0xa5, 0xe4, 0x33, 0xd0, 0x34, 0x20, 0xe8, 0xa5, 0x70, 0x3a, 0x34, 0xa4, + 0x12, 0x4b, 0xe3, 0xcc, 0xa9, 0x6d, 0x1f, 0x4f, 0x9b, 0x9d, 0x4d, 0x48, + 0x2a, 0xfa, 0xd2, 0xb9, 0x5c, 0xbc, 0x44, 0x55, 0x9c, 0x8b, 0x5b, 0xdd, + 0xac, 0x08, 0xf4, 0x23, 0xa6, 0x36, 0x25, 0xa0, 0x0b, 0x70, 0x4d, 0x34, + 0x2e, 0x1f, 0x3a, 0x04, 0x71, 0x98, 0x54, 0xaf, 0xcd, 0x64, 0x46, 0x50, + 0x00, 0x05, 0xe5, 0x08, 0xf4, 0x5a, 0x39, 0x09, 0x1c, 0x09, 0xac, 0x64, + 0xb9, 0x3d, 0x33, 0x35, 0x90, 0x74, 0x36, 0x9a, 0x54, 0xd7, 0x8f, 0x39, + 0x8c, 0x74, 0x7a, 0xee, 0x9e, 0xfc, 0x6d, 0xb0, 0x69, 0x5d, 0x27, 0xbd, + 0x2f, 0x27, 0xe9, 0x58, 0x5c, 0x01, 0xde, 0xae, 0xa3, 0xc9, 0xef, 0x4a, + 0x5b, 0x6b, 0x97, 0x8b, 0xfe, 0xf3, 0x4c, 0xf6, 0x01, 0xc9, 0x7d, 0x00, + 0xb5, 0xea, 0x15, 0xa3, 0xa2, 0x56, 0xe7, 0xa2, 0x57, 0x84, 0x82, 0xc2, + 0x5a, 0x6c, 0xc1, 0x8d, 0xb8, 0xfc, 0x59, 0x4c, 0xdc, 0xa3, 0xfb, 0x31, + 0x8f, 0x06, 0xed, 0x85, 0x3d, 0x16, 0xb4, 0xa0, 0xc0, 0x0c, 0xab, 0x8a, + 0x44, 0x46, 0xa1, 0x0b, 0x2d, 0x2d, 0x49, 0xeb, 0x2d, 0x0f, 0x70, 0xf9, + 0x5d, 0xc1, 0x88, 0x74, 0xcb, 0xd4, 0xf4, 0x10, 0x4b, 0x16, 0x09, 0x57, + 0xb5, 0x6d, 0x8b, 0x99, 0xd4, 0xc3, 0x7b, 0x89, 0x4b, 0x05, 0x2b, 0xae, + 0x4b, 0x64, 0xd0, 0xa0, 0x50, 0x70, 0xfc, 0x1a, 0x2a, 0x5d, 0xcb, 0x42, + 0x7b, 0xfb, 0x03, 0x7a, 0xbe, 0x53, 0x57, 0x17, 0x99, 0xe2, 0x1e, 0xf3, + 0x53, 0x9d, 0x2f, 0x72, 0xb0, 0x95, 0xef, 0x8c, 0x7e, 0xc4, 0x22, 0x38, + 0x5e, 0x95, 0x26, 0x5c, 0x8d, 0xee, 0xc8, 0xba, 0xe1, 0x11, 0x52, 0x61, + 0xd0, 0x2d, 0x37, 0x2f, 0x7a, 0x44, 0xf8, 0xd4, 0xe6, 0x20, 0x89, 0xbe, + 0xed, 0x99, 0x3c, 0xab, 0x93, 0x26, 0xae, 0x44, 0x3b, 0xa5, 0x5c, 0x24, + 0x25, 0xd4, 0xfb, 0x71, 0x6e, 0xd8, 0x82, 0x2a, 0xa4, 0xa0, 0x22, 0x0e, + 0x7b, 0x28, 0x1b, 0xfd, 0x45, 0x4f, 0x5f, 0x18, 0x56, 0x59, 0x05, 0xb3, + 0x30, 0x82, 0x05, 0xaf, 0x30, 0x82, 0x03, 0x97, 0xa0, 0x03, 0x02, 0x01, + 0x02, 0x02, 0x10, 0x68, 0x28, 0xd5, 0x4c, 0x7e, 0x5c, 0xda, 0xbd, 0x43, + 0x39, 0xae, 0x0c, 0xc1, 0x5a, 0x2a, 0x35, 0x30, 0x0d, 0x06, 0x09, 0x2a, + 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x0c, 0x05, 0x00, 0x30, 0x5f, + 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, 0x55, + 0x53, 0x31, 0x1e, 0x30, 0x1c, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, 0x15, + 0x4d, 0x69, 0x63, 0x72, 0x6f, 0x73, 0x6f, 0x66, 0x74, 0x20, 0x43, 0x6f, + 0x72, 0x70, 0x6f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x31, 0x30, 0x30, + 0x2e, 0x06, 0x03, 0x55, 0x04, 0x03, 0x13, 0x27, 0x4d, 0x69, 0x63, 0x72, + 0x6f, 0x73, 0x6f, 0x66, 0x74, 0x20, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, + 0x20, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x20, 0x52, 0x53, 0x41, 0x20, 0x52, + 0x6f, 0x6f, 0x74, 0x20, 0x43, 0x41, 0x20, 0x32, 0x30, 0x32, 0x32, 0x30, + 0x1e, 0x17, 0x0d, 0x32, 0x32, 0x30, 0x32, 0x31, 0x37, 0x30, 0x30, 0x31, + 0x32, 0x33, 0x36, 0x5a, 0x17, 0x0d, 0x34, 0x37, 0x30, 0x32, 0x31, 0x37, + 0x30, 0x30, 0x32, 0x31, 0x30, 0x39, 0x5a, 0x30, 0x5f, 0x31, 0x0b, 0x30, + 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, 0x55, 0x53, 0x31, 0x1e, + 0x30, 0x1c, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, 0x15, 0x4d, 0x69, 0x63, + 0x72, 0x6f, 0x73, 0x6f, 0x66, 0x74, 0x20, 0x43, 0x6f, 0x72, 0x70, 0x6f, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x31, 0x30, 0x30, 0x2e, 0x06, 0x03, + 0x55, 0x04, 0x03, 0x13, 0x27, 0x4d, 0x69, 0x63, 0x72, 0x6f, 0x73, 0x6f, + 0x66, 0x74, 0x20, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x20, 0x43, 0x68, + 0x61, 0x69, 0x6e, 0x20, 0x52, 0x53, 0x41, 0x20, 0x52, 0x6f, 0x6f, 0x74, + 0x20, 0x43, 0x41, 0x20, 0x32, 0x30, 0x32, 0x32, 0x30, 0x82, 0x02, 0x22, + 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x01, 0x05, 0x00, 0x03, 0x82, 0x02, 0x0f, 0x00, 0x30, 0x82, 0x02, 0x0a, + 0x02, 0x82, 0x02, 0x01, 0x00, 0x9e, 0x25, 0x01, 0x66, 0x19, 0x1f, 0x64, + 0x34, 0xc1, 0x9d, 0x39, 0x23, 0x62, 0x1d, 0x71, 0x8b, 0x56, 0xea, 0x25, + 0xd2, 0x9b, 0x1f, 0xef, 0x27, 0x01, 0x82, 0xbf, 0x77, 0xd8, 0x94, 0x33, + 0x83, 0x18, 0x48, 0x9b, 0x50, 0x9e, 0x7b, 0x96, 0x64, 0xc2, 0xd0, 0xc3, + 0x5f, 0x45, 0xff, 0x32, 0x9c, 0xe8, 0x17, 0x17, 0xbd, 0x78, 0xed, 0x75, + 0x98, 0x5f, 0x3a, 0x06, 0x08, 0x2b, 0x1c, 0x37, 0x9b, 0x46, 0x4a, 0x90, + 0x0a, 0xb0, 0xaf, 0x46, 0x92, 0x3e, 0x33, 0x89, 0x2a, 0xfa, 0xb8, 0xe7, + 0x32, 0x63, 0xf3, 0x23, 0xc7, 0x6e, 0xd2, 0x14, 0xfb, 0x26, 0x58, 0xee, + 0xfe, 0x06, 0x84, 0x54, 0xfa, 0xc1, 0x1f, 0x37, 0xaa, 0xdb, 0xd4, 0xec, + 0x56, 0x2a, 0xbf, 0x49, 0xbd, 0xcc, 0xeb, 0x02, 0xed, 0xc6, 0x4e, 0xfc, + 0xac, 0x19, 0xb5, 0x12, 0x35, 0x69, 0x15, 0x89, 0x17, 0x4d, 0xa3, 0x68, + 0xea, 0x6c, 0x1e, 0x29, 0x9a, 0x09, 0xf3, 0xce, 0x7a, 0x21, 0xc6, 0x09, + 0xd1, 0x19, 0xea, 0x8f, 0x30, 0x46, 0x69, 0x3b, 0x68, 0x04, 0x2b, 0x7c, + 0x8a, 0x2d, 0xd6, 0x63, 0x5d, 0xea, 0x6d, 0xd6, 0x39, 0x9e, 0xbd, 0x06, + 0x3e, 0x5b, 0xee, 0x2f, 0x11, 0x5b, 0x28, 0x6b, 0xa7, 0x52, 0xa4, 0x68, + 0x5e, 0x4c, 0xa4, 0xea, 0xae, 0xce, 0x23, 0xbf, 0x4c, 0x36, 0x71, 0xda, + 0x81, 0x45, 0x50, 0x8e, 0xca, 0x86, 0xce, 0xff, 0x53, 0xc3, 0xb8, 0x43, + 0xb3, 0x24, 0xee, 0x07, 0x7a, 0xa2, 0xb4, 0xfa, 0xc7, 0x0a, 0x1d, 0x7b, + 0xc6, 0x52, 0x35, 0x31, 0xec, 0x08, 0x1f, 0x84, 0x80, 0x92, 0x5b, 0xf8, + 0xb1, 0xda, 0x39, 0xd6, 0xc9, 0xe7, 0xe5, 0x89, 0x04, 0x7e, 0x51, 0x7f, + 0xf4, 0xe6, 0x6a, 0x64, 0x47, 0x49, 0xea, 0xf8, 0xec, 0xa6, 0xf6, 0xa0, + 0x43, 0x53, 0xfe, 0xda, 0xc3, 0x23, 0x24, 0xd8, 0x25, 0xda, 0x13, 0x2c, + 0x2a, 0xb7, 0x3f, 0x94, 0xde, 0x77, 0x1c, 0x4c, 0x78, 0x1c, 0x6a, 0xf9, + 0x9a, 0x8f, 0xeb, 0x6a, 0x15, 0x77, 0x77, 0xad, 0x49, 0x84, 0xce, 0x10, + 0x40, 0xc7, 0x99, 0x48, 0x0f, 0xd5, 0x96, 0x1e, 0x80, 0x9c, 0x73, 0xa1, + 0x38, 0xa1, 0x03, 0x6f, 0xd3, 0x4d, 0x20, 0xd0, 0xb5, 0x43, 0xe4, 0xf7, + 0x2e, 0x78, 0x0f, 0x4e, 0xf7, 0xbc, 0xbf, 0x65, 0xda, 0x6d, 0x90, 0x0b, + 0x5b, 0xbf, 0xde, 0xea, 0x27, 0x27, 0x99, 0x64, 0xf8, 0x39, 0x7c, 0x73, + 0x3d, 0xd6, 0x21, 0xd2, 0xee, 0xd6, 0xf3, 0x53, 0x11, 0x2e, 0x55, 0xc3, + 0xdc, 0xea, 0xf1, 0x29, 0x57, 0xde, 0x51, 0xa1, 0x78, 0x73, 0x90, 0x0b, + 0x2f, 0xf5, 0xc9, 0x75, 0x36, 0xeb, 0x8d, 0xd2, 0x6d, 0x8e, 0x79, 0x5d, + 0xba, 0x1a, 0x38, 0xff, 0xdf, 0x19, 0x01, 0xa8, 0xd2, 0xc8, 0xd1, 0xd6, + 0xf2, 0xeb, 0x8a, 0xf5, 0x2e, 0xd1, 0xcc, 0x93, 0x13, 0x9b, 0x9c, 0x90, + 0x78, 0x65, 0x63, 0x79, 0x04, 0xc4, 0xf1, 0x9e, 0x9f, 0x8c, 0x3a, 0xf3, + 0x64, 0x0c, 0xfe, 0x98, 0x1d, 0x93, 0xe2, 0x8f, 0x56, 0xa5, 0x63, 0x53, + 0x23, 0xb8, 0x6e, 0x73, 0x16, 0x45, 0x1a, 0xb6, 0xf7, 0x7b, 0x0f, 0xcd, + 0xa4, 0x32, 0xff, 0x5a, 0xfe, 0x96, 0x8d, 0xe1, 0x87, 0x78, 0xdb, 0x70, + 0x83, 0xa8, 0x24, 0x85, 0x69, 0x20, 0xc2, 0x6d, 0x12, 0x0d, 0xe5, 0x79, + 0xf6, 0x2a, 0x59, 0xcf, 0xd6, 0xab, 0xe7, 0x81, 0xe6, 0xa0, 0xb1, 0x88, + 0x2d, 0x08, 0x8c, 0x0b, 0xb1, 0xcf, 0xd7, 0x6c, 0x36, 0xaf, 0x9e, 0xf9, + 0x03, 0x67, 0xd9, 0x41, 0x73, 0xa9, 0xab, 0x45, 0xb8, 0x71, 0x60, 0x58, + 0x18, 0xd4, 0x16, 0x2c, 0x65, 0xba, 0xd1, 0x05, 0xde, 0x92, 0xc5, 0x50, + 0x10, 0x11, 0x90, 0xce, 0x47, 0xcc, 0xfb, 0xaf, 0xbf, 0x23, 0xc0, 0x9f, + 0x05, 0x02, 0x03, 0x01, 0x00, 0x01, 0xa3, 0x67, 0x30, 0x65, 0x30, 0x0e, + 0x06, 0x03, 0x55, 0x1d, 0x0f, 0x01, 0x01, 0xff, 0x04, 0x04, 0x03, 0x02, + 0x01, 0x86, 0x30, 0x0f, 0x06, 0x03, 0x55, 0x1d, 0x13, 0x01, 0x01, 0xff, + 0x04, 0x05, 0x30, 0x03, 0x01, 0x01, 0xff, 0x30, 0x1d, 0x06, 0x03, 0x55, + 0x1d, 0x0e, 0x04, 0x16, 0x04, 0x14, 0x0b, 0xb3, 0x68, 0x3b, 0xaf, 0xda, + 0xaf, 0xee, 0x70, 0xa5, 0x76, 0xd9, 0x21, 0xf7, 0xcc, 0x44, 0x16, 0x07, + 0xd0, 0xf8, 0x30, 0x10, 0x06, 0x09, 0x2b, 0x06, 0x01, 0x04, 0x01, 0x82, + 0x37, 0x15, 0x01, 0x04, 0x03, 0x02, 0x01, 0x00, 0x30, 0x11, 0x06, 0x03, + 0x55, 0x1d, 0x20, 0x04, 0x0a, 0x30, 0x08, 0x30, 0x06, 0x06, 0x04, 0x55, + 0x1d, 0x20, 0x00, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x01, 0x0c, 0x05, 0x00, 0x03, 0x82, 0x02, 0x01, 0x00, 0x48, + 0xc7, 0x37, 0xff, 0xff, 0xc1, 0x68, 0x57, 0xd7, 0x8b, 0x43, 0x66, 0x46, + 0x3a, 0x26, 0x6b, 0x2f, 0xe8, 0xfa, 0xde, 0x68, 0xa1, 0x8f, 0x47, 0xf1, + 0x3d, 0x34, 0x95, 0x7a, 0xda, 0x55, 0x31, 0xf4, 0x95, 0xd2, 0x38, 0x5f, + 0x2c, 0xba, 0x8f, 0xa5, 0x8d, 0x51, 0x31, 0x6a, 0x89, 0x55, 0x68, 0x6c, + 0x2b, 0x42, 0x64, 0x6a, 0x85, 0x24, 0xa0, 0x51, 0x03, 0xc7, 0xdd, 0xd1, + 0x72, 0x58, 0xed, 0x6c, 0x1e, 0x8c, 0xd8, 0x91, 0xc5, 0xe7, 0x49, 0x11, + 0x9d, 0x19, 0x7a, 0x37, 0x58, 0x1e, 0x77, 0x44, 0xfb, 0xc2, 0x08, 0x98, + 0x42, 0xc4, 0x4d, 0xe3, 0x9b, 0x8a, 0x0e, 0xcf, 0x40, 0x45, 0x4f, 0x1b, + 0x80, 0x70, 0x59, 0x8c, 0x93, 0x81, 0xe8, 0x0f, 0xd5, 0xc8, 0x26, 0x95, + 0xa9, 0xf7, 0x1f, 0x77, 0x06, 0xb8, 0xca, 0xef, 0x9c, 0xfb, 0xe8, 0x66, + 0xda, 0xe5, 0x39, 0xe0, 0xd2, 0xd2, 0x62, 0xc3, 0xa7, 0xd4, 0xb6, 0x18, + 0x9a, 0x27, 0x9b, 0x26, 0x50, 0x4a, 0x72, 0x97, 0xd5, 0xb3, 0x5b, 0x2a, + 0xa4, 0xfd, 0x5f, 0x2f, 0x7e, 0xe6, 0x62, 0xa3, 0x27, 0x66, 0x0c, 0xfa, + 0xd9, 0x19, 0xcc, 0x11, 0x1d, 0x31, 0xa8, 0x01, 0x52, 0x08, 0xe6, 0x54, + 0x0c, 0x99, 0x63, 0x2b, 0xea, 0xd8, 0x84, 0xd4, 0xb4, 0x08, 0x16, 0xef, + 0xbe, 0x4a, 0x5b, 0x88, 0x58, 0xf4, 0x06, 0x16, 0xa0, 0xeb, 0x7a, 0x5d, + 0xe1, 0xc7, 0x44, 0xd6, 0xbb, 0x2f, 0x55, 0x56, 0x25, 0xf0, 0x9e, 0x0c, + 0xe4, 0x0f, 0x12, 0xdb, 0xc0, 0x7f, 0xaf, 0x56, 0x5d, 0xc6, 0x89, 0x0e, + 0x71, 0xa9, 0x56, 0x12, 0xe4, 0xb9, 0x9c, 0xa8, 0x64, 0x1e, 0xb5, 0x47, + 0x95, 0x92, 0xae, 0xd0, 0x70, 0xc8, 0x93, 0x7d, 0x7c, 0x5a, 0x58, 0xf1, + 0x05, 0xf1, 0x4a, 0xb8, 0x6c, 0x72, 0x18, 0xa9, 0xae, 0x1f, 0x57, 0x99, + 0x26, 0x74, 0x66, 0xf5, 0x1d, 0x0f, 0xdf, 0x5d, 0xf0, 0xe7, 0x37, 0x5b, + 0x5f, 0xba, 0xf0, 0xb4, 0xef, 0xe4, 0x63, 0x07, 0x7e, 0x1f, 0x32, 0x18, + 0x69, 0xa9, 0x70, 0x5a, 0x92, 0xf9, 0x79, 0x9c, 0x58, 0xd4, 0x7e, 0xbf, + 0x72, 0x5d, 0x53, 0x46, 0x2b, 0x6e, 0xa3, 0x99, 0x60, 0xd6, 0x85, 0x8c, + 0x66, 0x77, 0x16, 0x76, 0xaf, 0xe2, 0xc5, 0x18, 0x5b, 0xe2, 0x5d, 0x08, + 0x36, 0xd6, 0x66, 0x37, 0x17, 0x65, 0xf0, 0x2e, 0xcf, 0xa1, 0xe5, 0xbc, + 0xe6, 0x8d, 0x0d, 0x65, 0xb4, 0x56, 0x53, 0x5d, 0x9f, 0xc8, 0xaf, 0x4e, + 0x6e, 0x51, 0xcf, 0x88, 0xbe, 0x92, 0xea, 0x30, 0xfb, 0x2c, 0xe7, 0x75, + 0x3f, 0x42, 0x60, 0xc4, 0x71, 0xe7, 0x97, 0x9f, 0x73, 0xc7, 0x9f, 0xca, + 0xd1, 0xb8, 0x6c, 0x23, 0xea, 0x50, 0x28, 0x1d, 0x0e, 0x43, 0xcc, 0xf5, + 0xa9, 0x1b, 0x40, 0xeb, 0xa6, 0x98, 0xe5, 0xe5, 0x0f, 0xc5, 0x92, 0x2f, + 0xa5, 0x96, 0xc7, 0xd7, 0xfa, 0x3c, 0x18, 0xee, 0x1d, 0x1b, 0x61, 0x03, + 0xfd, 0x86, 0xe7, 0x24, 0x41, 0x33, 0xbd, 0xd8, 0xf3, 0xb6, 0x60, 0x7c, + 0xf3, 0x1c, 0x82, 0x03, 0xd5, 0x60, 0xaf, 0xdf, 0xf4, 0x20, 0xa4, 0xe4, + 0x81, 0x06, 0x22, 0x5a, 0xcc, 0x85, 0x33, 0x7d, 0x64, 0xf8, 0xe4, 0xb8, + 0xbf, 0x80, 0x17, 0xd4, 0xfb, 0x21, 0x3f, 0x63, 0xae, 0xe7, 0x8f, 0xb7, + 0x17, 0x44, 0xec, 0x72, 0x2e, 0x35, 0xc9, 0x0b, 0xd0, 0x81, 0x1d, 0xe9, + 0x72, 0x03, 0x09, 0x41, 0xd9, 0xdf, 0x09, 0x48, 0xe6, 0xcd, 0xb7, 0xb2, + 0x1c, 0x60, 0x25, 0x19, 0x52, 0xf3, 0x3d, 0x12, 0x49, 0xed, 0x9d, 0x94, + 0x22, 0x8e, 0x71, 0x28, 0xf8, 0xc1, 0x07, 0x54, 0x73, 0xdd, 0x38, 0x08, + 0xb4, 0x85, 0x8f, 0x14, 0x6c, 0xaa, 0x00, 0xaf, 0x40, 0xab, 0xb5, 0x87, + 0xce, 0xb6, 0x39, 0x5c, 0x73, 0xf9, 0x90, 0x18, 0x22, 0x82, 0x2f, 0x58, + 0x20, 0x42, 0x81, 0x07, 0x43, 0xe2, 0x72, 0x4c, 0x0e, 0x4b, 0xae, 0x91, + 0x2c, 0x8b, 0x65, 0xc6, 0xd5, 0x23, 0x36, 0xd7, 0x44, 0x76, 0x1d, 0xb1, + 0x85, 0x03, 0xc3, 0x54, 0xe6, 0xf3, 0xad, 0xe4, 0x57, 0x19, 0x01, 0x02, + 0x38, 0x2a, 0x19, 0x01, 0x03, 0x78, 0x1c, 0x61, 0x70, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x6e, 0x64, 0x2e, 0x69, + 0x6e, 0x2d, 0x74, 0x6f, 0x74, 0x6f, 0x2b, 0x6a, 0x73, 0x6f, 0x6e, 0xa0, + 0x58, 0x30, 0x93, 0x52, 0xac, 0x02, 0x8d, 0x83, 0x03, 0xb1, 0xf1, 0xa3, + 0x57, 0xdb, 0xda, 0x7b, 0x8b, 0x0f, 0x1a, 0xd8, 0x8f, 0xa8, 0xbb, 0x5b, + 0x31, 0xe6, 0xa4, 0x02, 0xcc, 0x27, 0x82, 0x79, 0x48, 0x94, 0x04, 0x87, + 0x9a, 0xc2, 0x0e, 0x12, 0xaf, 0xb2, 0x30, 0x0e, 0xd6, 0xb8, 0xd1, 0xb9, + 0x24, 0x30, 0x59, 0x01, 0x80, 0x4a, 0x62, 0x13, 0x03, 0xff, 0x92, 0xb3, + 0x2f, 0x03, 0xe3, 0x66, 0x88, 0xaa, 0x1b, 0x55, 0xe8, 0x2a, 0x42, 0x08, + 0x2d, 0x75, 0xe4, 0xcc, 0x8f, 0x22, 0xa9, 0xea, 0x32, 0x80, 0x73, 0x89, + 0xce, 0x0c, 0x3d, 0xc5, 0xe8, 0xad, 0x0b, 0xe2, 0x12, 0x9a, 0xee, 0x02, + 0x37, 0xa4, 0x5d, 0xfc, 0x63, 0x57, 0x6b, 0x38, 0x0e, 0xbb, 0xd7, 0x22, + 0x14, 0x00, 0x86, 0x1d, 0x59, 0x41, 0xa5, 0xe5, 0x41, 0xde, 0x7e, 0xb8, + 0x6d, 0x92, 0x62, 0x42, 0x7a, 0xc3, 0x0d, 0xe4, 0xcc, 0x20, 0x65, 0xcb, + 0x65, 0xa8, 0x76, 0x28, 0x62, 0xe2, 0xf6, 0xce, 0x48, 0x0e, 0x22, 0x9b, + 0x3f, 0xc7, 0x02, 0xcd, 0x3c, 0x31, 0xba, 0x09, 0xe3, 0xdb, 0xe9, 0x21, + 0xc9, 0x7f, 0x33, 0xb1, 0xa0, 0x25, 0x73, 0x78, 0x31, 0xd8, 0x00, 0x8d, + 0x7a, 0x67, 0x1c, 0x7a, 0x03, 0xf8, 0x26, 0x4d, 0xbe, 0x03, 0x8e, 0x1d, + 0x01, 0x80, 0x8e, 0x1e, 0x5a, 0x54, 0x53, 0x17, 0xdb, 0x5c, 0xc8, 0x60, + 0xb3, 0x3a, 0xe7, 0x85, 0xa0, 0xa4, 0x16, 0xb6, 0x6c, 0xfd, 0x75, 0xcc, + 0x15, 0x25, 0x38, 0xa6, 0x70, 0x62, 0xd0, 0x70, 0x00, 0xfe, 0x4e, 0x74, + 0x70, 0xc6, 0x6a, 0x52, 0xe0, 0x0d, 0x5c, 0x28, 0x3f, 0x82, 0x0e, 0x2b, + 0x53, 0x61, 0x26, 0x2c, 0x2f, 0x93, 0xbb, 0x9c, 0x22, 0x63, 0x69, 0xc8, + 0x6d, 0xd2, 0x79, 0xe7, 0x4b, 0x63, 0x97, 0xe6, 0x59, 0x7b, 0x71, 0x6d, + 0x21, 0xa8, 0xa9, 0x4d, 0x25, 0x84, 0x70, 0x3d, 0x03, 0x1e, 0x54, 0xac, + 0x8e, 0xdb, 0x96, 0xa1, 0x34, 0x4b, 0x80, 0xda, 0xa3, 0x11, 0x13, 0x69, + 0x98, 0x23, 0x34, 0xbe, 0x93, 0x89, 0x50, 0xa8, 0x79, 0x39, 0x5f, 0xf2, + 0x50, 0x21, 0xa7, 0x9a, 0x01, 0x8e, 0x43, 0x31, 0xe0, 0x26, 0x09, 0xe2, + 0x07, 0x97, 0x3c, 0xc7, 0x31, 0x04, 0x2b, 0x2c, 0x60, 0xa3, 0xed, 0x91, + 0xfc, 0xd7, 0xd0, 0x30, 0xa6, 0x56, 0xf6, 0x67, 0xb3, 0x6d, 0x7c, 0xa5, + 0x86, 0xd0, 0x09, 0x91, 0x1b, 0xfc, 0xf9, 0xcb, 0x15, 0x5a, 0x13, 0x6e, + 0xd3, 0x6b, 0x53, 0xaf, 0x91, 0x0f, 0xaa, 0xf4, 0x0b, 0xb1, 0x56, 0x0f, + 0xbc, 0x76, 0x5e, 0xe3, 0xe2, 0xd8, 0x19, 0xd7, 0xe5, 0x4d, 0xcd, 0xbb, + 0x0a, 0x02, 0x0a, 0x25, 0x3e, 0xf4, 0x6c, 0xd6, 0xc9, 0x6c, 0x31, 0x4f, + 0x5a, 0xd4, 0xb6, 0xa2, 0x3e, 0x15, 0x10, 0x93, 0x00, 0xb1, 0xa7, 0x32, + 0xfc, 0x1d, 0x79, 0x5a, 0x16, 0xf6, 0x8d, 0x87, 0xce, 0x06, 0xa0, 0xf4, + 0xe9, 0x18, 0x8c, 0xec, 0xef, 0xe0, 0x10, 0x1c, 0xaa, 0x1f, 0xe7, 0xa1, + 0x85, 0x16, 0x32, 0xf3, 0xae, 0x4b, 0x3c, 0xf9, 0xf4, 0xdb, 0x38, 0xd9, + 0xb3, 0xef, 0xca, 0x8d, 0x76, 0x6c, 0x94, 0x62, 0x45, 0x9f, 0x35, 0x5f, + 0xf9, 0xb5, 0xae, 0x50, 0x4f, +} diff --git a/app.go b/app.go index db5599c..d24938e 100644 --- a/app.go +++ b/app.go @@ -1,3 +1,4 @@ +// Package veracity provides the main application for the Veracity CLI tool. package veracity import ( @@ -7,7 +8,6 @@ import ( ) func NewApp(version string, ikwid bool) *cli.App { - cli.VersionPrinter = func(cCtx *cli.Context) { fmt.Println(cCtx.App.Version) } @@ -27,8 +27,17 @@ func NewApp(version string, ikwid bool) *cli.App { }, &cli.StringFlag{ Name: "data-local", Aliases: []string{"l"}, - Usage: "filesystem location to load merkle log data from. can be a directory of massifs or a single file. mutually exclusive with data-url; if neither option is supplied, DataTrails' live log data will be used", + Usage: "filesystem location to load merkle log data from. can be a directory of massifs or a single file. mutually exclusive with data-url", + }, + &cli.StringFlag{ + Name: "massif-file", + Usage: "load a single massif from this file, added to any massifs loaded from data-local", }, + &cli.StringFlag{ + Name: "checkpoint-file", + Usage: "load a single checkpoint from this file, added to any checkpoints loaded from data-local", + }, + &cli.StringFlag{ Name: "tenant", Aliases: []string{"t"}, Usage: "tenant or list of tenants as a `,` separated list. commands which operate on a single tenant take the first tenant in the list", @@ -69,6 +78,7 @@ func AddCommands(app *cli.App, ikwid bool) *cli.App { app.Commands = append(app.Commands, NewNodeScanCmd()) app.Commands = append(app.Commands, NewFindTrieEntriesCmd()) app.Commands = append(app.Commands, NewFindMMREntriesCmd()) + app.Commands = append(app.Commands, NewAppendCmd()) } return app } diff --git a/app/app.go b/app/app.go deleted file mode 100644 index 3040c8d..0000000 --- a/app/app.go +++ /dev/null @@ -1,198 +0,0 @@ -package app - -import ( - "bytes" - "encoding/json" - "errors" - "io" - "strings" - - "github.com/datatrails/go-datatrails-logverification/logverification/app" -) - -/** - * Merklelog related APP contents - * - * An APP in the context of merklelog is an interface that commits log entries. - * - * Apps include: - * - assetsv2 - * - eventsv1 - */ - -const ( - AssetsV2AppDomain = byte(0) - EventsV1AppDomain = byte(1) -) - -var ( - ErrNoJsonGiven = errors.New("no json given") -) - -// AppDataToVerifiableLogEntries converts the app data (one or more app entries) to verifiable log entries -func AppDataToVerifiableLogEntries(appData []byte, logTenant string) ([]app.AppEntry, error) { - - // first attempt to convert the appdata to a list of events - eventList, err := eventListFromJson(appData) - if err != nil { - return nil, err - } - - // now we have an event list we can decipher if the app is - // assetsv2 or eventsv1 - appDomain := appDomain(appData) - - verifiableLogEntries := []app.AppEntry{} - - switch appDomain { - case AssetsV2AppDomain: - // assetsv2 - verfiableAssetsV2Events, err := NewAssetsV2AppEntries(eventList) - if err != nil { - return nil, err - } - - verifiableLogEntries = append(verifiableLogEntries, verfiableAssetsV2Events...) - - case EventsV1AppDomain: - verfiableEventsV1Events, err := NewEventsV1AppEntries(eventList, logTenant) - if err != nil { - return nil, err - } - - verifiableLogEntries = append(verifiableLogEntries, verfiableEventsV1Events...) - - default: - return nil, errors.New("unknown app domain for given app data") - } - - return verifiableLogEntries, nil -} - -// appDomain returns the app domain of the given app data -func appDomain(appData []byte) byte { - - // first attempt to convert the appdata to a list of events - eventList, err := eventListFromJson(appData) - if err != nil { - // if we can't return default of assetsv2 - return AssetsV2AppDomain - } - - // decode into events - events := struct { - Events []json.RawMessage `json:"events,omitempty"` - NextPageToken json.RawMessage `json:"next_page_token,omitempty"` - }{} - - decoder := json.NewDecoder(bytes.NewReader(eventList)) - decoder.DisallowUnknownFields() - for { - err = decoder.Decode(&events) - - if errors.Is(err, io.EOF) { - break - } - - if err != nil { - // return default of assetsv2 - return AssetsV2AppDomain - } - } - - // decode the first event and find the identity - event := struct { - Identity string `json:"identity,omitempty"` - }{} - - decoder = json.NewDecoder(bytes.NewReader(events.Events[0])) - - for { - err = decoder.Decode(&event) - - if errors.Is(err, io.EOF) { - break - } - - if err != nil { - // if we can't return default of assetsv2 - return AssetsV2AppDomain - } - } - - // find if the event identity is assetsv2 or eventsv1 identity - if strings.HasPrefix(event.Identity, "assets/") || strings.HasPrefix(event.Identity, "publicassets/") { - return AssetsV2AppDomain - } else { - return EventsV1AppDomain - } - -} - -// eventListFromJson normalises a json encoded event or *list* of events, by -// always returning a list of json encoded events. -// -// This converts events from the following apps: -// - assetsv2 -// - eventsv1 -// -// NOTE: there is no json validation done on the event or list of events given -// any valid json will be accepted, use validation logic after this function. -func eventListFromJson(data []byte) ([]byte, error) { - var err error - - doc := struct { - Events []json.RawMessage `json:"events,omitempty"` - NextPageToken json.RawMessage `json:"next_page_token,omitempty"` - }{} - - // check for empty json - // NOTE: also len(nil) == 0, so does the nil check also - if len(data) == 0 { - return nil, ErrNoJsonGiven - } - - decoder := json.NewDecoder(bytes.NewReader(data)) - decoder.DisallowUnknownFields() - - for { - - err = decoder.Decode(&doc) - - // if we can decode the events json - // we know its in the form of a list events json response from - // the list events api, so just return data - if errors.Is(err, io.EOF) { - return data, nil - } - - if err != nil { - break - } - - } - - // if we get here we know that the given data doesn't represent - // a list events json response - // so we can assume its a single event response from the events api. - - var event json.RawMessage - err = json.Unmarshal(data, &event) - if err != nil { - return nil, err - } - - // purposefully omit the next page token for response - listEvents := struct { - Events []json.RawMessage `json:"events,omitempty"` - }{} - - listEvents.Events = []json.RawMessage{event} - - events, err := json.Marshal(&listEvents) - if err != nil { - return nil, err - } - - return events, nil -} diff --git a/app/app_test.go b/app/app_test.go deleted file mode 100644 index 7e3311f..0000000 --- a/app/app_test.go +++ /dev/null @@ -1,114 +0,0 @@ -package app - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestEventListFromJson(t *testing.T) { - type args struct { - data []byte - } - tests := []struct { - name string - args args - expected []byte - wantErr bool - }{ - { - name: "nil", - args: args{ - data: nil, - }, - expected: nil, - wantErr: true, - }, - { - name: "empty", - args: args{ - data: []byte{}, - }, - expected: nil, - wantErr: true, - }, - // We do need this, since we expect input from other processes via pipes (i.e. an events query) - { - name: "empty list", - args: args{ - data: []byte(`{"events":[]}`), - }, - expected: []byte(`{"events":[]}`), - wantErr: false, - }, - { - name: "single event", - args: args{ - data: []byte(`{"identity":"assets/1/events/2"}`), - }, - expected: []byte(`{"events":[{"identity":"assets/1/events/2"}]}`), - wantErr: false, - }, - { - name: "single list", - args: args{ - data: []byte(`{"events":[{"identity":"assets/1/events/2"}]}`), - }, - expected: []byte(`{"events":[{"identity":"assets/1/events/2"}]}`), - wantErr: false, - }, - { - name: "multiple list", - args: args{ - data: []byte(`{"events":[{"identity":"assets/1/events/2"},{"identity":"assets/1/events/3"}]}`), - }, - expected: []byte(`{"events":[{"identity":"assets/1/events/2"},{"identity":"assets/1/events/3"}]}`), - wantErr: false, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - actual, err := eventListFromJson(test.args.data) - - assert.Equal(t, test.wantErr, err != nil) - assert.Equal(t, test.expected, actual) - }) - } -} - -// Test_appDomain tests: -// -// 1. a list of assetsv2 events return assetsv2 app domain -// 2. a list of eventsv1 events reutrn eventsv1 app domain -func Test_appDomain(t *testing.T) { - type args struct { - appData []byte - } - tests := []struct { - name string - args args - expected byte - }{ - { - name: "positive assetsv2", - args: args{ - appData: singleAssetsv2EventJsonList, - }, - expected: 0, - }, - { - name: "positive eventsv1", - args: args{ - appData: singleEventsv1EventJsonList, - }, - expected: 1, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - actual := appDomain(test.args.appData) - - assert.Equal(t, test.expected, actual) - }) - } -} diff --git a/app/assetsv2.go b/app/assetsv2.go deleted file mode 100644 index e14bb46..0000000 --- a/app/assetsv2.go +++ /dev/null @@ -1,129 +0,0 @@ -package app - -import ( - "encoding/json" - "errors" - "sort" - "strings" - - "github.com/datatrails/go-datatrails-common-api-gen/assets/v2/assets" - "github.com/datatrails/go-datatrails-logverification/logverification" - "github.com/datatrails/go-datatrails-logverification/logverification/app" - "github.com/google/uuid" - "google.golang.org/protobuf/encoding/protojson" -) - -var ( - ErrInvalidAssetsV2EventJson = errors.New(`invalid assetsv2 event json`) - ErrNoEvents = errors.New(`no events found in events json`) -) - -func VerifiableAssetsV2EventsFromData(data []byte) ([]app.AppEntry, error) { - - // Accept either the list events response format or a single event. Peak - // into the json data to pick which. - eventsJson, err := eventListFromJson(data) - if err != nil { - return nil, err - } - - verifiableEvents, err := NewAssetsV2AppEntries(eventsJson) - if err != nil { - return nil, err - } - - for _, event := range verifiableEvents { - validationErr := logverification.Validate(event) - if validationErr != nil { - return nil, validationErr - } - } - - return verifiableEvents, nil -} - -// NewAssetsV2AppEntries takes a list of events JSON (e.g. from the assetsv2 events list API), converts them -// into AssetsV2AppEntries and then returns them sorted by ascending MMR index. -func NewAssetsV2AppEntries(eventsJson []byte) ([]app.AppEntry, error) { - // get the event list out of events - eventListJson := struct { - Events []json.RawMessage `json:"events"` - }{} - - err := json.Unmarshal(eventsJson, &eventListJson) - if err != nil { - return nil, err - } - - events := []app.AppEntry{} - for _, eventJson := range eventListJson.Events { - verifiableEvent, err := NewAssetsV2AppEntry(eventJson) - if err != nil { - return nil, err - } - - events = append(events, *verifiableEvent) - } - - // check if we haven't got any events - if len(events) == 0 { - return nil, ErrNoEvents - } - - // Sorting the events by MMR index guarantees that they're sorted in log append order. - sort.Slice(events, func(i, j int) bool { - return events[i].MMRIndex() < events[j].MMRIndex() - }) - - return events, nil -} - -// NewAssetsV2AppEntry takes a single assetsv2 event JSON and returns an AssetsV2AppEntry, -// providing just enough information to verify the incluson of and identify the event. -func NewAssetsV2AppEntry(eventJson []byte) (*app.AppEntry, error) { - - // special care is needed here to deal with uint64 types. json marshal / - // un marshal treats them as strings because they don't fit in a - // javascript Number - - // Unmarshal into a generic type to get just the bits we need. Use - // defered decoding to get the raw merklelog entry as it must be - // unmarshaled using protojson and the specific generated target type. - entry := struct { - Identity string `json:"identity,omitempty"` - TenantIdentity string `json:"tenant_identity,omitempty"` - // Note: the proof_details top level field can be ignored here because it is a 'oneof' - MerklelogEntry json.RawMessage `json:"merklelog_entry,omitempty"` - }{} - err := json.Unmarshal(eventJson, &entry) - if err != nil { - return nil, err - } - - merkleLog := &assets.MerkleLogEntry{} - err = protojson.Unmarshal(entry.MerklelogEntry, merkleLog) - if err != nil { - return nil, err - } - - if entry.TenantIdentity == "" { - return nil, ErrInvalidAssetsV2EventJson - } - - // get the logID from the event log tenant - logUuid := strings.TrimPrefix(entry.TenantIdentity, "tenant/") - logId, err := uuid.Parse(logUuid) - if err != nil { - return nil, err - } - - return app.NewAppEntry( - entry.Identity, - logId[:], - app.NewMMREntryFields( - byte(0), - eventJson, // we cheat a bit here, because the eventJson isn't really serialized, but its a log version 0 log entry - ), - merkleLog.Commit.Index, - ), nil -} diff --git a/app/assetsv2_test.go b/app/assetsv2_test.go deleted file mode 100644 index bb7f765..0000000 --- a/app/assetsv2_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package app - -import ( - "encoding/json" - "testing" - - "github.com/datatrails/go-datatrails-logverification/logverification/app" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestVerifiableAssetsV2EventsFromData(t *testing.T) { - type args struct { - data []byte - } - tests := []struct { - name string - args args - expected []app.AppEntry - err error - }{ - { - name: "empty event list", - args: args{ - data: []byte(`{"events":[]}`), - }, - expected: []app.AppEntry{}, - err: ErrNoEvents, - }, - { - name: "list with invalid v3 event returns a validation error", - args: args{ - data: []byte(`{ - "events":[ - { - "merklelog_entry": { - "commit": { - "index": "0", - "idtimestamp": "018e3f48610b089800" - } - } - } - ] -}`), - }, - expected: nil, - err: ErrInvalidAssetsV2EventJson, - }, - { - name: "single event list", - args: args{ - data: singleAssetsv2EventJsonList, - }, - expected: []app.AppEntry{ - *app.NewAppEntry( - "assets/31de2eb6-de4f-4e5a-9635-38f7cd5a0fc8/events/21d55b73-b4bc-4098-baf7-336ddee4f2f2", // app id - []byte{0x73, 0xb0, 0x6b, 0x4e, 0x50, 0x4e, 0x4d, 0x31, 0x9f, 0xd9, 0x5e, 0x60, 0x6f, 0x32, 0x9b, 0x51}, // log id - app.NewMMREntryFields( - byte(0), // domain - assetsv2EventJson, // serialized bytes - ), - 0, // mmr index - ), - }, - err: nil, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - actual, err := VerifiableAssetsV2EventsFromData(test.args.data) - - assert.Equal(t, test.err, err) - assert.Equal(t, len(test.expected), len(actual)) - - for index, expectedEvent := range test.expected { - actualEvent := actual[index] - - assert.Equal(t, expectedEvent.AppID(), actualEvent.AppID()) - assert.Equal(t, expectedEvent.LogID(), actualEvent.LogID()) - assert.Equal(t, expectedEvent.MMRIndex(), actualEvent.MMRIndex()) - - // serialized bytes needs to be marshalled to show the json is equal for assetsv2 - var expectedJson map[string]any - err := json.Unmarshal(expectedEvent.SerializedBytes(), &expectedJson) - require.NoError(t, err) - - var actualJson map[string]any - err = json.Unmarshal(actualEvent.SerializedBytes(), &actualJson) - require.NoError(t, err) - - assert.Equal(t, expectedJson, actualJson) - } - }) - } -} diff --git a/app/consts_test.go b/app/consts_test.go deleted file mode 100644 index 966f434..0000000 --- a/app/consts_test.go +++ /dev/null @@ -1,145 +0,0 @@ -package app - -/** - * file for all the test constants - */ - -var ( - assetsv2EventJson = []byte(`{ - "identity": "assets/31de2eb6-de4f-4e5a-9635-38f7cd5a0fc8/events/21d55b73-b4bc-4098-baf7-336ddee4f2f2", - "asset_identity": "assets/31de2eb6-de4f-4e5a-9635-38f7cd5a0fc8", - "event_attributes": {}, - "asset_attributes": { - "document_hash_value": "3f3cbc0b6b3b20883b8fb1bf0203b5a1233809b2ab8edc8dd00b5cf1afaae3ee" - }, - "operation": "NewAsset", - "behaviour": "AssetCreator", - "timestamp_declared": "2024-03-14T23:24:50Z", - "timestamp_accepted": "2024-03-14T23:24:50Z", - "timestamp_committed": "2024-03-22T11:13:55.557Z", - "principal_declared": { - "issuer": "https://app.soak.stage.datatrails.ai/appidpv1", - "subject": "e96dfa33-b645-4b83-a041-e87ac426c089", - "display_name": "Root", - "email": "" - }, - "principal_accepted": { - "issuer": "https://app.soak.stage.datatrails.ai/appidpv1", - "subject": "e96dfa33-b645-4b83-a041-e87ac426c089", - "display_name": "Root", - "email": "" - }, - "confirmation_status": "CONFIRMED", - "transaction_id": "", - "block_number": 0, - "transaction_index": 0, - "from": "0xF17B3B9a3691846CA0533Ce01Fa3E35d6d6f714C", - "tenant_identity": "tenant/73b06b4e-504e-4d31-9fd9-5e606f329b51", - "merklelog_entry": { - "commit": { - "index": "0", - "idtimestamp": "018e3f48610b089800" - }, - "confirm": { - "mmr_size": "7", - "root": "XdcejozGdFYn7JTa/5PUodWtmomUuGuTTouMvxyDevo=", - "timestamp": "1711106035557", - "idtimestamp": "", - "signed_tree_head": "" - }, - "unequivocal": null - } -}`) - - eventsv1EventJson = []byte(` -{ - "identity": "events/01947000-3456-780f-bfa9-29881e3bac88", - "attributes": { - "foo": "bar" - }, - "trails": [], - "origin_tenant": "tenant/112758ce-a8cb-4924-8df8-fcba1e31f8b0", - "created_by": "2ef471c2-f997-4503-94c8-60b5c929a3c3", - "created_at": 1737045849174, - "confirmation_status": "CONFIRMED", - "merklelog_commit": { - "index": "1", - "idtimestamp": "019470003611017900" - } -} -`) - - singleEventsv1EventJsonList = []byte(` -{ - "events":[ - { - "identity": "events/01947000-3456-780f-bfa9-29881e3bac88", - "attributes": { - "foo": "bar" - }, - "trails": [], - "origin_tenant": "tenant/112758ce-a8cb-4924-8df8-fcba1e31f8b0", - "created_by": "2ef471c2-f997-4503-94c8-60b5c929a3c3", - "created_at": 1737045849174, - "confirmation_status": "CONFIRMED", - "event_type": "", - "ledger_entry": { - "index": "18446744073709551614", - "idtimestamp": "019470003611017900", - "log_id": "112758ce-a8cb-4924-8df8-fcba1e31f8b0" - } - } - ] -}`) - - singleAssetsv2EventJsonList = []byte(` -{ - "events":[ - { - "identity": "assets/31de2eb6-de4f-4e5a-9635-38f7cd5a0fc8/events/21d55b73-b4bc-4098-baf7-336ddee4f2f2", - "asset_identity": "assets/31de2eb6-de4f-4e5a-9635-38f7cd5a0fc8", - "event_attributes": {}, - "asset_attributes": { - "document_hash_value": "3f3cbc0b6b3b20883b8fb1bf0203b5a1233809b2ab8edc8dd00b5cf1afaae3ee" - }, - "operation": "NewAsset", - "behaviour": "AssetCreator", - "timestamp_declared": "2024-03-14T23:24:50Z", - "timestamp_accepted": "2024-03-14T23:24:50Z", - "timestamp_committed": "2024-03-22T11:13:55.557Z", - "principal_declared": { - "issuer": "https://app.soak.stage.datatrails.ai/appidpv1", - "subject": "e96dfa33-b645-4b83-a041-e87ac426c089", - "display_name": "Root", - "email": "" - }, - "principal_accepted": { - "issuer": "https://app.soak.stage.datatrails.ai/appidpv1", - "subject": "e96dfa33-b645-4b83-a041-e87ac426c089", - "display_name": "Root", - "email": "" - }, - "confirmation_status": "CONFIRMED", - "transaction_id": "", - "block_number": 0, - "transaction_index": 0, - "from": "0xF17B3B9a3691846CA0533Ce01Fa3E35d6d6f714C", - "tenant_identity": "tenant/73b06b4e-504e-4d31-9fd9-5e606f329b51", - "merklelog_entry": { - "commit": { - "index": "0", - "idtimestamp": "018e3f48610b089800" - }, - "confirm": { - "mmr_size": "7", - "root": "XdcejozGdFYn7JTa/5PUodWtmomUuGuTTouMvxyDevo=", - "timestamp": "1711106035557", - "idtimestamp": "", - "signed_tree_head": "" - }, - "unequivocal": null - } - } - ] -}`) -) diff --git a/app/eventsv1.go b/app/eventsv1.go deleted file mode 100644 index 5c891d6..0000000 --- a/app/eventsv1.go +++ /dev/null @@ -1,159 +0,0 @@ -package app - -import ( - "encoding/json" - "errors" - "sort" - "strconv" - "strings" - - "github.com/datatrails/go-datatrails-logverification/logverification/app" - "github.com/datatrails/go-datatrails-serialization/eventsv1" - "github.com/google/uuid" -) - -var ( - ErrInvalidEventsV1EventJson = errors.New(`invalid eventsv1 event json`) -) - -func VerifiableEventsV1EventsFromData(data []byte, logTenant string) ([]app.AppEntry, error) { - - // Accept either the list events response format or a single event. Peak - // into the json data to pick which. - eventsJson, err := eventListFromJson(data) - if err != nil { - return nil, err - } - - verifiableEvents, err := NewEventsV1AppEntries(eventsJson, logTenant) - if err != nil { - return nil, err - } - - return verifiableEvents, nil -} - -// NewEventsV1AppEntries takes a list of events JSON (e.g. from the events list API), converts them -// into EventsV1AppEntries and then returns them sorted by ascending MMR index. -func NewEventsV1AppEntries(eventsJson []byte, logTenant string) ([]app.AppEntry, error) { - // get the event list out of events - eventListJson := struct { - Events []json.RawMessage `json:"events"` - }{} - - err := json.Unmarshal(eventsJson, &eventListJson) - if err != nil { - return nil, err - } - - // check if we haven't got any events - if len(eventListJson.Events) == 0 { - return nil, ErrNoEvents - } - - events := []app.AppEntry{} - for _, eventJson := range eventListJson.Events { - verifiableEvent, err := NewEventsV1AppEntry(eventJson, logTenant) - if err != nil { - return nil, err - } - - events = append(events, *verifiableEvent) - } - - // Sorting the events by MMR index guarantees that they're sorted in log append order. - sort.Slice(events, func(i, j int) bool { - return events[i].MMRIndex() < events[j].MMRIndex() - }) - - return events, nil -} - -type ledgerEntry struct { - Index string `json:"index,omitempty"` - Idtimestamp string `json:"idtimestamp,omitempty"` - LogId string `json:"log_id,omitempty"` -} - -type eventData struct { - Identity string `json:"identity,omitempty"` - OriginTenant string `json:"origin_tenant,omitempty"` - - Attributes map[string]any `json:"attributes,omitempty"` - Trails []string `json:"trails,omitempty"` - EventType string `json:"event_type,omitempty"` - - // Note: the proof_details top level field can be ignored here because it is a 'oneof' - LedgerEntry json.RawMessage `json:"ledger_entry,omitempty"` -} - -// NewEventsV1AppEntry takes a single eventsv1 event JSON and returns a VerifiableEventsV1Event, -// providing just enough information to verify and identify the event. -func NewEventsV1AppEntry(eventJson []byte, logTenant string) (*app.AppEntry, error) { - - // special care is needed here to deal with uint64 types. json marshal / - // un marshal treats them as strings because they don't fit in a - // javascript Number - - // Unmarshal into a generic type to get just the bits we need. Use - // defered decoding to get the raw merklelog entry as it must be - // unmarshaled using protojson and the specific generated target type. - entry := eventData{} - - err := json.Unmarshal(eventJson, &entry) - if err != nil { - return nil, err - } - - // check we have at least the origin tenant - if entry.OriginTenant == "" { - return nil, ErrInvalidEventsV1EventJson - } - - //TODO: don't need to pass in logTenant any more - // if logTenant isn't given, default to the origin tenant - // for log tenant. - if logTenant == "" { - logTenant = entry.OriginTenant - } - - // get the merklelog commit info - eventLedgerEntry := &ledgerEntry{} - err = json.Unmarshal(entry.LedgerEntry, eventLedgerEntry) - if err != nil { - return nil, err - } - - // get the logID from the event log tenant - logUuid := strings.TrimPrefix(logTenant, "tenant/") - logId, err := uuid.Parse(logUuid) - if err != nil { - return nil, err - } - - // get the serialized bytes - serializableEvent := eventsv1.SerializableEvent{ - Attributes: entry.Attributes, - Trails: entry.Trails, - EventType: entry.EventType, - } - serializedBytes, err := serializableEvent.Serialize() - if err != nil { - return nil, err - } - - entryIndex, err := strconv.ParseUint(eventLedgerEntry.Index, 10, 64) - if err != nil { - return nil, err - } - - return app.NewAppEntry( - entry.Identity, - logId[:], - app.NewMMREntryFields( - byte(0), - serializedBytes, - ), - entryIndex, - ), nil -} diff --git a/app/eventsv1_test.go b/app/eventsv1_test.go deleted file mode 100644 index 4ad039b..0000000 --- a/app/eventsv1_test.go +++ /dev/null @@ -1,121 +0,0 @@ -package app - -import ( - "testing" - - "github.com/datatrails/go-datatrails-logverification/logverification/app" - "github.com/stretchr/testify/assert" -) - -func TestVerifiableEventsV1EventsFromData(t *testing.T) { - type args struct { - data []byte - logTenant string - } - tests := []struct { - name string - args args - expected []app.AppEntry - err error - }{ - { - name: "empty event list", - args: args{ - data: []byte(`{"events":[]}`), - logTenant: "tenant/112758ce-a8cb-4924-8df8-fcba1e31f8b0", - }, - expected: []app.AppEntry{}, - err: ErrNoEvents, - }, - { - name: "list with invalid v1 event returns a validation error", - args: args{ - data: []byte(`{ - "events":[ - { - "merklelog_entry": { - "commit": { - "index": "0", - "idtimestamp": "018e3f48610b089800" - } - } - } - ] -}`), - logTenant: "", - }, - expected: nil, - err: ErrInvalidEventsV1EventJson, - }, - { - name: "single event list", - args: args{ - data: singleEventsv1EventJsonList, - logTenant: "tenant/112758ce-a8cb-4924-8df8-fcba1e31f8b0", - }, - expected: []app.AppEntry{ - *app.NewAppEntry( - "events/01947000-3456-780f-bfa9-29881e3bac88", // app id - []byte{0x11, 0x27, 0x58, 0xce, 0xa8, 0xcb, 0x49, 0x24, 0x8d, 0xf8, 0xfc, 0xba, 0x1e, 0x31, 0xf8, 0xb0}, // log id - app.NewMMREntryFields( - byte(0), // domain - []byte{ - 0x34, 0x30, 0x3a, 0x7b, 0x22, 0x61, 0x74, 0x74, - 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x22, - 0x3a, 0x7b, 0x22, 0x66, 0x6f, 0x6f, 0x22, 0x3a, - 0x22, 0x62, 0x61, 0x72, 0x22, 0x7d, 0x2c, 0x22, - 0x74, 0x72, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x3a, - 0x5b, 0x5d, 0x7d, - }, // serialized bytes - ), - 18446744073709551614, // mmr index - ), - }, - err: nil, - }, - { - name: "single event list empty log tenant", - args: args{ - data: singleEventsv1EventJsonList, - logTenant: "", - }, - expected: []app.AppEntry{ - *app.NewAppEntry( - "events/01947000-3456-780f-bfa9-29881e3bac88", // app id - []byte{0x11, 0x27, 0x58, 0xce, 0xa8, 0xcb, 0x49, 0x24, 0x8d, 0xf8, 0xfc, 0xba, 0x1e, 0x31, 0xf8, 0xb0}, // log id - app.NewMMREntryFields( - byte(0), // domain - []byte{ - 0x34, 0x30, 0x3a, 0x7b, 0x22, 0x61, 0x74, 0x74, - 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x22, - 0x3a, 0x7b, 0x22, 0x66, 0x6f, 0x6f, 0x22, 0x3a, - 0x22, 0x62, 0x61, 0x72, 0x22, 0x7d, 0x2c, 0x22, - 0x74, 0x72, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x3a, - 0x5b, 0x5d, 0x7d, - }, // serialized bytes - ), - 18446744073709551614, // mmr index - ), - }, - err: nil, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - actual, err := VerifiableEventsV1EventsFromData(test.args.data, test.args.logTenant) - - assert.Equal(t, test.err, err) - assert.Equal(t, len(test.expected), len(actual)) - - for index, expectedEvent := range test.expected { - actualEvent := actual[index] - - assert.Equal(t, expectedEvent.AppID(), actualEvent.AppID()) - assert.Equal(t, expectedEvent.LogID(), actualEvent.LogID()) - assert.Equal(t, expectedEvent.MMRIndex(), actualEvent.MMRIndex()) - - assert.Equal(t, expectedEvent.SerializedBytes(), actualEvent.SerializedBytes()) - } - }) - } -} diff --git a/app/readappdata.go b/app/readappdata.go deleted file mode 100644 index 4fe96fc..0000000 --- a/app/readappdata.go +++ /dev/null @@ -1,45 +0,0 @@ -package app - -import ( - "bufio" - "os" - "path/filepath" -) - -func stdinToAppData() ([]byte, error) { - return scannerToAppData(bufio.NewScanner(os.Stdin)) -} - -func filePathToAppData(filePath string) ([]byte, error) { - filePath, err := filepath.Abs(filePath) - if err != nil { - return nil, err - } - f, err := os.Open(filePath) - if err != nil { - return nil, err - } - return scannerToAppData(bufio.NewScanner(f)) -} - -func scannerToAppData(scanner *bufio.Scanner) ([]byte, error) { - var data []byte - for scanner.Scan() { - data = append(data, scanner.Bytes()...) - } - if err := scanner.Err(); err != nil { - return nil, err - } - return data, nil -} - -// ReadAppData reads the app data from stdin or from a given file path -func ReadAppData(fromStdIn bool, filePath string) ([]byte, error) { - - if fromStdIn { - return stdinToAppData() - } - - return filePathToAppData(filePath) - -} diff --git a/append.go b/append.go new file mode 100644 index 0000000..60fdc26 --- /dev/null +++ b/append.go @@ -0,0 +1,546 @@ +package veracity + +import ( + "bytes" + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/sha256" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "time" + + "github.com/fxamacker/cbor/v2" + "github.com/veraison/go-cose" + + commoncose "github.com/datatrails/go-datatrails-common/cose" + "github.com/datatrails/go-datatrails-merklelog/massifs" + "github.com/datatrails/go-datatrails-merklelog/massifs/storage" + "github.com/datatrails/go-datatrails-merklelog/mmr" + "github.com/datatrails/veracity/keyio" + "github.com/datatrails/veracity/scitt" + "github.com/urfave/cli/v2" +) + +// coseSigner implements IdentifiableCoseSigner +type identifiableCoseSigner struct { + innerSigner cose.Signer + publicKey ecdsa.PublicKey +} + +func (s *identifiableCoseSigner) Algorithm() cose.Algorithm { + return s.innerSigner.Algorithm() +} + +func (s *identifiableCoseSigner) Sign(rand io.Reader, content []byte) ([]byte, error) { + return s.innerSigner.Sign(rand, content) +} + +func (s *identifiableCoseSigner) LatestPublicKey() (*ecdsa.PublicKey, error) { + return &s.publicKey, nil +} + +func (s *identifiableCoseSigner) PublicKey(ctx context.Context, kid string) (*ecdsa.PublicKey, error) { + return &s.publicKey, nil +} + +func (s *identifiableCoseSigner) KeyLocation() string { + return "robinbryce.me" +} + +func (s *identifiableCoseSigner) KeyIdentifier() string { + // the returned kid needs to match the kid format of the keyvault key + return "location:robinbryce/version1" +} + +// NewAppendCmd appends an entry to a local ledger, optionally sealing it with a provided private key. +func NewAppendCmd() *cli.Command { + return &cli.Command{ + Name: "append", + Usage: "add an entry to a local ledger, optionally sealing it with a provided private key", + Flags: []cli.Flag{ + &cli.Uint64Flag{ + Name: "mmrindex", Aliases: []string{"i"}, + }, + &cli.Int64Flag{ + Name: "massif", Aliases: []string{"m"}, + Usage: "allow inspection of an arbitrary mmr index by explicitly specifying a massif index", + Value: -1, + }, + &cli.StringFlag{ + Name: "sealer-key", + Usage: "the sealer key to use for signing the entry, in cose .cbor. Only P-256, ES256 is supported. If --generate-sealer-key is set, this generated key will be written to this file.", + }, + &cli.StringFlag{ + Name: "sealer-key-pem", + Usage: "the sealer key to use for signing the entry, in PEM format. Only P-256, ES256 is supported. If --generate-sealer-key is set, this generated key will be written to this file.", + }, + &cli.StringFlag{ + Name: "sealer-public-key-pem", + Usage: "If set, and if the sealer key is generated, the public key in PEM format is saved to this file.", + }, + + &cli.StringFlag{ + Name: "trusted-sealer-key-pem", + Usage: "verify the current seal using this pem file based public key", + }, + + &cli.StringFlag{ + Name: "receipt-file", + Usage: "file name to write the receipt to, defaults to 'receipt-{mmrIndex}.cbor'", + }, + + &cli.StringFlag{ + Name: "signed-statement", + Usage: "read statement to register from this file. if statements-dir is also set, this statement is registered first, then all statements in the directory are registered.", + }, + &cli.StringFlag{ + Name: "statements-dir", + Usage: "read statements to register from this directory. the statements are added in lexical filename order", + }, + + &cli.BoolFlag{ + Name: "generate-sealer-key", + Usage: "generate an ephemeral sealer key and write it to the sealer-key file. If the sealer-key file already exists, it will be overwritten. the default file name is 'ecdsa-key-private.cbor'.", + }, + &cli.StringFlag{ + Name: "massifs-dir", + Usage: "the directory to read the massifs from.", + }, + &cli.StringFlag{ + Name: "seals-dir", + Usage: "the directory to read the massif seals from.", + }, + }, + Action: func(cCtx *cli.Context) error { + var err error + var reader readerSelector + cmd := &CmdCtx{} + + if !cCtx.IsSet("data-local") { + return errors.New("this command supports local replicas only, and requires --data-local") + } + err = cfgLogging(cmd, cCtx) + if err != nil { + return fmt.Errorf("failed to configure logging: %w", err) + } + + if reader, err = cfgMassifReader(cmd, cCtx); err != nil { + return err + } + + // + // Read or generate a key to seal the forked log + // + var sealingKey *ecdsa.PrivateKey + var decodedKey keyio.DecodedPrivate + if cCtx.IsSet("sealer-key") && !cCtx.Bool("generate-sealer-key") { + sealerKeyFile := cCtx.String("sealer-key") + if sealerKeyFile == "" { + return errors.New("sealer-key file is required") + } + decodedKey, err = keyio.ReadECDSAPrivateCOSE(sealerKeyFile) + if err != nil { + return fmt.Errorf("failed to load sealer key from file %s: %w", sealerKeyFile, err) + } + sealingKey = decodedKey.Private + } + if cCtx.IsSet("sealer-key-pem") && !cCtx.Bool("generate-sealer-key") { + if cCtx.IsSet("sealer-key") { + fmt.Printf("verifying with sealer-key-pem %s (in preference to sealer-key)", cCtx.String("sealer-key-pem")) + } + sealerKeyFile := cCtx.String("sealer-key-pem") + if sealerKeyFile == "" { + return errors.New("sealer-key file is required") + } + decodedKey, err = keyio.ReadECDSAPrivatePEM(sealerKeyFile) + if err != nil { + return fmt.Errorf("failed to load sealer key from file %s: %w", sealerKeyFile, err) + } + sealingKey = decodedKey.Private + } + + if cCtx.Bool("generate-sealer-key") { + sealingKey, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + return err + } + } + var verifier cose.Verifier + + if cmd.CheckpointPublic.Public == nil { + return errors.New("checkpoint public key is required") + } + verifier, err = cose.NewVerifier(cmd.CheckpointPublic.Alg, cmd.CheckpointPublic.Public) + if err != nil { + return err + } + + headIndex, err := reader.HeadIndex(context.Background(), storage.ObjectCheckpoint) + if err != nil { + return fmt.Errorf("failed to get head index: %w", err) + } + + verified, err := massifs.GetContextVerified( + context.Background(), reader, &cmd.CBORCodec, verifier, headIndex) + if err != nil { + return fmt.Errorf("failed to read verified head massif: %w", err) + } + + mmrSizeOrig := verified.RangeCount() + fmt.Printf("%8d verified-size\n", mmrSizeOrig) + // verified.Tags = map[string]string{} + + // + // Add a batch of statements + // + statements, err := addStatements(cmd, cCtx, &verified.MassifContext) + if err != nil { + return err + } + fmt.Printf("%d statements registered\n", len(statements)) + mmrSizeNew := verified.RangeCount() + peakHashesNew, err := mmr.PeakHashes(&verified.MassifContext, mmrSizeNew-1) + if err != nil { + return err + } + for i, peak := range peakHashesNew { + fmt.Printf("peak[%d]: %x\n", i, peak) + } + + alg, err := commoncose.CoseAlgForEC(sealingKey.PublicKey) + if err != nil { + return err + } + + coseSigner, err := cose.NewSigner(alg, sealingKey) + if err != nil { + return err + } + identifiableSigner := &identifiableCoseSigner{ + innerSigner: coseSigner, + publicKey: sealingKey.PublicKey, + } + + // + // Seal a checkpoint for the locally forked ledger with a made up sealing key + // Receipts are rooted at a checkpoint accumulator state. + // + rootSigner := massifs.NewRootSigner("https://github.com/robinbryce/veracity", cmd.CBORCodec) + + // TODO: account for filling a massif + mmrSizeCurrent := verified.RangeCount() + cp, err := mmr.IndexConsistencyProof(&verified.MassifContext, verified.MMRState.MMRSize-1, mmrSizeCurrent-1) + if err != nil { + return err + } + + // To create the checkpoint, we first check that the current state + // contains the previously verified state. This necessarily produces + // and verifies the new accumulator which we can then include with + // the new checkpoint. + ok, peaksB, err := mmr.CheckConsistency( + verified, sha256.New(), + cp.MMRSizeA, cp.MMRSizeB, verified.MMRState.Peaks) + if !ok { + return fmt.Errorf("consistency check failed: verify failed") + } + if err != nil { + return err + } + lastIDTimestamp := verified.GetLastIDTimestamp() + + state := massifs.MMRState{ + Version: int(massifs.MMRStateVersionCurrent), + MMRSize: mmrSizeCurrent, + Peaks: peaksB, + Timestamp: time.Now().UnixMilli(), + CommitmentEpoch: verified.MMRState.CommitmentEpoch, + IDTimestamp: lastIDTimestamp, + } + + // + // Read and decode the checkpoint + // + // Given a signed checkpoint, receipts can be self served for any + // element included in the MMR before that checkpoint. Leaves from + // the massif corresponding to the massif need no other data. + // Leaves from earlier massifs *may* need the earlier massif, but + // often don't (its deterministic and computable when and which + // earlier massifs are needed for an arbitrary mmrIndex) + // + // It is never necessary to have more than two massifs in order to + // produce a receipt against the latest checkpoint. + // + // There is no particular reason to re-fresh, or even save, receipts + // if you have a trustworthy store of checkpoints. + // + mmrStatement := statements[0] + // A more appropriate subject would be the identity of the log ... + subject := fmt.Sprintf("fork-%d-%d.bin", verified.MMRState.MMRSize-1, mmrSizeCurrent) + publicKey, err := identifiableSigner.LatestPublicKey() + if err != nil { + return fmt.Errorf("unable to get public key for signing key %w", err) + } + + keyIdentifier := identifiableSigner.KeyIdentifier() + data, err := rootSigner.Sign1(coseSigner, keyIdentifier, publicKey, subject, state, nil) + if err != nil { + return err + } + + // note that state is not verified here, but we just signed it so it is our droid + msg, state, err := massifs.DecodeSignedRoot(cmd.CBORCodec, data) + if err != nil { + return err + } + + // + // Generate the inclusion proof, note that we don't actually need + // the leaf hash to do this. So *anyone* can obtain a receipt for + // *any* leaf at any time, given only the specific massif (tile) + // that leaf was registered in. and its associated checkpoint. + // + proof, err := mmr.InclusionProof(&verified.MassifContext, state.MMRSize-1, mmrStatement.MMRIndexLeaf) + if err != nil { + return fmt.Errorf( + "failed to generating inclusion proof: %d in MMR(%d), %v", + mmrStatement.MMRIndexLeaf, verified.MMRState.MMRSize, err) + } + + // + // Locate the pre-signed receipt for the accumulator peak containing the leaf. + // + peakIndex := mmr.PeakIndex(mmr.LeafCount(state.MMRSize), len(proof)) + // NOTE: The old-accumulator compatibility property, from + // https://eprint.iacr.org/2015/718.pdf, along with the COSE protected & + // unprotected buckets, is why we can just pre sign the receipts. + // As long as the receipt consumer is convinced of the logs consistency (not split view), + // it does not matter which accumulator state the receipt is signed against. + + var peaksHeader massifs.MMRStateReceipts + err = cbor.Unmarshal(msg.Headers.RawUnprotected, &peaksHeader) + if err != nil { + return fmt.Errorf( + "%w: failed decoding peaks header", err) + } + if peakIndex >= len(peaksHeader.PeakReceipts) { + return fmt.Errorf( + "%w: peaks header contains to few peak receipts", err) + } + + // This is an array of marshaled COSE_Sign1's + receiptMsg := peaksHeader.PeakReceipts[peakIndex] + signed, err := commoncose.NewCoseSign1MessageFromCBOR( + receiptMsg, commoncose.WithDecOptions(massifs.CheckpointDecOptions())) + if err != nil { + return fmt.Errorf( + "%w: failed to decode pre-signed receipt for MMR(%d)", + err, state.MMRSize) + } + + // To avoid creating invalid receipts due to bugs in this demo code, check the root matches the appropriate peak. + root := mmr.IncludedRoot(sha256.New(), mmrStatement.MMRIndexLeaf, mmrStatement.LeafHash, proof) + + if !bytes.Equal(root, peaksB[peakIndex]) { + return fmt.Errorf( + "%w: root %x of leaf %d in MMR(%d) does not match peak %d %x", + ErrVerifyInclusionFailed, root, mmrStatement.MMRIndexLeaf, state.MMRSize, peakIndex, state.Peaks[peakIndex]) + } + + // + // Make the MMR draft receipt by attaching the inclusion proof to the Unprotected header + // + signed.Headers.RawUnprotected = nil + + verifiableProofs := massifs.MMRiverVerifiableProofs{ + InclusionProofs: []massifs.MMRiverInclusionProof{{ + Index: mmrStatement.MMRIndexLeaf, + InclusionPath: proof, + }}, + } + + tagOriginSubject := int64(-257) + tagOriginIssuer := tagOriginSubject - 1 + tagLeafHash := tagOriginSubject - 2 + tagIDTimestamp := tagOriginSubject - 3 + tagExtraBytes := tagOriginSubject - 4 + + signed.Headers.Unprotected[massifs.VDSCoseReceiptProofsTag] = verifiableProofs + // these values would usually be provided by the application, or obtained directly from any replica. + // the unprotected headers are not signed, and are intended for this sort of convenience. + signed.Headers.Unprotected[tagOriginIssuer] = mmrStatement.Claims.Issuer + signed.Headers.Unprotected[tagOriginSubject] = mmrStatement.Claims.Subject + signed.Headers.Unprotected[tagIDTimestamp] = mmrStatement.IDTimestamp + signed.Headers.Unprotected[tagExtraBytes] = mmrStatement.ExtraBytes + signed.Headers.Unprotected[tagLeafHash] = mmrStatement.LeafHash + // + // Save the receipt to a file + // + receiptCbor, err := signed.MarshalCBOR() + if err != nil { + return fmt.Errorf("failed to marshal receipt: %w", err) + } + + receiptFileName := cCtx.String("receipt-file") + if receiptFileName == "" { + receiptFileName = fmt.Sprintf("receipt-%d.cbor", mmrStatement.MMRIndexLeaf) + } + if err := os.WriteFile(receiptFileName, receiptCbor, os.FileMode(0644)); err != nil { + return fmt.Errorf("failed to write receipt file %s: %w", receiptFileName, err) + } + fmt.Printf("wrote receipt file %s\n", receiptFileName) + + // + // A bunch of persistence conveniences for the sake of the demo + // + + forkFileName := filepath.Join(".", fmt.Sprintf("fork-%d-%d.bin", verified.MMRState.MMRSize-1, mmrSizeCurrent)) + if err := os.WriteFile(forkFileName, data, os.FileMode(0644)); err != nil { + return fmt.Errorf("failed to write log fork file %s: %w", forkFileName, err) + } + fmt.Printf("wrote forked log massif file %s\n", forkFileName) + + checkpointFileName := filepath.Join(".", fmt.Sprintf("checkpoint-%d.cbor", mmrSizeCurrent)) + if err := os.WriteFile(checkpointFileName, data, os.FileMode(0644)); err != nil { + return fmt.Errorf("failed to write checkpoint file %s: %w", checkpointFileName, err) + } + fmt.Printf("wrote checkpoint file %s\n", checkpointFileName) + if cCtx.Bool("generate-sealer-key") { + // write the sealer key to the sealer-key file + sealerKeyFile := cCtx.String("sealer-key") + if sealerKeyFile == "" { + sealerKeyFile = keyio.ECDSAPrivateDefaultFileName + } + if _, err := keyio.WriteECDSAPrivateCOSE(sealerKeyFile, sealingKey); err != nil { + return fmt.Errorf("failed to write sealer key to file %s: %w", sealerKeyFile, err) + } + fmt.Printf("wrote sealer key to file %s\n", sealerKeyFile) + sealerKeyFile = cCtx.String("sealer-key-pem") + if sealerKeyFile == "" { + sealerKeyFile = keyio.ECDSAPrivateDefaultPEMFileName + } + if err := keyio.WriteECDSAPrivatePEM(sealerKeyFile, sealingKey); err != nil { + return fmt.Errorf("failed to write sealer key to file %s: %w", sealerKeyFile, err) + } + fmt.Printf("wrote sealer key to file %s\n", sealerKeyFile) + + sealerKeyFile = cCtx.String("sealer-public-key-pem") + if sealerKeyFile == "" { + sealerKeyFile = keyio.ECDSAPublicDefaultPEMFileName + } + if _, err := keyio.WriteCoseECDSAPublicKey(sealerKeyFile, &sealingKey.PublicKey); err != nil { + return fmt.Errorf("failed to write sealer key to file %s: %w", sealerKeyFile, err) + } + fmt.Printf("wrote sealer public key to file %s\n", sealerKeyFile) + } + return nil + }, + } +} + +// addStatements adds the signed statements to the massif and returns the leaf +// indices of the added statements. +// If a specific statement is specified via --signed-statement, then it is +// added first. THose discovered from --statement-dir are added in lexical +// filename order. +func addStatements(cmd *CmdCtx, cCtx *cli.Context, massif *massifs.MassifContext) ([]scitt.MMRStatement, error) { + var fileNames []string + var statements []scitt.MMRStatement + + if cCtx.String("signed-statement") != "" { + fileNames = append(fileNames, cCtx.String("signed-statement")) + } + + if cCtx.String("statements-dir") != "" { + files, err := listFilesWithSuffix(cCtx.String("statements-dir"), ".cbor") + if err != nil { + return nil, err + } + fileNames = append(fileNames, files...) + } + if len(fileNames) == 0 { + return nil, fmt.Errorf("no signed statements found, please specify --signed-statement or --statements-dir or both") + } + + for _, fileName := range fileNames { + mmrStatement, err := readStatementFromFile(fileName, cmd) + if err != nil { + return nil, fmt.Errorf("failed to read signed statement from file %s: %w", cCtx.String("signed-statement"), err) + } + + // the *next* index to be added is the current *count* + mmrStatement.MMRIndexLeaf = massif.RangeCount() + + _, err = massif.AddHashedLeaf( + sha256.New(), + mmrStatement.IDTimestamp, + mmrStatement.ExtraBytes, + // use the issuer as the origin log id, which isn't quite right, but is close enough for this demo + []byte(mmrStatement.Claims.Issuer), + []byte("scitt"), + mmrStatement.LeafHash, + ) + if err != nil { + return nil, fmt.Errorf("failed to add hashed leaf: %w", err) + } + + statements = append(statements, *mmrStatement) + + fmt.Printf("index : %d\n", mmrStatement.MMRIndexLeaf) + fmt.Printf(" issuer : %s\n", mmrStatement.Claims.Issuer) + fmt.Printf(" idtimestamp : %x\n", mmrStatement.IDTimestamp) + fmt.Printf(" extraBytes : %x\n", mmrStatement.ExtraBytes) + fmt.Printf(" leaf-hash : %x\n", mmrStatement.LeafHash) + fmt.Printf(" statement-hash : %x\n", mmrStatement.Hash) + fmt.Printf(" node count : %d\n", (len(massif.Data)-int(massif.LogStart()))/32) + + value, err := massif.Get(mmrStatement.MMRIndexLeaf) + if err != nil { + return nil, fmt.Errorf("failed to get leaf value for index %d: %w", mmrStatement.MMRIndexLeaf, err) + } + if !bytes.Equal(value, mmrStatement.LeafHash) { + // this will mean a bug in the hacked up code if it catches + return nil, fmt.Errorf("leaf hash %x does not match expected value %x for index %d", + value, mmrStatement.LeafHash, mmrStatement.MMRIndexLeaf) + } + } + return statements, nil +} + +func readStatementFromFile(fileName string, cmd *CmdCtx) (*scitt.MMRStatement, error) { + mmrStatement, cpd, err := scitt.NewMMRStatementFromFile(fileName, cmd, scitt.RegistrationPolicyVerified()) + if err != nil { + if cpd != nil && cpd.Instance != scitt.ProblemInstanceConfirmationMissing { + return nil, fmt.Errorf("%w: failed reading and checking signed statement: %s", err, cpd.Detail) + } + // for demo purposes, because we do not support x509 + mmrStatement, cpd, err = scitt.NewMMRStatementFromFile(fileName, cmd, scitt.RegistrationPolicyUnverified()) + if err != nil { + return nil, fmt.Errorf("%w: failed reading and checking signed statement: %s", err, cpd.Detail) + } + err = nil + cpd = nil + } + return mmrStatement, nil +} + +func listFilesWithSuffix(dir, suffix string) ([]string, error) { + entries, err := os.ReadDir(dir) + if err != nil { + return nil, err + } + + var matches []string + for _, entry := range entries { + if entry.Type().IsRegular() && strings.HasSuffix(entry.Name(), suffix) { + matches = append(matches, filepath.Join(dir, entry.Name())) + } + } + return matches, nil +} diff --git a/azstorage.go b/azstorage.go new file mode 100644 index 0000000..6348e51 --- /dev/null +++ b/azstorage.go @@ -0,0 +1,50 @@ +package veracity + +import ( + "context" + "fmt" + + "github.com/datatrails/go-datatrails-common/azblob" + azstorage "github.com/robinbryce/go-merklelog-azure/storage" + "github.com/urfave/cli/v2" +) + +func IsStorageEmulatorEnabled(cCtx *cli.Context) bool { + return cCtx.String("account") == AzuriteStorageAccount +} + +func NewCmdStorageProviderAzure( + ctx context.Context, + cCtx *cli.Context, cmd *CmdCtx, + dataUrl string, + reader azblob.Reader, +) (*azstorage.CachingStore, error) { + + var err error + + if reader == nil { + + // If we had no url and no local data supplied we default to the production data source. + reader, err = cfgReader(cmd, cCtx, dataUrl) + if err != nil { + return nil, err + } + } + + /* + logID := datatrails.TenantID2LogID(dataUrl) + if logID == nil { + logID = datatrails.TenantID2LogID(cCtx.String("tenant")) + // if logID == nil { + // return nil, fmt.Errorf("tenant must be provided for this command (todo: remove this requirement)") + // } + }*/ + opts := azstorage.Options{} + opts.Store = reader + + store, err := azstorage.NewStore(ctx, opts) + if err != nil { + return nil, fmt.Errorf("could not create Azure object store: %w", err) + } + return store, nil +} diff --git a/cfgbugs.go b/cfgbugs.go deleted file mode 100644 index b811f39..0000000 --- a/cfgbugs.go +++ /dev/null @@ -1,45 +0,0 @@ -package veracity - -import ( - "fmt" - "slices" - - "github.com/urfave/cli/v2" -) - -var ( - // recovers timestamp_committed from merklelog_entry.commit.idtimestamp prior to hashing - Bug9308 = "9308" - - Bugs = []string{ - Bug9308, - } -) - -func IsSupportedBug(id string) bool { - return slices.Contains(Bugs, id) -} - -func Bug(cmd *CmdCtx, id string) bool { - if cmd.bugs == nil { - return false - } - return cmd.bugs[id] -} - -// cfgBugs checks the requested bug workarounds are valid and populates the map -// of enabled workarounds. -func cfgBugs(cmd *CmdCtx, cCtx *cli.Context) error { - cmd.bugs = map[string]bool{} - - // just one supported atm - id := cCtx.String("bug") - if id != "" { - if !IsSupportedBug(id) { - return fmt.Errorf("bug: %s no supported work around or accommodation", id) - } - cmd.bugs[id] = true - } - - return nil -} diff --git a/cfgkeys.go b/cfgkeys.go new file mode 100644 index 0000000..aa4d381 --- /dev/null +++ b/cfgkeys.go @@ -0,0 +1,51 @@ +package veracity + +import ( + "fmt" + + "github.com/datatrails/go-datatrails-merklelog/massifs" + "github.com/datatrails/veracity/keyio" + "github.com/urfave/cli/v2" +) + +func CfgKeys(cmd *CmdCtx, cCtx *cli.Context) error { + + var err error + + cmd.CBORCodec, err = massifs.NewCBORCodec() + if err != nil { + return fmt.Errorf("failed to create CBOR codec: %w", err) + } + + if cCtx.IsSet("checkpoint-public") && cCtx.IsSet("checkpoint-public-pem") { + return fmt.Errorf("cannot set both checkpoint-public and checkpoint-public-pem, use only one") + } + + if cCtx.IsSet("checkpoint-public") { + pub, err := keyio.ReadECDSAPublicCOSE(cCtx.String("checkpoint-public")) + if err != nil { + return fmt.Errorf("failed to read checkpoint public key: %w", err) + } + cmd.CheckpointPublic = pub + return nil + } + if cCtx.IsSet("checkpoint-public-pem") { + pub, err := keyio.ReadECDSAPublicPEM(cCtx.String("checkpoint-public-pem")) + if err != nil { + return fmt.Errorf("failed to read checkpoint public key: %w", err) + } + cmd.CheckpointPublic = pub + return nil + } + + if cCtx.IsSet("checkpoint-jwks") { + pub, err := keyio.ReadECDSAPublicJOSE(cCtx.String("checkpoint-jwks")) + if err != nil { + return fmt.Errorf("failed to read checkpoint public key: %w", err) + } + cmd.CheckpointPublic = pub + return nil + } + + return nil +} diff --git a/cfglogging.go b/cfglogging.go index cec3b0b..c1663bf 100644 --- a/cfglogging.go +++ b/cfglogging.go @@ -23,12 +23,12 @@ func cfgLogging(cmd *CmdCtx, cCtx *cli.Context) error { // package. For that reason we override the TEST logger instead. It is only // integration tests that need to make use of this. if logLevel == "TEST" { - cmd.log = &logger.WrappedLogger{ + cmd.Log = &logger.WrappedLogger{ SugaredLogger: zap.NewNop().Sugar(), } } else { logger.New(logLevel, logger.WithConsole()) - cmd.log = logger.Sugar + cmd.Log = logger.Sugar } return nil diff --git a/cfglogids.go b/cfglogids.go new file mode 100644 index 0000000..bb844e2 --- /dev/null +++ b/cfglogids.go @@ -0,0 +1,55 @@ +package veracity + +import ( + "strings" + + "github.com/datatrails/go-datatrails-merklelog/massifs/storage" + "github.com/google/uuid" +) + +const ( + LogIDOptionName = "logid" + TenantOptionName = "tenant" +) + +func ParseTenantOrLogID(logid string) storage.LogID { + logID := storage.ParsePrefixedLogID("tenant/", logid) + if logID != nil { + return logID + } + uid, err := uuid.Parse(logid) + if err != nil { + return nil + } + return storage.LogID(uid[:]) +} + +func CtxGetLogOptions(cCtx cliContextString) []storage.LogID { + + // transiational support for --tenant + optionString := cCtx.String(LogIDOptionName) + if optionString == "" { + optionString = cCtx.String(TenantOptionName) + } + if optionString == "" { + return nil + } + values := strings.Split(optionString, ",") + var logIDs []storage.LogID + for _, v := range values { + logID := ParseTenantOrLogID(v) + if logID == nil { + continue + } + logIDs = append(logIDs, logID) + } + return logIDs +} + +func CtxGetOneLogOption(cCtx cliContextString) storage.LogID { + logIDs := CtxGetLogOptions(cCtx) + if len(logIDs) == 0 { + return nil + } + return logIDs[0] +} diff --git a/cfgmassif.go b/cfgmassif.go index 07bb63b..28aafeb 100644 --- a/cfgmassif.go +++ b/cfgmassif.go @@ -4,128 +4,65 @@ import ( "context" "fmt" - "github.com/datatrails/go-datatrails-common/logger" "github.com/datatrails/go-datatrails-merklelog/massifs" + "github.com/forestrie/go-merklelog-datatrails/datatrails" "github.com/urfave/cli/v2" ) -const ( - defaultMassifHeight = uint8(14) -) - -// cfgMassifReader establishes the blob read only data accessor -// only azure blob storage is supported. Both emulated and produciton. -func cfgMassifReader(cmd *CmdCtx, cCtx *cli.Context) error { - - var err error - if cmd.log == nil { - if err = cfgLogging(cmd, cCtx); err != nil { - return err - } - } - - cmd.massifHeight = uint8(cCtx.Uint("height")) - if cmd.massifHeight == 0 { - cmd.massifHeight = defaultMassifHeight - } - - localLog := cCtx.String("data-local") - remoteLog := cCtx.String("data-url") - - if localLog != "" && remoteLog != "" { - return fmt.Errorf("can't use data-local and data-url at the same time") - } - - if localLog == "" && remoteLog == "" { - // If we had no url and no local data supplied we default to the production data source. - reader, err := cfgReader(cmd, cCtx, true) - if err != nil { - return err - } - mr := massifs.NewMassifReader(logger.Sugar, reader) - cmd.massifReader = &mr - - } else if localLog != "" { - - codec, err := massifs.NewRootSignerCodec() - if err != nil { - return err - } - - // This configures the dir cache and local reader for single tenant use, - // InReplicaMode is false, meaning tenant specific filesystem paths are - // not automatically derived. - cache, err := massifs.NewLogDirCache( - logger.Sugar, - NewFileOpener(), - massifs.WithDirCacheTenant(cCtx.String("tenant")), // may be empty string - massifs.WithDirCacheMassifLister(NewDirLister()), - massifs.WithDirCacheSealLister(NewDirLister()), - massifs.WithReaderOption(massifs.WithMassifHeight(cmd.massifHeight)), - massifs.WithReaderOption(massifs.WithCBORCodec(codec)), - ) - if err != nil { - return err - } - - mr, err := massifs.NewLocalReader(logger.Sugar, cache) - if err != nil { - return err - } - cmd.massifReader = &mr - - } else { - // otherwise configure for reading from remote blobs - reader, err := cfgReader(cmd, cCtx, false) - if err != nil { - return err - } - mr := massifs.NewMassifReader(logger.Sugar, reader) - cmd.massifReader = &mr - } - - return nil -} - // cfgMassif configures a massif reader and reads a massif -func cfgMassif(cmd *CmdCtx, cCtx *cli.Context) error { +func cfgMassif(ctx context.Context, cmd *CmdCtx, cCtx *cli.Context) (*massifs.MassifContext, error) { + + var massif massifs.MassifContext + var reader readerSelector var err error - if err = cfgMassifReader(cmd, cCtx); err != nil { - return err + if reader, err = cfgMassifReader(cmd, cCtx); err != nil { + return nil, err } tenant := CtxGetOneTenantOption(cCtx) if tenant == "" { - return fmt.Errorf("tenant must be provided for this command") + return nil, fmt.Errorf("tenant must be provided for this command") } - ctx := context.Background() + logID := datatrails.TenantID2LogID(tenant) + if logID == nil { + return nil, fmt.Errorf("invalid tenant id '%s'", tenant) + } + + if err = reader.SelectLog(ctx, logID); err != nil { + return nil, err + } mmrIndex := cCtx.Uint64("mmrindex") - massifIndex := cCtx.Uint64("massif") + massifIndex := uint32(cCtx.Uint64("massif")) // mmrIndex zero is always going to be massifIndex 0 so we treat this the // same as though the massif option had been supplied as 0 - if massifIndex == uint64(0) && mmrIndex == uint64(0) { - cmd.massif, err = cmd.massifReader.GetMassif(context.Background(), tenant, massifIndex) - return err + if massifIndex == uint32(0) && mmrIndex == uint64(0) { + massif, err = massifs.GetMassifContext(ctx, reader, massifIndex) + if err != nil { + return nil, err + } + return &massif, nil } // now, if we have a non zero mmrIndex, use it to (re)compute the massifIndex if mmrIndex > uint64(0) { - massifIndex = massifs.MassifIndexFromMMRIndex(cmd.massifHeight, mmrIndex) + massifIndex = uint32(massifs.MassifIndexFromMMRIndex(cmd.MassifFmt.MassifHeight, mmrIndex)) - cmd.massif, err = cmd.massifReader.GetMassif(context.Background(), tenant, massifIndex) - return err + massif, err = massifs.GetMassifContext(ctx, reader, massifIndex) + if err != nil { + return nil, err + } + return &massif, nil } // If massifIndex is not provided it will be zero here, and that is a good // default. - massif, err := cmd.massifReader.GetMassif(ctx, tenant, massifIndex) + massif, err = massifs.GetMassifContext(ctx, reader, massifIndex) if err != nil { - return err + return nil, err } - cmd.massif = massif - return nil + return &massif, nil } diff --git a/cfgmassiffmt.go b/cfgmassiffmt.go new file mode 100644 index 0000000..6d3cfdd --- /dev/null +++ b/cfgmassiffmt.go @@ -0,0 +1,49 @@ +package veracity + +import ( + "github.com/datatrails/go-datatrails-merklelog/massifs" + "github.com/datatrails/go-datatrails-merklelog/massifs/snowflakeid" + "github.com/urfave/cli/v2" +) + +type MassifFormatOptions struct { + MassifHeight uint8 + CommitmentEpoch uint8 + WorkerCIDR string + PodIP string +} + +// cfgMassifFmt initialises an idTimestamp generator +// of enabled workarounds. +func cfgMassifFmt(cmd *CmdCtx, cCtx *cli.Context) error { + var err error + cmd.MassifFmt.CommitmentEpoch = 1 // the default, and correct until the unix epoch changes + if cCtx.IsSet("commitment-epoch") { + cmd.MassifFmt.CommitmentEpoch = uint8(cCtx.Uint64("commitment-epoch")) + if cmd.MassifFmt.CommitmentEpoch == 0 { + cmd.MassifFmt.CommitmentEpoch = uint8(massifs.Epoch2038) + } + } + cmd.MassifFmt.MassifHeight = uint8(cCtx.Uint("height")) + if cmd.MassifFmt.MassifHeight == 0 { + cmd.MassifFmt.MassifHeight = defaultMassifHeight + } + + cmd.MassifFmt.WorkerCIDR = "0.0.0.0/16" + if cCtx.IsSet("worker-cidr") { + cmd.MassifFmt.WorkerCIDR = cCtx.String("worker-cidr") + } + cmd.MassifFmt.PodIP = "10.0.0.127" + if cCtx.IsSet("pod-ip") { + cmd.MassifFmt.PodIP = cCtx.String("pod-ip") + } + + cmd.IDState, err = snowflakeid.NewIDState(snowflakeid.Config{ + CommitmentEpoch: cmd.MassifFmt.CommitmentEpoch, + // There is no reason to override these for local use. + WorkerCIDR: cmd.MassifFmt.WorkerCIDR, + PodIP: cmd.MassifFmt.PodIP, + }) + + return err +} diff --git a/cfgmassifreader.go b/cfgmassifreader.go new file mode 100644 index 0000000..046444a --- /dev/null +++ b/cfgmassifreader.go @@ -0,0 +1,26 @@ +package veracity + +import ( + "github.com/urfave/cli/v2" +) + +// cfgMassifReader establishes the blob read only data accessor +// only azure blob storage is supported. Both emulated and production. +func cfgMassifReader(cmd *CmdCtx, cCtx *cli.Context) (readerSelector, error) { + var err error + if cmd.Log == nil { + if err = cfgLogging(cmd, cCtx); err != nil { + return nil, err + } + } + if err = cfgMassifFmt(cmd, cCtx); err != nil { + return nil, err + } + + reader, err := newMassifReader(cmd, cCtx) + if err != nil { + return nil, err + } + + return reader, nil +} diff --git a/cfgreader.go b/cfgreader.go index f62bf27..e2a8de1 100644 --- a/cfgreader.go +++ b/cfgreader.go @@ -16,23 +16,16 @@ const ( // cfgReader establishes the blob read only data accessor // only azure blob storage is supported. Both emulated and production. -func cfgReader(cmd *CmdCtx, cCtx *cli.Context, forceProdUrl bool) (azblob.Reader, error) { +func cfgReader(cmd *CmdCtx, cCtx *cli.Context, url string) (azblob.Reader, error) { var err error var reader azblob.Reader - if cmd.log == nil { + if cmd.Log == nil { if err = cfgLogging(cmd, cCtx); err != nil { return nil, err } } - // We prefer loading this from the command line argument, but if upstream code requests we default - // to the production URL we inject that here. - url := cCtx.String("data-url") - if forceProdUrl { - url = DefaultRemoteMassifURL - } - // These values are relevant for direct connection to Azure blob store (or emulator), but are // harmlessly irrelevant for standard remote connections that connect via public proxy. Potential // to simplify this function in future. @@ -42,19 +35,22 @@ func cfgReader(cmd *CmdCtx, cCtx *cli.Context, forceProdUrl bool) (azblob.Reader if account == "" && url == "" { account = AzuriteStorageAccount - cmd.log.Infof("defaulting to the emulator account %s", account) + cmd.Log.Infof("defaulting to the emulator account %s", account) } if container == "" { container = DefaultContainer - cmd.log.Infof("defaulting to the standard container %s", container) + cmd.Log.Infof("defaulting to the standard container %s", container) } if account == AzuriteStorageAccount { - cmd.log.Infof("using the emulator and authorizing with the well known private key (for production no authorization is required)") + if url != "" { + return nil, fmt.Errorf("the url for the emulator account is fixed, overriding it is not supported or useful") + } + cmd.Log.Infof("using the emulator and authorizing with the well known private key (for production no authorization is required)") // reader, err := azblob.NewAzurite(url, container) devCfg := azblob.NewDevConfigFromEnv() - cmd.readerURL = devCfg.URL + cmd.RemoteURL = devCfg.URL reader, err = azblob.NewDev(devCfg, container) if err != nil { return nil, err @@ -71,7 +67,7 @@ func cfgReader(cmd *CmdCtx, cCtx *cli.Context, forceProdUrl bool) (azblob.Reader if envAuth { devCfg := azblob.NewDevConfigFromEnv() - cmd.readerURL = devCfg.URL + cmd.RemoteURL = devCfg.URL reader, err = azblob.NewDev(devCfg, container) if err != nil { return nil, err @@ -79,8 +75,8 @@ func cfgReader(cmd *CmdCtx, cCtx *cli.Context, forceProdUrl bool) (azblob.Reader return reader, nil } - cmd.readerURL = url - reader, err = azblob.NewReaderNoAuth(url, azblob.WithContainer(container), azblob.WithAccountName(account)) + cmd.RemoteURL = url + reader, err = azblob.NewReaderNoAuth(cmd.Log, url, azblob.WithContainer(container), azblob.WithAccountName(account)) if err != nil { return nil, fmt.Errorf("failed to connect to blob store: %v", err) } diff --git a/cfgrootreader.go b/cfgrootreader.go deleted file mode 100644 index c1a6f5f..0000000 --- a/cfgrootreader.go +++ /dev/null @@ -1,28 +0,0 @@ -package veracity - -import ( - "github.com/datatrails/go-datatrails-merklelog/massifs" - "github.com/urfave/cli/v2" -) - -func cfgRootReader(cmd *CmdCtx, cCtx *cli.Context) error { - var err error - if cmd.log == nil { - if err = cfgLogging(cmd, cCtx); err != nil { - return err - } - } - - if cmd.cborCodec, err = massifs.NewRootSignerCodec(); err != nil { - return err - } - - forceProdUrl := cCtx.String("data-local") == "" && cCtx.String("data-url") == "" - reader, err := cfgReader(cmd, cCtx, forceProdUrl) - if err != nil { - return err - } - - cmd.rootReader = massifs.NewSignedRootReader(cmd.log, reader, cmd.cborCodec) - return nil -} diff --git a/cmd/veracity/main.go b/cmd/veracity/main.go index c628338..9c7ba8c 100644 --- a/cmd/veracity/main.go +++ b/cmd/veracity/main.go @@ -1,10 +1,12 @@ +// Package main provides the entry point for the Veracity CLI application. package main import ( "fmt" "log" "os" - "strings" + + // "strings" "github.com/datatrails/veracity" ) @@ -20,18 +22,17 @@ var ( ) func main() { - versionString := "unknown" if version != "" { // versionString = fmt.Sprintf("%s %s %s", version, commit, buildDate) versionString = fmt.Sprintf("%s %s", version, commit) } - ikwid := false - envikwid := os.Getenv("VERACITY_IKWID") - if envikwid == "1" || strings.ToLower(envikwid) == "true" { - ikwid = true - } + ikwid := true + // envikwid := os.Getenv("VERACITY_IKWID") + // if envikwid == "1" || strings.ToLower(envikwid) == "true" { + // ikwid = true + // } app := veracity.NewApp(versionString, ikwid) veracity.AddCommands(app, ikwid) if err := app.Run(os.Args); err != nil { diff --git a/cmdctx.go b/cmdctx.go index 0fd7b10..8fd0de7 100644 --- a/cmdctx.go +++ b/cmdctx.go @@ -1,60 +1,44 @@ package veracity import ( - "context" + "fmt" "github.com/datatrails/go-datatrails-common/cbor" "github.com/datatrails/go-datatrails-common/logger" - "github.com/datatrails/go-datatrails-merklelog/massifs" + "github.com/datatrails/go-datatrails-merklelog/massifs/snowflakeid" + "github.com/datatrails/veracity/keyio" ) -// MassifGetter gets a specific massif based on the massifIndex given for a tenant log -type MassifGetter interface { - GetMassif( - ctx context.Context, tenantIdentity string, massifIndex uint64, opts ...massifs.ReaderOption, - ) (massifs.MassifContext, error) -} +// CmdCtx holds shared config and config derived state for all commands +type CmdCtx struct { + Log logger.Logger + + CheckpointPublic keyio.DecodedPublic -type MassifReader interface { - GetVerifiedContext( - ctx context.Context, tenantIdentity string, massifIndex uint64, - opts ...massifs.ReaderOption, - ) (*massifs.VerifiedContext, error) - - GetFirstMassif( - ctx context.Context, tenantIdentity string, opts ...massifs.ReaderOption, - ) (massifs.MassifContext, error) - GetHeadMassif( - ctx context.Context, tenantIdentity string, opts ...massifs.ReaderOption, - ) (massifs.MassifContext, error) - GetLazyContext( - ctx context.Context, tenantIdentity string, which massifs.LogicalBlob, opts ...massifs.ReaderOption, - ) (massifs.LogBlobContext, uint64, error) - MassifGetter + RemoteURL string + CBORCodec cbor.CBORCodec + + // cfgMassifFmt sets the massif format options and the IDState + MassifFmt MassifFormatOptions + IDState *snowflakeid.IDState + + Bugs map[string]bool } -// CmdCtx holds shared config and config derived state for all commands -type CmdCtx struct { - log logger.Logger - // storer *azblob.Storer - //reader azblob.Reader - massifReader MassifReader - readerURL string - cborCodec cbor.CBORCodec - rootReader massifs.SignedRootReader - massif massifs.MassifContext - - massifHeight uint8 - - bugs map[string]bool +func (cmd *CmdCtx) NextID() (uint64, error) { + if cmd.IDState == nil { + return 0, fmt.Errorf("idState not initialized, cannot generate next ID") + } + return cmd.IDState.NextID() } -// Clone returns a copy of the CmdCtx with only those members that are safe to share copied. -// Those are: -// - log - the result of cfgLogging -// -// All other members need to be initialzed by the caller if they are required in -// a specific go routine context. +// Clone returns a safe copy of the CmdCtx. func (c *CmdCtx) Clone() *CmdCtx { - return &CmdCtx{log: c.log} + return &CmdCtx{ + RemoteURL: c.RemoteURL, + CBORCodec: c.CBORCodec, + MassifFmt: c.MassifFmt, + IDState: c.IDState, + Log: c.Log, + } } diff --git a/const.go b/const.go index 0e549ac..1e770fe 100644 --- a/const.go +++ b/const.go @@ -1,6 +1,7 @@ package veracity const ( + DefaultMassifHeight = uint8(14) // LeafTypePlain is used for committing to plain values. LeafTypePlain = uint8(0) PublicAssetsPrefix = "publicassets/" diff --git a/consts_test.go b/consts_test.go index 1af3871..1612afa 100644 --- a/consts_test.go +++ b/consts_test.go @@ -1,148 +1 @@ package veracity - -/** - * file for all the test constants - */ - -var ( - eventsV1Event = []byte(` -{ - "identity": "events/01947000-3456-780f-bfa9-29881e3bac88", - "attributes": { - "foo": "bar" - }, - "trails": [], - "origin_tenant": "tenant/112758ce-a8cb-4924-8df8-fcba1e31f8b0", - "created_by": "2ef471c2-f997-4503-94c8-60b5c929a3c3", - "created_at": 1737045849174, - "confirmation_status": "CONFIRMED", - "merklelog_commit": { - "index": "1", - "idtimestamp": "019470003611017900" - } -}`) - - eventsV1SingleEventList = []byte(` -{ - "events": [ - { - "identity": "events/01947000-3456-780f-bfa9-29881e3bac88", - "attributes": { - "foo": "bar" - }, - "trails": [], - "origin_tenant": "tenant/112758ce-a8cb-4924-8df8-fcba1e31f8b0", - "created_by": "2ef471c2-f997-4503-94c8-60b5c929a3c3", - "created_at": 1737045849174, - "confirmation_status": "CONFIRMED", - "merklelog_commit": { - "index": "1", - "idtimestamp": "019470003611017900" - } - } - ] -} - `) - - assetsV2Event = []byte(` -{ - "identity": "assets/899e00a2-29bc-4316-bf70-121ce2044472/events/450dce94-065e-4f6a-bf69-7b59f28716b6", - "asset_identity": "assets/899e00a2-29bc-4316-bf70-121ce2044472", - "event_attributes": {}, - "asset_attributes": { - "arc_display_name": "Default asset", - "default": "true", - "arc_description": "Collection for Events not specifically associated with any specific Asset" - }, - "operation": "NewAsset", - "behaviour": "AssetCreator", - "timestamp_declared": "2025-01-16T16:12:38Z", - "timestamp_accepted": "2025-01-16T16:12:38Z", - "timestamp_committed": "2025-01-16T16:12:38.576970217Z", - "principal_declared": { - "issuer": "https://accounts.google.com", - "subject": "105632894023856861149", - "display_name": "Henry SocialTest", - "email": "henry.socialtest@gmail.com" - }, - "principal_accepted": { - "issuer": "https://accounts.google.com", - "subject": "105632894023856861149", - "display_name": "Henry SocialTest", - "email": "henry.socialtest@gmail.com" - }, - "confirmation_status": "CONFIRMED", - "transaction_id": "", - "block_number": 0, - "transaction_index": 0, - "from": "0x412bB2Ecd6f2bDf26D64de834Fa17167192F4c0d", - "tenant_identity": "tenant/112758ce-a8cb-4924-8df8-fcba1e31f8b0", - "merklelog_entry": { - "commit": { - "index": "0", - "idtimestamp": "01946fe35fc6017900" - }, - "confirm": { - "mmr_size": "1", - "root": "YecBKn8UtUZ6hlTnrnXIlKvNOZKuMCIemNdNA8wOyjk=", - "timestamp": "1737043961154", - "idtimestamp": "", - "signed_tree_head": "" - }, - "unequivocal": null - } -}`) - - assetsV2SingleEventList = []byte(` -{ - "events": [ - { - "identity": "assets/899e00a2-29bc-4316-bf70-121ce2044472/events/450dce94-065e-4f6a-bf69-7b59f28716b6", - "asset_identity": "assets/899e00a2-29bc-4316-bf70-121ce2044472", - "event_attributes": {}, - "asset_attributes": { - "arc_display_name": "Default asset", - "default": "true", - "arc_description": "Collection for Events not specifically associated with any specific Asset" - }, - "operation": "NewAsset", - "behaviour": "AssetCreator", - "timestamp_declared": "2025-01-16T16:12:38Z", - "timestamp_accepted": "2025-01-16T16:12:38Z", - "timestamp_committed": "2025-01-16T16:12:38.576970217Z", - "principal_declared": { - "issuer": "https://accounts.google.com", - "subject": "105632894023856861149", - "display_name": "Henry SocialTest", - "email": "henry.socialtest@gmail.com" - }, - "principal_accepted": { - "issuer": "https://accounts.google.com", - "subject": "105632894023856861149", - "display_name": "Henry SocialTest", - "email": "henry.socialtest@gmail.com" - }, - "confirmation_status": "CONFIRMED", - "transaction_id": "", - "block_number": 0, - "transaction_index": 0, - "from": "0x412bB2Ecd6f2bDf26D64de834Fa17167192F4c0d", - "tenant_identity": "tenant/112758ce-a8cb-4924-8df8-fcba1e31f8b0", - "merklelog_entry": { - "commit": { - "index": "0", - "idtimestamp": "01946fe35fc6017900" - }, - "confirm": { - "mmr_size": "1", - "root": "YecBKn8UtUZ6hlTnrnXIlKvNOZKuMCIemNdNA8wOyjk=", - "timestamp": "1737043961154", - "idtimestamp": "", - "signed_tree_head": "" - }, - "unequivocal": null - } - } - ] -}`) -) diff --git a/datatrails.public.keys.json b/datatrails.public.keys.json new file mode 100644 index 0000000..89e3093 --- /dev/null +++ b/datatrails.public.keys.json @@ -0,0 +1,24 @@ +{ + "keys": [ + { + "kid": "merkle-log-signing/14346aafe4f04fa3b3c9388102f402cb", + "kty": "EC", + "key_ops": [ + "verify" + ], + "crv": "P-384", + "x": "A861WiJFuwOruvgCHmoGCEoNy4rxQU-TMV0TIIFE84sA5106vKerlKVHiYEE04wh", + "y": "nDwgJoczIAMusJAym7l0_4WMetVqldGsZ-WDlwOgTBrz4CFAjQABe5P6dzawS2By" + }, + { + "kid": "merkle-log-signing/16e61f876efd4c0cbab950422c445c07", + "kty": "EC", + "key_ops": [ + "verify" + ], + "crv": "P-384", + "x": "oXb_EZGhA7yQcXL40MowMkSUka_DB9oL14A0MAsEOxqcM2SPnPuAl1UGmHoDWJhw", + "y": "of7_fp_tNmxiEQPFuYNAAbYIO42LtJ07uXsXHZDSs2MhfnpesAiBR13GOWve4mZR" + } + ] +} diff --git a/diag.go b/diag.go index abd60ad..1433b1d 100644 --- a/diag.go +++ b/diag.go @@ -29,30 +29,34 @@ func NewDiagCmd() *cli.Command { }, Action: func(cCtx *cli.Context) error { var err error + var massif massifs.MassifContext + var reader massifs.ObjectReader + + ctx := context.Background() cmd := &CmdCtx{} - if err = cfgMassifReader(cmd, cCtx); err != nil { + if reader, err = cfgMassifReader(cmd, cCtx); err != nil { return err } - if cmd.massifHeight == 0 { + if cmd.MassifFmt.MassifHeight == 0 { return fmt.Errorf("massif height can't be zero") } fmt.Printf("%8d trie-header-start\n", massifs.TrieHeaderStart()) fmt.Printf("%8d trie-data-start\n", massifs.TrieDataStart()) - fmt.Printf("%8d peak-stack-start\n", massifs.PeakStackStart(cmd.massifHeight)) + fmt.Printf("%8d peak-stack-start\n", massifs.PeakStackStart(cmd.MassifFmt.MassifHeight)) // support identifying the massif implicitly via the index of a log // entry. note that mmrIndex 0 is just handled as though the caller // asked for massifIndex 0 mmrIndex := cCtx.Uint64("mmrindex") signedMassifIndex := cCtx.Int64("massif") - massifIndex := uint64(signedMassifIndex) + massifIndex := uint32(signedMassifIndex) if mmrIndex > uint64(0) && signedMassifIndex == -1 { - massifIndex = massifs.MassifIndexFromMMRIndex(cmd.massifHeight, mmrIndex) + massifIndex = uint32(massifs.MassifIndexFromMMRIndex(cmd.MassifFmt.MassifHeight, mmrIndex)) } - fmt.Printf("%8d peak-stack-len\n", massifs.PeakStackLen(massifIndex)) - logStart := massifs.PeakStackEnd(massifIndex, cmd.massifHeight) + fmt.Printf("%8d peak-stack-len\n", massifs.PeakStackLen(uint64(massifIndex))) + logStart := massifs.PeakStackEnd(uint64(massifIndex), cmd.MassifFmt.MassifHeight) fmt.Printf("%8d tree-start\n", logStart) fmt.Printf("%8d massif\n", massifIndex) if mmrIndex > 0 { @@ -61,27 +65,24 @@ func NewDiagCmd() *cli.Command { if cCtx.Bool("noread") { return nil } - if err = cfgMassifReader(cmd, cCtx); err != nil { - return err - } tenant := cCtx.String("tenant") - if tenant == "" { + if tenant == "" && !cCtx.IsSet("data-local") { fmt.Println("a tenant is required to get diagnostics that require reading a blob") return nil } - cmd.massif, err = cmd.massifReader.GetMassif(context.Background(), tenant, massifIndex) + massif, err = massifs.GetMassifContext(ctx, reader, massifIndex) if err != nil { return err } - fmt.Printf("%8d start:massif-height\n", cmd.massif.Start.MassifHeight) - fmt.Printf("%8d start:data-epoch\n", cmd.massif.Start.DataEpoch) - fmt.Printf("%8d start:commitment-epoch\n", cmd.massif.Start.CommitmentEpoch) - fmt.Printf("%8d start:first-index\n", cmd.massif.Start.FirstIndex) - fmt.Printf("%8d start:peak-stack-len\n", cmd.massif.Start.PeakStackLen) + fmt.Printf("%8d start:massif-height\n", massif.Start.MassifHeight) + fmt.Printf("%8d start:data-epoch\n", massif.Start.DataEpoch) + fmt.Printf("%8d start:commitment-epoch\n", massif.Start.CommitmentEpoch) + fmt.Printf("%8d start:first-index\n", massif.Start.FirstIndex) + fmt.Printf("%8d start:peak-stack-len\n", massif.Start.PeakStackLen) - fmt.Printf("%8d count\n", cmd.massif.Count()) - fmt.Printf("%8d leaf-count\n", cmd.massif.MassifLeafCount()) - fmt.Printf("%8d last-leaf-mmrindex\n", cmd.massif.LastLeafMMRIndex()) + fmt.Printf("%8d count\n", massif.Count()) + fmt.Printf("%8d leaf-count\n", massif.MassifLeafCount()) + fmt.Printf("%8d last-leaf-mmrindex\n", massif.LastLeafMMRIndex()) // trieIndex is equivilent to leafIndex, but we use the term trieIndex // when dealing with trie data. @@ -89,30 +90,30 @@ func NewDiagCmd() *cli.Command { fmt.Printf("%8d trie-index\n", trieIndex) // FirstIndex is the *size* of the mmr preceding the current massif - expectTrieIndexMassif := trieIndex - mmr.LeafCount(cmd.massif.Start.FirstIndex) + expectTrieIndexMassif := trieIndex - mmr.LeafCount(massif.Start.FirstIndex) fmt.Printf("%8d trie-index - massif-first-index\n", expectTrieIndexMassif) - logTrieKey, err := cmd.massif.GetTrieKey(mmrIndex) + logTrieKey, err := massif.GetTrieKey(mmrIndex) if err != nil { return fmt.Errorf("when expecting %d for %d: %v", expectTrieIndexMassif, mmrIndex, err) } - logTrieEntry, err := cmd.massif.GetTrieEntry(mmrIndex) + logTrieEntry, err := massif.GetTrieEntry(mmrIndex) if err != nil { entryIndex := mmr.LeafIndex(mmrIndex) // FirstIndex is the *size* of the mmr preceding the current massif - expectTrieIndexMassif := entryIndex - mmr.LeafCount(cmd.massif.Start.FirstIndex) + expectTrieIndexMassif := entryIndex - mmr.LeafCount(massif.Start.FirstIndex) return fmt.Errorf("when expecting %d for %d: %v", expectTrieIndexMassif, mmrIndex, err) } - logNodeValue, err := cmd.massif.Get(mmrIndex) + logNodeValue, err := massif.Get(mmrIndex) if err != nil { return err } fmt.Printf("%x log-value\n", logNodeValue) - idBytes := logTrieKey[massifs.TrieEntryIdTimestampStart:massifs.TrieEntryIdTimestampEnd] + idBytes := logTrieKey[massifs.TrieEntryIDTimestampStart:massifs.TrieEntryIDTimestampEnd] id := binary.BigEndian.Uint64(idBytes) - unixMS, err := snowflakeid.IDUnixMilli(id, uint8(cmd.massif.Start.CommitmentEpoch)) + unixMS, err := snowflakeid.IDUnixMilli(id, uint8(massif.Start.CommitmentEpoch)) if err != nil { return err } diff --git a/ediag.go b/ediag.go index 3c7fb19..d50f167 100644 --- a/ediag.go +++ b/ediag.go @@ -11,9 +11,11 @@ import ( "github.com/datatrails/go-datatrails-merklelog/massifs" "github.com/datatrails/go-datatrails-merklelog/massifs/snowflakeid" + "github.com/datatrails/go-datatrails-merklelog/massifs/storage" "github.com/datatrails/go-datatrails-merklelog/mmr" "github.com/datatrails/go-datatrails-simplehash/simplehash" - veracityapp "github.com/datatrails/veracity/app" + appdata "github.com/forestrie/go-merklelog-datatrails/appdata" + "github.com/forestrie/go-merklelog-datatrails/datatrails" "github.com/urfave/cli/v2" ) @@ -32,20 +34,29 @@ func NewEventDiagCmd() *cli.Command { }, Action: func(cCtx *cli.Context) error { + var reader readerSelector + var massif massifs.MassifContext + tenantIdentity := cCtx.String("tenant") - appData, err := veracityapp.ReadAppData(cCtx.Args().Len() == 0, cCtx.Args().Get(0)) + appData, err := appdata.ReadAppData(cCtx.Args().Len() == 0, cCtx.Args().Get(0)) if err != nil { return err } - appEntries, err := veracityapp.AppDataToVerifiableLogEntries(appData, tenantIdentity) + appEntries, err := appdata.AppDataToVerifiableLogEntries(appData, tenantIdentity) if err != nil { return err } cmd := &CmdCtx{} - if err = cfgMassifReader(cmd, cCtx); err != nil { + + if err = cfgMassifFmt(cmd, cCtx); err != nil { + return err + } + + reader, err = newReaderSelector(cmd, cCtx) + if err != nil { return err } cmpPrint := func(fmtEq, fmtNe string, a, b any) bool { @@ -66,27 +77,30 @@ func NewEventDiagCmd() *cli.Command { // Get the mmrIndex from the request and then compute the massif // it implies based on the massifHeight command line option. mmrIndex := appEntry.MMRIndex() - massifIndex := massifs.MassifIndexFromMMRIndex(cmd.massifHeight, mmrIndex) - tenantIdentity := cCtx.String("tenant") - if tenantIdentity == "" { + massifIndex := uint32(massifs.MassifIndexFromMMRIndex(cmd.MassifFmt.MassifHeight, mmrIndex)) + var logID storage.LogID + if cCtx.String("tenant") != "" { + tenantIdentity := cCtx.String("tenant") + logID = datatrails.TenantID2LogID(tenantIdentity) + } + if logID == nil { + // The tenant identity on the event is the original tenant // that created the event. For public assets and shared // assets, this is true regardless of which tenancy the // record is fetched from. Those same events will appear in // the logs of all tenants they were shared with. - tenantIdentity, err = appEntry.LogTenant() - if err != nil { - return err - } + logID = appEntry.LogID() } + reader.SelectLog(cCtx.Context, logID) // read the massif blob - cmd.massif, err = cmd.massifReader.GetMassif(context.Background(), tenantIdentity, massifIndex) + massif, err = massifs.GetMassifContext(context.Background(), reader, massifIndex) if err != nil { return err } // Get the human time from the idtimestamp committed on the event. - idTimestamp, err := appEntry.IDTimestamp(&cmd.massif) + idTimestamp, err := appEntry.IDTimestamp(&massif) if err != nil { return err } @@ -99,7 +113,7 @@ func NewEventDiagCmd() *cli.Command { if err != nil { return err } - eventIDTimestampMS, err := snowflakeid.IDUnixMilli(eventIDTimestamp, uint8(cmd.massif.Start.CommitmentEpoch)) + eventIDTimestampMS, err := snowflakeid.IDUnixMilli(eventIDTimestamp, uint8(massif.Start.CommitmentEpoch)) if err != nil { return err } @@ -108,24 +122,24 @@ func NewEventDiagCmd() *cli.Command { // Note that the banner info is all from the event response fmt.Printf("%d %s %s\n", leafIndex, time.UnixMilli(eventIDTimestampMS).Format(time.RFC3339Nano), appEntry.AppID()) - leafIndexMassif, err := cmd.massif.GetMassifLeafIndex(leafIndex) + leafIndexMassif, err := massif.GetMassifLeafIndex(leafIndex) if err != nil { return fmt.Errorf("when expecting %d for %d: %v", leafIndexMassif, mmrIndex, err) } fmt.Printf(" |%8d leaf-index-massif\n", leafIndexMassif) // Read the trie entry from the log - logTrieEntry := massifs.GetTrieEntry(cmd.massif.Data, cmd.massif.IndexStart(), leafIndexMassif) - logNodeValue, err := cmd.massif.Get(mmrIndex) + logTrieEntry := massifs.GetTrieEntry(massif.Data, massif.IndexStart(), leafIndexMassif) + logNodeValue, err := massif.Get(mmrIndex) if err != nil { return err } - logTrieKey := massifs.GetTrieKey(cmd.massif.Data, cmd.massif.IndexStart(), leafIndexMassif) + logTrieKey := massifs.GetTrieKey(massif.Data, massif.IndexStart(), leafIndexMassif) - logTrieIDTimestampBytes := logTrieEntry[massifs.TrieEntryIdTimestampStart:massifs.TrieEntryIdTimestampEnd] + logTrieIDTimestampBytes := logTrieEntry[massifs.TrieEntryIDTimestampStart:massifs.TrieEntryIDTimestampEnd] logTrieIDTimestamp := binary.BigEndian.Uint64(logTrieIDTimestampBytes) - unixMS, err := snowflakeid.IDUnixMilli(logTrieIDTimestamp, uint8(cmd.massif.Start.CommitmentEpoch)) + unixMS, err := snowflakeid.IDUnixMilli(logTrieIDTimestamp, uint8(massif.Start.CommitmentEpoch)) if err != nil { return err } @@ -189,13 +203,13 @@ func NewEventDiagCmd() *cli.Command { // Generate the proof for the mmrIndex and get the root. We use // the mmrSize from the end of the blob in which the leaf entry // was recorded. Any size > than the leaf index would work. - mmrSize := cmd.massif.RangeCount() - proof, err := mmr.InclusionProof(&cmd.massif, mmrSize, mmrIndex) + mmrSize := massif.RangeCount() + proof, err := mmr.InclusionProof(&massif, mmrSize, mmrIndex) if err != nil { return err } - verified, err := mmr.VerifyInclusion(&cmd.massif, eventHasher, mmrSize, logNodeValue, mmrIndex, proof) + verified, err := mmr.VerifyInclusion(&massif, eventHasher, mmrSize, logNodeValue, mmrIndex, proof) if verified { fmt.Printf("OK|%d %d\n", mmrIndex, leafIndex) continue diff --git a/findmmrentries.go b/findmmrentries.go index 68f49e7..ec0f902 100644 --- a/findmmrentries.go +++ b/findmmrentries.go @@ -8,11 +8,13 @@ import ( "strings" "github.com/datatrails/go-datatrails-common/logger" - "github.com/datatrails/go-datatrails-logverification/logverification/app" "github.com/datatrails/go-datatrails-merklelog/massifs" + "github.com/datatrails/go-datatrails-merklelog/massifs/storage" "github.com/datatrails/go-datatrails-merklelog/mmr" "github.com/datatrails/go-datatrails-serialization/eventsv1" - veracityapp "github.com/datatrails/veracity/app" + appdata "github.com/forestrie/go-merklelog-datatrails/appdata" + "github.com/forestrie/go-merklelog-datatrails/appentry" + "github.com/forestrie/go-merklelog-datatrails/datatrails" "github.com/urfave/cli/v2" ) @@ -27,8 +29,9 @@ const ( // findMMREntries searchs the log of the given log tenant for matching mmrEntries given the app entries // and returns the leaf indexes of all the matches as well as the number of mmr entries considered func findMMREntries( + ctx context.Context, log logger.Logger, - massifReader MassifReader, + massifReader massifs.ObjectReader, tenantLogPath string, massifStartIndex int64, massifEndIndex int64, @@ -54,15 +57,10 @@ func findMMREntries( break } - massifContext, err := massifReader.GetMassif(context.Background(), tenantLogPath, uint64(massifIndex)) + massifContext, err := massifs.GetMassifContext(ctx, massifReader, uint32(massifIndex)) // check if we have reached the last massif for the log tenant - if errors.Is(err, massifs.ErrMassifNotFound) { - break - } - - // check if we have reached the last massif for local log - if errors.Is(err, massifs.ErrLogFileMassifNotFound) { + if errors.Is(err, storage.ErrDoesNotExist) { break } @@ -124,10 +122,10 @@ func findMMREntries( } } - entry := app.NewAppEntry( + entry := appentry.NewAppEntry( "", []byte{}, - app.NewMMREntryFields( + appentry.NewMMREntryFields( 0, serializedBytes, ), @@ -223,7 +221,7 @@ func NewFindMMREntriesCmd() *cli.Command { appEntryFileName := cCtx.String(appEntryFileFlagName) - appEntry, err := veracityapp.ReadAppData(appEntryFileName == "", appEntryFileName) + appEntry, err := appdata.ReadAppData(appEntryFileName == "", appEntryFileName) if err != nil { return err } @@ -248,26 +246,35 @@ func NewFindMMREntriesCmd() *cli.Command { return err } - if err = cfgMassifReader(cmd, cCtx); err != nil { + if err = cfgMassifFmt(cmd, cCtx); err != nil { return err } - cmd.log.Debugf("app entry: %x", appEntry) + var reader readerSelector + if reader, err = cfgMassifReader(cmd, cCtx); err != nil { + return err + } + logID := datatrails.TenantID2LogID(logTenant) + if err := reader.SelectLog(context.Background(), logID); err != nil { + return fmt.Errorf("could not select log for tenant %q: %w", logTenant, err) + } + cmd.Log.Debugf("app entry: %x", appEntry) leafIndexMatches, entriesConsidered, err := findMMREntries( - cmd.log, - cmd.massifReader, + context.Background(), + cmd.Log, + reader, tenantLogPath, massifStartIndex, massifEndIndex, - cmd.massifHeight, + cmd.MassifFmt.MassifHeight, appEntry, ) if err != nil { return err } - cmd.log.Debugf("entries considered: %v", entriesConsidered) + cmd.Log.Debugf("entries considered: %v", entriesConsidered) // if we want the leaf index matches log them and return if asLeafIndexes { diff --git a/findtrieentries.go b/findtrieentries.go index 70f6105..1b8c379 100644 --- a/findtrieentries.go +++ b/findtrieentries.go @@ -10,6 +10,7 @@ import ( "github.com/datatrails/go-datatrails-common/logger" "github.com/datatrails/go-datatrails-merklelog/massifs" + "github.com/datatrails/go-datatrails-merklelog/massifs/storage" "github.com/datatrails/go-datatrails-merklelog/mmr" "github.com/google/uuid" "github.com/urfave/cli/v2" @@ -69,8 +70,9 @@ func logIDToLogTenant(logID string) (string, error) { // findTrieKeys searchs the log of the given log tenant for matches to the given triekeys // and returns the leaf indexes (trie indexes) of all the matches as well as the number of trie entries considered func findTrieKeys( + ctx context.Context, log logger.Logger, - massifReader MassifReader, + massifReader readerSelector, tenantLogPath string, massifStartIndex int64, massifEndIndex int64, @@ -96,15 +98,10 @@ func findTrieKeys( break } - massifContext, err := massifReader.GetMassif(context.Background(), tenantLogPath, uint64(massifIndex)) + massifContext, err := massifs.GetMassifContext(ctx, massifReader, uint32(massifIndex)) // check if we have reached the last massif - if errors.Is(err, massifs.ErrMassifNotFound) { - break - } - - // check if we have reached the last massif for local log - if errors.Is(err, massifs.ErrLogFileMassifNotFound) { + if errors.Is(err, storage.ErrDoesNotExist) { break } @@ -210,6 +207,9 @@ func NewFindTrieEntriesCmd() *cli.Command { Action: func(cCtx *cli.Context) error { cmd := &CmdCtx{} + var err error + var reader readerSelector + // This command uses the structured logger for all optional output. if err := cfgLogging(cmd, cCtx); err != nil { return err @@ -258,7 +258,7 @@ func NewFindTrieEntriesCmd() *cli.Command { return err } - if err := cfgMassifReader(cmd, cCtx); err != nil { + if reader, err = cfgMassifReader(cmd, cCtx); err != nil { return err } @@ -279,15 +279,18 @@ func NewFindTrieEntriesCmd() *cli.Command { []byte(appID), ) - cmd.log.Debugf("trieKey: %x", trieKey) + cmd.Log.Debugf("trieKey: %x", trieKey) + + reader.SelectLog(context.Background(), logIDBytes) leafIndexMatches, entriesConsidered, err = findTrieKeys( - cmd.log, - cmd.massifReader, + context.Background(), + cmd.Log, + reader, tenantLogPath, massifStartIndex, massifEndIndex, - cmd.massifHeight, + cmd.MassifFmt.MassifHeight, trieKey, ) if err != nil { @@ -308,7 +311,7 @@ func NewFindTrieEntriesCmd() *cli.Command { []byte(appID), ) - cmd.log.Debugf("trieKey version 0: %x", trieKeyVersion0) + cmd.Log.Debugf("trieKey version 0: %x", trieKeyVersion0) logTenantUUIDStr := strings.TrimPrefix(logTenant, "tenant/") logTenantUUID, err := uuid.Parse(logTenantUUIDStr) @@ -332,15 +335,18 @@ func NewFindTrieEntriesCmd() *cli.Command { []byte(appID), ) - cmd.log.Debugf("trieKey version 1: %x", trieKeyVersion1) + reader.SelectLog(context.Background(), logIDVersion1) + + cmd.Log.Debugf("trieKey version 1: %x", trieKeyVersion1) leafIndexMatches, entriesConsidered, err = findTrieKeys( - cmd.log, - cmd.massifReader, + context.Background(), + cmd.Log, + reader, tenantLogPath, massifStartIndex, massifEndIndex, - cmd.massifHeight, + cmd.MassifFmt.MassifHeight, trieKeyVersion0, trieKeyVersion1, ) @@ -350,7 +356,7 @@ func NewFindTrieEntriesCmd() *cli.Command { } - cmd.log.Debugf("entries considered: %v", entriesConsidered) + cmd.Log.Debugf("entries considered: %v", entriesConsidered) // if we want the leaf index matches log them and return if asLeafIndexes { diff --git a/fsstorage.go b/fsstorage.go new file mode 100644 index 0000000..b56a4b8 --- /dev/null +++ b/fsstorage.go @@ -0,0 +1,54 @@ +package veracity + +import ( + "context" + + "github.com/datatrails/go-datatrails-merklelog/massifs/storage" + fsstorage "github.com/robinbryce/go-merklelog-fs/storage" + "github.com/urfave/cli/v2" + "github.com/veraison/go-cose" +) + +const ( + defaultMassifHeight = uint8(14) +) + +func NewCmdStorageProviderFS( + ctx context.Context, + cCtx *cli.Context, cmd *CmdCtx, + dataLocal string, + createRootDir bool, +) (*fsstorage.CachingStore, error) { + var err error + massifExt := storage.V1MMRExtSep + storage.V1MMRMassifExt + if cCtx.IsSet("massif-ext") { + massifExt = cCtx.String("massif-ext") + } + + opts := fsstorage.Options{ + FSOptions: fsstorage.FSOptions{ + MassifFile: cCtx.String("massif-file"), + CheckpointFile: cCtx.String("checkpoint-file"), + RootDir: dataLocal, + CreateRootDir: createRootDir, + MassifExtension: massifExt, + }, + } + + opts.MassifHeight = cmd.MassifFmt.MassifHeight + + if cmd.CheckpointPublic.Public != nil { + verifier, err := cose.NewVerifier(cmd.CheckpointPublic.Alg, cmd.CheckpointPublic.Public) + if err != nil { + return nil, err + } + opts.COSEVerifier = verifier + } + + // Create Filesystem ObjectStore (replaces MassifFinder) + store, err := fsstorage.NewStore(ctx, opts) + if err != nil { + return nil, err + } + return store, nil +} diff --git a/go.mod b/go.mod index de9b167..065dbb9 100644 --- a/go.mod +++ b/go.mod @@ -1,45 +1,45 @@ module github.com/datatrails/veracity -go 1.23.0 +go 1.24.0 + +replace ( + github.com/datatrails/go-datatrails-common => ../go-datatrails-common + github.com/datatrails/go-datatrails-merklelog/massifs => ../go-datatrails-merklelog/massifs + github.com/datatrails/go-datatrails-merklelog/mmr => ../go-datatrails-merklelog/mmr + github.com/forestrie/go-merklelog-datatrails => ../go-merklelog-datatrails + github.com/robinbryce/go-merklelog-azure => ../go-merklelog-azure + github.com/robinbryce/go-merklelog-provider-testing => ../go-merklelog-provider-testing +) require ( - github.com/datatrails/go-datatrails-common v0.26.0 - github.com/datatrails/go-datatrails-common-api-gen v0.6.6 - github.com/datatrails/go-datatrails-logverification v0.4.3 - github.com/datatrails/go-datatrails-merklelog/massifs v0.4.0 - github.com/datatrails/go-datatrails-merklelog/mmr v0.2.0 - github.com/datatrails/go-datatrails-merklelog/mmrtesting v0.2.0 + github.com/datatrails/go-datatrails-common v0.30.0 + github.com/datatrails/go-datatrails-common-api-gen v0.8.0 + github.com/datatrails/go-datatrails-merklelog/massifs v0.6.0 + github.com/datatrails/go-datatrails-merklelog/mmr v0.4.0 github.com/datatrails/go-datatrails-serialization/eventsv1 v0.0.3 - github.com/datatrails/go-datatrails-simplehash v0.0.5 + github.com/datatrails/go-datatrails-simplehash v0.2.0 + github.com/forestrie/go-merklelog-datatrails v0.0.0 + github.com/fxamacker/cbor/v2 v2.9.0 + github.com/google/uuid v1.6.0 github.com/gosuri/uiprogress v0.0.1 - github.com/urfave/cli/v2 v2.27.6 + github.com/robinbryce/go-merklelog-azure v0.0.0-20250928182018-06ed158d48af + github.com/robinbryce/go-merklelog-fs v0.0.0-20250928180927-a4773e335b22 + github.com/robinbryce/go-merklelog-provider-testing v0.0.0-00010101000000-000000000000 + github.com/stretchr/testify v1.11.1 + github.com/urfave/cli/v2 v2.27.7 + github.com/veraison/go-cose v1.3.0 github.com/zeebo/bencode v1.0.0 - golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 -) - -// replace ( -// github.com/datatrails/go-datatrails-merklelog/massifs => ../go-datatrails-merklelog/massifs -// ) - -require ( - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect - github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect - github.com/golang-jwt/jwt/v5 v5.2.1 // indirect - github.com/gosuri/uilive v0.0.4 // indirect - github.com/kylelemons/godebug v1.1.0 // indirect - github.com/mattn/go-isatty v0.0.20 // indirect - github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect - github.com/stretchr/objx v0.5.2 // indirect + go.uber.org/zap v1.27.0 + golang.org/x/exp v0.0.0-20250911091902-df9299821621 + google.golang.org/protobuf v1.36.9 ) require ( github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.7.1 // indirect - github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1 - github.com/Azure/go-amqp v1.0.5 // indirect + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest v0.11.29 // indirect github.com/Azure/go-autorest/autorest/adal v0.9.24 // indirect @@ -50,36 +50,33 @@ require ( github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.3.3 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dimchansky/utfbom v1.1.1 // indirect - github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect github.com/gabriel-vasile/mimetype v1.4.8 // indirect github.com/golang-jwt/jwt/v4 v4.5.2 // indirect - github.com/google/uuid v1.6.0 + github.com/golang-jwt/jwt/v5 v5.2.2 // indirect + github.com/gosuri/uilive v0.0.4 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/ldclabs/cose/go v0.0.0-20221214142927-d22c1cfc2154 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect - github.com/opentracing-contrib/go-stdlib v1.0.0 // indirect - github.com/opentracing/opentracing-go v1.2.0 // indirect - github.com/openzipkin-contrib/zipkin-go-opentracing v0.5.0 // indirect - github.com/openzipkin/zipkin-go v0.4.3 // indirect + github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/stretchr/testify v1.10.0 - github.com/veraison/go-cose v1.1.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.27.0 - golang.org/x/crypto v0.32.0 // indirect - golang.org/x/net v0.34.0 // indirect - golang.org/x/sys v0.29.0 // indirect - golang.org/x/text v0.21.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 // indirect - google.golang.org/grpc v1.69.0-dev // indirect - google.golang.org/protobuf v1.36.6 + golang.org/x/crypto v0.36.0 // indirect + golang.org/x/net v0.38.0 // indirect + golang.org/x/sys v0.31.0 // indirect + golang.org/x/text v0.23.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect + google.golang.org/grpc v1.71.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 295dbf3..047c48a 100644 --- a/go.sum +++ b/go.sum @@ -2,16 +2,14 @@ github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0 github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 h1:g0EZJwz7xkXQiZAI5xi9f3WWFYBlX1CPTrR+NDToRkQ= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0/go.mod h1:XCW7KnZet0Opnr7HccfUw1PLc4CjHqpcaxW8DHklNkQ= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2 h1:F0gBpfdPLGsw+nsgk6aqqkZS1jiixa5WwFe3fk/T3Ys= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2/go.mod h1:SqINnQ9lVVdRlyC8cd1lCI0SdX4n2paeABd2K8ggfnE= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= -github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.7.1 h1:o/Ws6bEqMeKZUfj1RRm3mQ51O8JGU5w+Qdg2AhHib6A= -github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.7.1/go.mod h1:6QAMYBAbQeeKX+REFJMZ1nFWu9XLw/PPcjYpuc9RDFs= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1 h1:QSdcrd/UFJv6Bp/CfoVf2SrENpFn9P6Yh8yb+xNhYMM= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1/go.mod h1:eZ4g6GUvXiGulfIbbhh1Xr4XwUYaYaWMqzGD/284wCA= -github.com/Azure/go-amqp v1.0.5 h1:po5+ljlcNSU8xtapHTe8gIc8yHxCzC03E8afH2g1ftU= -github.com/Azure/go-amqp v1.0.5/go.mod h1:vZAogwdrkbyK3Mla8m/CxSc/aKdnTZ4IbPxl51Y5WZE= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA= @@ -38,49 +36,45 @@ github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+Z github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= -github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc= -github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/datatrails/go-datatrails-common v0.26.0 h1:Ga8lWKnA57VDFStrrO9OR394jMvV7S9Ia92m/7tGehs= -github.com/datatrails/go-datatrails-common v0.26.0/go.mod h1:k/ub6fdHldXZ129gzxDZI0aifi/qyFyKlU2P5bAASBM= -github.com/datatrails/go-datatrails-common-api-gen v0.6.6 h1:Qbfnte1+ZQsi0XzbfmOuk/xziqbPyEC4nyl7SsQdGdg= -github.com/datatrails/go-datatrails-common-api-gen v0.6.6/go.mod h1:rTMGdMdu5M6mGpbXZy1D84cBTGE8JwsDH6BYh9LJlmA= -github.com/datatrails/go-datatrails-logverification v0.4.3 h1:E+VKGudFanjbdGFLjB6L+r/9hHEOVrutplB/uMS+hs0= -github.com/datatrails/go-datatrails-logverification v0.4.3/go.mod h1:pWS2YhTuQJH0F/OgufihhM49gbck2BeWDo5Fll4JYqk= -github.com/datatrails/go-datatrails-merklelog/massifs v0.4.0 h1:j0mPW+sJruxGD+L9x59zu4muCWcNQIHtGYFDw6ZWolw= -github.com/datatrails/go-datatrails-merklelog/massifs v0.4.0/go.mod h1:9PzDUZzIMSLWcf5iv0AbzYOz6IhIBlHMXPiU5S1mb00= -github.com/datatrails/go-datatrails-merklelog/mmr v0.2.0 h1:NUP0OUVixuyWf+Gmi/e3wS5JD4za7DU0gdWVAgqnI5c= -github.com/datatrails/go-datatrails-merklelog/mmr v0.2.0/go.mod h1:iLipg39Ce3U68NjXFxjxwxXR9U0T6Dm6pldJA47Lx8s= -github.com/datatrails/go-datatrails-merklelog/mmrtesting v0.2.0 h1:cv8JincUm3h/4hyVcuofPt5pAtOZ+KYLnsDdvQ3D6Lc= -github.com/datatrails/go-datatrails-merklelog/mmrtesting v0.2.0/go.mod h1:h8b1O0xAoMv2DsVQuo6vNyM4RLL2DlJCWJqgysX127w= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= +github.com/AzureAD/microsoft-authentication-library-for-go v1.3.3 h1:H5xDQaE3XowWfhZRUpnfC+rGZMEVoSiji+b+/HFAPU4= +github.com/AzureAD/microsoft-authentication-library-for-go v1.3.3/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo= +github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/datatrails/go-datatrails-common-api-gen v0.8.0 h1:vO+s0h1SZMQv89240Fxok/vsJU4Oo/jHO5rbOVnj/pA= +github.com/datatrails/go-datatrails-common-api-gen v0.8.0/go.mod h1:ekmas39HNTCa011DG54jaKTVFh9XpAYZqITE414bD9U= github.com/datatrails/go-datatrails-serialization/eventsv1 v0.0.3 h1:BLHfCXjzXUgr1knXE9XtZC+jNnf2orGEL+BTAWqSyp4= github.com/datatrails/go-datatrails-serialization/eventsv1 v0.0.3/go.mod h1:9i6Tip2lIXwSZ3SxP7XEhU2eQ9zkpxhEBmPmlOGqv/8= -github.com/datatrails/go-datatrails-simplehash v0.0.5 h1:igu4QRYO87RQXrJlqSm3fgMA2Q0F4jglWqBlfvKrXKQ= -github.com/datatrails/go-datatrails-simplehash v0.0.5/go.mod h1:XuOwViwdL+dyz7fGYIjaByS1ElMFsrVI0goKX0bNimA= +github.com/datatrails/go-datatrails-simplehash v0.2.0 h1:dxr3li6DSPrjok8RcavOGr6iTstFei/EIkzr6/IKcLQ= +github.com/datatrails/go-datatrails-simplehash v0.2.0/go.mod h1:2/2e9QtNqxBSt9mKJeBb6iR1PXE6evaHGxcdcXxUrog= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= -github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= -github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= -github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= -github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= -github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= +github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM= github.com/gabriel-vasile/mimetype v1.4.8/go.mod h1:ByKUIKGjh1ODkGM1asKUbQZOLGrPjydw3hYPU2YU9t8= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= -github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= -github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8= +github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= @@ -93,8 +87,8 @@ github.com/gosuri/uiprogress v0.0.1 h1:0kpv/XY/qTmFWl/SkaJykZXrBBzwwadmW8fRb7RJS github.com/gosuri/uiprogress v0.0.1/go.mod h1:C1RTYn4Sc7iEyf6j8ft5dyoZ4212h8G1ol9QQluh5+0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 h1:ad0vkEBuk23VJzZR9nkLVG0YAoN9coASF1GusYX6AlU= github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= -github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= -github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= +github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6 h1:IsMZxCuZqKuao2vNdfD82fjjgPLfyHLpR41Z88viRWs= +github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6/go.mod h1:3VeWNIJaW+O5xpRQbPp0Ybqu1vJd/pm7s2F473HRrkw= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -107,24 +101,17 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 h1:lM6RxxfUMrYL/f8bWEUqdXrANWtrL7Nndbm9iFN0DlU= -github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= -github.com/opentracing-contrib/go-stdlib v1.0.0 h1:TBS7YuVotp8myLon4Pv7BtCBzOTo1DeZCld0Z63mW2w= -github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= -github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= -github.com/openzipkin-contrib/zipkin-go-opentracing v0.5.0 h1:uhcF5Jd7rP9DVEL10Siffyepr6SvlKbUsjH5JpNCRi8= -github.com/openzipkin-contrib/zipkin-go-opentracing v0.5.0/go.mod h1:+oCZ5GXXr7KPI/DNOQORPTq5AWHfALJj9c72b0+YsEY= -github.com/openzipkin/zipkin-go v0.4.3 h1:9EGwpqkgnwdEIJ+Od7QVSEIH+ocmm5nPat0G7sjsSdg= -github.com/openzipkin/zipkin-go v0.4.3/go.mod h1:M9wCJZFWCo2RiY+o1eBCEMe0Dp2S5LDHcMZmk3RmK7c= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E= +github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw= +github.com/robinbryce/go-merklelog-fs v0.0.0-20250928180927-a4773e335b22 h1:tuzpQDcSrv2ZI8hwdvbJtlYHWyAJmoNdQeeIS26+Nfk= +github.com/robinbryce/go-merklelog-fs v0.0.0-20250928180927-a4773e335b22/go.mod h1:zAKFdVMl+0TPfGbKZQH4iGmuLV3/Q0rw9oPWhqrTiRk= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -136,12 +123,12 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/urfave/cli/v2 v2.27.6 h1:VdRdS98FNhKZ8/Az8B7MTyGQmpIr36O1EHybx/LaZ4g= -github.com/urfave/cli/v2 v2.27.6/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ= -github.com/veraison/go-cose v1.1.0 h1:AalPS4VGiKavpAzIlBjrn7bhqXiXi4jbMYY/2+UC+4o= -github.com/veraison/go-cose v1.1.0/go.mod h1:7ziE85vSq4ScFTg6wyoMXjucIGOf4JkFEZi/an96Ct4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/urfave/cli/v2 v2.27.7 h1:bH59vdhbjLv3LAvIu6gd0usJHgoTTPhCFib8qqOwXYU= +github.com/urfave/cli/v2 v2.27.7/go.mod h1:CyNAG/xg+iAOg0N4MPGZqVmv2rCoP267496AOXUZjA4= +github.com/veraison/go-cose v1.3.0 h1:2/H5w8kdSpQJyVtIhx8gmwPJ2uSz1PkyWFx0idbd7rk= +github.com/veraison/go-cose v1.3.0/go.mod h1:df09OV91aHoQWLmy1KsDdYiagtXgyAwAl8vFeFn1gMc= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4= @@ -149,6 +136,18 @@ github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBi github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zeebo/bencode v1.0.0 h1:zgop0Wu1nu4IexAZeCZ5qbsjU4O1vMrfCrVgUjbHVuA= github.com/zeebo/bencode v1.0.0/go.mod h1:Ct7CkrWIQuLWAy9M3atFHYq4kG9Ao/SsY5cdtCXmp9Y= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= +go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= +go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= +go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= +go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= +go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= +go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= +go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= +go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= +go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -160,10 +159,10 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= -golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= -golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= -golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= -golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= +golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= +golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/exp v0.0.0-20250911091902-df9299821621 h1:2id6c1/gto0kaHYyrixvknJ8tUK/Qs5IsmBtrc+FtgU= +golang.org/x/exp v0.0.0-20250911091902-df9299821621/go.mod h1:TwQYMMnGpvZyc+JpB/UAuTNIsVJifOlSkrZkhcvpVUk= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -172,8 +171,8 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= -golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= +golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= +golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -188,8 +187,8 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= -golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= @@ -202,26 +201,24 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 h1:M0KvPgPmDZHPlbRbaNU1APr28TvwvvdUPlSv7PUvy8g= -google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= -google.golang.org/grpc v1.69.0-dev h1:apWegzBczine6VjRA1FpkZ9LVAvNINTqDPbiRDD4D/g= -google.golang.org/grpc v1.69.0-dev/go.mod h1:2RINgKHklVDGHlkF/BfDsmIw0xdarBnd0YM+g7Fc0Fk= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422 h1:GVIKPyP/kLIyVOgOnTwFOrvQaQUzOzGMCxgFUOEmm24= +google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422/go.mod h1:b6h1vNKhxaSoEI+5jc3PJUCustfli/mRab7295pY7rw= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= +google.golang.org/grpc v1.71.1 h1:ffsFWr7ygTUscGPI0KKK6TLrGz0476KUvvsbqWK0rPI= +google.golang.org/grpc v1.71.1/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -nhooyr.io/websocket v1.8.11 h1:f/qXNc2/3DpoSZkHt1DQu6rj4zGC8JmkkLkWss0MgN0= -nhooyr.io/websocket v1.8.11/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= diff --git a/jsonprincipals.go b/jsonprincipals.go deleted file mode 100644 index bc84cd2..0000000 --- a/jsonprincipals.go +++ /dev/null @@ -1,34 +0,0 @@ -package veracity - -import ( - "fmt" - - v2assets "github.com/datatrails/go-datatrails-common-api-gen/assets/v2/assets" -) - -func newPrincipalFromJson(m map[string]any) (*v2assets.Principal, error) { - iss, ok := m["issuer"].(string) - if !ok { - return nil, fmt.Errorf("missing issuer") - } - sub, ok := m["subject"].(string) - if !ok { - return nil, fmt.Errorf("missing subject") - } - dn, ok := m["display_name"].(string) - if !ok { - return nil, fmt.Errorf("missing display_name") - } - email, ok := m["email"].(string) - if !ok { - return nil, fmt.Errorf("missing email") - } - - p := &v2assets.Principal{ - Issuer: iss, - Subject: sub, - DisplayName: dn, - Email: email, - } - return p, nil -} diff --git a/keyio/decodecosekey.go b/keyio/decodecosekey.go new file mode 100644 index 0000000..b098d8c --- /dev/null +++ b/keyio/decodecosekey.go @@ -0,0 +1,233 @@ +package keyio + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "errors" + "fmt" + "math/big" + + "github.com/veraison/go-cose" +) + +// Cose Key as defined in: https://www.rfc-editor.org/rfc/rfc8152.html#page-33 +// +// COSE_Key = { +// 1 => tstr / int, ; kty +// ? 2 => bstr, ; kid +// ? 3 => tstr / int, ; alg +// ? 4 => [+ (tstr / nt) ], ; key_ops +// ? 5 => bstr, ; Base IV +// * label => values +// } +const ( + + // TODO: these are private in go-cose + KeyTypeLabel = 1 + KeyIDLabel = 2 + AlgorithmLabel = 3 + KeyOperationsLabel = 4 + + // :w + // ECCurveLabel = -1 + // :w + // ECXLabel = -2 + // :w + // ECYLabel = -3 + // :w + // ECDLabel = -4 + +) + +var ( + ErrKeyFormatError = errors.New("key format error") +) + +type DecodedPublic struct { + Alg cose.Algorithm + Public *ecdsa.PublicKey +} +type DecodedPrivate struct { + Alg cose.Algorithm + Private *ecdsa.PrivateKey +} + +func COSEDecodeEC2Private( + m map[int64]any, +) (DecodedPrivate, error) { + decoded, err := COSEDecodeEC2Public(m) + if err != nil { + return DecodedPrivate{}, fmt.Errorf("%w: decoding public component of private key.", err) + } + + privateKey := &ecdsa.PrivateKey{ + PublicKey: *decoded.Public, + D: big.NewInt(0), + } + privateKey.D.SetBytes(m[cose.KeyLabelEC2D].([]byte)) + + return DecodedPrivate{Alg: decoded.Alg, Private: privateKey}, nil +} + +func COSEDecodeEC2Public( + m map[int64]any, +) (DecodedPublic, error) { + + if err := KTYRequireEC2(m[KeyTypeLabel]); err != nil { + return DecodedPublic{}, fmt.Errorf("failed to decode public key from map: %w", err) + } + + alg, err := AlgRequireECDSA(m[AlgorithmLabel]) + if err != nil { + return DecodedPublic{}, fmt.Errorf("failed to decode public key from map: %w", err) + } + + // Key Type, must be EC2 or "EC". The string "EC" is accepted as an accommodation for JOSE + + x, err := DecodeLabeledBytes(m, cose.KeyLabelEC2X) + if err != nil { + return DecodedPublic{}, fmt.Errorf("failed to decode x coordinate from map: %w", err) + } + + y, err := DecodeLabeledBytes(m, cose.KeyLabelEC2Y) + if err != nil { + return DecodedPublic{}, fmt.Errorf("failed to decode y coordinate from map: %w", err) + } + + curve, err := COSEDecodeEC2Curve(m) + if err != nil { + return DecodedPublic{}, err + } + + // TODO: As extras KeyIDLabel, KeyIDOps + + publicKey := ecdsa.PublicKey{ + Curve: curve, + X: big.NewInt(0), + Y: big.NewInt(0), + } + publicKey.X.SetBytes(x) + publicKey.Y.SetBytes(y) + + return DecodedPublic{Alg: alg, Public: &publicKey}, nil +} + +// COSEDecodeEC2Curve require the curve to be appropriate for EC2 type keys +// And return a representation appropriate for building a golang EC public key +func COSEDecodeEC2Curve(m map[int64]any) (elliptic.Curve, error) { + + curveAny, ok := m[cose.KeyLabelEC2Curve] + if !ok { + return nil, fmt.Errorf("missing curve label in COSE key map") + } + + curve, ok := curveAny.(int64) + if !ok { + ucurve, ok := curveAny.(uint64) + if !ok { + return nil, fmt.Errorf("wrong type for curve label in COSE key map, needed int64, got %T", curveAny) + } + curve = int64(ucurve) + } + switch cose.Curve(curve) { + case cose.CurveP256: + return elliptic.P256(), nil + case cose.CurveP384: + return elliptic.P384(), nil + case cose.CurveP521: + return elliptic.P521(), nil + default: + return nil, fmt.Errorf("unsupported curve label in COSE key map: %d", curve) + } +} + +// KTYRequireEC2 returns an error if the label is not EC2 +// The strings "EC" and "EC2" are accepted as an accommodation for JOSE +// Both uint64 and int64 are accepted as accommodations for sloppy encoders. +// Per https://www.rfc-editor.org/rfc/rfc8152.html#section-13 +func KTYRequireEC2(label any) error { + s, ok := label.(string) + + if ok { + if s != "EC" && s != "EC2" { + return fmt.Errorf("%w: expected EC or EC2 or %d, got %s", ErrKeyFormatError, cose.KeyTypeEC2, s) + } + return nil + } + + i64, ok := label.(int64) + if !ok { + u64, ok := label.(uint64) + if !ok { + return fmt.Errorf("%w: expected [uint64|int64|string] not %T", ErrKeyFormatError, label) + } + i64 = int64(u64) + } + if cose.KeyType(i64) != cose.KeyTypeEC2 { + return fmt.Errorf("%w: expected EC or EC2 or %d, got %d", ErrKeyFormatError, cose.KeyTypeEC2, i64) + } + return nil +} + +func AlgRequireECDSA(label any) (cose.Algorithm, error) { + s, ok := label.(string) + if ok { + switch s { + case "ES256": + return cose.Algorithm(cose.AlgorithmES256), nil + case "ES384": + return cose.Algorithm(cose.AlgorithmES384), nil + case "ES512": + return cose.Algorithm(cose.AlgorithmES512), nil + default: + return 0, fmt.Errorf("%w: decoding string label, expected ES256, ES384 or ES512, got %s", ErrKeyFormatError, s) + } + } + + i64, ok := label.(int64) + if !ok { + u64, ok := label.(uint64) + if !ok { + return 0, fmt.Errorf("%w: decoding integer label expected [uint64|int64] not %T", ErrKeyFormatError, label) + } + i64 = int64(u64) + } + + switch cose.Algorithm(i64) { + case cose.AlgorithmES256: + return cose.Algorithm(i64), nil + case cose.AlgorithmES384: + return cose.Algorithm(i64), nil + case cose.AlgorithmES512: + return cose.Algorithm(i64), nil + default: + return 0, fmt.Errorf( + "%w: decoding integer label expected %d, %d or %d, got %d", + ErrKeyFormatError, cose.AlgorithmES256, cose.AlgorithmES384, cose.AlgorithmES512, i64) + } +} + +// +// label decode helper +// + +func DecodeLabeledBytes(m map[int64]interface{}, label int64) ([]byte, error) { + v, ok := m[label] + if !ok { + return nil, fmt.Errorf("missing label %d in map", label) + } + return DecodeBytes(v) +} + +func DecodeBytes(label any) ([]byte, error) { + b, ok := label.([]byte) + if ok { + return b, nil + } + + s, ok := label.(string) + if ok { + return []byte(s), nil + } + return nil, fmt.Errorf("%w: expected []byte or string, got %T", ErrKeyFormatError, label) +} diff --git a/keyio/decodejosekey.go b/keyio/decodejosekey.go new file mode 100644 index 0000000..41d0581 --- /dev/null +++ b/keyio/decodejosekey.go @@ -0,0 +1,105 @@ +package keyio + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "math/big" + "os" + + "github.com/veraison/go-cose" +) + +// JWK represents a single JOSE key (simplified for EC public keys) +type JWK struct { + Kty string `json:"kty"` + Crv string `json:"crv"` + X string `json:"x"` + Y string `json:"y"` + Alg string `json:"alg,omitempty"` + Use string `json:"use,omitempty"` + Kid string `json:"kid,omitempty"` +} + +// JWKS represents a JOSE key set +type JWKS struct { + Keys []JWK `json:"keys"` +} + +// ReadECDSAPublicJOSE decodes a JSON-encoded JOSE EC public key set and returns the first *last* key as DecodedPublic +func ReadECDSAPublicJOSE(fileName string) (DecodedPublic, error) { + + joseKey, err := os.ReadFile(fileName) + if err != nil { + return DecodedPublic{}, fmt.Errorf("failed to read public keyset file: %w", err) + } + + var jwks JWKS + if err := json.Unmarshal(joseKey, &jwks); err != nil { + return DecodedPublic{}, err + } + if len(jwks.Keys) == 0 { + return DecodedPublic{}, errors.New("no keys found in JWKS") + } + jwk := jwks.Keys[len(jwks.Keys)-1] + if jwk.Kty != "EC" { + return DecodedPublic{}, errors.New("only EC keys are supported") + } + // Decode base64url-encoded X and Y + // Use base64.RawURLEncoding to decode JOSE base64url values (no padding) + x, err := base64.RawURLEncoding.DecodeString(jwk.X) + if err != nil { + return DecodedPublic{}, err + } + y, err := base64.RawURLEncoding.DecodeString(jwk.Y) + if err != nil { + return DecodedPublic{}, err + } + + var curv elliptic.Curve + switch jwk.Crv { + case "P-256": + curv = elliptic.P256() + case "P-384": + curv = elliptic.P384() + case "P-521": + curv = elliptic.P521() + default: + return DecodedPublic{}, fmt.Errorf("%w: curve %s invalid for EC keys", ErrKeyFormatError, jwk.Crv) + } + + var alg cose.Algorithm + switch jwk.Alg { + case "PS256": + alg = cose.AlgorithmPS256 + case "PS384": + alg = cose.AlgorithmPS384 + case "PS512": + alg = cose.AlgorithmPS512 + case "ES256": + alg = cose.AlgorithmES256 + case "ES384": + alg = cose.AlgorithmES384 + case "ES512": + alg = cose.AlgorithmES512 + default: + return DecodedPublic{}, fmt.Errorf("%w: alg %s invalid for EC keys", ErrKeyFormatError, jwk.Alg) + } + + publicKey := ecdsa.PublicKey{ + Curve: curv, + X: big.NewInt(0), + Y: big.NewInt(0), + } + publicKey.X.SetBytes(x) + publicKey.Y.SetBytes(y) + + decoded := DecodedPublic{ + Public: &publicKey, + Alg: alg, + } + return decoded, nil +} diff --git a/keyio/ecdsareadwrite.go b/keyio/ecdsareadwrite.go new file mode 100644 index 0000000..276054f --- /dev/null +++ b/keyio/ecdsareadwrite.go @@ -0,0 +1,233 @@ +package keyio + +import ( + "crypto/ecdsa" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "os" + + "github.com/fxamacker/cbor/v2" + "github.com/veraison/go-cose" +) + +const ( + ECDSAPublicDefaultPEMFileName = "ecdsa-key-public.pem" + ECDSAPrivateDefaultPEMFileName = "ecdsa-key-private.pem" + ECDSAPublicDefaultFileName = "ecdsa-key-public.cbor" + ECDSAPrivateDefaultFileName = "ecdsa-key-private.cbor" + ECDSAPrivateDefaultPerm = 0600 // Default permission for private key file + ECDSAPublicDefaultPerm = 0644 // Default permission for private key file +) + +func ReadECDSAPublicCOSE( + fileName string, +) (DecodedPublic, error) { + // Read the public key from the default file + data, err := os.ReadFile(fileName) + if err != nil { + return DecodedPublic{}, fmt.Errorf("failed to read public key file: %w", err) + } + + var m map[int64]interface{} + if err := cbor.Unmarshal(data, &m); err != nil { + return DecodedPublic{}, err + } + + return COSEDecodeEC2Public(m) +} + +func ReadECDSAPrivateCOSE( + fileName string, + expectedStandardCurve ...string, +) (DecodedPrivate, error) { + // Read the private key from the default file + data, err := os.ReadFile(fileName) + if err != nil { + return DecodedPrivate{}, fmt.Errorf("failed to read private key file: %w", err) + } + var m map[int64]interface{} + if err := cbor.Unmarshal(data, &m); err != nil { + return DecodedPrivate{}, err + } + + return COSEDecodeEC2Private(m) +} + +func ReadECDSAPrivatePEM(filePath string) (DecodedPrivate, error) { + pemData, err := os.ReadFile(filePath) + if err != nil { + return DecodedPrivate{}, err + } + + block, _ := pem.Decode(pemData) + if block == nil || block.Type != "EC PRIVATE KEY" { + return DecodedPrivate{}, errors.New("invalid PEM block or type") + } + + key, err := x509.ParseECPrivateKey(block.Bytes) + if err != nil { + return DecodedPrivate{}, err + } + + coseKey, err := cose.NewKeyFromPrivate(key) + if err != nil { + return DecodedPrivate{}, err + } + decoded := DecodedPrivate{ + Private: key, + Alg: coseKey.Algorithm, + } + + return decoded, nil +} + +func ReadECDSAPublicPEM(filePath string) (DecodedPublic, error) { + pemData, err := os.ReadFile(filePath) + if err != nil { + return DecodedPublic{}, err + } + + block, _ := pem.Decode(pemData) + if block == nil || block.Type != "PUBLIC KEY" { + return DecodedPublic{}, errors.New("invalid PEM block or type") + } + + key, err := x509.ParsePKIXPublicKey(block.Bytes) + if err != nil { + return DecodedPublic{}, err + } + + ecdsaKey, ok := key.(*ecdsa.PublicKey) + if !ok { + return DecodedPublic{}, errors.New("not an ECDSA public key") + } + coseKey, err := cose.NewKeyFromPublic(ecdsaKey) + if err != nil { + return DecodedPublic{}, err + } + decoded := DecodedPublic{ + Public: ecdsaKey, + Alg: coseKey.Algorithm, + } + + return decoded, nil +} + +// Serializes the key to PEM format +func encodeECDSAPrivateKeyToPEM(key *ecdsa.PrivateKey) ([]byte, error) { + der, err := x509.MarshalECPrivateKey(key) + if err != nil { + return nil, err + } + block := &pem.Block{ + Type: "EC PRIVATE KEY", + Bytes: der, + } + return pem.EncodeToMemory(block), nil +} + +// Writes PEM to a file with 0600 permissions +func WriteECDSAPrivatePEM(pemFile string, key *ecdsa.PrivateKey) error { + pemBytes, err := encodeECDSAPrivateKeyToPEM(key) + if err != nil { + return fmt.Errorf("PEM encoding failed: %w", err) + } + return os.WriteFile(pemFile, pemBytes, 0600) +} + +func WriteECDSAPublicCOSE( + pubFile string, + publicKey *ecdsa.PublicKey, +) (string, error) { + var err error + + if _, err = WriteCoseECDSAPublicKey(pubFile, publicKey); err != nil { + return "", err + } + return pubFile, nil +} + +func WriteECDSAPrivateCOSE( + privFile string, + privateKey *ecdsa.PrivateKey, +) (string, error) { + var err error + + if _, err = WriteCoseECDSAPrivateKey(privFile, privateKey); err != nil { + return "", err + } + return privFile, nil +} + +// Encode private key to COSE_Key format (as CBOR bytes) +func encodePrivateKeyToCOSE(key *ecdsa.PrivateKey) ([]byte, error) { + m := map[int64]interface{}{ + int64(1): int64(cose.KeyTypeEC2), + int64(3): cose.AlgorithmPS256, + cose.KeyLabelEC2Curve: cose.CurveP256, // P-256 + cose.KeyLabelEC2X: key.PublicKey.X.Bytes(), + cose.KeyLabelEC2Y: key.PublicKey.Y.Bytes(), + cose.KeyLabelEC2D: key.D.Bytes(), + } + return cbor.Marshal(m) +} + +// Encode public key to COSE_Key format (as CBOR bytes) +func encodePublicKeyToCOSE(key *ecdsa.PublicKey) ([]byte, error) { + m := map[int64]interface{}{ + int64(1): int64(cose.KeyTypeEC2), + int64(3): cose.AlgorithmPS256, + cose.KeyLabelEC2Curve: cose.CurveP256, // P-256 + cose.KeyLabelEC2X: key.X.Bytes(), + cose.KeyLabelEC2Y: key.Y.Bytes(), + } + return cbor.Marshal(m) +} + +func WriteCoseECDSAPrivateKey( + fileName string, + privateKey *ecdsa.PrivateKey, + perms ...os.FileMode, +) ([]byte, error) { + var err error + var data []byte + if data, err = encodePrivateKeyToCOSE(privateKey); err != nil { + return nil, err + } + + perm := os.FileMode(ECDSAPrivateDefaultPerm) // Default permission + if len(perms) > 0 { + perm = perms[0] + } + + // Save to file + if err := os.WriteFile(fileName, data, perm); err != nil { + return nil, err + } + return data, nil +} + +func WriteCoseECDSAPublicKey( + fileName string, + publicKey *ecdsa.PublicKey, + perms ...os.FileMode, +) ([]byte, error) { + var err error + var data []byte + if data, err = encodePublicKeyToCOSE(publicKey); err != nil { + return nil, err + } + + perm := os.FileMode(ECDSAPublicDefaultPerm) // Default permission + if len(perms) > 0 { + perm = perms[0] + } + + // Save to file + if err := os.WriteFile(fileName, data, perm); err != nil { + return nil, err + } + return data, nil +} diff --git a/sealerpubkey.go b/keyio/sealerpubkey.go similarity index 99% rename from sealerpubkey.go rename to keyio/sealerpubkey.go index b60f2f3..bc33f30 100644 --- a/sealerpubkey.go +++ b/keyio/sealerpubkey.go @@ -1,4 +1,4 @@ -package veracity +package keyio import ( "crypto/ecdsa" @@ -49,7 +49,6 @@ func DecodeECDSAPublicPEM(data []byte) (*ecdsa.PublicKey, error) { // material presented as a single, base64 encoded, string. This is typically // more convenient for command line and environment vars func DecodeECDSAPublicString(data string) (*ecdsa.PublicKey, error) { - keyData, err := base64.StdEncoding.DecodeString(data) if err != nil { return nil, err diff --git a/localreader.go b/localreader.go deleted file mode 100644 index a5049cb..0000000 --- a/localreader.go +++ /dev/null @@ -1,74 +0,0 @@ -package veracity - -import ( - "bufio" - "bytes" - "io" - "os" - "path/filepath" - - "github.com/datatrails/go-datatrails-merklelog/massifs" -) - -type ReadOpener struct{} - -func (*ReadOpener) Open(name string) (io.ReadCloser, error) { - fpath, err := filepath.Abs(name) - if err != nil { - return nil, err - } - return os.Open(fpath) -} - -func NewFileOpener() massifs.Opener { - return &ReadOpener{} -} - -type StdinOpener struct { - data []byte -} - -func NewStdinOpener() massifs.Opener { - return &StdinOpener{} -} - -func (o *StdinOpener) Open(string) (io.ReadCloser, error) { - if len(o.data) > 0 { - return io.NopCloser(bytes.NewReader(o.data)), nil - } - - r := bufio.NewReader(os.Stdin) - data, err := io.ReadAll(r) - if err != nil { - return nil, err - } - - o.data = data - return io.NopCloser(bytes.NewReader(o.data)), nil -} - -// Utilities to remove the os dependencies from the MassifReader -type OsDirLister struct{} - -func NewDirLister() massifs.DirLister { - return &OsDirLister{} -} - -func (*OsDirLister) ListFiles(name string) ([]string, error) { - dpath, err := filepath.Abs(name) - if err != nil { - return nil, err - } - result := []string{} - entries, err := os.ReadDir(dpath) - if err != nil { - return result, err - } - for _, entry := range entries { - // if !entry.IsDir() && entry.Type().IsRegular() && strings.HasSuffix(entry.Name(), massifs.V1MMRMassifExt){ - if !entry.IsDir() { - result = append(result, filepath.Join(dpath, entry.Name())) - } - } - return result, nil -} diff --git a/localwriter.go b/localwriter.go deleted file mode 100644 index 9d63494..0000000 --- a/localwriter.go +++ /dev/null @@ -1,37 +0,0 @@ -package veracity - -import ( - "io" - "os" - "path/filepath" - - "github.com/datatrails/go-datatrails-merklelog/massifs" -) - -const ( - readWriteAllPermission = 0666 -) - -// FileWriteAppendOpener is an interface for opening a file for writing -// The Open implementation must open for *append*, and must create the file if it does not exist. -// The Create implementation must truncate the file if it exists, and create it if it does not. -type FileWriteAppendOpener struct{} - -// Open ensures the named file exists and is writable. Writes are appended to any existing content. -func (*FileWriteAppendOpener) Open(name string) (io.WriteCloser, error) { - name, err := filepath.Abs(name) - if err != nil { - return nil, err - } - return os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_APPEND, readWriteAllPermission) -} - -// Create ensures the named file exists, is empty and is writable -// If the named file already exists it is truncated -func (*FileWriteAppendOpener) Create(name string) (io.WriteCloser, error) { - return os.Create(name) -} - -func NewFileWriteOpener() massifs.WriteAppendOpener { - return &FileWriteAppendOpener{} -} diff --git a/logactivity.go b/logactivity.go new file mode 100644 index 0000000..1f3e1ec --- /dev/null +++ b/logactivity.go @@ -0,0 +1,48 @@ +package veracity + +import ( + "bufio" + "bytes" + "encoding/json" + "os" + "path/filepath" + + "github.com/datatrails/go-datatrails-merklelog/massifs/watcher" +) + +func FilePathToLogMassifs(filePath string) ([]watcher.LogMassif, error) { + filePath, err := filepath.Abs(filePath) + if err != nil { + return nil, err + } + f, err := os.Open(filePath) + if err != nil { + return nil, err + } + return ScannerToLogMassifs(bufio.NewScanner(f)) +} + +func StdinToDecodedLogMassifs() ([]watcher.LogMassif, error) { + return ScannerToLogMassifs(bufio.NewScanner(os.Stdin)) +} + +func ScannerToLogMassifs(scanner *bufio.Scanner) ([]watcher.LogMassif, error) { + var data []byte + for scanner.Scan() { + data = append(data, scanner.Bytes()...) + } + if err := scanner.Err(); err != nil { + return nil, err + } + return LogMassifsFromData(data) +} + +func LogMassifsFromData(data []byte) ([]watcher.LogMassif, error) { + decoder := json.NewDecoder(bytes.NewReader(data)) + var doc []watcher.LogMassif + err := decoder.Decode(&doc) + if err == nil { + return doc, nil + } + return nil, err +} diff --git a/logtail.go b/logtail.go index 126c5f0..ce37681 100644 --- a/logtail.go +++ b/logtail.go @@ -3,14 +3,15 @@ package veracity import ( "context" "fmt" - "strconv" "time" - "github.com/datatrails/go-datatrails-common/azblob" + commoncbor "github.com/datatrails/go-datatrails-common/cbor" "github.com/datatrails/go-datatrails-common/cose" "github.com/datatrails/go-datatrails-merklelog/massifs" "github.com/datatrails/go-datatrails-merklelog/massifs/snowflakeid" + "github.com/datatrails/go-datatrails-merklelog/massifs/storage" "github.com/datatrails/go-datatrails-merklelog/massifs/watcher" + "github.com/google/uuid" "github.com/urfave/cli/v2" ) @@ -18,19 +19,16 @@ type TailConfig struct { // Interval defines the wait period between repeated tail checks if many // checks have been asked for. Interval time.Duration - // TenantIdentity identifies the log of interest - TenantIdentity string + LogID storage.LogID } // LogTailActivity can represent either the seal or the massif that has most recently // been updated for the log. type LogTailActivity struct { watcher.LogTail - LogSize uint64 LastIDEpoch uint8 LastIDTimestamp uint64 LogActivity time.Time - TagActivity time.Time } // MassifTail contains the massif specific tail information @@ -47,7 +45,7 @@ type SealTail struct { State massifs.MMRState } -// String returns a printable. loggable pretty rendering of the tail +// String returns a printable pretty rendering of the tail func (st SealTail) String() string { s := fmt.Sprintf( @@ -56,81 +54,80 @@ func (st SealTail) String() string { time.UnixMilli(st.State.Timestamp).UTC().Format(time.RFC3339), st.LogActivity.UTC().Format(time.RFC3339), ) - if st.LastID != "" { - return fmt.Sprintf( - "%s, tag activity: %v", - s, st.TagActivity.UTC().Format(time.RFC3339), - ) - } - return fmt.Sprintf("%s, tag activity: ** tag not set **", s) + return fmt.Sprintf( + "%s, log activity: %v", + s, st.LogActivity.UTC().Format(time.RFC3339), + ) } -// NewTailConfig derives a configuration from the supplied comand line options context +// NewTailConfig derives a configuration from the supplied command line options context func NewTailConfig(cCtx *cli.Context, cmd *CmdCtx) (TailConfig, error) { + + if cCtx.String("logid") == "" { + return TailConfig{}, fmt.Errorf("a logid is required") + } + cfg := TailConfig{} // note: the cli defaults to 1 second interval and count = 1. so by default // the interval is ignored. If count is 0 or > 1, we get a single second // sleep by default. cfg.Interval = cCtx.Duration("interval") - cfg.TenantIdentity = cCtx.String("tenant") - if cfg.TenantIdentity == "" { - return TailConfig{}, fmt.Errorf("tenant identity is required") + + // transitional allow regular tenant id's from the datatrails era + cfg.LogID = storage.ParsePrefixedLogID("tenant/", cCtx.String("logid")) + if cfg.LogID == nil { + var err error + uid, err := uuid.Parse(cCtx.String("logid")) + if err != nil { + return TailConfig{}, err + } + cfg.LogID = uid[:] } return cfg, nil } -// String returns a printable. loggable pretty rendering of the tail +// String returns a printable pretty rendering of the tail func (lt MassifTail) String() string { s := fmt.Sprintf( - "massif: %d, mmrSize: %d, lastid: %s, log activity: %v", - lt.Number, lt.LogSize, lt.LastID, + "massif: %d, lastid: %s, log activity: %v", + lt.Number, lt.LastID, lt.LogActivity.UTC().Format(time.RFC3339), ) - if lt.LastID != "" { - return fmt.Sprintf( - "%s, tag activity: %v", - s, lt.TagActivity.UTC().Format(time.RFC3339), - ) - - } - return fmt.Sprintf("%s, tag activity: ** tag not set **", s) + return fmt.Sprintf( + "%s, log activity: %v", + s, lt.LogActivity.UTC().Format(time.RFC3339), + ) } // TailSeal returns the most recently added seal for the log func TailSeal( ctx context.Context, - rootReader massifs.SignedRootReader, - tenantIdentity string, + reader massifs.ObjectReader, + codec commoncbor.CBORCodec, + logID storage.LogID, ) (SealTail, error) { var err error - var tailSeal massifs.LogBlobContext + st := SealTail{ LogTailActivity: LogTailActivity{ LogTail: watcher.LogTail{ - Tenant: tenantIdentity, + LogID: logID, }, }, } - tailSeal, st.Count, err = rootReader.GetLazyContext( - ctx, tenantIdentity, massifs.LastBlob, azblob.WithListTags()) - if err != nil { - return SealTail{}, err - } - tags := tailSeal.Tags - msg, state, err := rootReader.ReadLogicalContext(ctx, tailSeal, azblob.WithGetTags()) + + headIndex, err := reader.HeadIndex(ctx, storage.ObjectCheckpoint) if err != nil { - return SealTail{}, err + return SealTail{}, fmt.Errorf("error reading head massif index: %w", err) } - st.Signed = *msg - st.State = state - st.Path = tailSeal.BlobPath - - st.Number, st.Ext, err = massifs.ParseMassifPathNumberExt(st.Path) + checkpt, err := massifs.GetCheckpoint(ctx, reader, codec, headIndex) if err != nil { - return SealTail{}, err + return SealTail{}, fmt.Errorf("error reading checkpoint for tenant %x: %w", logID, err) } + st.Signed = checkpt.Sign1Message + st.State = checkpt.MMRState // The log activity as it stood when the seal was made is on the state lastMS, err := snowflakeid.IDUnixMilli(st.State.IDTimestamp, uint8(st.State.CommitmentEpoch)) @@ -139,80 +136,55 @@ func TailSeal( } st.LogActivity = time.UnixMilli(lastMS) - - // And the seal blob also has a tag so this can be indexed - //lastIDTag := tailSeal.Tags[massifs.TagKeyLastID] - st.LastID = tags[massifs.TagKeyLastID] - id, epoch, err := massifs.SplitIDTimestampHex(st.LastID) - if err != nil { - return SealTail{}, err - } - lastMS, err = snowflakeid.IDUnixMilli(id, epoch) - if err != nil { - return SealTail{}, err - } - st.TagActivity = time.UnixMilli(lastMS) + st.LastIDTimestamp = st.State.IDTimestamp return st, err } +type storageTailer interface { + HeadIndex(ctx context.Context, ty storage.ObjectType) (uint32, error) +} + +type massifTailer interface { + storageTailer +} + // TailMassif returns the active massif for the tenant func TailMassif( ctx context.Context, - massifReader MassifReader, - tenantIdentity string, + reader massifs.ObjectReader, + logID storage.LogID, ) (MassifTail, error) { var err error lt := MassifTail{ LogTailActivity: LogTailActivity{ LogTail: watcher.LogTail{ - Tenant: tenantIdentity, + LogID: logID, }, }, } - tailMassif, err := massifReader.GetHeadMassif(ctx, tenantIdentity, massifs.WithListBlobOption(azblob.WithGetTags())) - if err != nil { - return MassifTail{}, fmt.Errorf( - "error reading head massif for tenant %s: %w", - tenantIdentity, err) - } - lt.Path = tailMassif.BlobPath - lt.Number = tailMassif.Start.MassifIndex - number, ext, err := massifs.ParseMassifPathNumberExt(lt.Path) + headIndex, err := reader.HeadIndex(ctx, storage.ObjectMassifStart) if err != nil { - return MassifTail{}, err - } - if number != lt.Number { - return MassifTail{}, fmt.Errorf("path base file doesn't match massif index in log start record") + return MassifTail{}, fmt.Errorf("error reading head massif index: %w", err) } - lt.Ext = ext - logActivityMS, err := tailMassif.LastCommitUnixMS(uint8(tailMassif.Start.CommitmentEpoch)) + start, err := massifs.GetMassifStart(ctx, reader, headIndex) + + lt.Number = headIndex + lt.OType = storage.ObjectMassifData + + logActivityMS, err := snowflakeid.IDUnixMilli(start.LastID, uint8(start.CommitmentEpoch)) if err != nil { return MassifTail{}, fmt.Errorf( - "error reading last activity time from head massif for tenant %s: %w", - tenantIdentity, err) + "error reading last activity time from head massif for log %x: %w", + logID, err) } lt.LogActivity = time.UnixMilli(logActivityMS) - firstIndexTag := tailMassif.Tags[massifs.TagKeyFirstIndex] - lt.FirstIndex, err = strconv.ParseUint(firstIndexTag, 16, 64) - if err != nil { - return MassifTail{}, err - } - lt.LogSize = tailMassif.RangeCount() + lt.LastIDTimestamp = start.LastID + lt.LastIDEpoch = uint8(start.CommitmentEpoch) - lt.LastID = tailMassif.Tags[massifs.TagKeyLastID] - lt.LastIDTimestamp, lt.LastIDEpoch, err = massifs.SplitIDTimestampHex(lt.LastID) - if err != nil { - return MassifTail{}, err - } - lastMS, err := snowflakeid.IDUnixMilli(lt.LastIDTimestamp, lt.LastIDEpoch) - if err != nil { - return MassifTail{}, err - } - lt.TagActivity = time.UnixMilli(lastMS) return lt, nil } @@ -220,7 +192,7 @@ func NewLogTailCmd() *cli.Command { return &cli.Command{Name: "tail", Usage: `report the current tail (most recent end) of the log - if --count is > 1, re-check every interval seconds until the count is exhasted + if --count is > 1, re-check every interval seconds until the count is exhausted if --count is explicitly zero, check forever `, Flags: []cli.Flag{ @@ -231,8 +203,8 @@ func NewLogTailCmd() *cli.Command { }, &cli.StringFlag{ - Name: "tenant", Aliases: []string{"t"}, - Usage: "tenant identity", + Name: "logid", + Usage: "log identifier, as a string encoded uuid", Required: true, }, @@ -251,10 +223,12 @@ func NewLogTailCmd() *cli.Command { cmd := &CmdCtx{} ctx := context.Background() - if err = cfgMassifReader(cmd, cCtx); err != nil { + if err = cfgMassifFmt(cmd, cCtx); err != nil { return err } - if err = cfgRootReader(cmd, cCtx); err != nil { + + reader, err := newMassifReader(cmd, cCtx) + if err != nil { return err } @@ -263,6 +237,11 @@ func NewLogTailCmd() *cli.Command { return err } + codec, err := massifs.NewCBORCodec() + if err != nil { + return err + } + count := cCtx.Int("count") mode := cCtx.String("mode") for { @@ -270,14 +249,14 @@ func NewLogTailCmd() *cli.Command { var lt MassifTail var st SealTail if mode == "both" || mode == "massif" { - lt, err = TailMassif(ctx, cmd.massifReader, cfg.TenantIdentity) + lt, err = TailMassif(ctx, reader, cfg.LogID) if err != nil { return err } fmt.Printf("%s\n", lt.String()) } if mode == "both" || mode == "seal" { - st, err = TailSeal(ctx, cmd.rootReader, cfg.TenantIdentity) + st, err = TailSeal(ctx, reader, codec, cfg.LogID) if err != nil { return err } diff --git a/main b/main new file mode 100755 index 0000000..b4cd155 Binary files /dev/null and b/main differ diff --git a/massifstores.go b/massifstores.go new file mode 100644 index 0000000..e28a8bf --- /dev/null +++ b/massifstores.go @@ -0,0 +1,77 @@ +package veracity + +import ( + "context" + "fmt" + + "github.com/datatrails/go-datatrails-merklelog/massifs" + "github.com/datatrails/go-datatrails-merklelog/massifs/storage" + "github.com/urfave/cli/v2" +) + +// omniMassifReader is the union of all interfaces needed by veracity commands +type omniMassifReader interface { + SelectLog(ctx context.Context, logId storage.LogID) error + massifs.ObjectReader + massifs.ObjectWriter +} + +type readerSelector interface { + SelectLog(ctx context.Context, logId storage.LogID) error + massifs.ObjectReader +} + +func newReaderSelector(cmd *CmdCtx, cCtx *cli.Context) (readerSelector, error) { + return newMassifStore(cmd, cCtx) +} + +func newMassifReader(cmd *CmdCtx, cCtx *cli.Context) (readerSelector, error) { + return newMassifStore(cmd, cCtx) +} + +func localDataOptionsSet(cCtx *cli.Context) bool { + if cCtx.IsSet("data-local") && cCtx.String("data-local") != "" { + return true + } + if cCtx.IsSet("massif-file") && cCtx.String("massif-file") != "" { + return true + } + if cCtx.IsSet("checkpoint-file") && cCtx.String("checkpoint-file") != "" { + return true + } + return false +} + +func newMassifStore(cmd *CmdCtx, cCtx *cli.Context) (omniMassifReader, error) { + var err error + + localSet := localDataOptionsSet(cCtx) + remoteLog := cCtx.String("data-url") + + if !localSet && remoteLog != "" { + return nil, fmt.Errorf("can't use data-local and data-url at the same time") + } + + if !localSet && remoteLog == "" && !IsStorageEmulatorEnabled(cCtx) { + remoteLog = DefaultRemoteMassifURL + } + + var reader omniMassifReader + + if remoteLog != "" || IsStorageEmulatorEnabled(cCtx) { + reader, err = NewCmdStorageProviderAzure(context.Background(), cCtx, cmd, remoteLog, nil) + if err != nil { + return nil, fmt.Errorf("could not create massif reader: %w", err) + } + return reader, nil + } + if localSet { + + reader, err := NewCmdStorageProviderFS(context.Background(), cCtx, cmd, cCtx.String("data-local"), false) + if err != nil { + return nil, fmt.Errorf("could not create massif reader: %w", err) + } + return reader, nil + } + return nil, fmt.Errorf("no massif reader configured, use either data-local or data-url or both") +} diff --git a/mmriver/const.go b/mmriver/const.go new file mode 100644 index 0000000..a79754f --- /dev/null +++ b/mmriver/const.go @@ -0,0 +1,6 @@ +package mmriver + +const ( + LeafTypePlain = uint8(0) + expectedExtraBytesSize = 24 +) diff --git a/mmriver/mmrentryversion1.go b/mmriver/mmrentryversion1.go new file mode 100644 index 0000000..bbf3233 --- /dev/null +++ b/mmriver/mmrentryversion1.go @@ -0,0 +1,82 @@ +// Package mmriver works with the datatrails ledger based on draft-bryce-cose-receipts-mmr-profile +package mmriver + +import ( + "crypto/sha256" + "encoding/binary" + "errors" +) + +// MMREntryVersion1 gets the mmr entry for log entry version 1. +// mmr entry format for log entry version 1: +// +// H( domain | mmrSalt | serializedBytes ) +// +// where mmrSalt = extraBytes + idtimestamp +// +// NOTE: extraBytes is consistently 24 bytes on the trie value, so we pad/truncate extrabytes here +// to ensure its 24 bytes also. This allows greater consistency and ease of moving between mmrSalt and trieValue +func MMREntryVersion1(extraBytes []byte, idtimestamp uint64, serializedBytes []byte) ([]byte, error) { + hasher := sha256.New() + + // domain + hasher.Write([]byte{byte(LeafTypePlain)}) + + // mmrSalt + + // ensure extrabytes is 24 bytes long + extraBytes, err := ConsistentExtraBytesSize(extraBytes) + if err != nil { + return nil, err + } + hasher.Write(extraBytes) + + // convert idtimestamp to bytes + idTimestampBytes := make([]byte, 8) + binary.BigEndian.PutUint64(idTimestampBytes, idtimestamp) + hasher.Write(idTimestampBytes) + + // serializedBytes + hasher.Write(serializedBytes) + + return hasher.Sum(nil), nil +} + +func TrimExtraBytes(extraBytes []byte) []byte { + extraBytesSize := len(extraBytes) + + // larger size need to truncate + if extraBytesSize > expectedExtraBytesSize { + extraBytes = extraBytes[:expectedExtraBytesSize] + } + + // smaller size need to pad + if extraBytesSize < expectedExtraBytesSize { + tmp := make([]byte, expectedExtraBytesSize) + copy(tmp[:extraBytesSize], extraBytes) + return tmp + } + + // goldilocks just right + return extraBytes +} + +// consistentExtraBytesSize ensures the given extraBytes is padded/truncated to exactly 24 bytes +func ConsistentExtraBytesSize(extraBytes []byte) ([]byte, error) { + extraBytesSize := len(extraBytes) + + // larger size need to truncate + if extraBytesSize > expectedExtraBytesSize { + return nil, errors.New("extra bytes is too large, maximum extra bytes size is 24") + } + + // smaller size need to pad + if extraBytesSize < expectedExtraBytesSize { + tmp := make([]byte, expectedExtraBytesSize) + copy(tmp[:extraBytesSize], extraBytes) + return tmp, nil + } + + // goldilocks just right + return extraBytes, nil +} diff --git a/node.go b/node.go index a31fd9c..f7cc758 100644 --- a/node.go +++ b/node.go @@ -1,6 +1,7 @@ package veracity import ( + "context" "fmt" "github.com/urfave/cli/v2" @@ -19,16 +20,15 @@ func NewNodeCmd() *cli.Command { }, }, Action: func(cCtx *cli.Context) error { - var err error cmd := &CmdCtx{} - err = cfgMassif(cmd, cCtx) + massif, err := cfgMassif(context.Background(), cmd, cCtx) if err != nil { return err } mmrIndex := cCtx.Uint64("mmrindex") - value, err := cmd.massif.Get(mmrIndex) + value, err := massif.Get(mmrIndex) if err != nil { return err } diff --git a/node_test.go b/node_test.go index 4809cf2..a727421 100644 --- a/node_test.go +++ b/node_test.go @@ -11,8 +11,9 @@ import ( "time" "github.com/datatrails/go-datatrails-common/logger" - "github.com/datatrails/go-datatrails-merklelog/mmrtesting" - "github.com/datatrails/veracity/veracitytesting" + "github.com/datatrails/veracity/tests/testcontext" + "github.com/forestrie/go-merklelog-datatrails/datatrails" + "github.com/robinbryce/go-merklelog-provider-testing/mmrtesting" "github.com/stretchr/testify/assert" ) @@ -29,16 +30,18 @@ func TestNodeCmd(t *testing.T) { logger.Sugar.Infof("url: '%s'", url) // Create a single massif in the emulator + tc, logID := testcontext.CreateLogContext( + t, 8, 1, + mmrtesting.WithTestLabelPrefix("TestNodeCmd"), + ) - tenantID := mmrtesting.DefaultGeneratorTenantIdentity - testContext, testGenerator, cfg := veracitytesting.NewAzuriteTestContext(t, "TestNodeCmd") - veracitytesting.GenerateTenantLog(&testContext, testGenerator, 10, tenantID, true, massifHeight, LeafTypePlain) + tenantID := datatrails.Log2TenantID(logID) tests := []struct { testArgs []string }{ // get node 1 - {testArgs: []string{"", "-u", "-", "-s", "devstoreaccount1", "-c", cfg.Container, "-t", tenantID, "node", fmt.Sprintf("%d", 1)}}, + {testArgs: []string{"", "-s", "devstoreaccount1", "-c", tc.Cfg.Container, "-t", tenantID, "node", fmt.Sprintf("%d", 1)}}, } for _, tc := range tests { diff --git a/nodescan.go b/nodescan.go index 8fa95ca..2548584 100644 --- a/nodescan.go +++ b/nodescan.go @@ -2,6 +2,7 @@ package veracity import ( "bytes" + "context" "encoding/hex" "fmt" @@ -24,10 +25,11 @@ func NewNodeScanCmd() *cli.Command { &cli.BoolFlag{Name: "massif-relative", Aliases: []string{"r"}}, }, Action: func(cCtx *cli.Context) error { - var err error cmd := &CmdCtx{} - if err = cfgMassif(cmd, cCtx); err != nil { + var err error + var massif *massifs.MassifContext + if massif, err = cfgMassif(context.Background(), cmd, cCtx); err != nil { return err } @@ -35,12 +37,12 @@ func NewNodeScanCmd() *cli.Command { if err != nil { return err } - start := cmd.massif.LogStart() - count := cmd.massif.Count() + start := massif.LogStart() + count := massif.Count() for i := range count { - entry := cmd.massif.Data[start+i*massifs.ValueBytes : start+i*massifs.ValueBytes+massifs.ValueBytes] + entry := massif.Data[start+i*massifs.ValueBytes : start+i*massifs.ValueBytes+massifs.ValueBytes] if bytes.Equal(entry, targetValue) { - fmt.Printf("%d\n", i+cmd.massif.Start.FirstIndex) + fmt.Printf("%d\n", i+massif.Start.FirstIndex) return nil } } diff --git a/nodescan_test.go b/nodescan_test.go index cf47fbd..75010ee 100644 --- a/nodescan_test.go +++ b/nodescan_test.go @@ -10,13 +10,15 @@ import ( "testing" "time" + "github.com/datatrails/go-datatrails-common-api-gen/assets/v2/assets" v2assets "github.com/datatrails/go-datatrails-common-api-gen/assets/v2/assets" "github.com/datatrails/go-datatrails-common/logger" "github.com/datatrails/go-datatrails-merklelog/massifs" "github.com/datatrails/go-datatrails-merklelog/mmr" - "github.com/datatrails/go-datatrails-merklelog/mmrtesting" "github.com/datatrails/go-datatrails-simplehash/simplehash" - "github.com/datatrails/veracity/veracitytesting" + "github.com/datatrails/veracity/tests/testcontext" + "github.com/forestrie/go-merklelog-datatrails/datatrails" + "github.com/robinbryce/go-merklelog-provider-testing/mmrtesting" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -30,12 +32,16 @@ func TestNodeScanCmd(t *testing.T) { logger.Sugar.Infof("url: '%s'", url) // Create a single massif in the emulator + tc, logID, _, generated := testcontext.CreateLogBuilderContext( + t, 8, 1, + mmrtesting.WithTestLabelPrefix("TestNodeScanCmd"), + ) - tenantID := mmrtesting.DefaultGeneratorTenantIdentity - testContext, testGenerator, cfg := veracitytesting.NewAzuriteTestContext(t, "TestNodeScanCmd") + // tc.GenerateTenantLog(10) - eventsResponse := veracitytesting.GenerateTenantLog(&testContext, testGenerator, 10, tenantID, true, massifHeight, LeafTypePlain) - marshaledEvents, err := marshalEventsList(eventsResponse) + tenantID := datatrails.Log2TenantID(logID) + + marshaledEvents, eventsResponse, err := marshalEventsList(tc, generated) require.NoError(t, err) // Arbitrarily chose to look for leaf 7 @@ -70,7 +76,7 @@ func TestNodeScanCmd(t *testing.T) { // precise node is located by the mmrIndex and the leafIndex derived // from that. {testArgs: []string{ - "", "-u", "-", "-s", "devstoreaccount1", "-c", cfg.Container, "-t", tenantID, + "", "-s", "devstoreaccount1", "-c", tc.Cfg.Container, "-t", tenantID, "nodescan", "-m", "0", "-v", expectedLeafNodeValue}}, } @@ -85,17 +91,38 @@ func TestNodeScanCmd(t *testing.T) { } } -func marshalEventsList(eventsResponse []*v2assets.EventResponse) ([][]byte, error) { - marshaller := v2assets.NewFlatMarshalerForEvents() +func marshalEventsList( + tc *testcontext.TestContext, generated mmrtesting.GeneratedLeaves) ([][]byte, []*assets.EventResponse, error) { + marshaller := v2assets.NewFlatMarshalerForEvents() eventJsonList := make([][]byte, 0) - for _, event := range eventsResponse { + events := make([]*assets.EventResponse, len(generated.MMRIndices)) + + for iLeaf := 0; iLeaf < len(generated.MMRIndices); iLeaf++ { + event := datatrailsAssetEvent( + tc.T, generated.Encoded[iLeaf], generated.Args[iLeaf], + generated.MMRIndices[iLeaf], uint8(massifs.Epoch2038), + ) + events[iLeaf] = event eventJson, err := marshaller.Marshal(event) if err != nil { - return nil, err + return nil, nil, err } eventJsonList = append(eventJsonList, eventJson) } - return eventJsonList, nil + return eventJsonList, events, nil +} + +func datatrailsAssetEvent(t *testing.T, a any, args mmrtesting.AddLeafArgs, index uint64, epoch uint8) *assets.EventResponse { + ae, ok := a.(*assets.EventResponse) + require.True(t, ok, "expected *assets.EventResponse, got %T", a) + + ae.MerklelogEntry = &assets.MerkleLogEntry{ + Commit: &assets.MerkleLogCommit{ + Index: index, + Idtimestamp: massifs.IDTimestampToHex(args.ID, epoch), + }, + } + return ae } diff --git a/readlogactivity.go b/readlogactivity.go new file mode 100644 index 0000000..e1708a4 --- /dev/null +++ b/readlogactivity.go @@ -0,0 +1,36 @@ +package veracity + +import ( + "bufio" + "os" + "path/filepath" + + "github.com/datatrails/go-datatrails-merklelog/massifs/watcher" +) + +func filePathToLogMassifs(filePath string) ([]watcher.LogMassif, error) { + filePath, err := filepath.Abs(filePath) + if err != nil { + return nil, err + } + f, err := os.Open(filePath) + if err != nil { + return nil, err + } + return scannerToLogMassifs(bufio.NewScanner(f)) +} + +func stdinToDecodedLogMassifs() ([]watcher.LogMassif, error) { + return scannerToLogMassifs(bufio.NewScanner(os.Stdin)) +} + +func scannerToLogMassifs(scanner *bufio.Scanner) ([]watcher.LogMassif, error) { + var data []byte + for scanner.Scan() { + data = append(data, scanner.Bytes()...) + } + if err := scanner.Err(); err != nil { + return nil, err + } + return watcher.LogMassifsFromData(data) +} diff --git a/readtenantactivity.go b/readtenantactivity.go deleted file mode 100644 index 060c3f8..0000000 --- a/readtenantactivity.go +++ /dev/null @@ -1,74 +0,0 @@ -package veracity - -import ( - "bufio" - "bytes" - "encoding/json" - "os" - "path/filepath" -) - -// TenantMassif identifies a combination of tenant and massif Typically it is -// used to convey that the massif is the most recently changed for that tenant. -// Note: it is a strict subset of the fields in TenantActivity, maintained seperately due to json marshalling -type TenantMassif struct { - // Massif is the massif index of the most recently appended massif - Massif int `json:"massifindex"` - // Tenant is the tenant identity of the most recently changed log - Tenant string `json:"tenant"` -} - -// TenantActivity represents the per tenant output of the watch command -type TenantActivity struct { - // Massif is the massif index of the most recently appended massif - Massif int `json:"massifindex"` - // Tenant is the tenant identity of the most recently changed log - Tenant string `json:"tenant"` - - // IDCommitted is the idtimestamp for the most recent entry observed in the log - IDCommitted string `json:"idcommitted"` - // IDConfirmed is the idtimestamp for the most recent entry to be sealed. - IDConfirmed string `json:"idconfirmed"` - LastModified string `json:"lastmodified"` - // MassifURL is the remote path to the most recently changed massif - MassifURL string `json:"massif"` - // SealURL is the remote path to the most recently changed seal - SealURL string `json:"seal"` -} - -func filePathToTenantMassifs(filePath string) ([]TenantMassif, error) { - filePath, err := filepath.Abs(filePath) - if err != nil { - return nil, err - } - f, err := os.Open(filePath) - if err != nil { - return nil, err - } - return scannerToTenantMassifs(bufio.NewScanner(f)) -} - -func stdinToDecodedTenantMassifs() ([]TenantMassif, error) { - return scannerToTenantMassifs(bufio.NewScanner(os.Stdin)) -} - -func scannerToTenantMassifs(scanner *bufio.Scanner) ([]TenantMassif, error) { - var data []byte - for scanner.Scan() { - data = append(data, scanner.Bytes()...) - } - if err := scanner.Err(); err != nil { - return nil, err - } - return TenantMassifsFromData(data) -} - -func TenantMassifsFromData(data []byte) ([]TenantMassif, error) { - decoder := json.NewDecoder(bytes.NewReader(data)) - var doc []TenantMassif - err := decoder.Decode(&doc) - if err == nil { - return doc, nil - } - return nil, err -} diff --git a/receipt.go b/receipt.go index 9b01c03..f13b3b9 100644 --- a/receipt.go +++ b/receipt.go @@ -4,12 +4,13 @@ import ( "context" "encoding/base64" "encoding/hex" + "errors" "fmt" "os" - "github.com/datatrails/go-datatrails-common/cbor" "github.com/datatrails/go-datatrails-merklelog/massifs" "github.com/urfave/cli/v2" + "github.com/veraison/go-cose" ) func NewReceiptCmd() *cli.Command { @@ -49,15 +50,34 @@ func NewReceiptCmd() *cli.Command { } log := func(m string, args ...any) { - cmd.log.Infof(m, args...) + cmd.Log.Infof(m, args...) } - dataUrl := cCtx.String("data-url") + if err = cfgMassifFmt(cmd, cCtx); err != nil { + return err + } + + reader, err := newMassifReader(cmd, cCtx) + if err != nil { + return err + } + + codec , err := massifs.NewCBORCodec() + if err != nil { + return err + } + + var verifier cose.Verifier - reader, err := cfgReader(cmd, cCtx, dataUrl == "") + if cmd.CheckpointPublic.Public == nil { + return errors.New("checkpoint public key is required") + } + + verifier, err = cose.NewVerifier(cmd.CheckpointPublic.Alg, cmd.CheckpointPublic.Public) if err != nil { return err } + tenantIdentity := cCtx.String("tenant") if tenantIdentity == "" { return fmt.Errorf("tenant identity is required") @@ -67,21 +87,10 @@ func NewReceiptCmd() *cli.Command { mmrIndex := cCtx.Uint64("mmrindex") massifHeight := uint8(cCtx.Int64("height")) - // TODO: local replica receipts, its not a big lift, the local reader used by replicatelogs - // implements the necessary interface for NewReceipt. - var cborCodec cbor.CBORCodec - if cborCodec, err = massifs.NewRootSignerCodec(); err != nil { - return err - } - sealReader := massifs.NewSignedRootReader(cmd.log, reader, cborCodec) - massifReader := massifs.NewMassifReader( - cmd.log, reader, - massifs.WithSealGetter(&sealReader), - massifs.WithCBORCodec(cborCodec), - ) - signedReceipt, err := massifs.NewReceipt( - context.Background(), massifHeight, tenantIdentity, mmrIndex, &massifReader, + context.Background(), reader, + &codec, verifier, + massifHeight, mmrIndex, ) if err != nil { return err diff --git a/replicatelogs.go b/replicatelogs.go index f36f576..bc2c7f7 100644 --- a/replicatelogs.go +++ b/replicatelogs.go @@ -2,23 +2,22 @@ package veracity import ( "bufio" - "bytes" "context" - "crypto/sha256" "errors" "fmt" "strings" "sync" "time" - "github.com/datatrails/go-datatrails-common/cbor" - "github.com/datatrails/go-datatrails-common/cose" "github.com/datatrails/go-datatrails-common/logger" "github.com/datatrails/go-datatrails-merklelog/massifs" + "github.com/datatrails/go-datatrails-merklelog/massifs/storage" "github.com/datatrails/go-datatrails-merklelog/massifs/watcher" - "github.com/datatrails/go-datatrails-merklelog/mmr" "github.com/gosuri/uiprogress" + azblobs "github.com/robinbryce/go-merklelog-azure/blobs" + azwatcher "github.com/robinbryce/go-merklelog-azure/watcher" "github.com/urfave/cli/v2" + "github.com/veraison/go-cose" "golang.org/x/exp/rand" ) @@ -51,23 +50,6 @@ var ( ErrInconsistentUseOfPrefetchedSeal = errors.New("prefetching signed root reader used inconsistently") ) -// prefetchingSealReader pre-fetches the seal for the massif to avoid racing with the -// sealer. If the massif is read first, the log can grow and a a new seal can -// be applied to the *longer* log. At which point the previously read copy of -// the massif will be "to short" for the seal. -// See Bug#10530 -type prefetchingSealReader struct { - msg *cose.CoseSign1Message - state massifs.MMRState - tenantIdentity string - massifIndex uint32 -} - -type changeCollector struct { - log logger.Logger - watchOutput string -} - // NewReplicateLogsCmd updates a local replica of a remote log, verifying the mutual consistency of the two before making any changes. // //nolint:gocognit @@ -97,12 +79,16 @@ in the publicly accessible remote storage`, Value: ".", }, &cli.StringFlag{ - Name: "sealer-key", - Usage: `to ensure the remote seal is signed by the correct -key, set this to the public datatrails sealing key, -having obtained its value from a source you trust`, + Name: "checkpoint-public", + Usage: `A COSE Key format file, containing the key to use to verify checkpoint signatures, ES2 only.`, Aliases: []string{"pub"}, }, + &cli.StringFlag{ + Name: "checkpoint-jwks", + Usage: `A JWKS format file, whose *last* entry is the key to use to verify checkpoint signatures. ES only`, + Aliases: []string{"jwks"}, + }, + &cli.StringFlag{ Name: "changes", Usage: ` @@ -141,16 +127,25 @@ By default transient errors are re-tried without limit, and if the error is 429, Action: func(cCtx *cli.Context) error { cmd := &CmdCtx{} - // note: we don't use cfgMassifReader here because it does not - // support setting replicaDir for the local reader, and infact we - // need to configure both a local and a remote reader. - var err error // The loggin configuration is safe to share accross go routines. if err = cfgLogging(cmd, cCtx); err != nil { return err } + if err = CfgKeys(cmd, cCtx); err != nil { + return err + } + + dataUrl := cCtx.String("data-url") + if dataUrl == "" && !IsStorageEmulatorEnabled(cCtx) { + dataUrl = DefaultRemoteMassifURL + } + if dataUrl == "" { + return fmt.Errorf("%w: remote-url is required", ErrRequiredOption) + } + cmd.RemoteURL = dataUrl + // There isn't really a better context. We could implement user // defined timeouts for "lights out/ci" use cases in future. Humans can ctrl-c changes, err := readTenantMassifChanges(context.Background(), cCtx, cmd) @@ -178,14 +173,13 @@ By default transient errors are re-tried without limit, and if the error is 429, // replicateChanges replicate the changes for the provided slice of tenants. // Paralelism is limited by breaking the total changes into smaller slices and calling this function -func replicateChanges(cCtx *cli.Context, cmd *CmdCtx, changes []TenantMassif, progress Progresser) error { - +func replicateChanges(cCtx *cli.Context, cmd *CmdCtx, changes []watcher.LogMassif, progress Progresser) error { var wg sync.WaitGroup errChan := make(chan error, len(changes)) // buffered so it doesn't block for _, change := range changes { wg.Add(1) - go func(change TenantMassif, errChan chan<- error) { + go func(change watcher.LogMassif, errChan chan<- error) { defer wg.Done() defer progress.Completed() @@ -202,7 +196,7 @@ func replicateChanges(cCtx *cli.Context, cmd *CmdCtx, changes []TenantMassif, pr // defined timeouts for "lights out/ci" use cases in future. Humans can ctrl-c err = replicator.ReplicateVerifiedUpdates( context.Background(), - change.Tenant, startMassif, endMassif, + startMassif, endMassif, ) if err == nil { return @@ -210,7 +204,7 @@ func replicateChanges(cCtx *cli.Context, cmd *CmdCtx, changes []TenantMassif, pr // 429 is the only transient error we currently re-try var retryDelay time.Duration - retryDelay, ok := massifs.IsRateLimiting(err) + retryDelay, ok := azblobs.IsRateLimiting(err) if !ok || retries == 0 { // not transient errChan <- err @@ -220,10 +214,10 @@ func replicateChanges(cCtx *cli.Context, cmd *CmdCtx, changes []TenantMassif, pr retryDelay = defaultRetryDelay(err) } - // underflow will actually terminate the loop, but that would have been running for an infeasable amount of time + // underflow will actually terminate the loop, but that would have been running for an infeasible amount of time retries-- // in the default case, remaining is always reported as -1 - cmd.log.Infof("retrying in %s, remaining: %d", retryDelay, max(-1, retries)) + cmd.Log.Infof("retrying in %s, remaining: %d", retryDelay, max(-1, retries)) } }(change, errChan) } @@ -234,23 +228,22 @@ func replicateChanges(cCtx *cli.Context, cmd *CmdCtx, changes []TenantMassif, pr var errs []error for err := range errChan { - cmd.log.Infof("%v", err) + cmd.Log.Infof("%v", err) errs = append(errs, err) } if len(errs) > 0 { return errs[0] } if len(changes) == 1 { - cmd.log.Infof("replication complete for tenant %s", changes[0].Tenant) + cmd.Log.Infof("replication complete for log %x", changes[0].LogID) } else { - cmd.log.Infof("replication complete for %d tenants", len(changes)) + cmd.Log.Infof("replication complete for %d logs", len(changes)) } return nil } -func initReplication(cCtx *cli.Context, cmd *CmdCtx, change TenantMassif) (*VerifiedReplica, uint32, uint32, error) { - - replicator, err := NewVerifiedReplica(cCtx, cmd.Clone()) +func initReplication(cCtx *cli.Context, cmd *CmdCtx, change watcher.LogMassif) (*VerifiedReplica, uint32, uint32, error) { + replicator, err := NewVerifiedReplica(cCtx, cmd.Clone(), change.LogID) if err != nil { return nil, 0, 0, err } @@ -268,361 +261,87 @@ func defaultRetryDelay(_ error) time.Duration { } func newProgressor(cCtx *cli.Context, barName string, increments int) Progresser { - if !cCtx.Bool("progress") { return NewNoopProgress() } return NewStagedProgress(barName, increments) } -type VerifiedContextReader interface { - massifs.VerifiedContextReader -} - type VerifiedReplica struct { - cCtx *cli.Context - log logger.Logger - writeOpener massifs.WriteAppendOpener - localReader massifs.ReplicaReader - remoteReader MassifReader - rootReader massifs.SealGetter - cborCodec cbor.CBORCodec + massifs.VerifyingReplicator + cCtx *cli.Context + log logger.Logger } func NewVerifiedReplica( - cCtx *cli.Context, cmd *CmdCtx, + cCtx *cli.Context, cmd *CmdCtx, logID storage.LogID, ) (*VerifiedReplica, error) { - dataUrl := cCtx.String("data-url") - reader, err := cfgReader(cmd, cCtx, dataUrl == "") - if err != nil { - return nil, err - } - if err = cfgRootReader(cmd, cCtx); err != nil { - return nil, err - } - - massifHeight := cCtx.Int64("height") - if massifHeight > massifHeightMax { - return nil, fmt.Errorf("massif height must be less than 256") - } + var err error - cache, err := massifs.NewLogDirCache(logger.Sugar, NewFileOpener()) - if err != nil { + if err := cfgMassifFmt(cmd, cCtx); err != nil { return nil, err } - localReader, err := massifs.NewLocalReader(logger.Sugar, cache) - if err != nil { - return nil, err - } - - opts := []massifs.DirCacheOption{ - massifs.WithDirCacheReplicaDir(cCtx.String("replicadir")), - massifs.WithDirCacheMassifLister(NewDirLister()), - massifs.WithDirCacheSealLister(NewDirLister()), - massifs.WithReaderOption(massifs.WithMassifHeight(uint8(massifHeight))), - massifs.WithReaderOption(massifs.WithSealGetter(&localReader)), - massifs.WithReaderOption(massifs.WithCBORCodec(cmd.cborCodec)), - } - // This will require that the remote seal is signed by the key - // provided here. If it is not, even if the seal is valid, the - // verification will fail with a suitable error. - pemString := cCtx.String("sealer-key") - if pemString != "" { - pem, err := DecodeECDSAPublicString(pemString) - if err != nil { - return nil, err - } - opts = append(opts, massifs.WithReaderOption(massifs.WithTrustedSealerPub(pem))) - } - - // For the localreader, the seal getter is the local reader itself. - // So we need to make use of ReplaceOptions on the cache, so we can - // provide the options after we have created the local reader. - cache.ReplaceOptions(opts...) - - remoteReader := massifs.NewMassifReader( - logger.Sugar, reader, - ) - - return &VerifiedReplica{ - cCtx: cCtx, - log: logger.Sugar, - writeOpener: NewFileWriteOpener(), - localReader: &localReader, - remoteReader: &remoteReader, - rootReader: &cmd.rootReader, - cborCodec: cmd.cborCodec, - }, nil -} - -// ReplicateVerifiedUpdates confirms that any additions to the remote log are -// consistent with the local replica Only the most recent local massif and seal -// need be retained for verification purposes. If independent, off line, -// verification of inclusion is desired, retain as much of the log as is -// interesting. -func (v *VerifiedReplica) ReplicateVerifiedUpdates( - ctx context.Context, - tenantIdentity string, startMassif, endMassif uint32) error { - - isNilOrNotFound := func(err error) bool { - if err == nil { - return true - } - if errors.Is(err, massifs.ErrLogFileSealNotFound) { - return true - } - if errors.Is(err, massifs.ErrLogFileMassifNotFound) { - return true - } - return false - } - - // on demand promotion of a v0 state to a v1 state, for compatibility with the consistency check. - trustedBaseState := func(local *massifs.VerifiedContext) (massifs.MMRState, error) { - - if local.MMRState.Version > int(massifs.MMRStateVersion0) { - return local.MMRState, nil - } - - // At this point we have a local seal in v0 format and we expect the - // remote seal to be in v1 format. - // We need to promote the legacy base state to a V1 state for the - // consistency check. This is a one way operation, and the legacy seal - // root is discarded. Once the seal for the open massif is upgraded, - // this case will never be encountered again for that tenant. - - peaks, err := mmr.PeakHashes(local, local.MMRState.MMRSize-1) - if err != nil { - return massifs.MMRState{}, err - } - root := mmr.HashPeaksRHS(sha256.New(), peaks) - if !bytes.Equal(root, local.MMRState.LegacySealRoot) { - return massifs.MMRState{}, fmt.Errorf("legacy seal root does not match the bagged peaks") - } - state := local.MMRState - state.Version = int(massifs.MMRStateVersion1) - // Keep the legacy seal root so that we can verify in the case where the remote is a V0 seal - // state.LegacySealRoot = nil - state.Peaks = peaks - return state, nil - } - - if err := v.localReader.EnsureReplicaDirs(tenantIdentity); err != nil { - return err - } - - // Read the most recently verified state from the local store. The - // verification ensures the local replica has not been corrupted, but this - // check trusts the seal stored locally with the head massif - local, err := v.localReader.GetHeadVerifiedContext(ctx, tenantIdentity) - if !isNilOrNotFound(err) { - return err - } - - // We always verify up to the requested massif, but we do not re-verify - // massifs we have already verified and replicated localy. If the last - // locally replicated masif is ahead of the endMassif we do nothing here. - // - // The --ancestors option is used to ensure there is a minimum number of - // verified massifs replicated locally, and influnces the startMassif to - // acheive this. - // - // The startMassif is the greater of the requested start and the massif - // index of the last locally verified massif. Our verification always reads - // the remote massifs starting from the startMassif. - // - // In the loop below we ensure three key things: - // 1. If there is a local replica of the remote, we ensure the remote is - // consistent with the replica. - // 2. If the remote starts a new massif, and we locally have its - // predecessor, we ensure the remote is consistent with the local predecessor. - // 3. If there is no local replica, we create one by copying the the remote. - // - // Note that we arrange things so that local is always the last avaible - // local massif, or nil. When dealing with the remote corresponding to - // startMassif, the local is *either* the predecessor or is the incomplete - // local replica of the remote being considered. After the first remote is - // dealt with, local is always the predecessor. - - if local != nil { - - // Start from the next massif after the last verified massif and do not - // re-verify massifs we have already verified and replicated, - if startMassif > local.Start.MassifIndex+1 { - // if the start of the ancestors is more than one massif ahead of - // the local, then we start afresh. - local = nil - } else { - startMassif = local.Start.MassifIndex - } - } - - for i := startMassif; i <= endMassif; i++ { - - // Note: we have to fetch the seal before the massif, otherwise we can lose a rase with the builder - // See bug#10530 - remoteSealReader, err := NewPrefetchingSealReader(ctx, v.rootReader, tenantIdentity, i) - if err != nil { - return err - } - remoteVerifyOpts := []massifs.ReaderOption{ - massifs.WithCBORCodec(v.cborCodec), - massifs.WithSealGetter(remoteSealReader), - } - if local != nil { - var baseState massifs.MMRState - // Promote the trusted base state to a V1 state if it is a V0 state. - baseState, err = trustedBaseState(local) - if err != nil { - return err - } - remoteVerifyOpts = append(remoteVerifyOpts, massifs.WithTrustedBaseState(baseState)) - } - - // On the first iteration local is *either* the predecessor to - // startMassif or it is the, as yet, incomplete local replica of it. - // After the first iteration, local is always the predecessor. (If the - // remote is still incomplte it means there is no subseqent massif to - // read) - remote, err := v.remoteReader.GetVerifiedContext( - ctx, tenantIdentity, uint64(i), remoteVerifyOpts...) - if err != nil { - // both the remote massif and it's seal must be present for the - // verification to succeed, so we don't filter using isBlobNotFound - // here. - return err - } - - // read the local massif, if it exists, reading at the end of the loop - local, err = v.localReader.GetVerifiedContext(ctx, tenantIdentity, uint64(i)) - if !isNilOrNotFound(err) { - return err - } - - // copy the remote locally, safely replacing the coresponding local if - // one exists. if the local is replaced (or created) without error, the - // remote verified context becomes the new local. - local, err = v.replicateVerifiedContext(local, remote) - if err != nil { - return err - } + if cmd.MassifFmt.MassifHeight > massifHeightMax { + return nil, fmt.Errorf("massif height must be less than 256") } - - return nil -} - -// replicateVerifiedContext is used to replicate a remote massif which may be an -// extension of a previously verified local copy. -// -// If local is nil, this method simply replicates the verified remote unconditionally. -// -// Otherwise, local and remote are required to be the same tenant and the same massif. -// This method then deals with ensuring the remote is a consistent extension of -// local before replacing the previously verified local. -// -// This method has no side effects in the case where the remote and the local -// are verified to be identical, the original local instance is retained. -func (v *VerifiedReplica) replicateVerifiedContext( - local *massifs.VerifiedContext, remote *massifs.VerifiedContext) (*massifs.VerifiedContext, error) { - - if local == nil { - return nil, v.localReader.ReplaceVerifiedContext(remote, v.writeOpener) + if cmd.MassifFmt.MassifHeight == 0 { + return nil, fmt.Errorf("massif height must be initialized") } - // note: return a nil local for all error cases, the caller should not carry on - if local.TenantIdentity != remote.TenantIdentity { - return nil, fmt.Errorf("can't replace, tenant identies don't match: local %s vs remote %s", local.TenantIdentity, remote.TenantIdentity) + if cmd.RemoteURL == "" { + return nil, fmt.Errorf("%w: remote-url is required", ErrRequiredOption) } - if local.Start.MassifIndex != remote.Start.MassifIndex { - return nil, fmt.Errorf( - "can't replace, massif indices don't match: local %d vs remote %d", - local.Start.MassifIndex, remote.Start.MassifIndex) + reader, err := cfgReader(cmd, cCtx, cmd.RemoteURL) + if err != nil { + return nil, err } - tenantIdentity := local.TenantIdentity - massifIndex := local.Start.MassifIndex + dataUrl := cmd.RemoteURL // may be azurite in emulator mode, which overrides - if len(local.Data) > len(remote.Data) { - // the remote log has been truncated since we last looked - return nil, fmt.Errorf("%w: %s, massif=%d", ErrRemoteLogTruncated, tenantIdentity, massifIndex) + remoteReader, err := NewCmdStorageProviderAzure(context.Background(), cCtx, cmd, dataUrl, reader) + if err != nil { + return nil, err } - - // if the remote and local are the same, we are done, provided the roots still match - if len(local.Data) == len(remote.Data) { - // note: the length equal check is elevated so we only write to local - // disc if there are changes. this duplicates a check in - // verifiedStateEqual in the interest of avoiding accidents due to - // future refactorings. - if !verifiedStateEqual(local, remote) { - return nil, fmt.Errorf("%w: %s, massif=%d", ErrRemoteLogInconsistentRootState, tenantIdentity, massifIndex) - } - return local, nil + if err = remoteReader.SelectLog(context.Background(), logID); err != nil { + return nil, fmt.Errorf("failed to select remote log %s: %w", logID, err) } - - err := v.localReader.ReplaceVerifiedContext(remote, v.writeOpener) + localReader, err := NewCmdStorageProviderFS( + context.Background(), cCtx, cmd, cCtx.String("replicadir"), true) if err != nil { return nil, err } - // We have succesfully the local data with the data from the remote. The - // remote vc is now equivalent to the local - return remote, nil -} - -func verifiedStateEqual(a *massifs.VerifiedContext, b *massifs.VerifiedContext) bool { - - var err error - - // There is no difference in the log format between the two versions currently supported. - if len(a.Data) != len(b.Data) { - return false + if err = localReader.SelectLog(context.Background(), logID); err != nil { + return nil, fmt.Errorf("failed to select local log %s: %w", logID, err) } - fromRoots := a.ConsistentRoots - toRoots := b.ConsistentRoots - // If either state is a V0 state, compare the legacy seal roots - if a.MMRState.Version == int(massifs.MMRStateVersion0) || b.MMRState.Version == int(massifs.MMRStateVersion0) { - rootA := peakBaggedRoot(a.MMRState) - rootB := peakBaggedRoot(b.MMRState) - if !bytes.Equal(rootA, rootB) { - return false - } - if a.MMRState.Version == int(massifs.MMRStateVersion0) { - fromRoots, err = mmr.PeakHashes(a, a.MMRState.MMRSize-1) - if err != nil { - return false - } - } - if b.MMRState.Version == int(massifs.MMRStateVersion0) { - toRoots, err = mmr.PeakHashes(b, b.MMRState.MMRSize-1) - if err != nil { - return false - } - } - } + var verifier cose.Verifier - // If both states are V1 states, compare the peaks - if len(fromRoots) != len(toRoots) { - return false - } - for i := range len(fromRoots) { - if !bytes.Equal(fromRoots[i], toRoots[i]) { - return false + if cmd.CheckpointPublic.Public != nil { + verifier, err = cose.NewVerifier(cmd.CheckpointPublic.Alg, cmd.CheckpointPublic.Public) + if err != nil { + return nil, err } } - return true + + return &VerifiedReplica{ + cCtx: cCtx, + log: logger.Sugar, + VerifyingReplicator: massifs.VerifyingReplicator{ + CBORCodec: cmd.CBORCodec, + COSEVerifier: verifier, + Sink: localReader, + Source: remoteReader, + }, + }, nil } -// peakBaggedRoot is used to obtain an MMRState V0 bagged root from a V1 accumulator peak list. -// If a v0 state is provided, the root is returned as is. -func peakBaggedRoot(state massifs.MMRState) []byte { - if state.Version < int(massifs.MMRStateVersion1) { - return state.LegacySealRoot - } - return mmr.HashPeaksRHS(sha256.New(), state.Peaks) +type changeCollector struct { + log logger.Logger + watchOutput string } func (c *changeCollector) Logf(msg string, args ...any) { @@ -638,30 +357,31 @@ func (c *changeCollector) Outf(msg string, args ...any) { func newWatchConfig(cCtx *cli.Context, cmd *CmdCtx) (WatchConfig, error) { cfg := WatchConfig{ - WatchCount: 1, - WatchConfig: watcher.WatchConfig{ - Horizon: tenYearsOfHours, + WatchConfig: azwatcher.WatchConfig{ + // Latest: cCtx.Bool("latest"), + WatchCount: 1, + Horizon: tenYearsOfHours, }, } - err := watcher.ConfigDefaults(&cfg.WatchConfig) + err := azwatcher.ConfigDefaults(&cfg.WatchConfig) if err != nil { return WatchConfig{}, err } - cfg.ReaderURL = cmd.readerURL + cfg.ObjectPrefixURL = cmd.RemoteURL - tenants := CtxGetTenantOptions(cCtx) - if len(tenants) == 0 { + logids := CtxGetLogOptions(cCtx) + if len(logids) == 0 { return cfg, nil } - cfg.WatchTenants = make(map[string]bool) - for _, t := range tenants { - cfg.WatchTenants[strings.TrimPrefix(t, tenantPrefix)] = true + + cfg.WatchLogs = make(map[string]bool) + for _, lid := range logids { + cfg.WatchLogs[string(lid)] = true } return cfg, nil } -func readTenantMassifChanges(ctx context.Context, cCtx *cli.Context, cmd *CmdCtx) ([]TenantMassif, error) { - +func readTenantMassifChanges(ctx context.Context, cCtx *cli.Context, cmd *CmdCtx) ([]watcher.LogMassif, error) { if cCtx.IsSet("latest") { // This is because people get tripped up with the `veracity watch -z 90000h | veracity replicate-logs` idiom, // Its such a common use case that we should just make it work. @@ -669,61 +389,53 @@ func readTenantMassifChanges(ctx context.Context, cCtx *cli.Context, cmd *CmdCtx if err != nil { return nil, err } - forceProdUrl := cCtx.String("data-url") == "" - reader, err := cfgReader(cmd, cCtx, forceProdUrl) + if cmd.RemoteURL == "" { + return nil, fmt.Errorf("%w: remote-url is required", ErrRequiredOption) + } + + reader, err := cfgReader(cmd, cCtx, cmd.RemoteURL) if err != nil { return nil, err } + collator := azwatcher.NewLogTailCollator( + func(storagePath string) storage.LogID { + return storage.ParsePrefixedLogID("tenant/", storagePath) + }, + storage.ObjectIndexFromPath, + ) + watcher, err := azwatcher.NewWatcher(cfg.WatchConfig) + if err != nil { + return nil, err + } + wc := &WatcherCollator{ + Watcher: watcher, + LogTailCollator: collator, + } - collector := &changeCollector{log: cmd.log} - err = WatchForChanges(ctx, cfg, reader, collector) + collector := &changeCollector{log: cmd.Log} + err = azwatcher.WatchForChanges(ctx, cfg.WatchConfig, wc, reader, collector) if err != nil { return nil, err } - return scannerToTenantMassifs(bufio.NewScanner(strings.NewReader(collector.watchOutput))) + return scannerToLogMassifs(bufio.NewScanner(strings.NewReader(collector.watchOutput))) } - tenants := CtxGetTenantOptions(cCtx) - if len(tenants) == 1 { - return []TenantMassif{{Tenant: tenants[0], Massif: cCtx.Int("massif")}}, nil + logs := CtxGetLogOptions(cCtx) + if len(logs) == 1 { + return []watcher.LogMassif{{LogID: logs[0], Massif: cCtx.Int("massif")}}, nil } - if len(tenants) > 1 { - return nil, fmt.Errorf("multiple tenants may only be used with --latest") + if len(logs) > 1 { + return nil, fmt.Errorf("multiple logs may only be used with --latest") } - // If --changes is set the tenants and massif indices are read from the identified file + // If --changes is set the logs and massif indices are read from the identified file changesFile := cCtx.String("changes") if changesFile != "" { - return filePathToTenantMassifs(changesFile) + return filePathToLogMassifs(changesFile) } // No explicit config and --all not set, read from stdin - return stdinToDecodedTenantMassifs() -} - -func NewPrefetchingSealReader(ctx context.Context, sealGetter massifs.SealGetter, tenantIdentity string, massifIndex uint32) (*prefetchingSealReader, error) { - - msg, state, err := sealGetter.GetSignedRoot(ctx, tenantIdentity, massifIndex) - if err != nil { - return nil, err - } - reader := prefetchingSealReader{ - msg: msg, - state: state, - tenantIdentity: tenantIdentity, - massifIndex: massifIndex, - } - return &reader, nil -} - -func (r *prefetchingSealReader) GetSignedRoot(ctx context.Context, tenantIdentity string, massifIndex uint32, opts ...massifs.ReaderOption) (*cose.CoseSign1Message, massifs.MMRState, error) { - if tenantIdentity != r.tenantIdentity { - return nil, massifs.MMRState{}, fmt.Errorf("%w: tenant requested: %s, tenant prefetched: %s", ErrInconsistentUseOfPrefetchedSeal, tenantIdentity, r.tenantIdentity) - } - if massifIndex != r.massifIndex { - return nil, massifs.MMRState{}, fmt.Errorf("%w: massif requested: %d, massif prefetched: %d", ErrInconsistentUseOfPrefetchedSeal, massifIndex, r.massifIndex) - } - return r.msg, r.state, nil + return stdinToDecodedLogMassifs() } diff --git a/scitt/mandatory.go b/scitt/mandatory.go new file mode 100644 index 0000000..78d3331 --- /dev/null +++ b/scitt/mandatory.go @@ -0,0 +1,149 @@ +package scitt + +import ( + "errors" + "fmt" + + "github.com/datatrails/go-datatrails-common/cose" + commoncose "github.com/datatrails/go-datatrails-common/cose" +) + +const ( + ProblemTitleServiceSpecific = "Service Specific" + ProblemTitleOperationNotFound = "Operation Not Found" + ProblemTitleOperationFailed = "Operation Failed" + ProblemTitleTransient = "Transient Service Issue" + ProblemTitleRejected = "Rejected" + ProblemTitleToManyRequests = "To Many Requests" + ProblemTitleConfirmationMissing = "Confirmation Missing" + ProblemInstanceRejectedByRegistrationPolicy = "urn:ietf:params:scitt:error:signed-statement:rejected-by-registration-policy" + ProblemInstanceConfirmationMissing = "urn:ietf:params:scitt:error:signed-statement:confirmation-missing" + ProblemInstanceToManyRequests = "urn:ietf:params:scitt:error:tooManyRequests" + ProblemInstanceTransientAndInternal = "urn:ietf:params:scitt:error:transient-and-internal" + ProblemInstanceServiceSpecific = "urn:ietf:params:scitt:error:service-specific" + ProblemInstanceNotFound = "urn:ietf:params:scitt:error:notFound" +) + +// mandatory checks required of any transparency service on registration + +type CheckedStatement struct { + Claims *cose.CWTClaims + Statement *cose.CoseSign1Message +} + +type RegistrationPolicy struct { + RequireCNFPublic bool + // We do not support x509 verification at this time + RequireX509 bool + AllowUnverified bool +} + +// RegistrationPolicyUnverified returns a RegistrationPolicy that allows unverified statements. +// And can be used to obtain decoded statements that otherwise pass the mandatory checks. +func RegistrationPolicyUnverified() RegistrationPolicy { + return RegistrationPolicy{ + RequireCNFPublic: false, + RequireX509: false, + AllowUnverified: true, + } +} + +func RegistrationPolicyVerified() RegistrationPolicy { + return RegistrationPolicy{ + RequireCNFPublic: true, + RequireX509: false, + AllowUnverified: false, + } +} + +func RegistrationMandatoryChecks( + signedStatement []byte, + policy RegistrationPolicy, +) (CheckedStatement, *ConciseProblemDetails) { + if policy.RequireX509 { + return CheckedStatement{}, &ConciseProblemDetails{ + Title: ProblemTitleRejected, + Detail: "Signed Statement not accepted by the current Registration Policy. X509 verification is not supported", + Instance: ProblemInstanceRejectedByRegistrationPolicy, + ResponseCode: CoAPBadRequest, + } + } + + // cbor decode statement + statement, err := commoncose.NewCoseSign1MessageFromCBOR(signedStatement) + + if err != nil { + return CheckedStatement{}, &ConciseProblemDetails{ + Title: ProblemTitleRejected, + Detail: fmt.Sprintf("Signed Statement not accepted by the current Registration Policy. Not a valid COSE Sign1 message: %v", err), + Instance: ProblemInstanceRejectedByRegistrationPolicy, + ResponseCode: CoAPBadRequest, + } + } + // Begin: Mandatory Registration checks + + // verify cose_sign1 message: + // + // Per - https://ietf-wg-scitt.github.io/draft-ietf-scitt-architecture/draft-ietf-scitt-architecture.html#section-4.1.1.1 + // Registration "MUST, at a minimum, syntactically check the Issuer of the Signed Statement by cryptographically verifying the COSE signature according to" + + err = statement.VerifyWithCWTPublicKey(nil) + + // if the error is because there is no cwt issuer, ensure we communicate that + if errors.Is(err, commoncose.ErrCWTClaimsNoIssuer) { + return CheckedStatement{}, &ConciseProblemDetails{ + Title: ProblemTitleRejected, + Detail: "Signed Statement not accepted by the current Registration Policy. issuer claim not present in CWT", + Instance: ProblemInstanceRejectedByRegistrationPolicy, + ResponseCode: CoAPBadRequest, + } + } + + if errors.Is(err, commoncose.ErrCWTClaimsNoSubject) { + return CheckedStatement{}, &ConciseProblemDetails{ + Title: ProblemTitleRejected, + Detail: "Signed Statement not accepted by the current Registration Policy. subject claim not present in CWT", + Instance: ProblemInstanceRejectedByRegistrationPolicy, + ResponseCode: CoAPBadRequest, + } + } + + // if the error is because there is no cwt verification key, ensure we communicate that + if errors.Is(err, commoncose.ErrCWTClaimsNoCNF) { + + if policy.RequireCNFPublic || !policy.AllowUnverified { + return CheckedStatement{}, &ConciseProblemDetails{ + Title: ProblemTitleConfirmationMissing, + Detail: fmt.Sprintf("Signed Statement did not contain proof of possession: %v", err), + Instance: ProblemInstanceConfirmationMissing, + ResponseCode: CoAPBadRequest, + } + } + err = nil + } + + if err != nil { + return CheckedStatement{}, &ConciseProblemDetails{ + Title: ProblemTitleRejected, + Detail: fmt.Sprintf("Signed Statement not accepted by the current Registration Policy. Verification failed: %v", err), + Instance: ProblemInstanceRejectedByRegistrationPolicy, + ResponseCode: CoAPBadRequest, + } + } + + cwtClaims, err := statement.CWTClaimsFromProtectedHeader() + if err != nil { + return CheckedStatement{}, &ConciseProblemDetails{ + Title: ProblemTitleRejected, + Detail: fmt.Sprintf("Signed Statement not accepted by the current Registration Policy. CWT Claims missing or invalid: %v", err), + Instance: ProblemInstanceRejectedByRegistrationPolicy, + ResponseCode: CoAPBadRequest, + } + } + // END: Mandatory Registration checks + + return CheckedStatement{ + Claims: cwtClaims, + Statement: statement, + }, nil +} diff --git a/scitt/rfc9290.go b/scitt/rfc9290.go new file mode 100644 index 0000000..fcaf636 --- /dev/null +++ b/scitt/rfc9290.go @@ -0,0 +1,120 @@ +package scitt + +import ( + "github.com/fxamacker/cbor/v2" +) + +// public scitt support for https://www.rfc-editor.org/rfc/rfc9290.html + +const ( + RFC9290MediaType = "application/concise-problem-details+cbor" +) + +const ( + + // success coap codes https://www.rfc-editor.org/rfc/rfc7252#section-12.1.2 + CoAPCreated = 201 + CoAPDeleted = 202 + CoAPValid = 203 + CoAPChanged = 204 + CoAPContent = 205 + + // non-success coap codes per https://www.rfc-editor.org/rfc/rfc7252#section-12.1.2 + + CoAPBadRequest = 400 + CoAPUnauthorized = 401 + CoAPBadOption = 402 + CoAPForbidden = 403 + CoAPNotFound = 404 + CoAPMethodNotAllowed = 405 + CoAPNotAcceptable = 406 + CoAPPreConditionFailed = 412 + CoAPRequestEntityToLarge = 413 + CoAPUnsupportedContentFormat = 415 + CoAPInternalServerError = 500 + CoAPNotImplemented = 501 + CoAPBadGateway = 502 + CoAPServiceUnavailable = 503 + CoAPGatewayTimeout = 504 + CoAPProxyingNotSupported = 505 +) + +var ( + // Note: this value is established by code in the test TestProblemDetailsWriteResponseError + ProblemDetailsEncodingError = []byte{ + 163, 32, 116, 101, 114, 114, 111, 114, 32, 101, 110, 99, 111, 100, + 105, 110, 103, 32, 101, 114, 114, 111, 114, 33, 120, 58, 84, 104, + 105, 115, 32, 105, 115, 32, 97, 32, 115, 101, 114, 118, 101, 114, + 32, 101, 114, 114, 111, 114, 32, 101, 110, 99, 111, 100, 105, 110, + 103, 32, 116, 104, 101, 32, 112, 114, 111, 98, 108, 101, 109, 32, + 100, 101, 116, 97, 105, 108, 115, 32, 105, 116, 115, 101, 108, 102, + 35, 25, 1, 244, + } + + problemDetailsEncodingError = ConciseProblemDetails{ + Title: "error encoding error", + Detail: "This is a server error encoding the problem details itself", + ResponseCode: CoAPInternalServerError, + } + + CoAPResponseCodes = map[uint]bool{ + CoAPCreated: true, + CoAPDeleted: true, + CoAPValid: true, + CoAPChanged: true, + CoAPContent: true, + CoAPBadRequest: true, + CoAPUnauthorized: true, + CoAPBadOption: true, + CoAPForbidden: true, + CoAPNotFound: true, + CoAPMethodNotAllowed: true, + CoAPNotAcceptable: true, + CoAPPreConditionFailed: true, + CoAPRequestEntityToLarge: true, + CoAPUnsupportedContentFormat: true, + CoAPInternalServerError: true, + CoAPNotImplemented: true, + CoAPBadGateway: true, + CoAPServiceUnavailable: true, + CoAPGatewayTimeout: true, + CoAPProxyingNotSupported: true, + } +) + +// ConciseProblemDetails encodes information about an error according to RFC 9260 +// See https://www.rfc-editor.org/rfc/rfc9290.html +type ConciseProblemDetails struct { + Title string `cbor:"-1,keyasint,omitempty"` + Detail string `cbor:"-2,keyasint,omitempty"` + Instance string `cbor:"-3,keyasint,omitempty"` + ResponseCode uint64 `cbor:"-4,keyasint,omitempty"` + BaseUri string `cbor:"-5,keyasint,omitempty"` + BaseRtl string `cbor:"-6,keyasint,omitempty"` +} + +func (p ConciseProblemDetails) MustMarshalCBOR() []byte { + content, err := cbor.Marshal(p) + if err != nil { + content = ProblemDetailsEncodingError + } + + return content +} + +// ProblemDetailsMarshal marshals a problem details from the +// provided arguments If there is an error marshaling the error, a pre encoded +// problem details for that situation is returned +func ProblemDetailsMarshal(title, detail string, responseCode uint64) []byte { + problem := ConciseProblemDetails{ + Title: title, + Detail: detail, + ResponseCode: responseCode, + } + content, err := cbor.Marshal(&problem) + if err != nil { + content = ProblemDetailsEncodingError + } + + return content +} diff --git a/scitt/scitt.go b/scitt/scitt.go new file mode 100644 index 0000000..c815eb2 --- /dev/null +++ b/scitt/scitt.go @@ -0,0 +1,79 @@ +// Package scitt signed statement conveniences +package scitt + +import ( + "crypto/sha256" + "errors" + "fmt" + "os" + + "github.com/datatrails/veracity/mmriver" +) + +const ( + ExtraBytesSize = 24 +) + +// MMRStatement prepares the details necessary for registering a signed +// statement on a localy forked datatrails ledger replica +type MMRStatement struct { + CheckedStatement + // Content is the signed statement raw cbor bytes exactly as read or provide + Content []byte + // Hash is th sha256 hash of the Statement + Hash []byte + // LeafHash is the MMR ledger defined leaf hash that is added to the ledger + LeafHash []byte + // ExtraBytes are the application contribution to the leaf hash. In the case + // of this pseudo scitt support, it is the first 24 bytes of the Hash + ExtraBytes []byte + // The IDTimestamp that contributed to the leaf hash. + IDTimestamp uint64 + MMRIndexLeaf uint64 +} + +type idTimetampGenerator interface { + NextID() (uint64, error) +} + +func NewMMRStatementFromFile(fileName string, idState idTimetampGenerator, policy RegistrationPolicy) (*MMRStatement, *ConciseProblemDetails, error) { + m := &MMRStatement{} + + content, err := os.ReadFile(fileName) + if err != nil { + return nil, nil, fmt.Errorf("failed to read file %s: %w", fileName, err) + } + + var cpd *ConciseProblemDetails + m.CheckedStatement, cpd = RegistrationMandatoryChecks(content, policy) + if cpd != nil { + return nil, cpd, fmt.Errorf("failed mandatory registration checks: %s", cpd.Detail) + } + + m.Content = content + hasher := sha256.New() + n, err := hasher.Write(m.Content) + if err != nil { + return nil, nil, err + } + if n != len(m.Content) { + return nil, nil, errors.New("hashed to few bytes") + } + m.Hash = hasher.Sum(nil) + + // Could use the hash bytes for content addressibility, but its primarily a scitt demo so use subject, but only the first 24 bytes + // m.ExtraBytes = m.Hash[:ExtraBytesSize] + m.ExtraBytes = mmriver.TrimExtraBytes([]byte(m.Claims.Subject)) + + m.IDTimestamp, err = idState.NextID() + if err != nil { + return nil, nil, fmt.Errorf("failed to generate snowflake id: %w", err) + } + // m.IDTimestamp = 0 // XXX: temporary stabilize the hash + + m.LeafHash, err = mmriver.MMREntryVersion1(m.ExtraBytes, m.IDTimestamp, m.Content) + if err != nil { + return nil, nil, err + } + return m, nil, nil +} diff --git a/taskfiles/Taskfile_azurite.yml b/taskfiles/Taskfile_azurite.yml deleted file mode 100644 index 029de12..0000000 --- a/taskfiles/Taskfile_azurite.yml +++ /dev/null @@ -1,85 +0,0 @@ ---- -version: '3' - -# Taskfile for working with dockerized azurite -# -# See: https://learn.microsoft.com/en-us/azure/storage/blobs/use-azurite-to-run-automated-tests -# Azurite supports local development and integration testing for services which -# use the message bus, blob store and ohter azure storage primitives. - -vars: - # AZURITE_DATA_DIR the --location option for azurite is where data is persisted - AZURITE_DATA_DIR: '{{.AZURITE_DATA_DIR | default "../.local/azurite-data"}}' - AZURITE_BLOB_PORT: '{{.AZURITE_BLOB_PORT | default "10000"}}' - AZURITE_QUEUE_PORT: '{{.AZURITE_QUEUE_PORT | default "10001"}}' - AZURITE_TABLE_PORT: '{{.AZURITE_TABLE_PORT | default "11111"}}' - AZURITE_CONTAINER_NAME: '{{.AZURITE_CONTAINER_NAME | default "veracity-azurite"}}' - AZURITE_IMAGE: '{{.AZURITE_IMAGE | default "mcr.microsoft.com/azure-storage/azurite"}}' - -tasks: - preflight: - desc: stops, cleans and re-starts the emulator providing a clean state - summary: | - stops, cleans and re-starts the emulator providing a clean state - cmds: - - task: cleanup - - task: stop - - task: start - start: - desc: start azurite azure local storage emulator in a named docker container - summary: | - Starts the azure local storage emulator service in a docker container - The following env vars are respected for configuration - AZURITE_CONTAINER_NAME: - The container name to use, default "veracity-azurite" - AZURITE_DATA_DIR: - Where the data is persisted, default ".local/azurite-data" - AZURITE_BLOB_PORT: - Blob service listening port, default "10000" - AZURITE_QUEUE_PORT: - Queue port, default "10001" - AZURITE_TABLE_PORT: - Table port, default "11111" - vars: - AZURITE_DOCKER_ARGS: '{{default "" .AZURITE_DOCKER_ARGS}}' - cmds: - - | - AZURITE_DATA_DIR=$(mkdir -p {{.AZURITE_DATA_DIR}} && cd {{.AZURITE_DATA_DIR}} && pwd) - echo "AZURITE_DATA_DIR: ${AZURITE_DATA_DIR}" - docker run \ - --name {{.AZURITE_CONTAINER_NAME}} \ - {{.AZURITE_DOCKER_ARGS}} \ - -p {{.AZURITE_BLOB_PORT}}:10000 \ - -p {{.AZURITE_QUEUE_PORT}}:10001 \ - -p {{.AZURITE_TABLE_PORT}}:11111 \ - -dt -u $(id -u):$(id -g) \ - --mount type=bind,src=${AZURITE_DATA_DIR},dst=/data \ - {{.AZURITE_IMAGE}} \ - {{.CLI_ARGS}} - stop: - desc: stop azurite azure local storage emulator docker container - summary: | - Stops the azure local storage emulator service - cmds: - - docker rm -f {{.AZURITE_CONTAINER_NAME}} - - cleanup: - desc: | - stop the container and DELETE the data directory identified by AZURITE_DATA_DIR - summary: | - Stops the azure local storage emulator service - deps: [stop] - cmds: - - | - [[ -z "{{.AZURITE_DATA_DIR}}" ]] && exit 0 - echo "deleting data at {{.AZURITE_DATA_DIR}}" - rm -vrf {{.AZURITE_DATA_DIR}} - follow-logs: - desc: follow the logs of the azurite container - cmds: - - docker logs -f {{.AZURITE_CONTAINER_NAME}} - - logs: - desc: follow the logs of the azurite container - cmds: - - docker logs {{.AZURITE_CONTAINER_NAME}} diff --git a/taskfiles/Taskfile_codequality.yml b/taskfiles/Taskfile_codequality.yml deleted file mode 100644 index b71d65a..0000000 --- a/taskfiles/Taskfile_codequality.yml +++ /dev/null @@ -1,68 +0,0 @@ ---- -# All targets that support linting and code analysis. -# DO NOT PUT CLEAN TARGETS or other developer conveniences in here - -version: '3' - -# Environment variables set for all commands. -env_build: &env_build - # XDG_CACHE_HOME: This may need to be set for python builds, but try to use - # virtual env instead. - - # The GOCACHE in a linux container on Docker for windows MUST be on a linuxy - # file system - GOCACHE: /tmp/datatrails/veracity - -vars: - - VERBOSE: "" - -tasks: - - all: - desc: "run all code quality tasks" - cmds: - - task: format - - task: lint - - format: - desc: "format sources (go fmt)" - dir: ../ - vars: - GO_MOD_DIRS: - sh: find . -type f -name 'go.mod' - cmds: - - for: { var: GO_MOD_DIRS, as: MODULE} - cmd: | - cd $(dirname {{.MODULE}}) - goimports {{.VERBOSE}} -w . - gofmt -l -s -w . - lint: - desc: Quality assurance of **all** code for desktop - dir: ../ - vars: - GO_MOD_DIRS: - sh: find . -type f -name 'go.mod' - GOLANGCI_LINT_CONFIG: '{{.GOLANGCI_LINT_CONFIG | default ".golangci.yml"}}' - cmds: - - for: { var: GO_MOD_DIRS, as: MODULE} - cmd: | - CONFIG=$(realpath {{.GOLANGCI_LINT_CONFIG}}) - NAME=$(basename $(dirname {{.MODULE}})) - echo "Linting: $NAME" - cd $(dirname {{.MODULE}}) - go vet -tags integration,azurite ./... - golangci-lint --version - golangci-lint --build-tags integration,azurite -c $CONFIG {{.VERBOSE}} run --timeout 10m ./... - modules: - desc: Make all go.mod files consistent - dir: ../ - vars: - GO_MOD_DIRS: - sh: find . -type f -name 'go.mod' - cmds: - - for: { var: GO_MOD_DIRS, as: MODULE} - cmd: | - cd $(dirname {{.MODULE}}) - go mod tidy - go mod verify diff --git a/taskfiles/Taskfile_gobuild.yml b/taskfiles/Taskfile_gobuild.yml deleted file mode 100644 index 9ef2096..0000000 --- a/taskfiles/Taskfile_gobuild.yml +++ /dev/null @@ -1,29 +0,0 @@ -version: '3' - -# Environment variables set for all commands. -env_build: &env_build - # XDG_CACHE_HOME: This may need to be set for python builds, but try to use - # virtual env instead. - - # The GOCACHE in a linux container on Docker for windows MUST be on a linuxy - # file system - GOCACHE: /tmp/datatrails/veracity - -tasks: - - go:build: - desc: "run go build against all go modules" - dir: ../ - vars: - GO_MOD_DIRS: - sh: find . -type f -name 'go.mod' - - cmds: - - for: { var: GO_MOD_DIRS, as: MODULE} - cmd: | - cd $(dirname {{.MODULE}}) - VERSION=$(git describe --tags) - COMMIT=$(git rev-parse --short HEAD) - BUILDDATE=$(date -u +%Y-%m-%dT%H:%M:%SZ) - go build -ldflags="-s -w -X main.version=$VERSION -X main.commit=$COMMIT -X main.buildDate=$BUILDDATE" \ - -trimpath -o ./ ./... diff --git a/tests/append/append_test.go b/tests/append/append_test.go new file mode 100644 index 0000000..9f07560 --- /dev/null +++ b/tests/append/append_test.go @@ -0,0 +1,30 @@ +package append + +import ( + "os" + + "github.com/datatrails/veracity" +) + +// Test that +func (s *AppendCmdSuite) xTestAppendCCFSignedStatement() { + // replicaDir := s.T().TempDir() + + var err error + + err = os.Chdir("/Users/robin/Desktop/personal/ietf/data") + s.Require().NoError(err, "should be able to change directory to /Users/robin/Desktop/ietf/data") + + app := veracity.NewApp("tests", true) + veracity.AddCommands(app, true) + + err = app.Run([]string{ + "veracity", + "-t", "tenant/6a009b40-eb55-4159-81f0-69024f89f53c", + // "-l", "v1/mmrs/tenant/6a009b40-eb55-4159-81f0-69024f89f53c/0/massifs/0000000000000000.log", + "-l", "/Users/robin/Desktop/personal/ietf/data", + // "append" , "--generate-sealer-key", + "append", "--sealer-key", "ecdsa-key-private.cbor", "--signed-statement", "in-toto.json.hashenvelope.cose.empty_uhdr", + }) + s.NoError(err) +} diff --git a/tests/append/suite_test.go b/tests/append/suite_test.go new file mode 100644 index 0000000..1541438 --- /dev/null +++ b/tests/append/suite_test.go @@ -0,0 +1,23 @@ +// Package replicatelogs provides a test suite for the ReplicateLogs command. +package append + +import ( + "testing" + + "github.com/datatrails/veracity/tests" + "github.com/stretchr/testify/suite" +) + +type AppendCmdSuite struct { + tests.IntegrationTestSuite +} + +func (s *AppendCmdSuite) SetupSuite() { + s.IntegrationTestSuite.SetupSuite() + // ensure we have the azurite config in the env for all the tests so that --envauth always uses the emulator + s.EnsureAzuriteEnv() +} + +func TestAppendCmdSuite(t *testing.T) { + suite.Run(t, new(AppendCmdSuite)) +} diff --git a/tests/node/node_test.go b/tests/node/node_test.go index 4c2a0dc..cbdfcaa 100644 --- a/tests/node/node_test.go +++ b/tests/node/node_test.go @@ -7,25 +7,14 @@ import ( "strings" "github.com/datatrails/go-datatrails-common/logger" - "github.com/datatrails/go-datatrails-logverification/integrationsupport" - "github.com/datatrails/go-datatrails-merklelog/massifs" "github.com/datatrails/go-datatrails-merklelog/mmr" - "github.com/datatrails/go-datatrails-merklelog/mmrtesting" - "github.com/datatrails/go-datatrails-simplehash/simplehash" "github.com/datatrails/veracity" + "github.com/datatrails/veracity/tests/testcontext" + "github.com/forestrie/go-merklelog-datatrails/datatrails" + "github.com/robinbryce/go-merklelog-provider-testing/mmrtesting" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) -func (s *NodeSuite) newMMRTestingConfig(labelPrefix, tenantIdentity string) mmrtesting.TestConfig { - return mmrtesting.TestConfig{ - StartTimeMS: (1698342521) * 1000, EventRate: 500, - TestLabelPrefix: labelPrefix, - TenantIdentity: tenantIdentity, - Container: strings.ReplaceAll(strings.ToLower(labelPrefix), "_", ""), - } -} - // TestNodeMultiMassif tests that the veracity sub command node // works for massifs beyond the first one and covers some obvious edge cases. // This really just tests that the correspondence between the massif index and the leaf index holds @@ -34,9 +23,6 @@ func (s *NodeSuite) TestVerifyIncludedMultiMassif() { logger.New("TestNodeMultiMassif") defer logger.OnExit() - cfg := s.newMMRTestingConfig("TestNodeMultiMassif", "") - azurite := mmrtesting.NewTestContext(s.T(), cfg) - massifHeight := uint8(8) leavesPerMassif := mmr.HeightIndexLeafCount(uint64(massifHeight) - 1) @@ -67,21 +53,15 @@ func (s *NodeSuite) TestVerifyIncludedMultiMassif() { massifCount := tt.massifCount s.Run(fmt.Sprintf("massifCount:%d", massifCount), func() { - leafHasher := integrationsupport.NewLeafHasher() - g := integrationsupport.NewTestGenerator( - s.T(), cfg.StartTimeMS/1000, &leafHasher, mmrtesting.TestGeneratorConfig{ - StartTimeMS: cfg.StartTimeMS, - EventRate: cfg.EventRate, - TenantIdentity: cfg.TenantIdentity, - TestLabelPrefix: cfg.TestLabelPrefix, - }) - - tenantId0 := g.NewTenantIdentity() - events := integrationsupport.GenerateTenantLog( - &azurite, g, int(tt.massifCount)*int(leavesPerMassif), tenantId0, true, + tc, logID, _, generated := testcontext.CreateLogBuilderContext( + s.T(), massifHeight, + tt.massifCount, + mmrtesting.WithTestLabelPrefix("TestNodeMultiMassif"), ) + tenantID := datatrails.Log2TenantID(logID) + for _, iLeaf := range tt.leaves { s.ReplaceStdout() @@ -91,9 +71,9 @@ func (s *NodeSuite) TestVerifyIncludedMultiMassif() { err := app.Run([]string{ "veracity", "--envauth", // uses the emulator - "--container", cfg.Container, + "--container", tc.Cfg.Container, "--data-url", s.Env.AzuriteVerifiableDataURL, - "--tenant", tenantId0, + "--tenant", tenantID, "--height", fmt.Sprintf("%d", massifHeight), "node", "--mmrindex", fmt.Sprintf("%d", mmrIndex), @@ -102,18 +82,7 @@ func (s *NodeSuite) TestVerifyIncludedMultiMassif() { stdout := s.CaptureAndCloseStdout() - id, _, err := massifs.SplitIDTimestampHex(events[iLeaf].MerklelogEntry.Commit.Idtimestamp) - require.NoError(s.T(), err) - - hasher := simplehash.NewHasherV3() - // hash the generated event - err = hasher.HashEvent( - events[iLeaf], - simplehash.WithPrefix([]byte{byte(integrationsupport.LeafTypePlain)}), - simplehash.WithIDCommitted(id), - ) - require.Nil(s.T(), err) - leafValue := fmt.Sprintf("%x", hasher.Sum(nil)) + leafValue := fmt.Sprintf("%x", generated.Args[iLeaf].Value) assert.Equal(s.T(), leafValue, strings.TrimSpace(stdout)) } }) diff --git a/tests/replicatelogs/integration_helpers.go b/tests/replicatelogs/integration_helpers.go new file mode 100644 index 0000000..2d8fbee --- /dev/null +++ b/tests/replicatelogs/integration_helpers.go @@ -0,0 +1,54 @@ +//go:build integration + +package verifyconsistency + +import ( + "crypto/sha256" + "io" + "os" + "path/filepath" + "testing" + + "github.com/datatrails/go-datatrails-merklelog/massifs/storage" + "github.com/google/uuid" + fsstorage "github.com/robinbryce/go-merklelog-fs/storage" + "github.com/stretchr/testify/require" +) + +func fileSHA256(filename string) ([]byte, error) { + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer f.Close() + + hasher := sha256.New() + if _, err := io.Copy(hasher, f); err != nil { + return nil, err + } + + return hasher.Sum(nil), nil +} + +func mustHashFile(t *testing.T, filename string) []byte { + t.Helper() + hash, err := fileSHA256(filename) + require.NoError(t, err) + return hash +} + +func mustMassifFilename(t *testing.T, replicaDir string, logID storage.LogID, massifIndex uint32) string { + t.Helper() + prefix := filepath.Join( + replicaDir, fsstorage.LogIDPrefix, uuid.UUID(logID).String(), fsstorage.MassifsDirName) + "/" + + return storage.FmtMassifPath(prefix, uint32(massifIndex)) +} + +func mustCheckpointFilename(t *testing.T, replicaDir string, logID storage.LogID, massifIndex uint32) string { + t.Helper() + + prefix := filepath.Join( + replicaDir, fsstorage.LogIDPrefix, uuid.UUID(logID).String(), fsstorage.CheckpointsDirName) + "/" + return storage.FmtCheckpointPath(prefix, uint32(massifIndex)) +} diff --git a/tests/replicatelogs/replicatelogs_azurite_test.go b/tests/replicatelogs/replicatelogs_azurite_test.go index 3e0864f..7bbacb9 100644 --- a/tests/replicatelogs/replicatelogs_azurite_test.go +++ b/tests/replicatelogs/replicatelogs_azurite_test.go @@ -4,53 +4,38 @@ package verifyconsistency import ( "context" - "crypto/elliptic" - "crypto/sha256" "encoding/json" "fmt" - "io" "os" "path/filepath" "testing" "github.com/datatrails/go-datatrails-common/logger" - "github.com/datatrails/go-datatrails-merklelog/massifs" + "github.com/datatrails/go-datatrails-merklelog/massifs/storage" "github.com/datatrails/go-datatrails-merklelog/mmr" "github.com/datatrails/veracity" + "github.com/datatrails/veracity/tests/testcontext" + "github.com/forestrie/go-merklelog-datatrails/datatrails" + "github.com/robinbryce/go-merklelog-provider-testing/mmrtesting" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func fileSHA256(filename string) ([]byte, error) { - f, err := os.Open(filename) - if err != nil { - return nil, err - } - defer f.Close() - - hasher := sha256.New() - if _, err := io.Copy(hasher, f); err != nil { - return nil, err - } - - return hasher.Sum(nil), nil -} - -func mustHashFile(t *testing.T, filename string) []byte { - t.Helper() - hash, err := fileSHA256(filename) - require.NoError(t, err) - return hash -} - // TestReplicateMassifUpdate ensures that an extension to a previously replicated // massif is handled correctly func (s *ReplicateLogsCmdSuite) TestReplicateMassifUpdate() { logger.New("TestReplicateMassifUpdate") defer logger.OnExit() - tc := massifs.NewLocalMassifReaderTestContext( - s.T(), logger.Sugar, "TestReplicateMassifUpdate") + tc := testcontext.NewDefaultTestContext( + s.T(), + mmrtesting.WithTestLabelPrefix("TestReplicateMassifUpdate"), + ) + + // getter, err := tc.NewNativeObjectReader(massifs.StorageOptions{MassifHeight: integrationsupport.TestMassifHeight}) + // require.Nil(t, err) + + ctx := context.TODO() h8MassifLeaves := mmr.HeightIndexLeafCount(uint64(8 - 1)) // = ((2 << massifHeight) - 1 + 1) >> 1 @@ -72,32 +57,28 @@ func (s *ReplicateLogsCmdSuite) TestReplicateMassifUpdate() { // make sure we cover update from partial blob to new massif {name: "partial first massif", massifHeight: 8, firstUpdateMassifs: 0, firstUpdateExtraLeaves: h8MassifLeaves - 6, secondUpdateMassifs: 2, secondUpdateExtraLeaves: 0}, } - key := massifs.TestGenerateECKey(s.T(), elliptic.P256()) for _, tt := range tests { - s.Run(tt.name, func() { - // Populate the log with the content for the first update + var err error + require.True(s.T(), tt.firstUpdateMassifs > 0 || tt.firstUpdateExtraLeaves > 0, uint32(0), "invalid test") require.True(s.T(), tt.secondUpdateMassifs > 0 || tt.secondUpdateExtraLeaves > 0, uint32(0), "invalid test") replicaDir := s.T().TempDir() - tenantId0 := tc.G.NewTenantIdentity() - // If we skip CreateLog below, we need to delete the blobs - tc.AzuriteContext.DeleteBlobsByPrefix(massifs.TenantMassifPrefix(tenantId0)) + // tc.GenerateTenantLog(10, massifHeight, 0 /* leaf type plain */) + logId0 := tc.G.NewLogID() + tenantId0 := datatrails.Log2TenantID(logId0) + + builder, _ := testcontext.CreateLogForContext(tc, logId0, tt.massifHeight, uint32(tt.firstUpdateMassifs)) + + leavesPerMassif := mmr.HeightIndexLeafCount(uint64(tt.massifHeight) - 1) // = ((2 << massifHeight) - 1 + 1) >> 1 - if tt.firstUpdateMassifs > 0 { - tc.CreateLog( - tenantId0, tt.massifHeight, uint32(tt.firstUpdateMassifs), - massifs.TestWithSealKey(&key), - ) - } if tt.firstUpdateExtraLeaves > 0 { - tc.AddLeavesToLog( - tenantId0, tt.massifHeight, int(tt.firstUpdateExtraLeaves), - massifs.TestWithSealKey(&key), + tc.AddLeaves( + ctx, builder, logId0, tt.massifHeight, leavesPerMassif*tt.firstUpdateMassifs, tt.firstUpdateExtraLeaves, ) } @@ -106,10 +87,10 @@ func (s *ReplicateLogsCmdSuite) TestReplicateMassifUpdate() { app := veracity.NewApp("tests", true) veracity.AddCommands(app, true) - err := app.Run([]string{ + err = app.Run([]string{ "veracity", "--envauth", // uses the emulator - "--container", tc.TestConfig.Container, + "--container", tc.Cfg.Container, "--data-url", s.Env.AzuriteVerifiableDataURL, "--tenant", tenantId0, "--height", fmt.Sprintf("%d", tt.massifHeight), @@ -120,7 +101,11 @@ func (s *ReplicateLogsCmdSuite) TestReplicateMassifUpdate() { "--massif", fmt.Sprintf("%d", tt.firstUpdateMassifs), }) s.NoError(err) - firstMassifFilename := filepath.Join(replicaDir, massifs.ReplicaRelativeMassifPath(tenantId0, uint32(tt.firstUpdateMassifs))) + + headIndex, err := builder.ObjectReader.HeadIndex(ctx, storage.ObjectMassifData) + s.NoError(err) + + firstMassifFilename := mustMassifFilename(s.T(), replicaDir, logId0, headIndex) firstHash := mustHashFile(s.T(), firstMassifFilename) // Add the content for the second update @@ -128,18 +113,16 @@ func (s *ReplicateLogsCmdSuite) TestReplicateMassifUpdate() { if tt.secondUpdateMassifs > 0 { massifLeaves := mmr.HeightIndexLeafCount(uint64(tt.massifHeight - 1)) // = ((2 << massifHeight) - 1 + 1) >> 1 // CreateLog always deleted blobs, so we can only use AddLeavesToLog here - for range tt.secondUpdateMassifs { - tc.AddLeavesToLog( - tenantId0, tt.massifHeight, int(massifLeaves), - massifs.TestWithSealKey(&key), + for i := range tt.secondUpdateMassifs { + tc.AddLeaves( + ctx, builder, logId0, tt.massifHeight, leavesPerMassif*tt.firstUpdateMassifs+i, massifLeaves, ) } } if tt.secondUpdateExtraLeaves > 0 { - tc.AddLeavesToLog( - tenantId0, tt.massifHeight, int(tt.secondUpdateExtraLeaves), - massifs.TestWithSealKey(&key), + tc.AddLeaves( + ctx, builder, logId0, tt.massifHeight, leavesPerMassif*(tt.firstUpdateMassifs+tt.secondUpdateMassifs), tt.secondUpdateExtraLeaves, ) } @@ -147,7 +130,7 @@ func (s *ReplicateLogsCmdSuite) TestReplicateMassifUpdate() { err = app.Run([]string{ "veracity", "--envauth", // uses the emulator - "--container", tc.TestConfig.Container, + "--container", tc.Cfg.Container, "--data-url", s.Env.AzuriteVerifiableDataURL, "--tenant", tenantId0, "--height", fmt.Sprintf("%d", tt.massifHeight), @@ -156,8 +139,12 @@ func (s *ReplicateLogsCmdSuite) TestReplicateMassifUpdate() { "--massif", fmt.Sprintf("%d", tt.firstUpdateMassifs+tt.secondUpdateMassifs), }) s.NoError(err) + + headIndex, err = builder.ObjectReader.HeadIndex(ctx, storage.ObjectMassifData) + s.NoError(err) + // note: secondMassifFilename *may* be same as first depending on test config - secondMassifFilename := filepath.Join(replicaDir, massifs.ReplicaRelativeMassifPath(tenantId0, uint32(tt.firstUpdateMassifs+tt.secondUpdateMassifs))) + secondMassifFilename := mustMassifFilename(s.T(), replicaDir, logId0, headIndex) secondHash := mustHashFile(s.T(), secondMassifFilename) assert.NotEqual(s.T(), firstHash, secondHash, "the massif should have changed") @@ -166,7 +153,7 @@ func (s *ReplicateLogsCmdSuite) TestReplicateMassifUpdate() { err = app.Run([]string{ "veracity", "--envauth", // uses the emulator - "--container", tc.TestConfig.Container, + "--container", tc.Cfg.Container, "--data-url", s.Env.AzuriteVerifiableDataURL, "--tenant", tenantId0, "--height", fmt.Sprintf("%d", tt.massifHeight), @@ -178,164 +165,23 @@ func (s *ReplicateLogsCmdSuite) TestReplicateMassifUpdate() { s.NoError(err) }) } - -} - -// TestV0ToV1ReplicationTransition tests that v0 seal replica can be continued with v1 seals -// In this tests the log starts with v0 seals, is replicated, and the continues with v1 seals. -// This covers the production case where there are previously replicated logs. -func (s *ReplicateLogsCmdSuite) TestV0ToV1ReplicationTransition() { - - logger.New("TestV0ToV1ReplicationTransition") - defer logger.OnExit() - - tc := massifs.NewLocalMassifReaderTestContext( - s.T(), logger.Sugar, "TestV0ToV1ReplicationTransition") - - h8MassifLeaves := mmr.HeightIndexLeafCount(uint64(8 - 1)) // = ((2 << massifHeight) - 1 + 1) >> 1 - - tests := []struct { - name string - massifHeight uint8 - legacyCount uint64 - lastLeagacyLeafCount uint64 - // if zero, the last legacy massif will be completed. If the last legacy is full and v1Count is zero the test is invalid - v1Count uint64 - lastV1LeafCount uint64 - }{ - // make sure we cover the obvious edge cases - {name: "complete first massif with v0 promoted to v1", massifHeight: 8, legacyCount: 0, lastLeagacyLeafCount: h8MassifLeaves - 3, v1Count: 0, lastV1LeafCount: 3}, - } - key := massifs.TestGenerateECKey(s.T(), elliptic.P256()) - - for _, tt := range tests { - - s.Run(tt.name, func() { - - // Populate the log with content under legacy seals - - require.True(s.T(), tt.legacyCount > 0 || tt.lastLeagacyLeafCount > 0, uint32(0), "invalid test") - require.True(s.T(), tt.v1Count > 0 || tt.lastV1LeafCount > 0, uint32(0), "invalid test") - replicaDir := s.T().TempDir() - tenantId0 := tc.G.NewTenantIdentity() - // leagacyLeafCount = massifLeaves*tt.legacyCount + tt.lastLeagacyLeafCount - - // note: CreateLog both creates the massifs *and* populates them - lastMassif := uint32(tt.legacyCount) - if lastMassif > 0 { - lastMassif-- - } - - // If we skip CreateLog below, we need to delete the blobs - tc.AzuriteContext.DeleteBlobsByPrefix(massifs.TenantMassifPrefix(tenantId0)) - - if lastMassif > 0 { - tc.CreateLog( - tenantId0, tt.massifHeight, lastMassif, - massifs.TestWithSealKey(&key), massifs.TestWithV0Seals(), - ) - } - if tt.lastLeagacyLeafCount > 0 { - tc.AddLeavesToLog( - tenantId0, tt.massifHeight, int(tt.lastLeagacyLeafCount), - massifs.TestWithSealKey(&key), massifs.TestWithV0Seals(), - ) - } - - // Replicate the log - // note: VERACITY_IKWID is set in main, we need it to enable --envauth so we force it here - app := veracity.NewApp("tests", true) - veracity.AddCommands(app, true) - - err := app.Run([]string{ - "veracity", - "--envauth", // uses the emulator - "--container", tc.TestConfig.Container, - "--data-url", s.Env.AzuriteVerifiableDataURL, - "--tenant", tenantId0, - "--height", fmt.Sprintf("%d", tt.massifHeight), - "replicate-logs", - // "--ancestors", fmt.Sprintf("%d", tt.ancestors), - "--replicadir", replicaDir, - "--massif", fmt.Sprintf("%d", lastMassif), - }) - s.NoError(err) - - // Add v1 sealed content - lastMassif = uint32(tt.v1Count) - if lastMassif > 0 { - lastMassif-- - } - - // Add v1 sealed content - if lastMassif > 0 { - massifLeaves := mmr.HeightIndexLeafCount(uint64(tt.massifHeight - 1)) // = ((2 << massifHeight) - 1 + 1) >> 1 - // CreateLog always deleted blobs, so we can only use AddLeavesToLog here - for range tt.v1Count { - tc.AddLeavesToLog( - tenantId0, tt.massifHeight, int(massifLeaves), - massifs.TestWithSealKey(&key), /*, massifs.TestWithV0Seals() V1 seals*/ - ) - } - } - if tt.lastLeagacyLeafCount > 0 { - tc.AddLeavesToLog( - tenantId0, tt.massifHeight, int(tt.lastV1LeafCount), - massifs.TestWithSealKey(&key), /*, massifs.TestWithV0Seals() V1 seals*/ - ) - } - - // Replicate the v1 content - err = app.Run([]string{ - "veracity", - "--envauth", // uses the emulator - "--container", tc.TestConfig.Container, - "--data-url", s.Env.AzuriteVerifiableDataURL, - "--tenant", tenantId0, - "--height", fmt.Sprintf("%d", tt.massifHeight), - "replicate-logs", - "--replicadir", replicaDir, - "--massif", fmt.Sprintf("%d", lastMassif), - }) - s.NoError(err) - - // re-read the v1 seal and decide we are up to date - err = app.Run([]string{ - "veracity", - "--envauth", // uses the emulator - "--container", tc.TestConfig.Container, - "--data-url", s.Env.AzuriteVerifiableDataURL, - "--tenant", tenantId0, - "--height", fmt.Sprintf("%d", tt.massifHeight), - "replicate-logs", - "--replicadir", replicaDir, - "--massif", fmt.Sprintf("%d", lastMassif), - }) - - s.NoError(err) - }) - } - } // TestReplicatingMassifLogsForOneTenant test that by default af full replica is made func (s *ReplicateLogsCmdSuite) TestReplicatingMassifLogsForOneTenant() { - logger.New("Test4AzuriteMassifsForOneTenant") defer logger.OnExit() - tc := massifs.NewLocalMassifReaderTestContext( - s.T(), logger.Sugar, "Test4AzuriteMassifsForOneTenant") - + tc := testcontext.NewDefaultTestContext(s.T(), mmrtesting.WithTestLabelPrefix("Test4AzuriteMassifsForOneTenant")) massifHeight := uint8(8) tests := []struct { massifCount uint32 }{ // make sure we cover the obvious edge cases - {massifCount: 1}, {massifCount: 2}, {massifCount: 5}, + {massifCount: 1}, } for _, tt := range tests { @@ -343,10 +189,9 @@ func (s *ReplicateLogsCmdSuite) TestReplicatingMassifLogsForOneTenant() { massifCount := tt.massifCount s.Run(fmt.Sprintf("massifCount:%d", massifCount), func() { - tenantId0 := tc.G.NewTenantIdentity() - - // note: CreateLog both creates the massifs *and* populates them - tc.CreateLog(tenantId0, massifHeight, massifCount) + logId0 := tc.G.NewLogID() + tenantId0 := datatrails.Log2TenantID(logId0) + testcontext.CreateLogForContext(tc, logId0, massifHeight, massifCount) replicaDir := s.T().TempDir() @@ -357,7 +202,7 @@ func (s *ReplicateLogsCmdSuite) TestReplicatingMassifLogsForOneTenant() { err := app.Run([]string{ "veracity", "--envauth", // uses the emulator - "--container", tc.TestConfig.Container, + "--container", tc.Cfg.Container, "--data-url", s.Env.AzuriteVerifiableDataURL, "--tenant", tenantId0, "--height", fmt.Sprintf("%d", massifHeight), @@ -368,10 +213,10 @@ func (s *ReplicateLogsCmdSuite) TestReplicatingMassifLogsForOneTenant() { s.NoError(err) for i := range massifCount { - expectMassifFile := filepath.Join(replicaDir, massifs.ReplicaRelativeMassifPath(tenantId0, i)) + expectMassifFile := mustMassifFilename(s.T(), replicaDir, logId0, uint32(i)) s.FileExistsf(expectMassifFile, "the replicated massif should exist") - expectSealFile := filepath.Join(replicaDir, massifs.ReplicaRelativeSealPath(tenantId0, i)) - s.FileExistsf(expectSealFile, "the replicated seal should exist") + expectCheckpointFile := mustCheckpointFilename(s.T(), replicaDir, logId0, uint32(i)) + s.FileExistsf(expectCheckpointFile, "the replicated checkpoint should exist") } }) } @@ -381,12 +226,10 @@ func (s *ReplicateLogsCmdSuite) TestReplicatingMassifLogsForOneTenant() { // limits the number of historical massifs that are replicated Note that // --ancestors=0 still requires consistency against local replica of the remote func (s *ReplicateLogsCmdSuite) TestAncestorMassifLogsForOneTenant() { - logger.New("Test4AzuriteMassifsForOneTenant") defer logger.OnExit() - tc := massifs.NewLocalMassifReaderTestContext( - s.T(), logger.Sugar, "Test4AzuriteMassifsForOneTenant") + tc := testcontext.NewDefaultTestContext(s.T(), mmrtesting.WithTestLabelPrefix("TestAncestorMassifLogsForOneTenant")) massifHeight := uint8(8) @@ -414,10 +257,10 @@ func (s *ReplicateLogsCmdSuite) TestAncestorMassifLogsForOneTenant() { massifCount := tt.massifCount s.Run(fmt.Sprintf("massifCount:%d", massifCount), func() { - tenantId0 := tc.G.NewTenantIdentity() + logId0 := tc.G.NewLogID() + tenantId0 := datatrails.Log2TenantID(logId0) - // note: CreateLog both creates the massifs *and* populates them - tc.CreateLog(tenantId0, massifHeight, massifCount) + testcontext.CreateLogForContext(tc, logId0, massifHeight, massifCount) replicaDir := s.T().TempDir() @@ -428,7 +271,7 @@ func (s *ReplicateLogsCmdSuite) TestAncestorMassifLogsForOneTenant() { err := app.Run([]string{ "veracity", "--envauth", // uses the emulator - "--container", tc.TestConfig.Container, + "--container", tc.Cfg.Container, "--data-url", s.Env.AzuriteVerifiableDataURL, "--tenant", tenantId0, "--height", fmt.Sprintf("%d", massifHeight), @@ -442,10 +285,10 @@ func (s *ReplicateLogsCmdSuite) TestAncestorMassifLogsForOneTenant() { if tt.ancestors >= massifCount-1 { // then all massifs should be replicated for i := range massifCount { - expectMassifFile := filepath.Join(replicaDir, massifs.ReplicaRelativeMassifPath(tenantId0, i)) + expectMassifFile := mustMassifFilename(s.T(), replicaDir, logId0, uint32(i)) s.FileExistsf(expectMassifFile, "the replicated massif should exist") - expectSealFile := filepath.Join(replicaDir, massifs.ReplicaRelativeSealPath(tenantId0, i)) - s.FileExistsf(expectSealFile, "the replicated seal should exist") + expectCheckpointFile := mustCheckpointFilename(s.T(), replicaDir, logId0, uint32(i)) + s.FileExistsf(expectCheckpointFile, "the replicated checkpoint should exist") } return } @@ -454,17 +297,18 @@ func (s *ReplicateLogsCmdSuite) TestAncestorMassifLogsForOneTenant() { end := max(2, massifCount) - 2 - tt.ancestors for i := range end { - expectMassifFile := filepath.Join(replicaDir, massifs.ReplicaRelativeMassifPath(tenantId0, i)) - s.NoFileExistsf(expectMassifFile, "the replicated massif should NOT exist") - expectSealFile := filepath.Join(replicaDir, massifs.ReplicaRelativeSealPath(tenantId0, i)) - s.NoFileExistsf(expectSealFile, "the replicated seal should NOT exist") + expectMassifFile := mustMassifFilename(s.T(), replicaDir, logId0, uint32(i)) + s.NoFileExistsf(expectMassifFile, "the replicated massif should exist") + expectCheckpointFile := mustCheckpointFilename(s.T(), replicaDir, logId0, uint32(i)) + s.NoFileExistsf(expectCheckpointFile, "the replicated checkpoint should exist") } for i := massifCount - 1 - tt.ancestors; i < massifCount; i++ { - expectMassifFile := filepath.Join(replicaDir, massifs.ReplicaRelativeMassifPath(tenantId0, i)) + + expectMassifFile := mustMassifFilename(s.T(), replicaDir, logId0, uint32(i)) s.FileExistsf(expectMassifFile, "the replicated massif should exist") - expectSealFile := filepath.Join(replicaDir, massifs.ReplicaRelativeSealPath(tenantId0, i)) - s.FileExistsf(expectSealFile, "the replicated seal should exist") + expectCheckpointFile := mustCheckpointFilename(s.T(), replicaDir, logId0, uint32(i)) + s.FileExistsf(expectCheckpointFile, "the replicated checkpoint should exist") } }) } @@ -476,22 +320,24 @@ func (s *ReplicateLogsCmdSuite) TestAncestorMassifLogsForOneTenant() { // replica is sparse. --ancestors is set what the user wants to have a bound on // the work done in any one run func (s *ReplicateLogsCmdSuite) TestSparseReplicaCreatedAfterExtendedOffline() { - logger.New("TestSparseReplicaCreatedAfterExtendedOffline") defer logger.OnExit() - tc := massifs.NewLocalMassifReaderTestContext( - s.T(), logger.Sugar, "TestSparseReplicaCreatedAfterExtendedOffline") - + tc := testcontext.NewDefaultTestContext(s.T(), mmrtesting.WithTestLabelPrefix("TestSparseReplicaCreatedAfterExtendedOffline")) + ctx := context.TODO() massifCount := uint32(4) massifHeight := uint8(8) + logId0 := tc.G.NewLogID() + + _, builders := testcontext.CreateLogsForContext(tc, massifHeight, 1, logId0) + builder := builders[0] + // This test requires two invocations. For the first invocation, we make ony one massif available. // Then after that is successfully replicated, we add the rest of the massifs. + tenantId0 := datatrails.Log2TenantID(logId0) - tenantId0 := tc.G.NewTenantIdentity() - // note: CreateLog both creates the massifs *and* populates them - tc.CreateLog(tenantId0, massifHeight, 1) + leavesPerMassif := mmr.HeightIndexLeafCount(uint64(massifHeight) - 1) // = ((2 << massifHeight) - 1 + 1) >> 1 replicaDir := s.T().TempDir() @@ -502,7 +348,7 @@ func (s *ReplicateLogsCmdSuite) TestSparseReplicaCreatedAfterExtendedOffline() { err := app.Run([]string{ "veracity", "--envauth", // uses the emulator - "--container", tc.TestConfig.Container, + "--container", tc.Cfg.Container, "--data-url", s.Env.AzuriteVerifiableDataURL, "--tenant", tenantId0, "--height", fmt.Sprintf("%d", massifHeight), @@ -514,20 +360,21 @@ func (s *ReplicateLogsCmdSuite) TestSparseReplicaCreatedAfterExtendedOffline() { s.NoError(err) // add the rest of the massifs - leavesPerMassif := mmr.HeightIndexLeafCount(uint64(massifHeight - 1)) for i := uint32(1); i < massifCount; i++ { - tc.AddLeavesToLog(tenantId0, massifHeight, int(leavesPerMassif)) + tc.AddLeaves( + ctx, builder, logId0, massifHeight, leavesPerMassif*uint64(i), leavesPerMassif, + ) } // This call, due to the --ancestors=1, should only replicate the last - // massif, and this will leave a gap in the local replica. Imporantly, this + // massif, and this will leave a gap in the local replica. Importantly, this // means the remote log has not been checked as being consistent with the // local. The supported way to fill the gaps is to run with --ancestors=0 (which is the default) // this will fill the gaps and ensure remote/local consistency err = app.Run([]string{ "veracity", "--envauth", // uses the emulator - "--container", tc.TestConfig.Container, + "--container", tc.Cfg.Container, "--data-url", s.Env.AzuriteVerifiableDataURL, "--tenant", tenantId0, "--height", fmt.Sprintf("%d", massifHeight), @@ -539,24 +386,24 @@ func (s *ReplicateLogsCmdSuite) TestSparseReplicaCreatedAfterExtendedOffline() { s.NoError(err) // check the 0'th massifs and seals was replicated (by the first run of veractity) - expectMassifFile := filepath.Join(replicaDir, massifs.ReplicaRelativeMassifPath(tenantId0, 0)) + expectMassifFile := mustMassifFilename(s.T(), replicaDir, logId0, uint32(0)) s.FileExistsf(expectMassifFile, "the replicated massif should exist") - expectSealFile := filepath.Join(replicaDir, massifs.ReplicaRelativeSealPath(tenantId0, 0)) - s.FileExistsf(expectSealFile, "the replicated seal should exist") + expectCheckpointFile := mustCheckpointFilename(s.T(), replicaDir, logId0, uint32(0)) + s.FileExistsf(expectCheckpointFile, "the replicated checkpoint should exist") // check the gap was not mistakenly filled for i := uint32(1); i < massifCount-2; i++ { - expectMassifFile = filepath.Join(replicaDir, massifs.ReplicaRelativeMassifPath(tenantId0, i)) + expectMassifFile := mustMassifFilename(s.T(), replicaDir, logId0, i) s.NoFileExistsf(expectMassifFile, "the replicated massif should NOT exist") - expectSealFile = filepath.Join(replicaDir, massifs.ReplicaRelativeSealPath(tenantId0, i)) - s.NoFileExistsf(expectSealFile, "the replicated seal should NOT exist") + expectCheckpointFile := mustCheckpointFilename(s.T(), replicaDir, logId0, i) + s.NoFileExistsf(expectCheckpointFile, "the replicated checkpoint should NOT exist") } // check the massifs from the second veracity run were replicated for i := massifCount - 2; i < massifCount; i++ { - expectMassifFile := filepath.Join(replicaDir, massifs.ReplicaRelativeMassifPath(tenantId0, i)) + expectMassifFile := mustMassifFilename(s.T(), replicaDir, logId0, i) s.FileExistsf(expectMassifFile, "the replicated massif should exist") - expectSealFile := filepath.Join(replicaDir, massifs.ReplicaRelativeSealPath(tenantId0, i)) + expectSealFile := mustCheckpointFilename(s.T(), replicaDir, logId0, i) s.FileExistsf(expectSealFile, "the replicated seal should exist") } } @@ -564,22 +411,26 @@ func (s *ReplicateLogsCmdSuite) TestSparseReplicaCreatedAfterExtendedOffline() { // TestFullReplicaByDefault tests that we get a full replica when // updating a previous replica after further massifs have been added func (s *ReplicateLogsCmdSuite) TestFullReplicaByDefault() { - logger.New("TestFullReplicaByDefault") defer logger.OnExit() - tc := massifs.NewLocalMassifReaderTestContext( - s.T(), logger.Sugar, "TestFullReplicaByDefault") + ctx := context.TODO() massifCount := uint32(4) massifHeight := uint8(8) + tc := testcontext.NewDefaultTestContext(s.T(), mmrtesting.WithTestLabelPrefix("TestFullReplicaByDefault")) + logId0 := tc.G.NewLogID() + _, builders := testcontext.CreateLogsForContext( + tc, massifHeight, massifCount, logId0, + ) + builder := builders[0] + // This test requires two invocations. For the first invocation, we make ony one massif available. // Then after that is successfully replicated, we add the rest of the massifs. + tenantId0 := datatrails.Log2TenantID(logId0) - tenantId0 := tc.G.NewTenantIdentity() - // note: CreateLog both creates the massifs *and* populates them - tc.CreateLog(tenantId0, massifHeight, 1) + leavesPerMassif := mmr.HeightIndexLeafCount(uint64(massifHeight) - 1) // = ((2 << massifHeight) - 1 + 1) >> 1 replicaDir := s.T().TempDir() @@ -590,7 +441,7 @@ func (s *ReplicateLogsCmdSuite) TestFullReplicaByDefault() { err := app.Run([]string{ "veracity", "--envauth", // uses the emulator - "--container", tc.TestConfig.Container, + "--container", tc.Cfg.Container, "--data-url", s.Env.AzuriteVerifiableDataURL, "--tenant", tenantId0, "--height", fmt.Sprintf("%d", massifHeight), @@ -602,20 +453,21 @@ func (s *ReplicateLogsCmdSuite) TestFullReplicaByDefault() { s.NoError(err) // add the rest of the massifs - leavesPerMassif := mmr.HeightIndexLeafCount(uint64(massifHeight - 1)) for i := uint32(1); i < massifCount; i++ { - tc.AddLeavesToLog(tenantId0, massifHeight, int(leavesPerMassif)) + tc.AddLeaves( + ctx, builder, logId0, massifHeight, leavesPerMassif*uint64(i), leavesPerMassif, + ) } // This call, due to the --ancestors=0 default, should replicate all the new massifs. // The previously replicated massifs should not be re-verified. - // The first new replicaetd massif should be verified as consistent with the + // The first new replicated massif should be verified as consistent with the // last local massif. This last point isn't assured by this test, but if - // debugging it, that behviour can be observed. + // debugging it, that behavior can be observed. err = app.Run([]string{ "veracity", "--envauth", // uses the emulator - "--container", tc.TestConfig.Container, + "--container", tc.Cfg.Container, "--data-url", s.Env.AzuriteVerifiableDataURL, "--tenant", tenantId0, "--height", fmt.Sprintf("%d", massifHeight), @@ -627,165 +479,45 @@ func (s *ReplicateLogsCmdSuite) TestFullReplicaByDefault() { s.NoError(err) // check the 0'th massifs and seals was replicated (by the first run of veractity) - expectMassifFile := filepath.Join(replicaDir, massifs.ReplicaRelativeMassifPath(tenantId0, 0)) + expectMassifFile := mustMassifFilename(s.T(), replicaDir, logId0, 0) s.FileExistsf(expectMassifFile, "the replicated massif should exist") - expectSealFile := filepath.Join(replicaDir, massifs.ReplicaRelativeSealPath(tenantId0, 0)) - s.FileExistsf(expectSealFile, "the replicated seal should exist") + expectCheckpointFile := mustCheckpointFilename(s.T(), replicaDir, logId0, 0) + s.FileExistsf(expectCheckpointFile, "the replicated checkpoint should exist") // check the massifs from the second veracity run were replicated for i := uint32(1); i < massifCount; i++ { - expectMassifFile := filepath.Join(replicaDir, massifs.ReplicaRelativeMassifPath(tenantId0, i)) - s.FileExistsf(expectMassifFile, "the replicated massif should exist") - expectSealFile := filepath.Join(replicaDir, massifs.ReplicaRelativeSealPath(tenantId0, i)) - s.FileExistsf(expectSealFile, "the replicated seal should exist") - } -} - -// TestLocalTamperDetected tests that a localy tampered masif is detected -// -// In this case, an attacker changes a remotely replicated massif in an attempt to -// include, exclude or change some element. In order for such a change to be -// provable, the attacker has to re-build the log from the point of the tamper -// forward, otherwise the inclusion proof for the changed element will fail. We -// can simulate this situation without re-building the log simply by changing -// one of the peaks, as a re-build will necessarily always result in a different -// peak value. -// -// Attacks where the leaves are changed or remove and the log is not re-built -// can only be deteceted by full audit anyway. But these attacks are essentially -// equivalent to data corruption. And they do not result in a log which includes -// a different thing, just a single entry (or pair of) in the log which can't be -// proven -func (s *ReplicateLogsCmdSuite) TestLocalTamperDetected() { - - logger.New("TestFullReplicaByDefault") - defer logger.OnExit() - - tc := massifs.NewLocalMassifReaderTestContext( - s.T(), logger.Sugar, "TestFullReplicaByDefault") - - massifCount := uint32(4) - massifHeight := uint8(8) - - // This test requires two invocations. For the first invocation, we make ony - // one massif available. Then after that is successfully replicated, we - // tamper a peak in the local replica, then attempt to replicate the - // subsequent log - this should fail due to the local data being unable to - // re-produce the root needed for the local seal to verify. - - tenantId0 := tc.G.NewTenantIdentity() - // note: CreateLog both creates the massifs *and* populates them - tc.CreateLog(tenantId0, massifHeight, 1) - - replicaDir := s.T().TempDir() - - // note: VERACITY_IKWID is set in main, we need it to enable --envauth so we force it here - app := veracity.NewApp("tests", true) - veracity.AddCommands(app, true) - - err := app.Run([]string{ - "veracity", - "--envauth", // uses the emulator - "--container", tc.TestConfig.Container, - "--data-url", s.Env.AzuriteVerifiableDataURL, - "--tenant", tenantId0, - "--height", fmt.Sprintf("%d", massifHeight), - "replicate-logs", - // --ancestors defaults to 0 which means "all", but only massif is available - "--replicadir", replicaDir, - "--massif", "0", - }) - s.NoError(err) - - localReader := newTestLocalReader(s.T(), replicaDir, massifHeight) - - massifLeafCount := mmr.HeightIndexLeafCount(uint64(massifHeight) - 1) - LastLeafIndex := massifLeafCount - 1 - mmrSize0 := mmr.FirstMMRSize(mmr.MMRIndex(LastLeafIndex)) - peaks := mmr.Peaks(mmrSize0 - 1) - // this simulates the effect of changing a leaf then re-building the log so - // that a proof of inclusion can be produced for the new element, this - // necessarily causes a peak to change. *any* peak change will cause the - // consistency proof to fail. And regardless of whether our seals are - // accumulators (all peak hashes) or a single bagged peak, the local log - // will be unable to produce the correct detached payload for the Sign1 seal - // over the root material. - tamperLocalReaderNode(s.T(), localReader, tenantId0, - massifHeight, peaks[len(peaks)-1], []byte{0x0D, 0x0E, 0x0A, 0x0D, 0x0B, 0x0E, 0x0E, 0x0F}) - - // Note: it's actually a property of the way massifs fill that the last node - // added is always a peak, we could have taken that shortcut abvove. In the - // interests of illustrating how any peak can be found, its done the long - // way above. - - // add the rest of the massifs - for i := uint32(1); i < massifCount; i++ { - tc.AddLeavesToLog(tenantId0, massifHeight, int(massifLeafCount)) - } - - // This call, due to the --ancestors=0 default, should replicate all the new massifs. - // The previously replicated massifs should not be re-verified. - // The first new replicaetd massif should be verified as consistent with the - // last local massif. This last point isn't assured by this test, but if - // debugging it, that behviour can be observed. - err = app.Run([]string{ - "veracity", - "--envauth", // uses the emulator - "--container", tc.TestConfig.Container, - "--data-url", s.Env.AzuriteVerifiableDataURL, - "--tenant", tenantId0, - "--height", fmt.Sprintf("%d", massifHeight), - "replicate-logs", - // --ancestors defaults to 0 which means "all", but only massif is available - "--replicadir", replicaDir, - "--massif", fmt.Sprintf("%d", massifCount-1), - }) - - s.ErrorIs(err, massifs.ErrSealVerifyFailed) - // check the 0'th massifs and seals was replicated (by the first run of veractity) - expectMassifFile := filepath.Join(replicaDir, massifs.ReplicaRelativeMassifPath(tenantId0, 0)) - s.FileExistsf(expectMassifFile, "the replicated massif should exist") - expectSealFile := filepath.Join(replicaDir, massifs.ReplicaRelativeSealPath(tenantId0, 0)) - s.FileExistsf(expectSealFile, "the replicated seal should exist") - - // check the massifs from the second veracity run were NOT replicated - for i := uint32(1); i < massifCount; i++ { - expectMassifFile := filepath.Join(replicaDir, massifs.ReplicaRelativeMassifPath(tenantId0, i)) - s.NoFileExistsf(expectMassifFile, "the replicated massif should NOT exist") - expectSealFile := filepath.Join(replicaDir, massifs.ReplicaRelativeSealPath(tenantId0, i)) - s.NoFileExistsf(expectSealFile, "the replicated seal should NOT exist") + expectMassifFile := mustMassifFilename(s.T(), replicaDir, logId0, uint32(i)) + s.FileExistsf(expectMassifFile, "the replicated massif should exist") + expectCheckpointFile := mustCheckpointFilename(s.T(), replicaDir, logId0, uint32(i)) + s.FileExistsf(expectCheckpointFile, "the replicated checkpoint should exist") } } // Test4MassifsForThreeTenants multiple massifs are replicated // when the output of the watch command is provided on stdin func (s *ReplicateLogsCmdSuite) Test4MassifsForThreeTenants() { - logger.New("Test4AzuriteMassifsForThreeTenants") defer logger.OnExit() - tc := massifs.NewLocalMassifReaderTestContext( - s.T(), logger.Sugar, "Test4AzuriteMassifsForThreeTenants") + tc := testcontext.NewDefaultTestContext(s.T(), mmrtesting.WithTestLabelPrefix("Test4AzuriteMassifsForThreeTenants")) massifCount := uint32(4) massifHeight := uint8(8) - tenantId0 := tc.G.NewTenantIdentity() - // note: CreateLog both creates the massifs *and* populates them - tc.CreateLog(tenantId0, massifHeight, massifCount) - tenantId1 := tc.G.NewTenantIdentity() - tc.CreateLog(tenantId1, massifHeight, massifCount) - tenantId2 := tc.G.NewTenantIdentity() - tc.CreateLog(tenantId2, massifHeight, massifCount) + logId0 := tc.G.NewLogID() + logId1 := tc.G.NewLogID() + logId2 := tc.G.NewLogID() + + testcontext.CreateLogsForContext(tc, massifHeight, massifCount, logId0, logId1, logId2) changes := []struct { - TenantIdentity string `json:"tenant"` - MassifIndex int `json:"massifindex"` + LogID storage.LogID `json:"logid"` + MassifIndex int `json:"massifindex"` }{ - {tenantId0, int(massifCount - 1)}, - {tenantId1, int(massifCount - 1)}, - {tenantId2, int(massifCount - 1)}, + {logId0, int(massifCount - 1)}, + {logId1, int(massifCount - 1)}, + {logId2, int(massifCount - 1)}, } data, err := json.Marshal(changes) @@ -802,7 +534,7 @@ func (s *ReplicateLogsCmdSuite) Test4MassifsForThreeTenants() { err = app.Run([]string{ "veracity", "--envauth", // uses the emulator - "--container", tc.TestConfig.Container, + "--container", tc.Cfg.Container, "--data-url", s.Env.AzuriteVerifiableDataURL, "--height", fmt.Sprintf("%d", massifHeight), "replicate-logs", @@ -812,44 +544,38 @@ func (s *ReplicateLogsCmdSuite) Test4MassifsForThreeTenants() { for _, change := range changes { for i := range change.MassifIndex + 1 { - expectMassifFile := filepath.Join( - replicaDir, massifs.ReplicaRelativeMassifPath(change.TenantIdentity, uint32(i))) + expectMassifFile := mustMassifFilename(s.T(), replicaDir, logId0, uint32(i)) s.FileExistsf( expectMassifFile, "the replicated massif should exist") - expectSealFile := filepath.Join( - replicaDir, massifs.ReplicaRelativeSealPath(change.TenantIdentity, uint32(i))) - s.FileExistsf(expectSealFile, "the replicated seal should exist") + expectCheckpointFile := mustCheckpointFilename(s.T(), replicaDir, logId0, uint32(i)) + s.FileExistsf(expectCheckpointFile, "the replicated checkpoint should exist") } } } -// TestThreeTenantsOneAtATime uses --concurency to force the replication to go one tenant at a time +// TestThreeTenantsOneAtATime uses --concurrency to force the replication to go one tenant at a time // The test just ensures the obvious boundary case works func (s *ReplicateLogsCmdSuite) TestThreeTenantsOneAtATime() { logger.New("TestThreeTenantsOneAtATime") defer logger.OnExit() - tc := massifs.NewLocalMassifReaderTestContext( - s.T(), logger.Sugar, "TestThreeTenantsOneAtATime") + tc := testcontext.NewDefaultTestContext(s.T(), mmrtesting.WithTestLabelPrefix("TestThreeTenantsOneAtATime")) massifCount := uint32(4) massifHeight := uint8(8) - tenantId0 := tc.G.NewTenantIdentity() - // note: CreateLog both creates the massifs *and* populates them - tc.CreateLog(tenantId0, massifHeight, massifCount) - tenantId1 := tc.G.NewTenantIdentity() - tc.CreateLog(tenantId1, massifHeight, massifCount) - tenantId2 := tc.G.NewTenantIdentity() - tc.CreateLog(tenantId2, massifHeight, massifCount) + logId0 := tc.G.NewLogID() + logId1 := tc.G.NewLogID() + logId2 := tc.G.NewLogID() + testcontext.CreateLogsForContext(tc, massifHeight, massifCount, logId0, logId1, logId2) changes := []struct { - TenantIdentity string `json:"tenant"` - MassifIndex int `json:"massifindex"` + LogID storage.LogID `json:"logid"` + MassifIndex int `json:"massifindex"` }{ - {tenantId0, int(massifCount - 1)}, - {tenantId1, int(massifCount - 1)}, - {tenantId2, int(massifCount - 1)}, + {logId0, int(massifCount - 1)}, + {logId1, int(massifCount - 1)}, + {logId2, int(massifCount - 1)}, } data, err := json.Marshal(changes) @@ -866,7 +592,7 @@ func (s *ReplicateLogsCmdSuite) TestThreeTenantsOneAtATime() { err = app.Run([]string{ "veracity", "--envauth", // uses the emulator - "--container", tc.TestConfig.Container, + "--container", tc.Cfg.Container, "--data-url", s.Env.AzuriteVerifiableDataURL, "--height", fmt.Sprintf("%d", massifHeight), "replicate-logs", @@ -877,44 +603,39 @@ func (s *ReplicateLogsCmdSuite) TestThreeTenantsOneAtATime() { for _, change := range changes { for i := range change.MassifIndex + 1 { - expectMassifFile := filepath.Join( - replicaDir, massifs.ReplicaRelativeMassifPath(change.TenantIdentity, uint32(i))) + expectMassifFile := mustMassifFilename(s.T(), replicaDir, logId0, uint32(i)) s.FileExistsf( expectMassifFile, "the replicated massif should exist") - expectSealFile := filepath.Join( - replicaDir, massifs.ReplicaRelativeSealPath(change.TenantIdentity, uint32(i))) - s.FileExistsf(expectSealFile, "the replicated seal should exist") + expectCheckpointFile := mustCheckpointFilename(s.T(), replicaDir, logId0, uint32(i)) + s.FileExistsf(expectCheckpointFile, "the replicated checkpoint should exist") } } } -// TestConcurrencyZero uses --concurency to force the replication to go one tenant at a time +// TestConcurrencyZero uses --concurrency to force the replication to go one tenant at a time // The test just ensures the obvious boundary case works func (s *ReplicateLogsCmdSuite) TestConcurrencyZero() { logger.New("TestConcurrencyZero") defer logger.OnExit() - tc := massifs.NewLocalMassifReaderTestContext( - s.T(), logger.Sugar, "TestConcurrencyZero") + tc := testcontext.NewDefaultTestContext(s.T(), mmrtesting.WithTestLabelPrefix("TestConcurrencyZero")) massifCount := uint32(4) massifHeight := uint8(8) - tenantId0 := tc.G.NewTenantIdentity() - // note: CreateLog both creates the massifs *and* populates them - tc.CreateLog(tenantId0, massifHeight, massifCount) - tenantId1 := tc.G.NewTenantIdentity() - tc.CreateLog(tenantId1, massifHeight, massifCount) - tenantId2 := tc.G.NewTenantIdentity() - tc.CreateLog(tenantId2, massifHeight, massifCount) + logId0 := tc.G.NewLogID() + logId1 := tc.G.NewLogID() + logId2 := tc.G.NewLogID() + + testcontext.CreateLogsForContext(tc, massifHeight, massifCount, logId0, logId1, logId2) changes := []struct { - TenantIdentity string `json:"tenant"` - MassifIndex int `json:"massifindex"` + LogID storage.LogID `json:"logid"` + MassifIndex int `json:"massifindex"` }{ - {tenantId0, int(massifCount - 1)}, - {tenantId1, int(massifCount - 1)}, - {tenantId2, int(massifCount - 1)}, + {logId0, int(massifCount - 1)}, + {logId1, int(massifCount - 1)}, + {logId2, int(massifCount - 1)}, } data, err := json.Marshal(changes) @@ -931,7 +652,7 @@ func (s *ReplicateLogsCmdSuite) TestConcurrencyZero() { err = app.Run([]string{ "veracity", "--envauth", // uses the emulator - "--container", tc.TestConfig.Container, + "--container", tc.Cfg.Container, "--data-url", s.Env.AzuriteVerifiableDataURL, "--height", fmt.Sprintf("%d", massifHeight), "replicate-logs", @@ -942,44 +663,40 @@ func (s *ReplicateLogsCmdSuite) TestConcurrencyZero() { for _, change := range changes { for i := range change.MassifIndex + 1 { - expectMassifFile := filepath.Join( - replicaDir, massifs.ReplicaRelativeMassifPath(change.TenantIdentity, uint32(i))) + expectMassifFile := mustMassifFilename(s.T(), replicaDir, logId0, uint32(i)) s.FileExistsf( expectMassifFile, "the replicated massif should exist") - expectSealFile := filepath.Join( - replicaDir, massifs.ReplicaRelativeSealPath(change.TenantIdentity, uint32(i))) - s.FileExistsf(expectSealFile, "the replicated seal should exist") + + expectCheckpointFile := mustCheckpointFilename(s.T(), replicaDir, logId0, uint32(i)) + s.FileExistsf(expectCheckpointFile, "the replicated checkpoint should exist") } } } -// TestConcurrencyCappedToTenantCount sets --concurency greater than the number of tenants +// TestConcurrencyCappedToTenantCount sets --concurrency greater than the number of tenants // and shows all tenants are replicated func (s *ReplicateLogsCmdSuite) TestConcurrencyCappedToTenantCount() { logger.New("TestConcurrencyCappedToTenantCount") defer logger.OnExit() - tc := massifs.NewLocalMassifReaderTestContext( - s.T(), logger.Sugar, "TestConcurrencyCappedToTenantCount") + tc := testcontext.NewDefaultTestContext(s.T(), mmrtesting.WithTestLabelPrefix("TestConcurrencyCappedToTenantCount")) massifCount := uint32(4) massifHeight := uint8(8) - tenantId0 := tc.G.NewTenantIdentity() - // note: CreateLog both creates the massifs *and* populates them - tc.CreateLog(tenantId0, massifHeight, massifCount) - tenantId1 := tc.G.NewTenantIdentity() - tc.CreateLog(tenantId1, massifHeight, massifCount) - tenantId2 := tc.G.NewTenantIdentity() - tc.CreateLog(tenantId2, massifHeight, massifCount) + logId0 := tc.G.NewLogID() + logId1 := tc.G.NewLogID() + logId2 := tc.G.NewLogID() + + testcontext.CreateLogsForContext(tc, massifHeight, massifCount, logId0, logId1, logId2) changes := []struct { - TenantIdentity string `json:"tenant"` - MassifIndex int `json:"massifindex"` + LogID storage.LogID `json:"logid"` + MassifIndex int `json:"massifindex"` }{ - {tenantId0, int(massifCount - 1)}, - {tenantId1, int(massifCount - 1)}, - {tenantId2, int(massifCount - 1)}, + {logId0, int(massifCount - 1)}, + {logId1, int(massifCount - 1)}, + {logId2, int(massifCount - 1)}, } data, err := json.Marshal(changes) @@ -996,7 +713,7 @@ func (s *ReplicateLogsCmdSuite) TestConcurrencyCappedToTenantCount() { err = app.Run([]string{ "veracity", "--envauth", // uses the emulator - "--container", tc.TestConfig.Container, + "--container", tc.Cfg.Container, "--data-url", s.Env.AzuriteVerifiableDataURL, "--height", fmt.Sprintf("%d", massifHeight), "replicate-logs", @@ -1007,70 +724,39 @@ func (s *ReplicateLogsCmdSuite) TestConcurrencyCappedToTenantCount() { for _, change := range changes { for i := range change.MassifIndex + 1 { - expectMassifFile := filepath.Join( - replicaDir, massifs.ReplicaRelativeMassifPath(change.TenantIdentity, uint32(i))) + + expectMassifFile := mustMassifFilename(s.T(), replicaDir, logId0, uint32(i)) s.FileExistsf( expectMassifFile, "the replicated massif should exist") - expectSealFile := filepath.Join( - replicaDir, massifs.ReplicaRelativeSealPath(change.TenantIdentity, uint32(i))) - s.FileExistsf(expectSealFile, "the replicated seal should exist") + expectCheckpointFile := mustCheckpointFilename(s.T(), replicaDir, logId0, uint32(i)) + s.FileExistsf(expectCheckpointFile, "the replicated checkpoint should exist") } } - -} - -// newTestLocalReader creates a new LocalReader -// This provides a convenient way to interact with the massifs locally replicated by integration tests. -func newTestLocalReader( - t *testing.T, replicaDir string, massifHeight uint8) *massifs.LocalReader { - cache, err := massifs.NewLogDirCache(logger.Sugar, veracity.NewFileOpener()) - require.NoError(t, err) - localReader, err := massifs.NewLocalReader(logger.Sugar, cache) - require.NoError(t, err) - - cborCodec, err := massifs.NewRootSignerCodec() - require.NoError(t, err) - - opts := []massifs.DirCacheOption{ - massifs.WithDirCacheReplicaDir(replicaDir), - massifs.WithDirCacheMassifLister(veracity.NewDirLister()), - massifs.WithDirCacheSealLister(veracity.NewDirLister()), - massifs.WithReaderOption(massifs.WithMassifHeight(massifHeight)), - massifs.WithReaderOption(massifs.WithSealGetter(&localReader)), - massifs.WithReaderOption(massifs.WithCBORCodec(cborCodec)), - } - cache.ReplaceOptions(opts...) - return &localReader } // Test4MassifsForThreeTenantsFromFile multiple massifs are replicated // when the output of the watch command is provided in a file on disc func (s *ReplicateLogsCmdSuite) Test4MassifsForThreeTenantsFromFile() { - logger.New("Test4AzuriteMassifsForThreeTenantsFromFile") defer logger.OnExit() - - tc := massifs.NewLocalMassifReaderTestContext( - s.T(), logger.Sugar, "Test4AzuriteMassifsForThreeTenantsFromFile") + tc := testcontext.NewDefaultTestContext(s.T(), mmrtesting.WithTestLabelPrefix("Test4AzuriteMassifsForThreeTenantsFromFile")) massifCount := uint32(4) massifHeight := uint8(8) - tenantId0 := tc.G.NewTenantIdentity() - // note: CreateLog both creates the massifs *and* populates them - tc.CreateLog(tenantId0, massifHeight, massifCount) - tenantId1 := tc.G.NewTenantIdentity() - tc.CreateLog(tenantId1, massifHeight, massifCount) - tenantId2 := tc.G.NewTenantIdentity() - tc.CreateLog(tenantId2, massifHeight, massifCount) + logId0 := tc.G.NewLogID() + logId1 := tc.G.NewLogID() + logId2 := tc.G.NewLogID() + + testcontext.CreateLogsForContext(tc, massifHeight, massifCount, logId0, logId1, logId2) changes := []struct { - TenantIdentity string `json:"tenant"` - MassifIndex int `json:"massifindex"` + LogID storage.LogID `json:"logid"` + MassifIndex int `json:"massifindex"` }{ - {tenantId0, int(massifCount - 1)}, - {tenantId1, int(massifCount - 1)}, - {tenantId2, int(massifCount - 1)}, + {logId0, int(massifCount - 1)}, + {logId1, int(massifCount - 1)}, + {logId2, int(massifCount - 1)}, } data, err := json.Marshal(changes) @@ -1089,7 +775,7 @@ func (s *ReplicateLogsCmdSuite) Test4MassifsForThreeTenantsFromFile() { err = app.Run([]string{ "veracity", "--envauth", // uses the emulator - "--container", tc.TestConfig.Container, + "--container", tc.Cfg.Container, "--data-url", s.Env.AzuriteVerifiableDataURL, "--height", fmt.Sprintf("%d", massifHeight), "replicate-logs", @@ -1100,13 +786,12 @@ func (s *ReplicateLogsCmdSuite) Test4MassifsForThreeTenantsFromFile() { for _, change := range changes { for i := range change.MassifIndex + 1 { - expectMassifFile := filepath.Join( - replicaDir, massifs.ReplicaRelativeMassifPath(change.TenantIdentity, uint32(i))) + expectMassifFile := mustMassifFilename(s.T(), replicaDir, logId0, uint32(i)) s.FileExistsf( expectMassifFile, "the replicated massif should exist") - expectSealFile := filepath.Join( - replicaDir, massifs.ReplicaRelativeSealPath(change.TenantIdentity, uint32(i))) - s.FileExistsf(expectSealFile, "the replicated seal should exist") + + expectCheckpointFile := mustCheckpointFilename(s.T(), replicaDir, logId0, uint32(i)) + s.FileExistsf(expectCheckpointFile, "the replicated checkpoint should exist") } } } @@ -1119,35 +804,3 @@ func createFileFromData(t *testing.T, data []byte, filename string) { require.NoError(t, err) require.Equal(t, n, len(data)) } - -// tamperLocalReaderNode over-writes the log entry at the given mmrIndex with the provided bytes -// This is typically used to simulate a local tamper or coruption -// -// The value needs to be non-empty and no longer than LogEntryBytes, a fine -// value for this purpose is: -// -// []byte{0x0D, 0x0E, 0x0A, 0x0D, 0x0B, 0x0E, 0x0E, 0x0F} -func tamperLocalReaderNode( - t *testing.T, reader *massifs.LocalReader, tenantIdentity string, - massifHeight uint8, mmrIndex uint64, tamperedValue []byte) { - - require.NotZero(t, len(tamperedValue)) - require.LessOrEqual(t, len(tamperedValue), massifs.LogEntryBytes) - - leafIndex := mmr.LeafIndex(mmrIndex) - massifIndex := massifs.MassifIndexFromLeafIndex(massifHeight, leafIndex) - mc, err := reader.GetMassif(context.TODO(), tenantIdentity, massifIndex) - require.NoError(t, err) - - i := mmrIndex - mc.Start.FirstIndex - logData := mc.Data[mc.LogStart():] - copy(logData[i*massifs.LogEntryBytes:i*massifs.LogEntryBytes+8], tamperedValue) - - filePath := reader.GetMassifLocalPath(tenantIdentity, uint32(massifIndex)) - f, err := os.Create(filePath) // read-write & over write - require.NoError(t, err) - defer f.Close() - n, err := f.Write(mc.Data) - require.NoError(t, err) - require.Equal(t, n, len(mc.Data)) -} diff --git a/tests/replicatelogs/replicatelogs_prod_test.go b/tests/replicatelogs/replicatelogs_prod_test.go index 03b72b1..02130ac 100644 --- a/tests/replicatelogs/replicatelogs_prod_test.go +++ b/tests/replicatelogs/replicatelogs_prod_test.go @@ -3,10 +3,8 @@ package verifyconsistency import ( - "path/filepath" - - "github.com/datatrails/go-datatrails-merklelog/massifs" "github.com/datatrails/veracity" + "github.com/forestrie/go-merklelog-datatrails/datatrails" ) // Test that the watch command returns no error or that the error is "no changes" @@ -27,9 +25,11 @@ func (s *ReplicateLogsCmdSuite) TestReplicateFirstPublicMassif() { "--massif", "1", }) s.NoError(err) + logID := datatrails.TenantID2LogID(s.Env.PublicTenantId) - expectMassifFile := filepath.Join(replicaDir, massifs.ReplicaRelativeMassifPath(s.Env.PublicTenantId, 0)) + expectMassifFile := mustMassifFilename(s.T(), replicaDir, logID, 0) s.FileExistsf(expectMassifFile, "the replicated massif should exist") - expectSealFile := filepath.Join(replicaDir, massifs.ReplicaRelativeSealPath(s.Env.PublicTenantId, 0)) - s.FileExistsf(expectSealFile, "the replicated seal should exist") + + expectCheckpointFile := mustCheckpointFilename(s.T(), replicaDir, logID, 0) + s.FileExistsf(expectCheckpointFile, "the replicated checkpoint should exist") } diff --git a/tests/replicatelogs/suite_test.go b/tests/replicatelogs/suite_test.go index 78b4438..771a4d1 100644 --- a/tests/replicatelogs/suite_test.go +++ b/tests/replicatelogs/suite_test.go @@ -1,3 +1,4 @@ +// Package replicatelogs provides a test suite for the ReplicateLogs command. package verifyconsistency import ( @@ -18,6 +19,5 @@ func (s *ReplicateLogsCmdSuite) SetupSuite() { } func TestReplicateLogsCmdSuite(t *testing.T) { - suite.Run(t, new(ReplicateLogsCmdSuite)) } diff --git a/tests/systemtest/.gitignore b/tests/systemtest/.gitignore index 5c9df43..1454576 100644 --- a/tests/systemtest/.gitignore +++ b/tests/systemtest/.gitignore @@ -1,4 +1,5 @@ one.sh +run.sh tampered.log tampered.sth mmr.log diff --git a/tests/systemtest/replicate-logs-latest-interactive.sh b/tests/systemtest/replicate-logs-latest-interactive.sh index 5840948..9cd3032 100644 --- a/tests/systemtest/replicate-logs-latest-interactive.sh +++ b/tests/systemtest/replicate-logs-latest-interactive.sh @@ -35,7 +35,6 @@ oneTimeSetUp() { cp $SOAK_LOCAL_BLOB_FILE $DUP_DIR/soak-mmr.log assertTrue "prod MMR blob file should be present" "[ -r $PROD_LOCAL_BLOB_FILE ]" - assertTrue "soak MMR blob file should be present" "[ -r $SOAK_LOCAL_BLOB_FILE ]" assertTrue "invalid MMR blob file should be present" "[ -r $INVALID_BLOB_FILE ]" } diff --git a/tests/systemtest/test-01-watch.sh b/tests/systemtest/test-01-watch.sh new file mode 100644 index 0000000..086ca83 --- /dev/null +++ b/tests/systemtest/test-01-watch.sh @@ -0,0 +1,39 @@ +testVeracityWatchPublicFindsActivity() { + local output + output=$($VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata --tenant=$PROD_PUBLIC_TENANT_ID watch --horizon 10000h) + assertEquals "watch-public should return a 0 exit code" 0 $? + assertContains "watch-public should find activity" "$output" "$PROD_PUBLIC_TENANT_ID" +} + +testVeracityWatchLatestFindsActivity() { + local output + output=$($VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata --tenant=$PROD_PUBLIC_TENANT_ID watch --latest) + assertEquals "watch-public --latest should return a 0 exit code" 0 $? + assertContains "watch-public --latest should find activity" "$output" "$PROD_PUBLIC_TENANT_ID" +} + +testVeracityReplicateLogsPublicTenantWatchPipe() { + local output + local replicadir=$TEST_TMPDIR/merklelogs + + rm -rf $replicadir + output=$($VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata \ + --tenant=$PROD_PUBLIC_TENANT_ID watch --horizon 10000h | + $VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata --tenant=$PROD_PUBLIC_TENANT_ID replicate-logs --ancestors=0 --replicadir=$replicadir) + assertEquals "watch-public should return a 0 exit code" 0 $? + + COUNT=$(find $replicadir -type f | wc -l | tr -d ' ') + assertEquals "should replicate one massif and one seal" "2" "$COUNT" +} + +testVeracityReplicateLogsPublicTenantWatchLatestFlag() { + local output + local replicadir=$TEST_TMPDIR/merklelogs + + rm -rf $replicadir + output=$($VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata --tenant=$PROD_PUBLIC_TENANT_ID replicate-logs --latest --ancestors=0 --replicadir=$replicadir) + assertEquals "replicate-logs --latest should return a 0 exit code" 0 $? + + COUNT=$(find $replicadir -type f | wc -l | tr -d ' ') + assertEquals "should replicate one massif and one seal" "2" "$COUNT" +} diff --git a/tests/systemtest/test-02-datatrails-verify.sh b/tests/systemtest/test-02-datatrails-verify.sh new file mode 100644 index 0000000..904a75f --- /dev/null +++ b/tests/systemtest/test-02-datatrails-verify.sh @@ -0,0 +1,36 @@ +testVerifySingleEvent() { + echo "disabled, datatrails public events service has been shutdown" + if false; then + # Check if the response status code is 200 + local response + response=$(curl -sL -w "%{http_code}" $DATATRAILS_URL/archivist/v2/$PUBLIC_EVENT_ID -o /dev/null) + assertEquals 200 "$response" + # Verify the event and check if the exit code is 0 + curl -sL $DATATRAILS_URL/archivist/v2/$PUBLIC_EVENT_ID | $VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata --tenant=$PROD_PUBLIC_TENANT_ID verify-included + assertEquals "Verifying a valid single event should result in a 0 exit code" 0 $? + fi +} + +testVerifyListEvents() { + echo "disabled, datatrails public events service has been shutdown" + if false; then + + # Check if the response status code is 200 + local response + response=$(curl -sL -w "%{http_code}" $DATATRAILS_URL/archivist/v2/$PUBLIC_ASSET_ID/events -o /dev/null) + assertEquals 200 "$response" + # Verify the events on the asset and check if the exit code is 0 + curl -sL $DATATRAILS_URL/archivist/v2/$PUBLIC_ASSET_ID/events | $VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata --tenant=$PROD_PUBLIC_TENANT_ID verify-included + assertEquals "Verifying events on a vaid asset should result in a 0 exit code" 0 $? + fi +} + +testVerifySingleEventWithLocalMassifCopy() { + echo "disabled, datatrails public events service has been shutdown" + if false; then + + # Verify the event and check if the exit code is 0 + curl -sL $DATATRAILS_URL/archivist/v2/$PUBLIC_EVENT_ID | $VERACITY_INSTALL --data-local $PROD_LOCAL_BLOB_FILE --tenant=$PROD_PUBLIC_TENANT_ID verify-included + assertEquals "verifying valid events with a local copy of the massif should result in a 0 exit code" 0 $? + fi +} diff --git a/tests/systemtest/test-03-findtrie.sh b/tests/systemtest/test-03-findtrie.sh new file mode 100644 index 0000000..d21cd94 --- /dev/null +++ b/tests/systemtest/test-03-findtrie.sh @@ -0,0 +1,34 @@ +testFindTrieEntrySingleEvent() { + # Verify the trie key for the known event is on the log at the correct position. + PUBLIC_EVENT_PERMISSIONED_ID=${PUBLIC_EVENT_ID#"public"} + output=$($VERACITY_INSTALL find-trie-entries --log-tenant $PROD_PUBLIC_TENANT_ID --app-id $PUBLIC_EVENT_PERMISSIONED_ID) + assertEquals "verifying finding the trie entry of a known public prod event from the datatrails log should match mmr index 663" "matches: [663]" "$output" +} + +testFindTrieEntrySingleEventWithLocalMassifCopy() { + + # Verify the trie key for the known event is on the log at the correct position for a local log. + PUBLIC_EVENT_PERMISSIONED_ID=${PUBLIC_EVENT_ID#"public"} + output=$($VERACITY_INSTALL --massif-file $PROD_LOCAL_BLOB_FILE find-trie-entries --log-tenant $PROD_PUBLIC_TENANT_ID --app-id $PUBLIC_EVENT_PERMISSIONED_ID) + assertEquals "verifying finding the trie entry of a known public prod event from a local log should match mmr index 663" "matches: [663]" "$output" +} + +testFindMMREntrySingleEvent() { + # Verify the mmr entry for the known event is on the log at the correct position. + echo "disabled, datatrails public events service has been shutdown" + if false; then + + output=$(curl -sL $DATATRAILS_URL/archivist/v2/$PUBLIC_EVENT_ID | VERACITY_IKWID=true $VERACITY_INSTALL find-mmr-entries --log-tenant $PROD_PUBLIC_TENANT_ID) + assertEquals "verifying finding the mmr entry of a known public prod event from the datatrails log should match mmr index 663" "matches: [663]" "$output" + fi +} + +testFindMMREntrySingleEventWithLocalMassifCopy() { + echo "disabled, datatrails public events service has been shutdown" + if false; then + + # Verify the mmr entry for the known event is on the log at the correct position. + output=$(curl -sL $DATATRAILS_URL/archivist/v2/$PUBLIC_EVENT_ID | VERACITY_IKWID=true $VERACITY_INSTALL --data-local $PROD_LOCAL_BLOB_FILE find-mmr-entries --log-tenant $PROD_PUBLIC_TENANT_ID) + assertEquals "verifying finding the mmr entry of a known public prod event from a local log should match mmr index 663" "matches: [663]" "$output" + fi +} diff --git a/tests/systemtest/test-04-replication.sh b/tests/systemtest/test-04-replication.sh new file mode 100644 index 0000000..635558e --- /dev/null +++ b/tests/systemtest/test-04-replication.sh @@ -0,0 +1,194 @@ +testReplicateErrorForLogShorterThanSeal() { + + local output + local other_tenant + local tampered_log_url + local tampered_seal_url + local replicadir=$TEST_TMPDIR/merklelogs + + # Note: this tenant belongs to Joe Gough and he has promised never to fill the first massif + other_tenant=tenant/97e90a09-8c56-40df-a4de-42fde462ef6f + + rm -rf $replicadir + # first get the prod public tenant replicated for massif 0. NOTE: this is a full massif + output=$($VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata --tenant=$PROD_PUBLIC_TENANT_ID replicate-logs --massif 0 --replicadir=$replicadir) + assertEquals "0: should return a 0 exit code" 0 $? + + COUNT=$(find $replicadir -type f | wc -l | tr -d ' ') + assertEquals "should replicate one massif and one seal" "2" "$COUNT" + + # now get a different prod tenant log and seal. NOTE the log is partially full for this tenant + tampered_log_url=${tampered_log_url:-${DATATRAILS_URL}/verifiabledata/merklelogs/v1/mmrs/${other_tenant}/0/massifs/0000000000000000.log} + tampered_seal_url=${tampered_seal_url:-${DATATRAILS_URL}/verifiabledata/merklelogs/v1/mmrs/${other_tenant}/0/massifseals/0000000000000000.sth} + curl -s -H "x-ms-blob-type: BlockBlob" -H "x-ms-version: 2019-12-12" $tampered_log_url -o tampered.log + + ## copy over the different (shorter) tenant log for massif 0 + cp tampered.log $replicadir/log/$PROD_PUBLIC_LOGID/massifs/0000000000000000.log + # attempt to replicate the logs again, the local log data is for the wrong + # tenant and is *less* than the seal expects, but the local seal is correct + # for the replaced data and the remote seal + output=$($VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata --tenant=$PROD_PUBLIC_TENANT_ID replicate-logs --replicadir=$replicadir) + status=$? + assertEquals "1: a tampered log should exit 1" 1 $status + assertContains "$output" "error: there is insufficient data in the massif context to generate a consistency proof against the provided state" +} + +# test veracity can't extend the replica of the wrong tenant +# +# When extending a local replica, if the local tenant log data is from a tenant +# other than the requested remote, replication should fail due to consistency +# checks. This is essentially equivalent to a tamper attempt. +# +# There are two cases that are important: +# 1. The higest indexed local massif is incomplete and so the remote massif is used to extend it. +# 2. The highest indexed local massif is complete and so the remote massif is copied, leaving the original unchanged. +# +# This test suite covers only the second case. Thee first case can only be +# tested with synthesized data, or interaction with a live system, and so is +# easier to cover in the integration tests. However, the same checks are +# excercised in both cases and so this test gives a lot of confidence both +# situations are sound. +# +# Note that the --ancestor flag can be used to limit how many massifs are +# replicated. This can cause the replica to "start again" because the replica is +# so far behind that the --ancestor limit forces a gap. In this case consistency +# of the remote is not checked against the local massif, and in that case the +# replication would succeded, the local replica of the foregn tenant would not +# be updated. And the replica would be left with massifs from multiple tenants. +testReplicateErrorForMixedTenants() { + + local output + local other_tenant + local tampered_log_url + local tampered_seal_url + local replicadir=$TEST_TMPDIR/merklelogs + + # Note: this tenant is known to have > 1 massif at the time of writing and logs don't get shorter + other_tenant=tenant/b197ba3c-44fe-4b1a-bbe8-bd9674b2bd17 + + rm -rf $replicadir + # first get the prod public tenant replicated for massif 0. NOTE: this is a full massif + output=$($VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata --tenant=$PROD_PUBLIC_TENANT_ID replicate-logs --massif 0 --replicadir=$replicadir) + assertEquals "should return a 0 exit code" 0 $? + + COUNT=$(find $replicadir -type f | wc -l | tr -d ' ') + assertEquals "should replicate one massif and one seal" "2" "$COUNT" + + # now get a different prod tenant log and seal. NOTE the log is partially full for this tenant + tampered_log_url=${tampered_log_url:-${DATATRAILS_URL}/verifiabledata/merklelogs/v1/mmrs/${other_tenant}/0/massifs/0000000000000000.log} + tampered_seal_url=${tampered_seal_url:-${DATATRAILS_URL}/verifiabledata/merklelogs/v1/mmrs/${other_tenant}/0/massifseals/0000000000000000.sth} + curl -s -H "x-ms-blob-type: BlockBlob" -H "x-ms-version: 2019-12-12" $tampered_log_url -o tampered.log + curl -s -H "x-ms-blob-type: BlockBlob" -H "x-ms-version: 2019-12-12" $tampered_seal_url -o tampered.sth + + # copy over the different tenant log for massif 0 + cp tampered.log $replicadir/log/$PROD_PUBLIC_LOGID/massifs/0000000000000000.log + + # attempt to replicate the logs again, the local log data is for the wrong tenant but the local seal is correct for the replaced data and the remote seal + output=$($VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata --tenant=$PROD_PUBLIC_TENANT_ID replicate-logs --replicadir=$replicadir) + assertEquals "1: extending an inconsistent replica should exit 1" 1 $? + assertContains "$output" "error: the seal signature verification failed: failed to verify seal for massif 0" + + # now add in the seal from the other log, so that the local log and seal are consistent and locally verifiable. + cp tampered.sth $replicadir/log/$PROD_PUBLIC_LOGID/checkpoints/0000000000000000.sth + + # attempt to replicate the logs again + output=$($VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata --tenant=$PROD_PUBLIC_TENANT_ID replicate-logs --latest --replicadir=$replicadir) + assertEquals "2: extending an inconsistent replica should exit 1" 1 $? + assertContains "$output" "error: consistency check failed: the accumulator produced for the trusted base state doesn't match the root produced for the seal state fetched from the log" +} + +# test veracity can't update the replica for a tenant whos log has been tampered with +# +# This test repeats testReplicateErrorForMixedTenants, but does so using the +# combination of watch | replicate-logs which permits finer control over the +# replica +testWatchReplicateErrorForTamperedLog() { + + local output + local other_tenant + local tampered_log_url + local tampered_seal_url + + local replicadir=$TEST_TMPDIR/merklelogs + + # Note: this tenant is known to have > 1 massif at the time of writing and logs don't get shorter + other_logid=b197ba3c-44fe-4b1a-bbe8-bd9674b2bd17 + other_tenant=tenant/$other_logid + + # first get the prod tenant replicated for massif 0. NOTE: this is a partially full massif + rm -rf $replicadir + output=$($VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata \ + --tenant=$other_tenant watch --horizon 10000h | + $VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata --tenant=$other_tenant replicate-logs --ancestors=0 --massif 0 --replicadir=$replicadir) + assertEquals "watch-public should return a 0 exit code" 0 $? + + COUNT=$(find $replicadir -type f | wc -l | tr -d ' ') + assertEquals "should replicate one massif and one seal" "2" "$COUNT" + + # now get a different prod public tenant log and seal. NOTE: this is a full massif + tampered_log_url=${tampered_log_url:-${DATATRAILS_URL}/verifiabledata/merklelogs/v1/mmrs/${PROD_PUBLIC_TENANT_ID}/0/massifs/0000000000000000.log} + tampered_seal_url=${tampered_seal_url:-${DATATRAILS_URL}/verifiabledata/merklelogs/v1/mmrs/${PROD_PUBLIC_TENANT_ID}/0/massifseals/0000000000000000.sth} + curl -s -H "x-ms-blob-type: BlockBlob" -H "x-ms-version: 2019-12-12" $tampered_log_url -o tampered.log + curl -s -H "x-ms-blob-type: BlockBlob" -H "x-ms-version: 2019-12-12" $tampered_seal_url -o tampered.sth + + # copy over the different tenant log for massif 0 + cp tampered.log $replicadir/log/$other_logid/massifs/0000000000000000.log + + # attempt to replicate the logs again + output=$($VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata \ + --tenant=$other_tenant watch --horizon 10000h | + $VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata --tenant=$other_tenant replicate-logs --replicadir=$replicadir) + assertEquals "extending an inconsistent replica should exit 1" 1 $? + assertContains "$output" "error: the seal signature verification failed: failed to verify checkpoint for massif 0: verification error" + + # now attempt to change the seal to the tampered log seal + cp tampered.sth $replicadir/log/$other_logid/checkpoints/0000000000000000.sth + + # attempt to replicate the logs again + output=$($VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata \ + --tenant=$other_tenant watch --horizon 10000h | + $VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata --tenant=$other_tenant replicate-logs --replicadir=$replicadir) + status=$? + assertEquals "extending an inconsistent replica should exit 1" 1 $status + assertContains "$output" "error: consistency check failed" +} + +# this test ensures that veracity refused to work with replica directories that +# mix tenant massifs together while the consistency checks would prevent +# accidental extension of the wrong log, the failre mode would be very confusing +# and potentially alarming to the user. +testWatchReplicateErrorForMixedTenants() { + + local output + + local logid=${PROD_PUBLIC_LOGID} + local tenant=${PROD_PUBLIC_TENANT_ID} + # Note: this tenant is known to have > 1 massif at the time of writing and logs don't get shorter + local other_logid='b197ba3c-44fe-4b1a-bbe8-bd9674b2bd17' + local other_tenant="tenant/$other_logid" + local replicadir=$TEST_TMPDIR/merklelogs + local SHA=shasum + + rm -rf $replicadir + + # replicate massif 0 from the main tenant + output=$($VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata \ + --tenant=$tenant watch --horizon 10000h | + $VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata --tenant=$tenant replicate-logs --massif=0 --replicadir=$replicadir) + assertEquals "watch-public should return a 0 exit code" 0 $? + + # explicitly fetch a massif 0 from a different tenant and place it in the same replica directory using a different filename + local other_log_url=${other_log_url:-${DATATRAILS_URL}/verifiabledata/merklelogs/v1/mmrs/${other_tenant}/0/massifs/0000000000000000.log} + local other_seal_url=${other_seal_url:-${DATATRAILS_URL}/verifiabledata/merklelogs/v1/mmrs/${other_tenant}/0/massifseals/0000000000000000.sth} + curl -s -H "x-ms-blob-type: BlockBlob" -H "x-ms-version: 2019-12-12" $other_log_url -o $replicadir/log/$logid/massifs/other_tenant.log + curl -s -H "x-ms-blob-type: BlockBlob" -H "x-ms-version: 2019-12-12" $other_seal_url -o $replicadir/log/$logid/checkpoints/other_tenant.sth + + # Now attempt to extend the replica. we chose $tenant because we know it has + # more than one massif, so this command will always attempt to extend the + # replica directory. + output=$($VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata \ + --tenant=$tenant watch --horizon 10000h | + $VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata --tenant=$tenant replicate-logs --replicadir=$replicadir) + assertEquals "extending a replica directory with mixed tenants should exit 1" 1 $? + assertContains "$output" "error: consistency check failed" +} diff --git a/tests/systemtest/test-setup.sh b/tests/systemtest/test-setup.sh new file mode 100644 index 0000000..8e2b27a --- /dev/null +++ b/tests/systemtest/test-setup.sh @@ -0,0 +1,48 @@ +#! /bin/bash + +VERACITY_INSTALL=${VERACITY_INSTALL:-../../veracity} +DATATRAILS_URL=${DATATRAILS_URL:-https://app.datatrails.ai} +PUBLIC_ASSET_ID=${PUBLIC_ASSET_ID:-publicassets/87dd2e5a-42b4-49a5-8693-97f40a5af7f8} +PUBLIC_EVENT_ID=${PUBLIC_EVENT_ID:-publicassets/87dd2e5a-42b4-49a5-8693-97f40a5af7f8/events/a022f458-8e55-4d63-a200-4172a42fc2aa} + +PROD_PUBLIC_LOGID=6ea5cd00-c711-3649-6914-7b125928bbb4 +PROD_PUBLIC_TENANT_ID=${PROD_PUBLIC_TENANT_ID:-tenant/$PROD_PUBLIC_LOGID} + +PROD_LOG_URL=${PROD_LOG_URL:-${DATATRAILS_URL}/verifiabledata/merklelogs/v1/mmrs/${PROD_PUBLIC_TENANT_ID}/0/massifs/0000000000000000.log} +TEST_TMPDIR=${TEST_TMPDIR:-${SHUNIT_TMPDIR}} +EMPTY_DIR=$TEST_TMPDIR/empty +PROD_DIR=$TEST_TMPDIR/prod +DUP_DIR=$TEST_TMPDIR/duplicate-massifs +PROD_LOCAL_BLOB_FILE="$PROD_DIR/mmr.log" +INVALID_BLOB_FILE="$TEST_TMPDIR/invalid.log" + +oneTimeSetUp() { + mkdir -p $EMPTY_DIR + mkdir -p $PROD_DIR + mkdir -p $DUP_DIR + curl -s -H "x-ms-blob-type: BlockBlob" -H "x-ms-version: 2019-12-12" $PROD_LOG_URL -o $PROD_LOCAL_BLOB_FILE + touch $INVALID_BLOB_FILE + + # Duplicate the prod and soak massif files in a single directory. The + # replication should refuse to work with a directory that has multiple + # massif files for the same massif index. + cp $PROD_LOCAL_BLOB_FILE $DUP_DIR/prod-mmr.log + + assertTrue "prod MMR blob file should be present" "[ -r $PROD_LOCAL_BLOB_FILE ]" + assertTrue "invalid MMR blob file should be present" "[ -r $INVALID_BLOB_FILE ]" +} + +assertStringMatch() { + local message="$1" + local expected="$2" + local actual="$3" + + # Normalize by converting all spaces to a single space, removing leading/trailing spaces and punctuation. + expected=$(echo "$expected" | sed -e 's/[[:space:]]\+/ /g' -e 's/^[[:space:]]*//;s/[[:space:]]*[[:punct:]]*$//') + actual=$(echo "$actual" | sed -e 's/[[:space:]]\+/ /g' -e 's/^[[:space:]]*//;s/[[:space:]]*[[:punct:]]*$//') + + echo "Expected (hex):" && echo "$expected" | hexdump -C + echo "Actual (hex):" && echo "$actual" | hexdump -C + + assertEquals "$message" "$expected" "$actual" +} diff --git a/tests/systemtest/test.sh b/tests/systemtest/test.sh deleted file mode 100755 index 606b5f7..0000000 --- a/tests/systemtest/test.sh +++ /dev/null @@ -1,447 +0,0 @@ -#! /bin/bash - -VERACITY_INSTALL=${VERACITY_INSTALL:-../../veracity} -DATATRAILS_URL=${DATATRAILS_URL:-https://app.datatrails.ai} -PUBLIC_ASSET_ID=${PUBLIC_ASSET_ID:-publicassets/87dd2e5a-42b4-49a5-8693-97f40a5af7f8} -PUBLIC_EVENT_ID=${PUBLIC_EVENT_ID:-publicassets/87dd2e5a-42b4-49a5-8693-97f40a5af7f8/events/a022f458-8e55-4d63-a200-4172a42fc2aa} - -PROD_PUBLIC_TENANT_ID=${PROD_PUBLIC_TENANT_ID:-tenant/6ea5cd00-c711-3649-6914-7b125928bbb4} -SOAK_PUBLIC_TENANT_ID=${SOAK_PUBLIC_TENANT_ID:-tenant/2280c2c6-21c9-67b2-1e16-1c008a709ff0} - -PROD_LOG_URL=${PROD_LOG_URL:-${DATATRAILS_URL}/verifiabledata/merklelogs/v1/mmrs/${PROD_PUBLIC_TENANT_ID}/0/massifs/0000000000000000.log} -SOAK_LOG_URL=${SOAK_LOG_URL:-https://app.soak.stage.datatrails.ai/verifiabledata/merklelogs/v1/mmrs/${SOAK_PUBLIC_TENANT_ID}/0/massifs/0000000000000000.log} -TEST_TMPDIR=${TEST_TMPDIR:-${SHUNIT_TMPDIR}} -EMPTY_DIR=$TEST_TMPDIR/empty -PROD_DIR=$TEST_TMPDIR/prod -SOAK_DIR=$TEST_TMPDIR/soak -DUP_DIR=$TEST_TMPDIR/duplicate-massifs -PROD_LOCAL_BLOB_FILE="$PROD_DIR/mmr.log" -SOAK_LOCAL_BLOB_FILE="$SOAK_DIR/soak-mmr.log" -INVALID_BLOB_FILE="$TEST_TMPDIR/invalid.log" - -oneTimeSetUp() { - mkdir -p $EMPTY_DIR - mkdir -p $PROD_DIR - mkdir -p $SOAK_DIR - mkdir -p $DUP_DIR - curl -s -H "x-ms-blob-type: BlockBlob" -H "x-ms-version: 2019-12-12" $PROD_LOG_URL -o $PROD_LOCAL_BLOB_FILE - curl -s -H "x-ms-blob-type: BlockBlob" -H "x-ms-version: 2019-12-12" $SOAK_LOG_URL -o $SOAK_LOCAL_BLOB_FILE - touch $INVALID_BLOB_FILE - - # Duplicate the prod and soak massif files in a single directory. The - # replication should refuse to work with a directory that has multiple - # massif files for the same massif index. - cp $PROD_LOCAL_BLOB_FILE $DUP_DIR/prod-mmr.log - cp $SOAK_LOCAL_BLOB_FILE $DUP_DIR/soak-mmr.log - - assertTrue "prod MMR blob file should be present" "[ -r $PROD_LOCAL_BLOB_FILE ]" - assertTrue "soak MMR blob file should be present" "[ -r $SOAK_LOCAL_BLOB_FILE ]" - assertTrue "invalid MMR blob file should be present" "[ -r $INVALID_BLOB_FILE ]" -} - -assertStringMatch() { - local message="$1" - local expected="$2" - local actual="$3" - - # Normalize by converting all spaces to a single space, removing leading/trailing spaces and punctuation. - expected=$(echo "$expected" | sed -e 's/[[:space:]]\+/ /g' -e 's/^[[:space:]]*//;s/[[:space:]]*[[:punct:]]*$//') - actual=$(echo "$actual" | sed -e 's/[[:space:]]\+/ /g' -e 's/^[[:space:]]*//;s/[[:space:]]*[[:punct:]]*$//') - - echo "Expected (hex):" && echo "$expected" | hexdump -C - echo "Actual (hex):" && echo "$actual" | hexdump -C - - - assertEquals "$message" "$expected" "$actual" -} - -testVeracityVersion() { - local output - output=$($VERACITY_INSTALL --version) - assertEquals "veracity --version should return a 0 exit code" 0 $? - - echo "$output" | grep -qE '^v[0-9]+\.[0-9]+\.[0-9]+' - assertTrue "The output should start with a semantic version string" $? -} - -testVeracityWatchPublicFindsActivity() { - local output - output=$($VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata --tenant=$PROD_PUBLIC_TENANT_ID watch --horizon 10000h) - assertEquals "watch-public should return a 0 exit code" 0 $? - assertContains "watch-public should find activity" "$output" "$PROD_PUBLIC_TENANT_ID" -} - -testVeracityWatchLatestFindsActivity() { - local output - output=$($VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata --tenant=$PROD_PUBLIC_TENANT_ID watch --latest) - assertEquals "watch-public --latest should return a 0 exit code" 0 $? - assertContains "watch-public --latest should find activity" "$output" "$PROD_PUBLIC_TENANT_ID" -} - -testVeracityReplicateLogsPublicTenantWatchPipe() { - local output - local replicadir=$TEST_TMPDIR/merklelogs - - rm -rf $replicadir - output=$($VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata \ - --tenant=$PROD_PUBLIC_TENANT_ID watch --horizon 10000h \ - | $VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata --tenant=$PROD_PUBLIC_TENANT_ID replicate-logs --ancestors=0 --replicadir=$replicadir) - assertEquals "watch-public should return a 0 exit code" 0 $? - - COUNT=$(find $replicadir -type f | wc -l | tr -d ' ') - assertEquals "should replicate one massif and one seal" "2" "$COUNT" -} - -testVeracityReplicateLogsPublicTenantWatchLatestFlag() { - local output - local replicadir=$TEST_TMPDIR/merklelogs - - rm -rf $replicadir - output=$($VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata --tenant=$PROD_PUBLIC_TENANT_ID replicate-logs --latest --ancestors=0 --replicadir=$replicadir) - assertEquals "replicate-logs --latest should return a 0 exit code" 0 $? - - COUNT=$(find $replicadir -type f | wc -l | tr -d ' ') - assertEquals "should replicate one massif and one seal" "2" "$COUNT" -} - -testVerifySingleEvent() { - # Check if the response status code is 200 - local response - response=$(curl -sL -w "%{http_code}" $DATATRAILS_URL/archivist/v2/$PUBLIC_EVENT_ID -o /dev/null) - assertEquals 200 "$response" - # Verify the event and check if the exit code is 0 - curl -sL $DATATRAILS_URL/archivist/v2/$PUBLIC_EVENT_ID | $VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata --tenant=$PROD_PUBLIC_TENANT_ID verify-included - assertEquals "Verifying a valid single event should result in a 0 exit code" 0 $? -} - -testVerifyListEvents() { - # Check if the response status code is 200 - local response - response=$(curl -sL -w "%{http_code}" $DATATRAILS_URL/archivist/v2/$PUBLIC_ASSET_ID/events -o /dev/null) - assertEquals 200 "$response" - # Verify the events on the asset and check if the exit code is 0 - curl -sL $DATATRAILS_URL/archivist/v2/$PUBLIC_ASSET_ID/events | $VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata --tenant=$PROD_PUBLIC_TENANT_ID verify-included - assertEquals "Verifying events on a vaid asset should result in a 0 exit code" 0 $? -} - -testVerifySingleEventWithLocalMassifCopy() { - # Verify the event and check if the exit code is 0 - curl -sL $DATATRAILS_URL/archivist/v2/$PUBLIC_EVENT_ID | $VERACITY_INSTALL --data-local $PROD_LOCAL_BLOB_FILE --tenant=$PROD_PUBLIC_TENANT_ID verify-included - assertEquals "verifying valid events with a local copy of the massif should result in a 0 exit code" 0 $? -} - -testFindTrieEntrySingleEvent() { - # Verify the trie key for the known event is on the log at the correct position. - PUBLIC_EVENT_PERMISSIONED_ID=${PUBLIC_EVENT_ID#"public"} - output=$(VERACITY_IKWID=true $VERACITY_INSTALL find-trie-entries --log-tenant $PROD_PUBLIC_TENANT_ID --app-id $PUBLIC_EVENT_PERMISSIONED_ID) - assertEquals "verifying finding the trie entry of a known public prod event from the datatrails log should match mmr index 663" "matches: [663]" "$output" -} - -testFindTrieEntrySingleEventWithLocalMassifCopy() { - # Verify the trie key for the known event is on the log at the correct position for a local log. - PUBLIC_EVENT_PERMISSIONED_ID=${PUBLIC_EVENT_ID#"public"} - output=$(VERACITY_IKWID=true $VERACITY_INSTALL --data-local $PROD_LOCAL_BLOB_FILE find-trie-entries --log-tenant $PROD_PUBLIC_TENANT_ID --app-id $PUBLIC_EVENT_PERMISSIONED_ID) - assertEquals "verifying finding the trie entry of a known public prod event from a local log should match mmr index 663" "matches: [663]" "$output" -} - -testFindMMREntrySingleEvent() { - # Verify the mmr entry for the known event is on the log at the correct position. - output=$(curl -sL $DATATRAILS_URL/archivist/v2/$PUBLIC_EVENT_ID | VERACITY_IKWID=true $VERACITY_INSTALL find-mmr-entries --log-tenant $PROD_PUBLIC_TENANT_ID) - assertEquals "verifying finding the mmr entry of a known public prod event from the datatrails log should match mmr index 663" "matches: [663]" "$output" -} - -testFindMMREntrySingleEventWithLocalMassifCopy() { - # Verify the mmr entry for the known event is on the log at the correct position. - output=$(curl -sL $DATATRAILS_URL/archivist/v2/$PUBLIC_EVENT_ID | VERACITY_IKWID=true $VERACITY_INSTALL --data-local $PROD_LOCAL_BLOB_FILE find-mmr-entries --log-tenant $PROD_PUBLIC_TENANT_ID) - assertEquals "verifying finding the mmr entry of a known public prod event from a local log should match mmr index 663" "matches: [663]" "$output" -} - -testReplicateErrorForLogShorterThanSeal() { - - local output - local other_tenant - local tampered_log_url - local tampered_seal_url - local replicadir=$TEST_TMPDIR/merklelogs - - # Note: this tenant belongs to Joe Gough and he has promised never to fill the first massif - other_tenant=tenant/97e90a09-8c56-40df-a4de-42fde462ef6f - - rm -rf $replicadir - # first get the prod public tenant replicated for massif 0. NOTE: this is a full massif - output=$($VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata --tenant=$PROD_PUBLIC_TENANT_ID replicate-logs --massif 0 --replicadir=$replicadir) - assertEquals "0: should return a 0 exit code" 0 $? - - COUNT=$(find $replicadir -type f | wc -l | tr -d ' ') - assertEquals "should replicate one massif and one seal" "2" "$COUNT" - - # now get a different prod tenant log and seal. NOTE the log is partially full for this tenant - tampered_log_url=${tampered_log_url:-${DATATRAILS_URL}/verifiabledata/merklelogs/v1/mmrs/${other_tenant}/0/massifs/0000000000000000.log} - tampered_seal_url=${tampered_seal_url:-${DATATRAILS_URL}/verifiabledata/merklelogs/v1/mmrs/${other_tenant}/0/massifseals/0000000000000000.sth} - curl -s -H "x-ms-blob-type: BlockBlob" -H "x-ms-version: 2019-12-12" $tampered_log_url -o tampered.log - - # copy over the different (shorter) tenant log for massif 0 - cp tampered.log $replicadir/$PROD_PUBLIC_TENANT_ID/0/massifs/0000000000000000.log - - # attempt to replicate the logs again, the local log data is for the wrong - # tenant and is *less* than the seal expects, but the local seal is correct - # for the replaced data and the remote seal - output=$($VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata --tenant=$PROD_PUBLIC_TENANT_ID replicate-logs --replicadir=$replicadir) - assertEquals "1: a tampered log should exit 1" 1 $? - assertContains "$output" "error: There is insufficient data in the massif context to generate a consistency proof against the provided state" -} - -# test veracity can't extend the replica of the wrong tenant -# -# When extending a local replica, if the local tenant log data is from a tenant -# other than the requested remote, replication should fail due to consistency -# checks. This is essentially equivalent to a tamper attempt. -# -# There are two cases that are important: -# 1. The higest indexed local massif is incomplete and so the remote massif is used to extend it. -# 2. The highest indexed local massif is complete and so the remote massif is copied, leaving the original unchanged. -# -# This test suite covers only the second case. Thee first case can only be -# tested with synthesized data, or interaction with a live system, and so is -# easier to cover in the integration tests. However, the same checks are -# excercised in both cases and so this test gives a lot of confidence both -# situations are sound. -# -# Note that the --ancestor flag can be used to limit how many massifs are -# replicated. This can cause the replica to "start again" because the replica is -# so far behind that the --ancestor limit forces a gap. In this case consistency -# of the remote is not checked against the local massif, and in that case the -# replication would succeded, the local replica of the foregn tenant would not -# be updated. And the replica would be left with massifs from multiple tenants. -testReplicateErrorForMixedTenants() { - - local output - local other_tenant - local tampered_log_url - local tampered_seal_url - local replicadir=$TEST_TMPDIR/merklelogs - - # Note: this tenant is known to have > 1 massif at the time of writing and logs don't get shorter - other_tenant=tenant/b197ba3c-44fe-4b1a-bbe8-bd9674b2bd17 - - rm -rf $replicadir - # first get the prod public tenant replicated for massif 0. NOTE: this is a full massif - output=$($VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata --tenant=$PROD_PUBLIC_TENANT_ID replicate-logs --massif 0 --replicadir=$replicadir) - assertEquals "should return a 0 exit code" 0 $? - - COUNT=$(find $replicadir -type f | wc -l | tr -d ' ') - assertEquals "should replicate one massif and one seal" "2" "$COUNT" - - # now get a different prod tenant log and seal. NOTE the log is partially full for this tenant - tampered_log_url=${tampered_log_url:-${DATATRAILS_URL}/verifiabledata/merklelogs/v1/mmrs/${other_tenant}/0/massifs/0000000000000000.log} - tampered_seal_url=${tampered_seal_url:-${DATATRAILS_URL}/verifiabledata/merklelogs/v1/mmrs/${other_tenant}/0/massifseals/0000000000000000.sth} - curl -s -H "x-ms-blob-type: BlockBlob" -H "x-ms-version: 2019-12-12" $tampered_log_url -o tampered.log - curl -s -H "x-ms-blob-type: BlockBlob" -H "x-ms-version: 2019-12-12" $tampered_seal_url -o tampered.sth - - # copy over the different tenant log for massif 0 - cp tampered.log $replicadir/$PROD_PUBLIC_TENANT_ID/0/massifs/0000000000000000.log - - # attempt to replicate the logs again, the local log data is for the wrong tenant but the local seal is correct for the replaced data and the remote seal - output=$($VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata --tenant=$PROD_PUBLIC_TENANT_ID replicate-logs --replicadir=$replicadir) - assertEquals "1: extending an inconsistent replica should exit 1" 1 $? - assertContains "$output" "error: the seal signature verification failed: failed to verify seal for massif 0" - - # now add in the seal from the other log, so that the local log and seal are consistent and locally verifiable. - cp tampered.sth $replicadir/$PROD_PUBLIC_TENANT_ID/0/massifseals/0000000000000000.sth - - # attempt to replicate the logs again - output=$($VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata --tenant=$PROD_PUBLIC_TENANT_ID replicate-logs --latest --replicadir=$replicadir) - assertEquals "2: extending an inconsistent replica should exit 1" 1 $? - assertContains "$output" "error: consistency check failed: the accumulator produced for the trusted base state doesn't match the root produced for the seal state fetched from the log" -} - -# test veracity can't update the replica for a tenant whos log has been tampered with -# -# This test repeats testReplicateErrorForMixedTenants, but does so using the -# combination of watch | replicate-logs which permits finer control over the -# replica -testWatchReplicateErrorForMixedTenants() { - - local output - local other_tenant - local tampered_log_url - local tampered_seal_url - - local replicadir=$TEST_TMPDIR/merklelogs - - # Note: this tenant is known to have > 1 massif at the time of writing and logs don't get shorter - other_tenant=tenant/b197ba3c-44fe-4b1a-bbe8-bd9674b2bd17 - - # first get the prod tenant replicated for massif 0. NOTE: this is a partially full massif - rm -rf $replicadir - output=$($VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata \ - --tenant=$other_tenant watch --horizon 10000h \ - | $VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata --tenant=$other_tenant replicate-logs --ancestors=0 --massif 0 --replicadir=$replicadir) - assertEquals "watch-public should return a 0 exit code" 0 $? - - COUNT=$(find $replicadir -type f | wc -l | tr -d ' ') - assertEquals "should replicate one massif and one seal" "2" "$COUNT" - - # now get a different prod public tenant log and seal. NOTE: this is a full massif - tampered_log_url=${tampered_log_url:-${DATATRAILS_URL}/verifiabledata/merklelogs/v1/mmrs/${PROD_PUBLIC_TENANT_ID}/0/massifs/0000000000000000.log} - tampered_seal_url=${tampered_seal_url:-${DATATRAILS_URL}/verifiabledata/merklelogs/v1/mmrs/${PROD_PUBLIC_TENANT_ID}/0/massifseals/0000000000000000.sth} - curl -s -H "x-ms-blob-type: BlockBlob" -H "x-ms-version: 2019-12-12" $tampered_log_url -o tampered.log - curl -s -H "x-ms-blob-type: BlockBlob" -H "x-ms-version: 2019-12-12" $tampered_seal_url -o tampered.sth - - # copy over the different tenant log for massif 0 - cp tampered.log $replicadir/$other_tenant/0/massifs/0000000000000000.log - - # attempt to replicate the logs again - output=$($VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata \ - --tenant=$other_tenant watch --horizon 10000h \ - | $VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata --tenant=$other_tenant replicate-logs --replicadir=$replicadir) - assertEquals "extending an inconsistent replica should exit 1" 1 $? - assertContains "$output" "error: the seal signature verification failed: failed to verify seal for massif 0" - - # now attempt to change the seal to the tampered log seal - cp tampered.sth $replicadir/$other_tenant/0/massifseals/0000000000000000.sth - - # attempt to replicate the logs again - output=$($VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata \ - --tenant=$other_tenant watch --horizon 10000h \ - | $VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata --tenant=$other_tenant replicate-logs --replicadir=$replicadir) - assertEquals "extending an inconsistent replica should exit 1" 1 $? - assertContains "$output" "error: consistency check failed" -} - -# this test ensures that veracity refused to work with replica directories that -# mix tenant massifs together while the consistency checks would prevent -# accidental extension of the wrong log, the failre mode would be very confusing -# and potentially alarming to the user. -testWatchReplicateErrorForMixedTenants() { - - local output - - local tenant=${PROD_PUBLIC_TENANT_ID} - # Note: this tenant is known to have > 1 massif at the time of writing and logs don't get shorter - local other_tenant='tenant/b197ba3c-44fe-4b1a-bbe8-bd9674b2bd17' - local replicadir=$TEST_TMPDIR/merklelogs - local SHA=shasum - - rm -rf $replicadir - - # replicate massif 0 from the main tenant - output=$($VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata \ - --tenant=$tenant watch --horizon 10000h \ - | $VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata --tenant=$tenant replicate-logs --massif=0 --replicadir=$replicadir) - assertEquals "watch-public should return a 0 exit code" 0 $? - - # explicitly fetch a massif 0 from a different tenant and place it in the same replica directory using a different filename - local other_log_url=${other_log_url:-${DATATRAILS_URL}/verifiabledata/merklelogs/v1/mmrs/${other_tenant}/0/massifs/0000000000000000.log} - local other_seal_url=${other_seal_url:-${DATATRAILS_URL}/verifiabledata/merklelogs/v1/mmrs/${other_tenant}/0/massifseals/0000000000000000.sth} - curl -s -H "x-ms-blob-type: BlockBlob" -H "x-ms-version: 2019-12-12" $other_log_url -o $replicadir/$tenant/0/massifs/other_tenant.log - curl -s -H "x-ms-blob-type: BlockBlob" -H "x-ms-version: 2019-12-12" $other_seal_url -o $replicadir/$tenant/0/massifseals/other_tenant.sth - - # Now attempt to extend the replica. we chose $tenant because we know it has - # more than one massif, so this command will always attempt to extend the - # replica directory. - output=$($VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata \ - --tenant=$tenant watch --horizon 10000h \ - | $VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata --tenant=$tenant replicate-logs --replicadir=$replicadir) - assertEquals "extending a replica directory with mixed tenants should exit 1" 1 $? - assertContains "$output" "error: consistency check failed" -} - -testVerboseOutput() { - local expected_output="verifying events dir: defaulting to the standard container merklelogs verifying for tenant: $PROD_PUBLIC_TENANT_ID verifying: 663 334 018fa97ef269039b00 publicassets/87dd2e5a-42b4-49a5-8693-97f40a5af7f8/events/a022f458-8e55-4d63-a200-4172a42fc2aa OK|663 334|[aea799fb2a8c4bbb6eda1dd2c1e69f8807b9b06deeaf51b9e0287492cefd8e4c, 9f0183c7f79fd81966e104520af0f90c8447f1a73d4e38e7f2f23a0602ceb617, da21cb383d63896a9811f06ebd2094921581d8eb72f7fbef566b730958dc35f1, 51ea08fd02da3633b72ef0b09d8ba4209db1092d22367ef565f35e0afd4b0fc3, 185a9d55cf507ef85bd264f4db7228e225032c48da689aa8597e11059f45ab30, bab40107f7d7bebfe30c9cea4772f9eb3115cae1f801adab318f90fcdc204bdc, 94ca607094ead6fcd23f52851c8cdd8c6f0e2abde20dca19ba5abc8aff70d0d1, ba6d0fd8922342aafbba6073c5510103b077a7de9cb2d72fb652510110250f9e, 7fafc7edc434225afffc19b0582efa2a71b06a2d035358356df0a52d2256c235, 18c9b525a75ff8386f108abed53e01f79173892bb7fe90805f749d3d3af09d28] verifying: 916 461 019007e7960d052e00 publicassets/87dd2e5a-42b4-49a5-8693-97f40a5af7f8/events/999773ed-cc92-4d9c-863f-b418418705ea OK|916 461|[25ee5db5cce059f89372dd3a54bfa6fd9f77d8a09eef36a88e2cba12631eaef6, df700cc8323dcece5185b4cdd769854369c59d9a38b364fabaebe3ad83aa2693, 1dd1250b52ed3f0a408f6928182bec55ddb2b5648c834cc1e104fe2029ec22e3, 292ce1ef003fb25f3bbdb4de5d9af91cdbf85185224f560d351ed2558723b08e, 118cbc9b298a5442177728c707dea6adf1a65274cf0a1e4ac09aa22dd38ebdb0, 27b3d13f8faf19ebaa3525c8b61825f25b772de1121d1e51f5f3d278b6ed00db, 2d7a6a491d378f5c4c97de2e2ab36bc6f8e6ec80ecd0b61f263ffcc754f10576, 302b47f6a440c664f406fb2c13996d46804983c4bab0fe978e8b5f3a4db65f78, 7fafc7edc434225afffc19b0582efa2a71b06a2d035358356df0a52d2256c235, 18c9b525a75ff8386f108abed53e01f79173892bb7fe90805f749d3d3af09d28]" - local output - - # Verify the events on the asset and check if the exit code is 0 - output=$(curl -sL $DATATRAILS_URL/archivist/v2/$PUBLIC_ASSET_ID/events | $VERACITY_INSTALL --data-url $DATATRAILS_URL/verifiabledata --tenant=$PROD_PUBLIC_TENANT_ID --loglevel=verbose verify-included 2>&1) - assertEquals "Verifying events on a vaid asset should result in a 0 exit code" 0 $? - - # check that the output contains the expected string - assertContains "Verifying verbose output matches" "$expected_output" "$output" -} - -testHelpOutputNoArgs() { - local output - - output=$($VERACITY_INSTALL 2>&1) - assertEquals "Calling veracity with no args should return a help message and a zero exit code" 0 $? - assertNotNull "help message should be present" "$output" -} - -testValidEventNotinMassif() { - local expected_message="error: the entry is not in the log. for tenant $PROD_PUBLIC_TENANT_ID" - local output - - output=$(curl -sL $DATATRAILS_URL/archivist/v2/$PUBLIC_EVENT_ID | $VERACITY_INSTALL --data-local $SOAK_LOCAL_BLOB_FILE --tenant=$PROD_PUBLIC_TENANT_ID verify-included 2>&1) - assertEquals "verifying an event not in the massif should result in an error" 1 $? - assertStringMatch "Error should have the correct error message" "$expected_message" "$output" -} - -testNon200Response() { - local invalid_event_ID=publicassets/87dd2e5a-42b4-49a5-8693-97f40a5af7f8/events/a022f458-8e55-4d63-a200-4172a42fc2ab - local output - - output=$(curl -sL $DATATRAILS_URL/archivist/v2/$invalid_event_ID | $VERACITY_INSTALL --data-local $PROD_LOCAL_BLOB_FILE --tenant=$PROD_PUBLIC_TENANT_ID verify-included 2>&1) - assertEquals "a non 200 response being piped in should result in a non 0 exit code" 1 $? - assertNotNull "Error message should be present" "$output" -} - -testMissingMassifFile() { - local expected_message="error: the entry is not in the log. for tenant $PROD_PUBLIC_TENANT_ID" - local output - - output=$(curl -sL $DATATRAILS_URL/archivist/v2/$PUBLIC_EVENT_ID | $VERACITY_INSTALL --data-local $EMPTY_DIR --tenant=$PROD_PUBLIC_TENANT_ID verify-included 2>&1) - assertEquals "verifying an event not in the massif should result in an error" 1 $? - assertContains "$output" "a log file corresponding to the massif index was not found" -} - -testNotBlobFile() { - local expected_message="error: the entry is not in the log. for tenant $PROD_PUBLIC_TENANT_ID" - local output - - - output=$(curl -sL $DATATRAILS_URL/archivist/v2/$PUBLIC_EVENT_ID | $VERACITY_INSTALL --data-local $INVALID_BLOB_FILE --tenant=$PROD_PUBLIC_TENANT_ID verify-included 2>&1) - assertEquals "verifying an event not in the massif should result in an error" 1 $? - assertContains "$output" "a log file corresponding to the massif index was not found" -} - -testInvalidBlobUrl() { - local expected_message="error: no json given" - local invalid_domain="https://app.datatrails.com" - local invalid_url="$invalid_domain/verifiabledata" - local output - output=$(curl -sL $invalid_domain/archivist/v2/$PUBLIC_EVENT_ID | $VERACITY_INSTALL --data-url $invalid_url --tenant=$PROD_PUBLIC_TENANT_ID verify-included 2>&1) - - assertEquals "verifying an event not in the massif should result in an error" 1 $? - assertStringMatch "Error should have the correct error message" "$expected_message" "$output" -} - -# test that the manual post release test works when the local directory has junk (and small) files in the replica directory -testReleaseCheckVerifyIncludedMixedFilesLessThanHeaderSize() { - local output - - # This test always targets the production instance as it replicates a manual release check - local tenant=${PROD_PUBLIC_TENANT_ID} - local replicadir=$TEST_TMPDIR/mixed - local datatrails_url="https://app.datatrails.ai" - - rm -rf $replicadir* - mkdir -p $replicadir - - - local event_id="publicassets/14ba3825-e174-40ac-9dac-da1e7a39f785/events/1421caf9-31c4-4f13-91b0-7eeae36784cb" - - - # Create a file that is not a valid massif and is also shorter than the 32 bytes - echo "<342b" > $replicadir/small.file.whatever - - # running veracity include with mmr.log in cwd as it is in the test plan - - local veracity_bin=$(realpath $VERACITY_INSTALL) - - cd $replicadir - echo curl -H "x-ms-blob-type: BlockBlob" -H "x-ms-version: 2019-12-12" $datatrails_url/verifiabledata/merklelogs/v1/mmrs/$tenant/0/massifs/0000000000000001.log -o mmr.log - curl -H "x-ms-blob-type: BlockBlob" -H "x-ms-version: 2019-12-12" $datatrails_url/verifiabledata/merklelogs/v1/mmrs/$tenant/0/massifs/0000000000000001.log -o mmr.log - curl -sL $datatrails_url/archivist/v2/$event_id \ - | $veracity_bin --data-local mmr.log --tenant=$tenant verify-included - assertEquals "verify-included failed" 0 $? -} diff --git a/tests/systemtest/watch-replicate-logs-latest-interactive.sh b/tests/systemtest/watch-replicate-logs-latest-interactive.sh index 3c44912..0d5c65e 100644 --- a/tests/systemtest/watch-replicate-logs-latest-interactive.sh +++ b/tests/systemtest/watch-replicate-logs-latest-interactive.sh @@ -6,36 +6,28 @@ PUBLIC_ASSET_ID=${PUBLIC_ASSET_ID:-publicassets/87dd2e5a-42b4-49a5-8693-97f40a5a PUBLIC_EVENT_ID=${PUBLIC_EVENT_ID:-publicassets/87dd2e5a-42b4-49a5-8693-97f40a5af7f8/events/a022f458-8e55-4d63-a200-4172a42fc2aa} PROD_PUBLIC_TENANT_ID=${PROD_PUBLIC_TENANT_ID:-tenant/6ea5cd00-c711-3649-6914-7b125928bbb4} -SOAK_PUBLIC_TENANT_ID=${SOAK_PUBLIC_TENANT_ID:-tenant/2280c2c6-21c9-67b2-1e16-1c008a709ff0} PROD_LOG_URL=${PROD_LOG_URL:-${DATATRAILS_URL}/verifiabledata/merklelogs/v1/mmrs/${PROD_PUBLIC_TENANT_ID}/0/massifs/0000000000000000.log} -SOAK_LOG_URL=${SOAK_LOG_URL:-https://app.soak.stage.datatrails.ai/verifiabledata/merklelogs/v1/mmrs/${SOAK_PUBLIC_TENANT_ID}/0/massifs/0000000000000000.log} TEST_TMPDIR=${TEST_TMPDIR:-${SHUNIT_TMPDIR}} EMPTY_DIR=$TEST_TMPDIR/empty PROD_DIR=$TEST_TMPDIR/prod -SOAK_DIR=$TEST_TMPDIR/soak DUP_DIR=$TEST_TMPDIR/duplicate-massifs PROD_LOCAL_BLOB_FILE="$PROD_DIR/mmr.log" -SOAK_LOCAL_BLOB_FILE="$SOAK_DIR/soak-mmr.log" INVALID_BLOB_FILE="$TEST_TMPDIR/invalid.log" oneTimeSetUp() { mkdir -p $EMPTY_DIR mkdir -p $PROD_DIR - mkdir -p $SOAK_DIR mkdir -p $DUP_DIR curl -s -H "x-ms-blob-type: BlockBlob" -H "x-ms-version: 2019-12-12" $PROD_LOG_URL -o $PROD_LOCAL_BLOB_FILE - curl -s -H "x-ms-blob-type: BlockBlob" -H "x-ms-version: 2019-12-12" $SOAK_LOG_URL -o $SOAK_LOCAL_BLOB_FILE touch $INVALID_BLOB_FILE # Duplicate the prod and soak massif files in a single directory. The # replication should refuse to work with a directory that has multiple # massif files for the same massif index. cp $PROD_LOCAL_BLOB_FILE $DUP_DIR/prod-mmr.log - cp $SOAK_LOCAL_BLOB_FILE $DUP_DIR/soak-mmr.log assertTrue "prod MMR blob file should be present" "[ -r $PROD_LOCAL_BLOB_FILE ]" - assertTrue "soak MMR blob file should be present" "[ -r $SOAK_LOCAL_BLOB_FILE ]" assertTrue "invalid MMR blob file should be present" "[ -r $INVALID_BLOB_FILE ]" } diff --git a/tests/testcontext/testcontext.go b/tests/testcontext/testcontext.go new file mode 100644 index 0000000..42a5022 --- /dev/null +++ b/tests/testcontext/testcontext.go @@ -0,0 +1,83 @@ +//go:build integration && azurite + +package testcontext + +import ( + "testing" + "time" + + "github.com/datatrails/go-datatrails-merklelog/massifs" + "github.com/datatrails/go-datatrails-merklelog/massifs/storage" + azstoragetesting "github.com/robinbryce/go-merklelog-azure/tests/storage" + "github.com/robinbryce/go-merklelog-provider-testing/mmrtesting" + "github.com/stretchr/testify/require" +) + +type TestContext struct { + azstoragetesting.TestContext + LastTime time.Time + numEventsGenerated int +} + +func NewDefaultTestContext(t *testing.T, opts ...massifs.Option) *TestContext { + tc := azstoragetesting.NewDefaultTestContext(t, opts...) + return &TestContext{ + TestContext: *tc, + LastTime: time.Now(), + } +} + +func NewLogBuilderFactory(tc *TestContext) mmrtesting.LogBuilder { + builder := azstoragetesting.NewLogBuilder(&tc.TestContext) + builder.LeafGenerator = mmrtesting.NewDataTrailsLeafGenerator(tc.GetG()) + return builder +} + +func CreateLogBuilderContext(t *testing.T, massifHeight uint8, massifCount uint32, opts ...massifs.Option) (*TestContext, storage.LogID, mmrtesting.LogBuilder, mmrtesting.GeneratedLeaves) { + + tc := NewDefaultTestContext(t, opts...) + logID := tc.G.NewLogID() + builder, generated := CreateLogForContext(tc, logID, massifHeight, massifCount) + return tc, logID, builder, generated +} + +func CreateLogContext(t *testing.T, massifHeight uint8, massifCount uint32, opts ...massifs.Option) (*TestContext, storage.LogID) { + + tc, logID, _, _ := CreateLogBuilderContext(t, massifHeight, massifCount, opts...) + return tc, logID +} + +func CreateLogForContext(tc *TestContext, logID storage.LogID, massifHeight uint8, massifCount uint32) (mmrtesting.LogBuilder, mmrtesting.GeneratedLeaves) { + + builder := NewLogBuilderFactory(tc) + tc.DeleteLog(logID) + err := builder.SelectLog(tc.T.Context(), logID) + require.NoError(tc.T, err) + generated, err := tc.CreateLog(tc.T.Context(), builder, logID, massifHeight, massifCount) + if err != nil { + tc.T.Fatalf("CreateLog failed: %v", err) + } + return builder, generated +} + +func CreateLogsForContext(tc *TestContext, massifHeight uint8, massifCount uint32, logIDs ...storage.LogID) ([]mmrtesting.GeneratedLeaves, []mmrtesting.LogBuilder) { + + var generated []mmrtesting.GeneratedLeaves + var builders []mmrtesting.LogBuilder + + for _, logID := range logIDs { + builder := NewLogBuilderFactory(tc) + tc.DeleteLog(logID) + err := builder.SelectLog(tc.T.Context(), logID) + if err != nil { + tc.T.Fatalf("SelectLog failed: %v", err) + } + builders = append(builders, builder) + gen, err := tc.CreateLog(tc.T.Context(), builder, logID, massifHeight, massifCount) + if err != nil { + tc.T.Fatalf("CreateLog failed: %v", err) + } + generated = append(generated, gen) + } + return generated, builders +} diff --git a/tests/verifyincluded/verifyevents_azurite_test.go b/tests/verifyincluded/verifyevents_azurite_test.go index 1c6c5cf..0b433fb 100644 --- a/tests/verifyincluded/verifyevents_azurite_test.go +++ b/tests/verifyincluded/verifyevents_azurite_test.go @@ -4,35 +4,27 @@ package verifyevents import ( "fmt" - "strings" + "testing" + + "github.com/datatrails/go-datatrails-common-api-gen/assets/v2/assets" "github.com/datatrails/go-datatrails-common/logger" - "github.com/datatrails/go-datatrails-logverification/integrationsupport" + "github.com/datatrails/go-datatrails-merklelog/massifs" "github.com/datatrails/go-datatrails-merklelog/mmr" - "github.com/datatrails/go-datatrails-merklelog/mmrtesting" "github.com/datatrails/go-datatrails-simplehash/simplehash" "github.com/datatrails/veracity" + "github.com/datatrails/veracity/tests/testcontext" + "github.com/forestrie/go-merklelog-datatrails/datatrails" + "github.com/robinbryce/go-merklelog-provider-testing/mmrtesting" "github.com/stretchr/testify/require" ) -func (s *VerifyEventsSuite) newMMRTestingConfig(labelPrefix, tenantIdentity string) mmrtesting.TestConfig { - return mmrtesting.TestConfig{ - StartTimeMS: (1698342521) * 1000, EventRate: 500, - TestLabelPrefix: labelPrefix, - TenantIdentity: tenantIdentity, - Container: strings.ReplaceAll(strings.ToLower(labelPrefix), "_", ""), - } -} - // TestVerifyIncludedMultiMassif tests that the veracity sub command verify-included // works for massifs beyond the first one and covers some obvious edge cases. func (s *VerifyEventsSuite) TestVerifyIncludedMultiMassif() { logger.New("TestVerifyIncludedMultiMassif") defer logger.OnExit() - cfg := s.newMMRTestingConfig("TestVerifyIncludedMultiMassif", "") - azurite := mmrtesting.NewTestContext(s.T(), cfg) - massifHeight := uint8(8) leavesPerMassif := mmr.HeightIndexLeafCount(uint64(massifHeight) - 1) @@ -43,8 +35,8 @@ func (s *VerifyEventsSuite) TestVerifyIncludedMultiMassif() { leaves []uint64 }{ // make sure we cover the obvious edge cases - {name: "single massif first few and last few", massifCount: 1, leaves: []uint64{0, 1, 2, leavesPerMassif - 2, leavesPerMassif - 1}}, {name: "2 massifs, last of first and first of last", massifCount: 2, leaves: []uint64{leavesPerMassif - 1, leavesPerMassif}}, + {name: "single massif first few and last few", massifCount: 1, leaves: []uint64{0, 1, 2, leavesPerMassif - 2, leavesPerMassif - 1}}, {name: "5 massifs, first and last of each", massifCount: 5, leaves: []uint64{ 0, leavesPerMassif - 1, 1 * leavesPerMassif, 2*leavesPerMassif - 1, @@ -62,31 +54,27 @@ func (s *VerifyEventsSuite) TestVerifyIncludedMultiMassif() { massifCount := tt.massifCount s.Run(fmt.Sprintf("massifCount:%d", massifCount), func() { - leafHasher := integrationsupport.NewLeafHasher() - g := integrationsupport.NewTestGenerator( - s.T(), cfg.StartTimeMS/1000, &leafHasher, mmrtesting.TestGeneratorConfig{ - StartTimeMS: cfg.StartTimeMS, - EventRate: cfg.EventRate, - TenantIdentity: cfg.TenantIdentity, - TestLabelPrefix: cfg.TestLabelPrefix, - }) - - tenantId0 := g.NewTenantIdentity() - events := integrationsupport.GenerateTenantLog( - &azurite, g, int(tt.massifCount)*int(leavesPerMassif), tenantId0, true, - massifHeight, - ) + tc, logId, _, generated := testcontext.CreateLogBuilderContext( + s.T(), + massifHeight, tt.massifCount, + mmrtesting.WithTestLabelPrefix("TestVerifyIncludedMultiMassif")) for _, iLeaf := range tt.leaves { + + event := datatrailsAssetEvent( + s.T(), generated.Encoded[iLeaf], generated.Args[iLeaf], + generated.MMRIndices[iLeaf], uint8(massifs.Epoch2038), + ) marshaler := simplehash.NewEventMarshaler() - eventJson, err := marshaler.Marshal(events[iLeaf]) + eventJson, err := marshaler.Marshal(event) require.NoError(s.T(), err) s.StdinWriteAndClose(eventJson) + tenantId0 := datatrails.Log2TenantID(logId) err = app.Run([]string{ "veracity", "--envauth", // uses the emulator - "--container", cfg.Container, + "--container", tc.Cfg.Container, "--data-url", s.Env.AzuriteVerifiableDataURL, "--tenant", tenantId0, "--height", fmt.Sprintf("%d", massifHeight), @@ -98,3 +86,16 @@ func (s *VerifyEventsSuite) TestVerifyIncludedMultiMassif() { }) } } + +func datatrailsAssetEvent(t *testing.T, a any, args mmrtesting.AddLeafArgs, index uint64, epoch uint8) *assets.EventResponse { + ae, ok := a.(*assets.EventResponse) + require.True(t, ok, "expected *assets.EventResponse, got %T", a) + + ae.MerklelogEntry = &assets.MerkleLogEntry{ + Commit: &assets.MerkleLogCommit{ + Index: index, + Idtimestamp: massifs.IDTimestampToHex(args.ID, epoch), + }, + } + return ae +} diff --git a/tests/watch/watch_test.go b/tests/watch/watch_test.go index f274deb..249c194 100644 --- a/tests/watch/watch_test.go +++ b/tests/watch/watch_test.go @@ -76,7 +76,7 @@ func (s *WatchCmdSuite) TestNoChangesForFictitiousTenant() { "--tenant", s.Env.UnknownTenantId, "watch", "--latest", }) - assert.Equal(err, veracity.ErrNoChanges) + assert.Equal(veracity.ErrNoChanges, err) } // Test that the watch command returns no error when the horizon is set longer than the age of the company diff --git a/v3toeventresponse.go b/v3toeventresponse.go deleted file mode 100644 index e8c060b..0000000 --- a/v3toeventresponse.go +++ /dev/null @@ -1,96 +0,0 @@ -package veracity - -import ( - "fmt" - "time" - - v2assets "github.com/datatrails/go-datatrails-common-api-gen/assets/v2/assets" - "github.com/datatrails/go-datatrails-common-api-gen/attribute/v2/attribute" - "github.com/datatrails/go-datatrails-simplehash/simplehash" - "google.golang.org/protobuf/types/known/timestamppb" -) - -func NewAttribute(value any) (*attribute.Attribute, error) { - switch v := value.(type) { - case string: - return attribute.NewStringAttribute(v), nil - case map[string]string: - return attribute.NewDictAttribute(v), nil - case []map[string]string: - return attribute.NewListAttribute(v), nil - // case []map[string]interface{}: - case []interface{}: - lv := []map[string]string{} - for _, it := range v { - mIface, ok := it.(map[string]interface{}) - if !ok { - continue - } - mString := map[string]string{} - for k, i := range mIface { - s, ok := i.(string) - if !ok { - continue - } - mString[k] = s - } - lv = append(lv, mString) - } - return attribute.NewListAttribute(lv), nil - default: - return nil, fmt.Errorf("value not string, map or list") - } -} -func newEventResponseFromV3(v3 simplehash.V3Event) (*v2assets.EventResponse, error) { - - var err error - event := &v2assets.EventResponse{ - EventAttributes: map[string]*attribute.Attribute{}, - AssetAttributes: map[string]*attribute.Attribute{}, - } - - event.Identity = v3.Identity - - for k, v := range v3.EventAttributes { - if event.EventAttributes[k], err = NewAttribute(v); err != nil { - - return nil, err - } - } - for k, v := range v3.AssetAttributes { - if event.AssetAttributes[k], err = NewAttribute(v); err != nil { - return nil, err - } - } - - event.Operation = v3.Operation - event.Behaviour = v3.Behaviour - - var t time.Time - - if t, err = time.Parse(time.RFC3339Nano, v3.TimestampDeclared); err != nil { - return nil, err - } - event.TimestampDeclared = timestamppb.New(t) - - if t, err = time.Parse(time.RFC3339Nano, v3.TimestampAccepted); err != nil { - return nil, err - } - event.TimestampAccepted = timestamppb.New(t) - - if t, err = time.Parse(time.RFC3339Nano, v3.TimestampCommitted); err != nil { - return nil, err - } - event.TimestampCommitted = timestamppb.New(t) - - if event.PrincipalDeclared, err = newPrincipalFromJson(v3.PrincipalDeclared); err != nil { - return nil, err - } - if event.PrincipalAccepted, err = newPrincipalFromJson(v3.PrincipalAccepted); err != nil { - return nil, err - } - - event.TenantIdentity = v3.TenantIdentity - - return event, nil -} diff --git a/veracitytesting/testcontext.go b/veracitytesting/testcontext.go deleted file mode 100644 index 63f3351..0000000 --- a/veracitytesting/testcontext.go +++ /dev/null @@ -1,118 +0,0 @@ -package veracitytesting - -import ( - "context" - "crypto/sha256" - "errors" - - "github.com/datatrails/go-datatrails-common-api-gen/assets/v2/assets" - "github.com/datatrails/go-datatrails-merklelog/massifs" - "github.com/datatrails/go-datatrails-merklelog/mmrtesting" - "github.com/datatrails/go-datatrails-simplehash/simplehash" - "github.com/stretchr/testify/require" -) - -type LeafHasher struct { - simplehash.HasherV3 -} - -func NewLeafHasher() LeafHasher { - h := LeafHasher{ - HasherV3: simplehash.NewHasherV3(), - } - return h -} - -// GenerateTenantLog populates the tenants blob storage with deterministically generated -// -// datatrails merklelog events as leaf nodes, and populates the rest of the mmr -// from these leaf nodes. -// -// Returns the list of generated events with the correct merklelog data. -// -// NOTE: deletes all pre-existing blobs for the given tenant first. -// NOTE: Will only populate the first massif. -// NOTE: No Range checks are performed if you go out of the first massif. -func GenerateTenantLog(tc *mmrtesting.TestContext, g EventTestGenerator, eventTotal int, tenantID string, deleteBlobs bool, massifHeight uint8, leafType uint8) []*assets.EventResponse { - - if deleteBlobs { - // first delete any blobs already in the massif - tc.DeleteBlobsByPrefix(massifs.TenantMassifPrefix(tenantID)) - } - - c := massifs.NewMassifCommitter( - massifs.MassifCommitterConfig{ - CommitmentEpoch: 1, /* good until 2038 for real. irrelevant for tests as long as everyone uses the same value */ - }, - tc.GetLog(), - tc.GetStorer(), - ) - - mc, err := c.GetCurrentContext(context.Background(), tenantID, massifHeight) - if err != nil { - tc.T.Fatalf("unexpected err: %v", err) - } - - g.LeafHasher.Reset() - - batch := g.GenerateEventBatch(eventTotal) - - events := []*assets.EventResponse{} - for _, ev := range batch { - - // get next timestamp id - idTimestamp, err1 := g.NextId() - require.Nil(tc.T, err1) - - // now hash the generated event - hasher := simplehash.NewHasherV3() - - // hash the generated event - err1 = hasher.HashEvent( - ev, - simplehash.WithPrefix([]byte{leafType}), - simplehash.WithIDCommitted(idTimestamp)) - require.Nil(tc.T, err1) - - // get the leaf value (hash of event) - leafValue := hasher.Sum(nil) - - // mmrIndex is equal to the count of all nodes - mmrIndex := mc.RangeCount() - - // add the generated event to the mmr - _, err1 = mc.AddHashedLeaf(sha256.New(), idTimestamp, nil, []byte(ev.TenantIdentity), []byte(ev.GetIdentity()), leafValue) - if err1 != nil { - if errors.Is(err1, massifs.ErrMassifFull) { - var err2 error - _, err2 = c.CommitContext(context.Background(), mc) - require.Nil(tc.T, err2) - - // We've filled the current massif. GetCurrentContext handles creating new massifs. - mc, err2 = c.GetCurrentContext(context.Background(), tenantID, massifHeight) - if err2 != nil { - tc.T.Fatalf("unexpected err: %v", err) - } - - _, err1 = mc.AddHashedLeaf(sha256.New(), idTimestamp, nil, []byte(ev.TenantIdentity), []byte(ev.GetIdentity()), leafValue) - } - - require.Nil(tc.T, err1) - } - - // set the events merklelog entry correctly - ev.MerklelogEntry = &assets.MerkleLogEntry{ - Commit: &assets.MerkleLogCommit{ - Index: mmrIndex, - Idtimestamp: massifs.IDTimestampToHex(idTimestamp, uint8(c.Cfg.CommitmentEpoch)), - }, - } - - events = append(events, ev) - } - - _, err = c.CommitContext(context.Background(), mc) - require.Nil(tc.T, err) - - return events -} diff --git a/veracitytesting/testeventgenerator.go b/veracitytesting/testeventgenerator.go deleted file mode 100644 index 847c2a8..0000000 --- a/veracitytesting/testeventgenerator.go +++ /dev/null @@ -1,239 +0,0 @@ -package veracitytesting - -import ( - "errors" - "fmt" - "math" - "strconv" - "strings" - "testing" - "time" - - "github.com/datatrails/go-datatrails-common-api-gen/assets/v2/assets" - "github.com/datatrails/go-datatrails-common-api-gen/attribute/v2/attribute" - "github.com/datatrails/go-datatrails-merklelog/massifs/snowflakeid" - "github.com/datatrails/go-datatrails-merklelog/mmrtesting" - "github.com/datatrails/go-datatrails-simplehash/simplehash" - "github.com/stretchr/testify/require" - "google.golang.org/protobuf/types/known/timestamppb" -) - -const ( - resourceChangedProperty = "resource_changed" - resourceChangeMerkleLogStoredEvent = "assetsv2merklelogeventstored" - millisecondMultiplier = int64(1000) - names = 2 - assetAttributeWords = 4 - eventAttributeWords = 6 -) - -type leafHasher interface { - Reset() - Sum(b []byte) []byte - HashEvent(event *assets.EventResponse, opts ...simplehash.HashOption) error -} - -// Create random values of various sorts for testing. Seeded so that from run to -// run the values are the same. Intended for white box tests that benefit from a -// large volume of synthetic data. -type EventTestGenerator struct { - mmrtesting.TestGenerator - numEventsGenerated int - LeafHasher leafHasher - IdState *snowflakeid.IDState -} - -func NewAzuriteTestContext( - t *testing.T, - testLabelPrefix string, -) (mmrtesting.TestContext, EventTestGenerator, mmrtesting.TestConfig) { - - eventRate := 500 - - cfg := mmrtesting.TestConfig{ - StartTimeMS: (1698342521) * millisecondMultiplier, EventRate: eventRate, - TestLabelPrefix: testLabelPrefix, - TenantIdentity: "", - Container: strings.ReplaceAll(strings.ToLower(testLabelPrefix), "_", "")} - leafHasher := NewLeafHasher() - tc := mmrtesting.NewTestContext(t, cfg) - g := NewEventTestGenerator( - t, cfg.StartTimeMS/millisecondMultiplier, - &leafHasher, - mmrtesting.TestGeneratorConfig{ - StartTimeMS: cfg.StartTimeMS, - EventRate: cfg.EventRate, - TenantIdentity: cfg.TenantIdentity, - TestLabelPrefix: cfg.TestLabelPrefix, - }, - ) - return tc, g, cfg -} - -// NewTestGenerator creates a deterministic, but random looking, test data generator. -// Given the same seed, the series of data generated on different runs is identical. -// This means that we generate valid values for things like uuid based -// identities and simulated time stamps, but the log telemetry from successive runs will -// be usefuly stable. -func NewEventTestGenerator( - t *testing.T, seed int64, - leafHasher leafHasher, - cfg mmrtesting.TestGeneratorConfig) EventTestGenerator { - - g := EventTestGenerator{ - LeafHasher: leafHasher, - } - g.TestGenerator = mmrtesting.NewTestGenerator(t, seed, cfg, func(tenantIdentity string, base, i uint64) mmrtesting.AddLeafArgs { - return g.GenerateLeaf(tenantIdentity, base, i) - }) - - var err error - g.IdState, err = snowflakeid.NewIDState(snowflakeid.Config{ - CommitmentEpoch: 1, - WorkerCIDR: "0.0.0.0/16", - PodIP: "10.0.0.1", - }) - require.NoError(t, err) - - return g -} - -func (g *EventTestGenerator) NextId() (uint64, error) { - var err error - var id uint64 - - var attempts = 2 - var sleep = time.Millisecond * 2 - - for range attempts { - id, err = g.IdState.NextID() - if err != nil { - if !errors.Is(err, snowflakeid.ErrOverloaded) { - return 0, err - } - time.Sleep(sleep) - } - } - return id, nil -} - -func (g *EventTestGenerator) GenerateLeaf(tenantIdentity string, base, i uint64) mmrtesting.AddLeafArgs { - ev := g.GenerateNextEvent(tenantIdentity) - - id, err := g.NextId() - require.NoError(g.T, err) - g.LeafHasher.Reset() - err = g.LeafHasher.HashEvent(ev) - require.Nil(g.T, err) - - return mmrtesting.AddLeafArgs{ - Id: id, - AppId: []byte(ev.GetIdentity()), - Value: g.LeafHasher.Sum(nil), - } -} - -func (g *EventTestGenerator) GenerateEventBatch(count int) []*assets.EventResponse { - events := make([]*assets.EventResponse, 0, count) - for range count { - events = append(events, g.GenerateNextEvent(mmrtesting.DefaultGeneratorTenantIdentity)) - } - return events -} - -func (g *EventTestGenerator) GenerateNextEvent(tenantIdentity string) *assets.EventResponse { - - assetIdentity := g.NewAssetIdentity() - assetUUID := strings.Split(assetIdentity, "/")[1] - - name := strings.Join(g.WordList(names), "") - email := fmt.Sprintf("%s@datatrails.com", name) - subject := strconv.Itoa(g.Intn(math.MaxInt)) - - // Use the desired event rate as the upper bound, and generate a time stamp at lastTime + rand(0, upper-bound * 2) - // So the generated event stream will be around the target rate. - ts := g.SinceLastJitter() - - event := &assets.EventResponse{ - Identity: g.NewEventIdentity(assetUUID), - AssetIdentity: assetIdentity, - EventAttributes: map[string]*attribute.Attribute{ - "forestrie.testGenerator-sequence-number": { - Value: &attribute.Attribute_StrVal{ - StrVal: strconv.Itoa(g.numEventsGenerated), - }, - }, - "forestrie.testGenerator-label": { - Value: &attribute.Attribute_StrVal{ - StrVal: fmt.Sprintf("%s%s", g.Cfg.TestLabelPrefix, "GenerateNextEvent"), - }, - }, - - "event-attribute-0": { - Value: &attribute.Attribute_StrVal{ - StrVal: g.MultiWordString(eventAttributeWords), - }, - }, - }, - AssetAttributes: map[string]*attribute.Attribute{ - "asset-attribute-0": { - Value: &attribute.Attribute_StrVal{ - StrVal: g.MultiWordString(assetAttributeWords), - }, - }, - }, - Operation: "Record", - Behaviour: "RecordEvidence", - TimestampDeclared: timestamppb.New(ts), - TimestampAccepted: timestamppb.New(ts), - TimestampCommitted: nil, - PrincipalDeclared: &assets.Principal{ - Issuer: "https://rkvt.com", - Subject: subject, - DisplayName: name, - Email: email, - }, - PrincipalAccepted: &assets.Principal{ - Issuer: "https://rkvt.com", - Subject: subject, - DisplayName: name, - Email: email, - }, - ConfirmationStatus: assets.ConfirmationStatus_PENDING, - From: "0xf8dfc073650503aeD429E414bE7e972f8F095e70", - // TenantIdentity: "tenant/0684984b-654d-4301-ad10-a508126e187d", - TenantIdentity: tenantIdentity, - } - g.LastTime = ts - g.numEventsGenerated++ - - return event -} - -func (g *EventTestGenerator) NewEventIdentity(assetUUID string) string { - return assets.EventIdentityFromUuid(assetUUID, g.NewRandomUUIDString(g.T)) -} - -func (g *EventTestGenerator) NewAssetIdentity() string { - return assets.AssetIdentityFromUuid(g.NewRandomUUIDString(g.T)) -} - -// PadWithLeafEntries pads the given mmr (data) with the given number of leaves (n). -// -// Each leaf is a hash of a deterministically generated event. -func (g *EventTestGenerator) PadWithLeafEntries(data []byte, n int) []byte { - if n == 0 { - return data - } - g.LeafHasher.Reset() - g.LeafHasher.Reset() - - batch := g.GenerateEventBatch(n) - for _, ev := range batch { - err := g.LeafHasher.HashEvent(ev) - require.NoError(g.T, err) - v := g.LeafHasher.Sum(nil) - data = append(data, v...) - } - return data -} diff --git a/verifyincluded.go b/verifyincluded.go index beba1c8..de88bd7 100644 --- a/verifyincluded.go +++ b/verifyincluded.go @@ -7,12 +7,13 @@ import ( "fmt" "strings" - "github.com/datatrails/go-datatrails-logverification/logverification/app" "github.com/datatrails/go-datatrails-merklelog/massifs" "github.com/datatrails/go-datatrails-merklelog/mmr" + "github.com/forestrie/go-merklelog-datatrails/appentry" + "github.com/forestrie/go-merklelog-datatrails/datatrails" "github.com/urfave/cli/v2" - veracityapp "github.com/datatrails/veracity/app" + appdata "github.com/forestrie/go-merklelog-datatrails/appdata" ) var ( @@ -34,7 +35,8 @@ func proofPath(proof [][]byte) string { // verifyEvent is an example function of how to verify the inclusion of a datatrails event using the mmr and massifs modules func verifyEvent( - event *app.AppEntry, logTenant string, mmrEntry []byte, massifHeight uint8, massifGetter MassifGetter, + reader massifs.ObjectReader, + event *appentry.AppEntry, logTenant string, mmrEntry []byte, massifHeight uint8, ) ([][]byte, error) { // Get the mmrIndex from the request and then compute the massif @@ -44,7 +46,7 @@ func verifyEvent( massifIndex := massifs.MassifIndexFromMMRIndex(massifHeight, mmrIndex) // read the massif blob - massif, err := massifGetter.GetMassif(context.Background(), logTenant, massifIndex) + massif, err := massifs.GetMassifContext(context.Background(), reader, uint32(massifIndex)) if err != nil { return nil, err } @@ -97,7 +99,7 @@ Note: for publicly attested events, or shared protected events, you must use --t } log := func(m string, args ...any) { - cmd.log.Infof(m, args...) + cmd.Log.Infof(m, args...) } tenantIdentity := cCtx.String("tenant") @@ -117,24 +119,29 @@ Note: for publicly attested events, or shared protected events, you must use --t tenantLogPath = tenantIdentity } - appData, err := veracityapp.ReadAppData(cCtx.Args().Len() == 0, cCtx.Args().Get(0)) + if err = cfgMassifFmt(cmd, cCtx); err != nil { + return err + } + + appData, err := appdata.ReadAppData(cCtx.Args().Len() == 0, cCtx.Args().Get(0)) if err != nil { return err } - verifiableLogEntries, err := veracityapp.AppDataToVerifiableLogEntries(appData, tenantIdentity) + verifiableLogEntries, err := appdata.AppDataToVerifiableLogEntries(appData, tenantIdentity) if err != nil { return err } - if err = cfgMassifReader(cmd, cCtx); err != nil { + reader, err := newMassifReader(cmd, cCtx) + if err != nil { return err } var countNotCommitted int var countVerifyFailed int - previousMassifIndex := uint64(0) + previousMassifIndex := uint32(0) var massifContext *massifs.MassifContext = nil for _, event := range verifiableLogEntries { @@ -142,7 +149,7 @@ Note: for publicly attested events, or shared protected events, you must use --t leafIndex := mmr.LeafIndex(event.MMRIndex()) // get the massif index for the event event - massifIndex := massifs.MassifIndexFromMMRIndex(cmd.massifHeight, event.MMRIndex()) + massifIndex := uint32(massifs.MassifIndexFromMMRIndex(cmd.MassifFmt.MassifHeight, event.MMRIndex())) // find the log tenant path if not provided if tenantLogPath == "" { @@ -152,14 +159,19 @@ Note: for publicly attested events, or shared protected events, you must use --t if err != nil { return err } + } + + logId := datatrails.TenantID2LogID(tenantLogPath) + if err := reader.SelectLog(cCtx.Context, logId); err != nil { + return fmt.Errorf("failed to select log %s: %w", tenantLogPath, err) } // check if we need this event is part of a different massif than the previous event // // if it is, we get the new massif if massifContext == nil || massifIndex != previousMassifIndex { - massif, err := cmd.massifReader.GetMassif(cCtx.Context, tenantLogPath, massifIndex) + massif, err := massifs.GetMassifContext(context.Background(), reader, massifIndex) if err != nil { return err } diff --git a/verifyincluded_test.go b/verifyincluded_test.go deleted file mode 100644 index ba87bea..0000000 --- a/verifyincluded_test.go +++ /dev/null @@ -1,198 +0,0 @@ -package veracity - -import ( - "context" - "crypto/sha256" - "encoding/binary" - "fmt" - "testing" - - "github.com/datatrails/go-datatrails-common/logger" - "github.com/datatrails/go-datatrails-logverification/logverification/app" - "github.com/datatrails/go-datatrails-merklelog/massifs" - veracityapp "github.com/datatrails/veracity/app" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// testMassifContext generates a massif context with 2 entries -// -// the first entry is a known assetsv2 events -// the seconds entry is a known eventsv1 event -func testMassifContext(t *testing.T) *massifs.MassifContext { - - start := massifs.MassifStart{ - MassifHeight: 3, - } - - testMassifContext := &massifs.MassifContext{ - Start: start, - LogBlobContext: massifs.LogBlobContext{ - BlobPath: "test", - Tags: map[string]string{}, - }, - } - - data, err := start.MarshalBinary() - require.NoError(t, err) - - testMassifContext.Data = append(data, testMassifContext.InitIndexData()...) - - testMassifContext.Tags["firstindex"] = fmt.Sprintf("%016x", testMassifContext.Start.FirstIndex) - - hasher := sha256.New() - - // KAT Data taken from an actual merklelog. - - // AssetsV2 - _, err = testMassifContext.AddHashedLeaf( - hasher, - binary.BigEndian.Uint64([]byte{148, 111, 227, 95, 198, 1, 121, 0}), - []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - []byte("tenant/112758ce-a8cb-4924-8df8-fcba1e31f8b0"), - []byte("assets/899e00a2-29bc-4316-bf70-121ce2044472/events/450dce94-065e-4f6a-bf69-7b59f28716b6"), - []byte{97, 231, 1, 42, 127, 20, 181, 70, 122, 134, 84, 231, 174, 117, 200, 148, 171, 205, 57, 146, 174, 48, 34, 30, 152, 215, 77, 3, 204, 14, 202, 57}, - ) - require.NoError(t, err) - - // EventsV1 - _, err = testMassifContext.AddHashedLeaf( - hasher, - binary.BigEndian.Uint64([]byte{148, 112, 0, 54, 17, 1, 121, 0}), - []byte{1, 17, 39, 88, 206, 168, 203, 73, 36, 141, 248, 252, 186, 30, 49, 248, 176, 0, 0, 0, 0, 0, 0, 0}, - []byte("tenant/112758ce-a8cb-4924-8df8-fcba1e31f8b0"), - []byte("events/01947000-3456-780f-bfa9-29881e3bac88"), - []byte{215, 191, 107, 210, 134, 10, 40, 56, 226, 71, 136, 164, 9, 118, 166, 159, 86, 31, 175, 135, 202, 115, 37, 151, 174, 118, 115, 113, 25, 16, 144, 250}, - ) - require.NoError(t, err) - - // Intermediate Node Skipped - - return testMassifContext -} - -type fakeMassifGetter struct { - t *testing.T - massifContext *massifs.MassifContext -} - -// NewFakeMassifGetter creates a new massif getter that has 2 entries in the massif it gets -// -// one assetsv2 event entry and one eventsv1 entry -func NewFakeMassifGetter(t *testing.T) *fakeMassifGetter { - - massifContext := testMassifContext(t) - - return &fakeMassifGetter{ - t: t, - massifContext: massifContext, - } - -} - -// NewFakeMassifGetterInvalidRoot creates a new massif getter that has an incorrect massif root -func NewFakeMassifGetterInvalidRoot(t *testing.T) *fakeMassifGetter { - - massifContext := testMassifContext(t) - - // a massif context with 2 entries has its root at index 2 - // - // 2 - // / \ - // 0 1 - rootMMRIndex := 2 - - rootDataStart := (massifContext.LogStart() + uint64(rootMMRIndex*massifs.LogEntryBytes)) - 1 - rootDataEnd := (rootDataStart + massifs.ValueBytes) - - // set the start and end of the root entry to 0 - // to make the root entry invalid - massifContext.Data[rootDataStart] = 0x0 - massifContext.Data[rootDataEnd] = 0x0 - - return &fakeMassifGetter{ - t: t, - massifContext: massifContext, - } -} - -// GetMassif always returns the test massif -func (tmg *fakeMassifGetter) GetMassif( - ctx context.Context, - tenantIdentity string, - massifIndex uint64, - opts ...massifs.ReaderOption, -) (massifs.MassifContext, error) { - return *tmg.massifContext, nil -} - -func TestVerifyAssetsV2Event(t *testing.T) { - logger.New("TestVerifyList") - defer logger.OnExit() - - events, _ := veracityapp.NewAssetsV2AppEntries(assetsV2SingleEventList) - require.NotZero(t, len(events)) - - event := events[0] - - tests := []struct { - name string - event *app.AppEntry - massifGetter MassifGetter - expectedProof [][]byte - expectedError bool - }{ - { - name: "simple OK", - event: &event, - massifGetter: NewFakeMassifGetter(t), - expectedError: false, - expectedProof: [][]byte{ - { - 0xd7, 0xbf, 0x6b, 0xd2, 0x86, 0xa, 0x28, 0x38, - 0xe2, 0x47, 0x88, 0xa4, 0x9, 0x76, 0xa6, 0x9f, - 0x56, 0x1f, 0xaf, 0x87, 0xca, 0x73, 0x25, 0x97, - 0xae, 0x76, 0x73, 0x71, 0x19, 0x10, 0x90, 0xfa, - }, - }, - }, - /**{ - name: "No mmr log", - event: &event, - massifGetter: &fakeMassifGetter{t, nil}, - expectedError: true, - },*/ - { - name: "Not valid proof", - event: &event, - massifGetter: NewFakeMassifGetterInvalidRoot(t), - expectedError: true, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - - logTenant, err := test.event.LogTenant() - require.Nil(t, err) - - massifIndex := massifs.MassifIndexFromMMRIndex(defaultMassifHeight, test.event.MMRIndex()) - - ctx := context.Background() - massif, err := test.massifGetter.GetMassif(ctx, logTenant, massifIndex) - require.NoError(t, err) - - mmrEntry, err := test.event.MMREntry(&massif) - require.NoError(t, err) - - proof, err := verifyEvent(test.event, logTenant, mmrEntry, defaultMassifHeight, test.massifGetter) - - if test.expectedError { - assert.NotNil(t, err, "expected error got nil") - } else { - assert.Nil(t, err, "unexpected error") - assert.Equal(t, test.expectedProof, proof) - } - }) - } -} diff --git a/watch.go b/watch.go index 4462be6..5b80a08 100644 --- a/watch.go +++ b/watch.go @@ -5,18 +5,15 @@ package veracity import ( "context" - "encoding/json" "errors" "fmt" "strings" "time" - "github.com/datatrails/go-datatrails-common/azblob" "github.com/datatrails/go-datatrails-common/logger" - "github.com/datatrails/go-datatrails-merklelog/massifs" - "github.com/datatrails/go-datatrails-merklelog/massifs/snowflakeid" - "github.com/datatrails/go-datatrails-merklelog/massifs/watcher" + "github.com/datatrails/go-datatrails-merklelog/massifs/storage" + azwatcher "github.com/robinbryce/go-merklelog-azure/watcher" // "github.com/datatrails/go-datatrails-common/azblob" "github.com/urfave/cli/v2" @@ -49,11 +46,7 @@ var ( ) type WatchConfig struct { - watcher.WatchConfig - WatchTenants map[string]bool - WatchCount int - ReaderURL string - Latest bool + azwatcher.WatchConfig } // watchReporter abstracts the output interface for WatchForChanges to facilitate unit testing. @@ -88,7 +81,7 @@ func NewLogWatcherCmd() *cli.Command { Flags: []cli.Flag{ &cli.BoolFlag{ Name: flagLatest, - Usage: `find the latest changes for each requested tenant (no matter how long ago they occured). This is mutualy exclusive with --since, --idsince and --horizon.`, + Usage: `find the latest changes for each requested tenant (no matter how long ago they occurred). This is mutually exclusive with --since, --idsince and --horizon.`, Value: false, }, @@ -127,21 +120,40 @@ func NewLogWatcherCmd() *cli.Command { if err = cfgLogging(cmd, cCtx); err != nil { return err } - reporter := &defaultReporter{log: cmd.log} + reporter := &defaultReporter{log: cmd.Log} cfg, err := NewWatchConfig(cCtx, cmd) if err != nil { return err } - forceProdUrl := cCtx.String("data-url") == "" + dataUrl := cCtx.String("data-url") + if dataUrl == "" && !IsStorageEmulatorEnabled(cCtx) { + dataUrl = DefaultRemoteMassifURL + } + + + reader, err := cfgReader(cmd, cCtx, dataUrl) + if err != nil { + return err + } - reader, err := cfgReader(cmd, cCtx, forceProdUrl) + collator := azwatcher.NewLogTailCollator( + func(storagePath string) storage.LogID { + return storage.ParsePrefixedLogID("tenant/", storagePath) + }, + storage.ObjectIndexFromPath, + ) + watcher, err := azwatcher.NewWatcher(cfg.WatchConfig) if err != nil { return err } + wc := &WatcherCollator{ + Watcher: watcher, + LogTailCollator: collator, + } - return WatchForChanges(ctx, cfg, reader, reporter) + return azwatcher.WatchForChanges(ctx, cfg.WatchConfig, wc, reader, reporter) }, } } @@ -155,7 +167,7 @@ func checkCompatibleFlags(cCtx cliContext) error { for _, excluded := range latestExcludes { if cCtx.IsSet(excluded) { - return fmt.Errorf("the %s flag is mutualy exclusive with %s", flagLatest, strings.Join(latestExcludes, ", ")) + return fmt.Errorf("the %s flag is mutually exclusive with %s", flagLatest, strings.Join(latestExcludes, ", ")) } } return nil @@ -212,9 +224,14 @@ func NewWatchConfig(cCtx cliContext, cmd *CmdCtx) (WatchConfig, error) { } cfg := WatchConfig{ - Latest: cCtx.Bool(flagLatest), + WatchConfig: azwatcher.WatchConfig{ + Latest: cCtx.Bool(flagLatest), + Interval: cCtx.Duration(flagInterval), + }, + } + if cfg.Interval == 0 { + cfg.Interval = threeSeconds } - cfg.Interval = cCtx.Duration(flagInterval) if cCtx.IsSet(flagHorizon) { cfg.Horizon, err = parseHorizon(cCtx.String(flagHorizon)) @@ -230,229 +247,31 @@ func NewWatchConfig(cCtx cliContext, cmd *CmdCtx) (WatchConfig, error) { cfg.IDSince = cCtx.String(flagIDSince) } - if !cCtx.IsSet(flagLatest) { - err = watcher.ConfigDefaults(&cfg.WatchConfig) - if err != nil { - return WatchConfig{}, err - } - if cfg.Interval < time.Second { - return WatchConfig{}, fmt.Errorf("polling more than once per second is not currently supported") - } + err = azwatcher.ConfigDefaults(&cfg.WatchConfig) + if err != nil { + return WatchConfig{}, err + } + if cfg.Interval < time.Second { + return WatchConfig{}, fmt.Errorf("polling more than once per second is not currently supported") } cfg.WatchCount = min(max(1, cCtx.Int(flagCount)), maxPollCount) - cfg.ReaderURL = cmd.readerURL + cfg.ObjectPrefixURL = cmd.RemoteURL - tenants := CtxGetTenantOptions(cCtx) - if len(tenants) == 0 { + logs := CtxGetLogOptions(cCtx) + if len(logs) == 0 { return cfg, nil } - cfg.WatchTenants = make(map[string]bool) - for _, t := range tenants { - cfg.WatchTenants[strings.TrimPrefix(t, tenantPrefix)] = true + cfg.WatchLogs = make(map[string]bool) + for _, lid := range logs { + cfg.WatchLogs[string(lid)] = true } return cfg, nil } -type Watcher struct { - watcher.Watcher - cfg WatchConfig - reader azblob.Reader - reporter watchReporter - collator watcher.LogTailCollator -} - -// FirstFilter accounts for the --latest flag but otherwise falls through to the base implementation -func (w *Watcher) FirstFilter() string { - if !w.cfg.Latest { - return w.Watcher.FirstFilter() - } - // The first idtimestamp of the first epoch - idSince := massifs.IDTimestampToHex(0, 0) - return fmt.Sprintf(`"lastid">='%s'`, idSince) -} - -// NextFilter accounts for the --latest flag but otherwise falls through to the base implementation -func (w *Watcher) NextFilter() string { - if !w.cfg.Latest { - return w.Watcher.NextFilter() - } - return w.FirstFilter() -} - -func normalizeTenantIdentity(tenant string) string { - if strings.HasPrefix(tenant, tenantPrefix) { - return tenant - } - return fmt.Sprintf("%s%s", tenantPrefix, tenant) -} - -// WatchForChanges watches for tenant log chances according to the provided config -func WatchForChanges( - ctx context.Context, - cfg WatchConfig, reader azblob.Reader, reporter watchReporter, -) error { - - w := &Watcher{ - Watcher: watcher.Watcher{Cfg: cfg.WatchConfig}, - cfg: cfg, - reader: reader, - reporter: reporter, - collator: watcher.NewLogTailCollator(), - } - tagsFilter := w.FirstFilter() - - count := w.cfg.WatchCount - - for { - - // For each count, collate all the pages - err := collectPages(ctx, w, tagsFilter) - if err != nil { - return err - } - - var activity []TenantActivity - for _, tenant := range w.collator.SortedMassifTenants() { - if w.cfg.WatchTenants != nil && !w.cfg.WatchTenants[tenant] { - continue - } - - lt := w.collator.Massifs[tenant] - sealLastID := lastSealID(w.collator, tenant) - // This is console mode output - - a := TenantActivity{ - Tenant: normalizeTenantIdentity(tenant), - Massif: int(lt.Number), - IDCommitted: lt.LastID, IDConfirmed: sealLastID, - LastModified: lastActivityRFC3339(lt.LastID, sealLastID), - MassifURL: fmt.Sprintf("%s%s", w.cfg.ReaderURL, lt.Path), - } - - if sealLastID != sealIDNotFound { - a.SealURL = fmt.Sprintf("%s%s", w.cfg.ReaderURL, w.collator.Seals[tenant].Path) - } - - activity = append(activity, a) - } - - if activity != nil { - reporter.Logf( - "%d active logs since %v (%s).", - len(w.collator.Massifs), - w.LastSince.Format(time.RFC3339), - w.LastIDSince, - ) - reporter.Logf( - "%d tenants sealed since %v (%s).", - len(w.collator.Seals), - w.LastSince.Format(time.RFC3339), - w.LastIDSince, - ) - - marshaledJson, err := json.MarshalIndent(activity, "", " ") - if err != nil { - return err - } - reporter.Outf(string(marshaledJson)) - - // Terminate immediately once we have results - return nil - } - - // Note we don't allow a zero interval - if count <= 1 || w.Cfg.Interval == 0 { - - // exit non zero if nothing is found - return ErrNoChanges - } - count-- - - tagsFilter = w.NextFilter() - time.Sleep(w.Cfg.Interval) - } -} - -// collectPages collects all pages of a single filterList invocation -// and keeps things happy left -func collectPages( - ctx context.Context, - w *Watcher, - tagsFilter string, - filterOpts ...azblob.Option, -) error { - - var lastMarker azblob.ListMarker - - for { - filtered, err := filteredList(ctx, w.reader, tagsFilter, lastMarker, filterOpts...) - if err != nil { - return err - } - - err = w.collator.CollatePage(filtered.Items) - if err != nil { - return err - } - lastMarker = filtered.Marker - if lastMarker == nil || *lastMarker == "" { - break - } - } - return nil -} - -// filteredList makes adding the lastMarker option to the FilteredList call 'happy to the left' -func filteredList( - ctx context.Context, - reader azblob.Reader, - tagsFilter string, - marker azblob.ListMarker, - filterOpts ...azblob.Option, -) (*azblob.FilterResponse, error) { - - if marker == nil || *marker == "" { - return reader.FilteredList(ctx, tagsFilter) - } - return reader.FilteredList(ctx, tagsFilter, append(filterOpts, azblob.WithListMarker(marker))...) -} - -func lastSealID(c watcher.LogTailCollator, tenant string) string { - if _, ok := c.Seals[tenant]; ok { - return c.Seals[tenant].LastID - } - return sealIDNotFound -} - -func lastActivityRFC3339(idmassif, idseal string) string { - tmassif, err := lastActivity(idmassif) - if err != nil { - return "" - } - if idseal == sealIDNotFound { - return tmassif.UTC().Format(time.RFC3339) - } - tseal, err := lastActivity(idseal) - if err != nil { - return tmassif.UTC().Format(time.RFC3339) - } - if tmassif.After(tseal) { - return tmassif.UTC().Format(time.RFC3339) - } - return tseal.UTC().Format(time.RFC3339) -} - -func lastActivity(idTimstamp string) (time.Time, error) { - id, epoch, err := massifs.SplitIDTimestampHex(idTimstamp) - if err != nil { - return time.Time{}, err - } - ms, err := snowflakeid.IDUnixMilli(id, epoch) - if err != nil { - return time.Time{}, err - } - return time.UnixMilli(ms), nil +type WatcherCollator struct { + azwatcher.Watcher + azwatcher.LogTailCollator } diff --git a/watch_test.go b/watch_test.go index 826f622..cb38739 100644 --- a/watch_test.go +++ b/watch_test.go @@ -1,56 +1,14 @@ package veracity import ( - "context" - "encoding/json" - "errors" "reflect" "strings" "testing" "time" - azStorageBlob "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" - "github.com/datatrails/go-datatrails-common/azblob" - "github.com/datatrails/go-datatrails-common/logger" - "github.com/datatrails/go-datatrails-merklelog/massifs" - "github.com/datatrails/go-datatrails-merklelog/massifs/snowflakeid" "github.com/stretchr/testify/assert" ) -func Test_lastActivityRFC3339(t *testing.T) { - type args struct { - idmassif string - idseal string - } - tests := []struct { - name string - args args - want string - }{ - { - args: args{ - idmassif: "019107fb65391e3e00", - idseal: "0191048b865a073f00", - }, - want: "2024-07-31T08:50:01Z", - }, - { - args: args{ - idmassif: "0191048b865a073f00", - idseal: "019107fb65391e3e00", - }, - want: "2024-07-31T08:50:01Z", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := lastActivityRFC3339(tt.args.idmassif, tt.args.idseal); got != tt.want { - t.Errorf("lastActivityRFC3339() = %v, want %v", got, tt.want) - } - }) - } -} - type checkWatchConfig func(t *testing.T, cfg WatchConfig) func TestNewWatchConfig(t *testing.T) { @@ -98,7 +56,7 @@ func TestNewWatchConfig(t *testing.T) { }, cmd: new(CmdCtx), }, - errPrefix: "the latest flag is mutualy exclusive", + errPrefix: "the latest flag is mutually exclusive", }, { @@ -120,7 +78,7 @@ func TestNewWatchConfig(t *testing.T) { cCtx: &mockContext{}, cmd: new(CmdCtx), }, - errPrefix: "provide horizon on its own or either of the since", + errPrefix: "provide the latest flag, horizon on its own or either of the since parameters", }, { @@ -159,7 +117,7 @@ func TestNewWatchConfig(t *testing.T) { }, check: func(t *testing.T, cfg WatchConfig) { assert.Equal(t, hourSince, cfg.Since) - assert.Equal(t, time.Second, cfg.Interval) + assert.Equal(t, time.Second*3, cfg.Interval) assert.NotEqual(t, "", cfg.IDSince) // should be set to IDTimeHex }, }, @@ -199,338 +157,6 @@ func TestNewWatchConfig(t *testing.T) { } } -const ( - Unix20231215T1344120000 = uint64(1702647852) -) - -func watchMakeId(ms uint64) string { - seqBits := 8 - idt := (ms - uint64(snowflakeid.EpochMS(1))) << snowflakeid.TimeShift - return massifs.IDTimestampToHex(idt|uint64(7)<= len(reporter.outf) { - t.Errorf("wanted %d outputs, got %d", len(tt.wantOutputs), len(reporter.outf)) - break - } - assert.Equal(t, tt.wantOutputs[i], reporter.outf[i]) - } - } - }) - } -} - -func marshalActivity(t *testing.T, activity ...TenantActivity) []byte { - marshaledJson, err := json.MarshalIndent(activity, "", " ") - assert.NoError(t, err) - return marshaledJson -} - -func newFilterBlobItem(name string, lastid string) *azStorageBlob.FilterBlobItem { - it := &azStorageBlob.FilterBlobItem{} - it.Name = &name - it.Tags = &azStorageBlob.BlobTags{} - it.Tags.BlobTagSet = make([]*azStorageBlob.BlobTag, 1) - key := "lastid" - - it.Tags.BlobTagSet[0] = &azStorageBlob.BlobTag{Key: &key, Value: &lastid} - return it -} - -func newFilterBlobItems(nameAndLastIdPairs ...string) []*azStorageBlob.FilterBlobItem { - // just ignore odd lenght - var items []*azStorageBlob.FilterBlobItem - pairs := len(nameAndLastIdPairs) >> 1 - for i := range pairs { - name := nameAndLastIdPairs[i*2] - lastid := nameAndLastIdPairs[i*2+1] - items = append(items, newFilterBlobItem(name, lastid)) - } - return items -} - -type mockReader struct { - resultIndex int - pageTokens []azblob.ListMarker - results []*azblob.FilterResponse -} - -func (r *mockReader) Reader( - ctx context.Context, - identity string, - opts ...azblob.Option, -) (*azblob.ReaderResponse, error) { - return nil, nil - -} -func (r *mockReader) FilteredList(ctx context.Context, tagsFilter string, opts ...azblob.Option) (*azblob.FilterResponse, error) { - - i := r.resultIndex - if i >= len(r.results) { - return &azblob.FilterResponse{}, nil - } - - // Note: when paging, because the values on StorerOptions are needlessly - // private we can't check we got the expected option back - - r.resultIndex++ - - res := *r.results[i] - if i < len(r.pageTokens) { - res.Marker = r.pageTokens[i] - } - - return &res, nil -} -func (r *mockReader) List(ctx context.Context, opts ...azblob.Option) (*azblob.ListerResponse, error) { - return nil, nil -} - -type mockReporter struct { - logf []string - logfargs [][]any - outf []string - outfargs [][]any -} - -func (r *mockReporter) Logf(message string, args ...any) { - - r.logf = append(r.logf, message) - r.logfargs = append(r.logfargs, args) -} -func (r *mockReporter) Outf(message string, args ...any) { - r.outf = append(r.outf, message) - r.outfargs = append(r.outfargs, args) -} - type mockContext struct { since *time.Time latest bool