Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion .github/actions/setup-ci/action.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,10 @@ runs:
steps:
- name: Setup etc/hosts
shell: bash
run: sudo echo "127.0.0.1 bucketwebsitetester.s3-website-us-east-1.amazonaws.com" | sudo tee -a /etc/hosts
run: |
sudo echo "127.0.0.1 bucketwebsitetester.s3-website-us-east-1.amazonaws.com" | sudo tee -a /etc/hosts
sudo echo "127.0.0.1 pykmip.local" | sudo tee -a /etc/hosts
for i in `seq 1 50`; do sudo echo "127.0.0.$i $i.pykmip.local" | sudo tee -a /etc/hosts ; done
- name: Setup Credentials
shell: bash
run: bash .github/scripts/credentials.bash
Expand Down
4 changes: 4 additions & 0 deletions .github/docker/admin.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
{
"accessKey": "D4IT2AWSB588GO5J9T00",
"secretKeyValue": "UEEu8tYlsOGGrgf4DAiSZD6apVNPUWqRiPG0nTB6"
}
1 change: 1 addition & 0 deletions .github/docker/creds.env
Original file line number Diff line number Diff line change
Expand Up @@ -35,3 +35,4 @@ gcpbackend_GCP_SERVICE_KEYFILE
gcpbackendmismatch_GCP_SERVICE_KEYFILE
gcpbackendnoproxy_GCP_SERVICE_KEYFILE
gcpbackendproxy_GCP_SERVICE_KEYFILE
AWS_SDK_JS_SUPPRESS_MAINTENANCE_MODE_MESSAGE=1
50 changes: 50 additions & 0 deletions .github/docker/docker-compose.sse.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
services:
vault-sse-before-migration:
image: ${VAULT_IMAGE}
profiles: ['sse-migration']
# root because S3C images needs ownership permission on files and mounted paths
user: root
command: sh -c "chmod 400 tests/utils/keyfile && yarn start > /artifacts/vault.log 2> /artifacts/vault-stderr.log"
network_mode: "host"
volumes:
- /tmp/artifacts/${JOB_NAME}:/artifacts
- ./vault-config.json:/conf/config.json:ro
- ./vault-db:/data
environment:
- VAULT_DB_BACKEND=LEVELDB
- CI=true
- ENABLE_LOCAL_CACHE=true
- REDIS_HOST=0.0.0.0
- REDIS_PORT=6379
depends_on:
- redis
vault-sse-migration:
extends: vault-sse-before-migration
profiles: ['sse-migration']
command: sh -c "chmod 400 tests/utils/keyfile && yarn start > /artifacts/vault.migration.log 2> /artifacts/vault-stderr.migration.log"
environment:
- KMS_BACKEND
cloudserver-sse-before-migration:
extends:
file: docker-compose.yaml
service: cloudserver
profiles: [sse-migration]
volumes:
# using artesca container
- ../../localData:/usr/src/app/localData
- ../../localMetadata:/usr/src/app/localMetadata
- ../../tests/functional/sse-kms-migration/config.json:/conf/config.json
environment:
- S3_CONFIG_FILE=/conf/config.json
- S3VAULT=scality
depends_on:
- vault-sse-before-migration
cloudserver-sse-migration:
extends: cloudserver-sse-before-migration
profiles: [sse-migration]
command: sh -c "yarn start > /artifacts/s3.migration.log 2> /artifacts/s3-stderr.migration.log"
environment:
- S3KMS
depends_on: !override
- redis
- vault-sse-migration
6 changes: 5 additions & 1 deletion .github/docker/docker-compose.yaml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
services:
cloudserver:
image: ${CLOUDSERVER_IMAGE}
command: sh -c "yarn start > /artifacts/s3.log"
command: sh -c "yarn start > /artifacts/s3.log 2> /artifacts/s3-stderr.log"
network_mode: "host"
volumes:
- /tmp/ssl:/ssl
Expand Down Expand Up @@ -69,3 +69,7 @@ services:
- ../pykmip/certs:/ssl
- ../pykmip/policy.json:/etc/pykmip/policies/policy.json
- ../pykmip/server.conf:/etc/pykmip/server.conf
localkms:
network_mode: "host"
profiles: ['localkms']
image: ${KMS_IMAGE:-nsmithuk/local-kms:3.11.7}
17 changes: 17 additions & 0 deletions .github/docker/ensure-version.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
#!/bin/bash

# Make sure the good container version runs before running sse migration tests

set -o pipefail;

CONTAINER=$1
EXPECTED_VERSION=$2

# run jq outside container as some container might not have it
VERSION=$(docker compose exec $1 cat package.json | jq -r .version)
if [[ "$VERSION" != "$EXPECTED_VERSION" ]]; then
echo "bad version of container $CONTAINER. Should be $EXPECTED_VERSION. Was $VERSION" >&2
exit 1
else
echo OK $VERSION
fi
131 changes: 131 additions & 0 deletions .github/docker/local.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,131 @@
#!/bin/bash
set -e -o pipefail

# run kms migration tests locally
# in .github/docker

export S3BACKEND=file
export S3METADATA=file
export S3VAULT=scality
export MPU_TESTING="yes"

export CLOUDSERVER_IMAGE_BEFORE_SSE_MIGRATION=ghcr.io/scality/cloudserver:7.70.66
export CLOUDSERVER_IMAGE_ORIGINAL=ghcr.io/scality/cloudserver:7.70.70
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Are we going to be updating the after migration images to the latest ones?

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We can but don't need to for those tests.


export VAULT_IMAGE_BEFORE_SSE_MIGRATION=ghcr.io/scality/vault:7.70.31
export VAULT_IMAGE_ORIGINAL=ghcr.io/scality/vault:7.70.32
export KMS_IMAGE=nsmithuk/local-kms:3.11.7

export S3_CONFIG_FILE=config.json

export kmsContainer=localkms
export kmsProvider=aws
export kmsPort=8080

# export kmsContainer=pykmip
# export kmsProvider=kmip
# export kmsPort=5696

export kmsHideScalityArn=true
export globalEncryptionEnabled=true

export JOB_NAME=sse-kms-migration-tests-$kmsHideScalityArn-$kmsProvider

mkdir -p /tmp/artifacts/$JOB_NAME
mkdir -p /tmp/ssl-kmip

export CLOUDSERVER_IMAGE=$CLOUDSERVER_IMAGE_BEFORE_SSE_MIGRATION
export VAULT_IMAGE=$VAULT_IMAGE_BEFORE_SSE_MIGRATION

export KMS_AWS_SECRET_ACCESS_KEY=123
export KMS_AWS_ACCESS_KEY_ID=456

export COMPOSE_FILE=docker-compose.yaml:docker-compose.sse.yaml

function stop_all() {
docker compose -p docker down
}

function rm_all() {
sudo rm -rf ./vault-db/
sudo rm -rf ../../localData/*
sudo rm -rf ../../localMetadata/*
}

function start_all_before_migration() {
cd ../../tests/functional/sse-kms-migration
pwd
cp configs/base.json config.json
cd ../../../.github/docker

docker compose up -d redis vault-sse-before-migration cloudserver-sse-before-migration
bash ../../wait_for_local_port.bash 8500 40
bash ../../wait_for_local_port.bash 8000 40
# HAVE vaultclient bin in your PATH or an alias
# alias vaultclient="~/scality/vaultclient/bin/vaultclient"
export PATH="$PATH:~/scality/vaultclient/bin/"
vaultclient --config admin.json delete-account --name test || true
vaultclient --config admin.json create-account --name test --email test@scality.com
vaultclient --config admin.json generate-account-access-key --name test --accesskey TESTAK00000000000000 --secretkey TESTSK0000000000000000000000000000000000
vaultclient --config admin.json get-account --account-name test
}

function run_before_migration() {
echo ===== RUN BEFORE MIGRATION =====
cd ../..

AWS_ENDPOINT_URL=http://0:8000 AWS_DEFAULT_REGION=us-east-1 AWS_ACCESS_KEY_ID=TESTAK00000000000000 AWS_SECRET_ACCESS_KEY=TESTSK0000000000000000000000000000000000 aws s3 ls
yarn run ft_sse_before_migration | tee /tmp/artifacts/$JOB_NAME/beforeMigration.log
}

function run_latest_images() {
export CLOUDSERVER_IMAGE=$CLOUDSERVER_IMAGE_ORIGINAL
export VAULT_IMAGE=$VAULT_IMAGE_ORIGINAL
export S3KMS=$kmsProvider # S3
export KMS_BACKEND=$([[ "$kmsProvider" == "aws" ]] && echo "aws") # vault only aws is supported

cd tests/functional/sse-kms-migration
jq -s "
.[0] * .[1] * .[2] *
{ kmsHideScalityArn: $kmsHideScalityArn } *
{ globalEncryptionEnabled: $globalEncryptionEnabled }
" \
configs/base.json \
configs/$kmsProvider.json \
configs/sseMigration.json \
> config.json
cd ../../../
cd .github/docker
# copy kmip certs
sudo cp -r ../pykmip/certs/* /tmp/ssl-kmip

docker compose down cloudserver-sse-before-migration vault-sse-before-migration
docker compose up -d $kmsContainer vault-sse-migration cloudserver-sse-migration

bash ../../wait_for_local_port.bash $kmsPort 40
bash ../../wait_for_local_port.bash 8500 40
bash ../../wait_for_local_port.bash 8000 40
}

function run_migration() {
echo ==== RUN MIGRATION ====
cd ../..
export S3KMS=$kmsProvider # S3
export KMS_BACKEND=$([[ "$kmsProvider" == "aws" ]] && echo "aws") # vault only aws is supported
yarn run ft_sse_migration | tee /tmp/artifacts/$JOB_NAME/migration.log
}

function run_after_migration() {
sleep 1
export S3KMS=$kmsProvider # S3
export KMS_BACKEND=$([[ "$kmsProvider" == "aws" ]] && echo "aws") # vault only aws is supported
yarn run ft_sse_arn | tee /tmp/artifacts/$JOB_NAME/arnPrefix.log
}

stop_all
rm_all
start_all_before_migration
run_before_migration
run_latest_images
run_migration
run_after_migration
76 changes: 76 additions & 0 deletions .github/docker/vault-config.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
{
"clusters": 2,
"healthChecks": {
"allowFrom": ["127.0.0.1/8", "::1"]
},
"interfaces": {
"S3": {
"address": "0.0.0.0",
"port": 8500,
"allowFrom": ["0.0.0.0/8", "::1"]
},
"administration": {
"address": "0.0.0.0",
"port": 8600
},
"sts": {
"address": "127.0.0.1",
"port": 8800
}
},
"map": ["127.0.0.1:4300", "127.0.0.2:4301", "127.0.0.3:4302", "127.0.0.4:4303", "127.0.0.5:4304"],
"keyFilePath": "./tests/utils/keyfile",
"adminCredentialsFilePath": "./tests/utils/admincredentials.json.encrypted",
"log": {
"level": "info",
"dump": "error"
},
"accountSeeds": [
{
"role": {
"roleName": "scality-role1",
"trustPolicy": {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "AWS": "arn:aws:iam::000000000000:user/root" },
"Action": "sts:AssumeRole",
"Condition": {}
}
]
}
},
"permissionPolicy": {
"policyName": "scality-policy1",
"policyDocument": {
"Version": "2012-10-17",
"Statement": [
{
"Sid": "FullAccess",
"Effect": "Allow",
"Action": ["s3:*"],
"Resource": ["*"]
}
]
}
}
}
],
"utapi": {
"host": "127.0.0.1",
"port": 8100
},
"scuba": {
"host": "127.0.0.1",
"port": 8100
},
"kmsAWS": {
"noAwsArn": true,
"providerName": "local",
"region": "us-east-1",
"endpoint": "http://0:8080",
"ak": "456",
"sk": "123"
}
}
Loading
Loading