Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .github/actions/setup-ci/action.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ runs:
shell: bash
run: |
sudo echo "127.0.0.1 bucketwebsitetester.s3-website-us-east-1.amazonaws.com" | sudo tee -a /etc/hosts
sudo echo "127.0.0.1 bucketwebsitetester.s3-website-us-east-1.scality.com" | sudo tee -a /etc/hosts
sudo echo "127.0.0.1 pykmip.local" | sudo tee -a /etc/hosts
for i in `seq 1 50`; do sudo echo "127.0.0.$i $i.pykmip.local" | sudo tee -a /etc/hosts ; done
- name: Setup Credentials
Expand Down
63 changes: 63 additions & 0 deletions .github/docker/config.s3c.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
{
"port": 8000,
"maxScannedLifecycleListingEntries": 10000,
"metricsPort": 8001,
"internalPort": 8004,
"replicationGroupId": "RG001",
"restEndpoints": {
"s3.amazonaws.com": "dc-1",
"localhost": "dc-1",
"scality.scality.com": "dc-1",
"127.0.0.1": "dc-1",
"node1.scality.com": "dc-1"
},
"websiteEndpoints": ["s3-website-us-east-1.scality.com"],
"bucketd": {
"bootstrap": ["0.0.0.0:9000"]
},
"vaultd": {
"host": "127.0.0.1",
"port": 8500
},
"clusters": 5,
"kmsHideScalityArn": true,
"healthChecks": {
"allowFrom": ["127.0.0.1", "::1"]
},
"localCache": {
"host": "localhost",
"port": 6379,
"password": ""
},
"log": {
"logLevel": "info",
"dumpLevel": "error"
},
"replicationEndpoints": [
{"site": "zenko", "servers": ["127.0.0.1:9080"], "echo": false, "default": true},
{"site": "us-east-2", "servers": ["127.0.0.1:9080"]}
],
"requests": {
"extractClientIPFromHeader": "x-forwarded-for",
"extractProtocolFromHeader": "x-forwarded-proto",
"trustedProxyCIDRs": [
"127.0.0.1/32",
"::ffff:127.0.0.1/128",
"127.0.0.1"
],
"viaProxy": true
},
"multiObjectDeleteEnableOptimizations": false,
"supportedLifecycleRules": [
"Expiration",
"NoncurrentVersionExpiration",
"AbortIncompleteMultipartUpload"
],
"bucketNotificationDestinations": [
{
"resource": "target1",
"type": "dummy",
"host": "localhost:6000"
}
]
}
9 changes: 9 additions & 0 deletions .github/docker/docker-compose.sse.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,18 @@ services:
- ../../localData:/usr/src/app/localData
- ../../localMetadata:/usr/src/app/localMetadata
- ../../tests/functional/sse-kms-migration/config.json:/conf/config.json
- ../../tests/locationConfig/locationConfigS3C.json:/conf/locationConfig.json
environment:
- S3_CONFIG_FILE=/conf/config.json
- S3_LOCATION_FILE=/conf/locationConfig.json
cloudserver-sse-migration:
extends: cloudserver-sse-before-migration
profiles: [sse-migration]
command: sh -c "yarn start > /artifacts/s3.migration.log 2> /artifacts/s3-stderr.migration.log"
metadata-standalone:
image: ghcr.io/scality/metadata:8.11.0-standalone
profiles: ['metadata-standalone']
network_mode: 'host'
volumes:
- ./md-config-v0.json:/mnt/standalone_workdir/config.json:ro
- /tmp/artifacts/${JOB_NAME}/md:/mnt/standalone_workdir/log
29 changes: 29 additions & 0 deletions .github/docker/md-config-v0.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
{
"raftSessions": 1,
"raftMembers": 5,
"bucketdCount": 1,
"bucketdWorkers": 1,
"basePorts": {
"bucketd": 9000,
"repd": 4200,
"repdAdmin": 4250
},
"logLevel": "info",
"env": {
"METADATA_NEW_BUCKETS_VFORMAT": "v0",
"S3_VERSION_ID_ENCODING_TYPE":"hex"
},
"migration": {
"deploy": false,
"raftSessions": 0,
"raftMembers": 5,
"bucketdCount": 1,
"bucketdWorkers": 1,
"basePorts": {
"bucketd": 9001,
"repd": 4700,
"repdAdmin": 4750
},
"logLevel": "info"
}
}
62 changes: 62 additions & 0 deletions .github/docker/setup-s3c.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
#!/bin/bash

# setup S3C environment just like
# https://github.com/scality/Integration/blob/development/9.5/tests/setup-environment/index.js

set -e -o pipefail

SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )

VAULTCLIENT=$SCRIPT_DIR/../../node_modules/vaultclient/bin/vaultclient
CONFIG=$SCRIPT_DIR/admin.json

echo "Deleting and setting up S3C accounts like Integration (follow conf/authdata.json)"

$VAULTCLIENT --config $CONFIG delete-account --name Bart || true
$VAULTCLIENT --config $CONFIG create-account \
--name Bart \
--email sampleaccount1@sampling.com \
--accountid 123456789012 \
--canonicalid 79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be
$VAULTCLIENT --config $CONFIG generate-account-access-key \
--name Bart \
--accesskey ACC1AK00000000000000 \
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Since we are specifying those credentials in s3c_credentials.json should we read from it?

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not necessarily, I've hardcoded here so we can easily rerun manually and quickly troubleshoot without having to look for creds. Same for Integration, it will be hardcoded

--secretkey ACC1SK0000000000000000000000000000000000

$VAULTCLIENT --config $CONFIG delete-account --name Lisa || true
$VAULTCLIENT --config $CONFIG create-account \
--name Lisa \
--email sampleaccount2@sampling.com \
--accountid 123456789013 \
--canonicalid 79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2bf
$VAULTCLIENT --config $CONFIG generate-account-access-key \
--name Lisa \
--accesskey ACC2AK00000000000000 \
--secretkey ACC2SK0000000000000000000000000000000000

# Replication account for backbeat replication tests
$VAULTCLIENT --config $CONFIG delete-account --name Replication || true
# Cannot use url as canonicalid for service account
$VAULTCLIENT --config $CONFIG create-account \
--name Replication \
--email inspector@replication.info \
--accountid 123456789015 \
--canonicalid 79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2ba
$VAULTCLIENT --config $CONFIG generate-account-access-key \
--name Replication \
--accesskey ACCREPAK000000000000 \
--secretkey ACCREPSK00000000000000000000000000000000

echo "Copying s3c credentials to mem credentials"
cp \
$SCRIPT_DIR/../../tests/functional/aws-node-sdk/lib/json/s3c_credentials.json \
$SCRIPT_DIR/../../tests/functional/aws-node-sdk/lib/json/mem_credentials.json
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is this file supposed to be added in the .gitignore?

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No this is executed in the CI


echo "Update conf/authdata.json account Replication canonicalID"
REP_CANONICAL_ID=$(
$VAULTCLIENT --config $CONFIG get-account --account-name Replication | jq -r .canonicalId
)

# might need to undo changes if script was already ran before (manuallyoutside CI)
git checkout -- $SCRIPT_DIR/../../conf/authdata.json || true
sed -i "s/http:\/\/acs.zenko.io\/accounts\/service\/replication/$REP_CANONICAL_ID/g" $SCRIPT_DIR/../../conf/authdata.json
116 changes: 116 additions & 0 deletions .github/workflows/tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -451,6 +451,122 @@ jobs:
source: /tmp/artifacts
if: always()

# Configure and run as Integration run S3C tests
s3c-ft-tests:
# This job for now ignore errors until all unification and backbeat tests are fixed
runs-on: ubuntu-24.04
needs: build
continue-on-error: true
env:
S3BACKEND: file
S3DATA: scality
S3METADATA: scality
S3VAULT: scality
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
MPU_TESTING: "yes"
ENABLE_NULL_VERSION_COMPAT_MODE: true
COMPOSE_FILE: docker-compose.yaml:docker-compose.sse.yaml
S3_VERSION_ID_ENCODING_TYPE: hex
JOB_NAME: ${{ github.job }}
VAULT_IMAGE: ghcr.io/scality/vault:7.76.0
S3_END_TO_END: true
S3_TESTVAL_OWNERCANONICALID: 79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Login to Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ github.token }}
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Copy S3C config
run: cp .github/docker/config.s3c.json tests/functional/sse-kms-migration/config.json
- name: Setup CI services
run: docker compose up -d --quiet-pull redis sproxyd metadata-standalone vault-sse-before-migration cloudserver-sse-before-migration
working-directory: .github/docker
- name: Wait for services to be ready
run: |-
set -o pipefail;
bash wait_for_local_port.bash 81 40
bash wait_for_local_port.bash 9000 40
bash wait_for_local_port.bash 8000 40
- name: Setup vault credentials like S3C Integration
run: ./setup-s3c.sh
working-directory: .github/docker
- name: Run cloudserver-object tests
continue-on-error: true
env:
S3_CONFIG_FILE: ${{ github.workspace }}/.github/docker/config.s3c.json
S3_LOCATION_FILE: ${{ github.workspace }}/tests/locationConfig/locationConfigS3C.json
run: |-
set -o pipefail;
yarn run ft_awssdk_objects_misc | tee /tmp/artifacts/${{ github.job }}/ft_awssdk_objects_misc.log
- name: Run cloudserver-version tests
continue-on-error: true
env:
S3_CONFIG_FILE: ${{ github.workspace }}/.github/docker/config.s3c.json
S3_LOCATION_FILE: ${{ github.workspace }}/tests/locationConfig/locationConfigS3C.json
run: |-
set -o pipefail;
yarn run ft_awssdk_versioning | tee /tmp/artifacts/${{ github.job }}/ft_awssdk_versioning.log
- name: Run cloudserver-bucket tests
continue-on-error: true
env:
S3_CONFIG_FILE: ${{ github.workspace }}/.github/docker/config.s3c.json
S3_LOCATION_FILE: ${{ github.workspace }}/tests/locationConfig/locationConfigS3C.json
run: |-
set -o pipefail;
yarn run ft_awssdk_buckets | tee /tmp/artifacts/${{ github.job }}/ft_awssdk_buckets.log
- name: Run cloudserver-routes (metadata) tests
continue-on-error: true
env:
S3_CONFIG_FILE: ${{ github.workspace }}/.github/docker/config.s3c.json
S3_LOCATION_FILE: ${{ github.workspace }}/tests/locationConfig/locationConfigS3C.json
run: |-
set -o pipefail;
yarn run ft_node_routes | tee /tmp/artifacts/${{ github.job }}/ft_node_routes.log
- name: Run backbeat route tests
continue-on-error: true
env:
S3_CONFIG_FILE: ${{ github.workspace }}/.github/docker/config.s3c.json
S3_LOCATION_FILE: ${{ github.workspace }}/tests/locationConfig/locationConfigS3C.json
run: |-
set -o pipefail;
yarn run ft_route_backbeat | tee /tmp/artifacts/${{ github.job }}/ft_route_backbeat.log
- name: Run backbeat tests
continue-on-error: true
env:
S3_CONFIG_FILE: ${{ github.workspace }}/.github/docker/config.s3c.json
S3_LOCATION_FILE: ${{ github.workspace }}/tests/locationConfig/locationConfigS3C.json
run: |-
set -o pipefail;
yarn run ft_backbeat | tee /tmp/artifacts/${{ github.job }}/ft_backbeat.log
- name: Cleanup and upload coverage
uses: ./.github/actions/cleanup-and-coverage
with:
codecov-token: ${{ secrets.CODECOV_TOKEN }}
flags: s3c-ft-tests
if: always()
- name: Upload test results to Codecov
uses: codecov/test-results-action@v1
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: '**/junit/*junit*.xml'
flags: s3c-ft-tests
if: always() && !cancelled()
- name: Upload logs to artifacts
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: /tmp/artifacts
if: always()

utapi-v2-tests:
runs-on: ubuntu-24.04
needs: build
Expand Down
14 changes: 14 additions & 0 deletions tests/functional/aws-node-sdk/lib/json/s3c_credentials.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
{
"default": {
"accessKey": "ACC1AK00000000000000",
"secretKey": "ACC1SK0000000000000000000000000000000000"
},
"lisa": {
"accessKey": "ACC2AK00000000000000",
"secretKey": "ACC2SK0000000000000000000000000000000000"
},
"replication": {
"accessKey": "ACCREPAK000000000000",
"secretKey": "ACCREPSK00000000000000000000000000000000"
}
}
Loading
Loading