Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions jobs/blobstore_benchmark/monit
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
# Empty because it's an errand
47 changes: 47 additions & 0 deletions jobs/blobstore_benchmark/spec
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
---
name: blobstore_benchmark
description: "Cloud Controller blobstore benchmark runner (errand)"

templates:
bpm.yml.erb: config/bpm.yml
bin/run.erb: bin/run
cloud_controller_ng.yml.erb: config/cloud_controller_ng.yml
ruby_version.sh.erb: bin/ruby_version.sh
db_ca.crt.erb: config/certs/db_ca.crt
storage_cli_config_droplets.json.erb: config/storage_cli_config_droplets.json
storage_cli_config_packages.json.erb: config/storage_cli_config_packages.json
storage_cli_config_buildpacks.json.erb: config/storage_cli_config_buildpacks.json
storage_cli_config_resource_pool.json.erb: config/storage_cli_config_resource_pool.json

packages:
- capi_utils
- cloud_controller_ng
- ruby-3.2
- jemalloc
- storage-cli


consumes:
- name: cloud_controller_internal
type: cloud_controller_internal
optional: false
- name: cloud_controller_db
type: cloud_controller_db
- name: database
type: database
optional: true

properties:
blobstore_benchmark.mode:
description: "Which blobstore backend to benchmark ('storage-cli' or 'fog')."
default: "storage-cli"

blobstore_benchmark.cc_overrides:
description: "Hash merged into cc config for this errand."
default: {}
cc:
description: "Full Cloud Controller 'cc' config subtree."
default: {}
cc.stdout_logging_enabled:
default: false
description: "Enable logging to stdout"
29 changes: 29 additions & 0 deletions jobs/blobstore_benchmark/templates/bin/run.erb
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
#!/usr/bin/env bash
set -eu

JOB_DIR="/var/vcap/jobs/blobstore_benchmark"
PKG_DIR="/var/vcap/packages/cloud_controller_ng"

MODE="<%= p('blobstore_benchmark.mode') %>"
echo "Performing blobstore benchmarks (mode: ${MODE})"

export LD_PRELOAD=/var/vcap/packages/jemalloc/lib/libjemalloc.so

perform_blobstore_benchmarks() {
export CLOUD_CONTROLLER_NG_CONFIG=/var/vcap/jobs/blobstore_benchmark/config/cloud_controller_ng.yml
source "${JOB_DIR}/bin/ruby_version.sh"
cd /var/vcap/packages/cloud_controller_ng/cloud_controller_ng

exec bundle exec rake benchmarks:perform_blobstore_benchmark
}

case ${1:-} in
run)
perform_blobstore_benchmarks
;;

*)
/var/vcap/jobs/bpm/bin/bpm run blobstore_benchmark -p blobstore_benchmark
;;

esac
6 changes: 6 additions & 0 deletions jobs/blobstore_benchmark/templates/bpm.yml.erb
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
processes:
- name: blobstore_benchmark
executable: /var/vcap/jobs/blobstore_benchmark/bin/run
args:
- run

105 changes: 105 additions & 0 deletions jobs/blobstore_benchmark/templates/cloud_controller_ng.yml.erb
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
<%
require 'cgi'

def yaml_escape(input_string)
chars_to_escape = /[:\\"\x00-\x1f\x7f]/
chars_needing_quotes = /[ !#'&%*,:>@\[\]\\`{|}]/
delimiter = (chars_needing_quotes.match(input_string) ||
chars_to_escape.match(input_string)) ? '"' : ''
fixed_string = input_string.gsub(/(#{chars_to_escape})/) { |m| "\\x#{'%x' % m.ord}" }
"#{delimiter}#{fixed_string}#{delimiter}"
end

def deep_merge_without_overwrite(base, extra)
return base unless extra.is_a?(Hash)

extra.each do |k, v|
if !base.key?(k)
base[k] = v
else
if base[k].is_a?(Hash) && v.is_a?(Hash)
deep_merge_without_overwrite(base[k], v)
end
# else: keep base[k] as-is (do not overwrite)
end
end

base
end

cc_cfg = (link("cloud_controller_internal").p("cc") rescue {})

unless cc_cfg.is_a?(Hash)
raise "cc link did not return a Hash, got: #{cc_cfg.class}"
end

%w[resource_pool buildpacks packages droplets].each do |k|
section = cc_cfg[k]
next unless section.is_a?(Hash)

%w[fog_connection connection_config fog_aws_storage_options fog_gcp_storage_options webdav_config].each do |hk|
section[hk] = {} if section.key?(hk) && section[hk].nil?
end
end

db = link("cloud_controller_db").p("ccdb.databases").find { |d| d["tag"] == "cc" }
db_role = link("cloud_controller_db").p("ccdb.roles").find { |r| r["tag"] == "admin" }

database_address = nil
link('cloud_controller_db').if_p('ccdb.address') { |host| database_address = host }
.else { database_address = link('database').instances[0].address }

db_hash = {
'database' => {
'adapter' => (link("cloud_controller_db").p("ccdb.db_scheme") == "mysql" ? "mysql2" : link("cloud_controller_db").p("ccdb.db_scheme")),
'host' => database_address,
'port' => link("cloud_controller_db").p("ccdb.port"),
'user' => db_role["name"],
# Let YAML handle quoting/escaping correctly
'password' => db_role["password"].to_s,
'database' => db["name"],
},
'max_connections' => link("cloud_controller_db").p("ccdb.max_connections"),
'pool_timeout' => link("cloud_controller_db").p("ccdb.pool_timeout"),
# keep DB noise down
'log_level' => (cc_cfg['db_logging_level'] || 'error'),
'log_db_queries' => (cc_cfg.key?('log_db_queries') ? cc_cfg['log_db_queries'] : false),
'ssl_verify_hostname' => link("cloud_controller_db").p("ccdb.ssl_verify_hostname"),
'connection_validation_timeout' => link("cloud_controller_db").p("ccdb.connection_validation_timeout"),
}

if link("cloud_controller_db").p("ccdb.ca_cert", nil)
db_hash['ca_cert_path'] = '/var/vcap/jobs/blobstore_benchmark/config/certs/db_ca.crt'
end

logging_level = p("cc.logging_level", cc_cfg["logging_level"] || cc_cfg.dig("logging", "level") || "error")

final = {
'pid_filename' => '/var/vcap/sys/run/blobstore_benchmark/blobstore_benchmark.pid',
'index' => spec.index,
'name' => name,

'logging' => {
'file' => '/var/vcap/sys/log/blobstore_benchmark/blobstore_benchmark.log',
'syslog' => 'vcap.cloud_controller_ng',
'level' => logging_level.to_s,
'max_retries' => p("cc.logging_max_retries", cc_cfg["logging_max_retries"] || 0),
'format' => {
'timestamp' => (cc_cfg.dig("logging", "format", "timestamp") ||
link("cloud_controller_internal").p("cc.logging.format.timestamp", "rfc3339"))
},
'stdout_sink_enabled' => p('cc.stdout_logging_enabled', false)
},

'db' => db_hash,

'storage_cli_config_file_resource_pool' => '/var/vcap/jobs/blobstore_benchmark/config/storage_cli_config_resource_pool.json',
'storage_cli_config_file_buildpacks' => '/var/vcap/jobs/blobstore_benchmark/config/storage_cli_config_buildpacks.json',
'storage_cli_config_file_packages' => '/var/vcap/jobs/blobstore_benchmark/config/storage_cli_config_packages.json',
'storage_cli_config_file_droplets' => '/var/vcap/jobs/blobstore_benchmark/config/storage_cli_config_droplets.json',
}

deep_merge_without_overwrite(final, cc_cfg)
%>
---
<%= final.to_yaml.sub(/\A---\s*\n/, '') %>
1 change: 1 addition & 0 deletions jobs/blobstore_benchmark/templates/db_ca.crt.erb
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
<%= link('cloud_controller_db').p('ccdb.ca_cert', '') %>
1 change: 1 addition & 0 deletions jobs/blobstore_benchmark/templates/ruby_version.sh.erb
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
<%
require "json"

# Ensure Azure CLI connection_config has a default timeout if none is set
def cli_cfg_with_default_timeout(connection_cfg, blobstore_type, default_seconds: 41)
cfg = (connection_cfg || {}).dup
if blobstore_type == 'storage_cli'
if !cfg.key?('put_timeout_in_seconds') || cfg['put_timeout_in_seconds'].to_s.empty?
cfg['put_timeout_in_seconds'] = default_seconds.to_s
end
end
cfg
end

# helper: add key only when value is present
def add_optional(h, key, val)
return if val.nil?
return if val.respond_to?(:empty?) && val.empty?
h[key] = val
end

l = link("cloud_controller_internal")

scope = "cc.buildpacks.connection_config"
provider = l.p("cc.buildpacks.blobstore_provider", nil)
options = {}

if provider == "AzureRM"
options["provider"] = provider
options["account_name"] = l.p("#{scope}.azure_storage_account_name")
options["container_name"] = l.p("#{scope}.container_name")
options["account_key"] = l.p("#{scope}.azure_storage_access_key")
add_optional(options, "environment", l.p("#{scope}.environment", "AzureCloud"))
add_optional(options, "put_timeout_in_seconds", l.p("#{scope}.put_timeout_in_seconds", nil))
options = cli_cfg_with_default_timeout(options, 'storage_cli')
end

if provider == "Google"
options["provider"] = provider
options["credentials_source"] = "static"
options["json_key"] = l.p("#{scope}.google_json_key_string")
options["bucket_name"] = l.p("#{scope}.bucket_name")
add_optional(options, "storage_class", l.p("#{scope}.storage_class", nil))
add_optional(options, "encryption_key", l.p("#{scope}.encryption_key", nil))
end

if provider == "AWS"
options["provider"] = provider
options["bucket_name"] = l.p("#{scope}.bucket_name")
options["credentials_source"] = "static"
options["access_key_id"] = l.p("#{scope}.aws_access_key_id")
options["secret_access_key"] = l.p("#{scope}.aws_secret_access_key")
add_optional(options, "region", l.p("#{scope}.region", nil))
add_optional(options, "host", l.p("#{scope}.host", nil))
add_optional(options, "port", l.p("#{scope}.port", nil))
add_optional(options, "ssl_verify_peer", l.p("#{scope}.ssl_verify_peer", nil))
add_optional(options, "use_ssl", l.p("#{scope}.use_ssl", nil))
add_optional(options, "signature_version", l.p("#{scope}.signature_version", nil))
add_optional(options, "server_side_encryption", l.p("#{scope}.encryption", nil))
add_optional(options, "sse_kms_key_id", l.p("#{scope}.x-amz-server-side-encryption-aws-kms-key-id", nil))
add_optional(options, "multipart_upload", l.p("#{scope}.multipart_upload", nil))
end

if provider == "aliyun"
options["provider"] = provider
options["access_key_id"] = l.p("#{scope}.aliyun_accesskey_id")
options["access_key_secret"] = l.p("#{scope}.aliyun_accesskey_secret")
options["endpoint"] = l.p("#{scope}.aliyun_oss_endpoint")
options["bucket_name"] = l.p("#{scope}.aliyun_oss_bucket")
end

if provider == "webdav"
options["provider"] = provider
options["user"] = l.p("#{scope}.username")
options["password"] = l.p("#{scope}.password")
options["endpoint"] = l.p("#{scope}.public_endpoint")
add_optional(options, "secret", l.p("#{scope}.secret", nil))
add_optional(options, "retry_attempts", l.p("#{scope}.retry_attempts", nil))

# TLS nested object with a Cert inside
ca_cert=l.p("#{scope}.ca_cert",nil)
unless ca_cert.empty?
options["tls"]={"cert"=>ca_cert}
end
end

-%>
<%= JSON.pretty_generate(options) %>
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
<%
require "json"

# Ensure Azure CLI connection_config has a default timeout if none is set
def cli_cfg_with_default_timeout(connection_cfg, blobstore_type, default_seconds: 41)
cfg = (connection_cfg || {}).dup
if blobstore_type == 'storage_cli'
if !cfg.key?('put_timeout_in_seconds') || cfg['put_timeout_in_seconds'].to_s.empty?
cfg['put_timeout_in_seconds'] = default_seconds.to_s
end
end
cfg
end

# helper: add key only when value is present
def add_optional(h, key, val)
return if val.nil?
return if val.respond_to?(:empty?) && val.empty?
h[key] = val
end

l = link("cloud_controller_internal")

scope = "cc.droplets.connection_config"
provider = l.p("cc.droplets.blobstore_provider", nil)
options = {}

if provider == "AzureRM"
options["provider"] = provider
options["account_name"] = l.p("#{scope}.azure_storage_account_name")
options["container_name"] = l.p("#{scope}.container_name")
options["account_key"] = l.p("#{scope}.azure_storage_access_key")
add_optional(options, "environment", l.p("#{scope}.environment", "AzureCloud"))
add_optional(options, "put_timeout_in_seconds", l.p("#{scope}.put_timeout_in_seconds", nil))
options = cli_cfg_with_default_timeout(options, 'storage_cli')
end

if provider == "Google"
options["provider"] = provider
options["credentials_source"] = "static"
options["json_key"] = l.p("#{scope}.google_json_key_string")
options["bucket_name"] = l.p("#{scope}.bucket_name")
add_optional(options, "storage_class", l.p("#{scope}.storage_class", nil))
add_optional(options, "encryption_key", l.p("#{scope}.encryption_key", nil))
end

if provider == "AWS"
options["provider"] = provider
options["bucket_name"] = l.p("#{scope}.bucket_name")
options["credentials_source"] = "static"
options["access_key_id"] = l.p("#{scope}.aws_access_key_id")
options["secret_access_key"] = l.p("#{scope}.aws_secret_access_key")
add_optional(options, "region", l.p("#{scope}.region", nil))
add_optional(options, "host", l.p("#{scope}.host", nil))
add_optional(options, "port", l.p("#{scope}.port", nil))
add_optional(options, "ssl_verify_peer", l.p("#{scope}.ssl_verify_peer", nil))
add_optional(options, "use_ssl", l.p("#{scope}.use_ssl", nil))
add_optional(options, "signature_version", l.p("#{scope}.signature_version", nil))
add_optional(options, "server_side_encryption", l.p("#{scope}.encryption", nil))
add_optional(options, "sse_kms_key_id", l.p("#{scope}.x-amz-server-side-encryption-aws-kms-key-id", nil))
add_optional(options, "multipart_upload", l.p("#{scope}.multipart_upload", nil))
end

if provider == "aliyun"
options["provider"] = provider
options["access_key_id"] = l.p("#{scope}.aliyun_accesskey_id")
options["access_key_secret"] = l.p("#{scope}.aliyun_accesskey_secret")
options["endpoint"] = l.p("#{scope}.aliyun_oss_endpoint")
options["bucket_name"] = l.p("#{scope}.aliyun_oss_bucket")
end

if provider == "webdav"
options["provider"] = provider
options["user"] = l.p("#{scope}.username")
options["password"] = l.p("#{scope}.password")
options["endpoint"] = l.p("#{scope}.public_endpoint")
add_optional(options, "secret", l.p("#{scope}.secret", nil))
add_optional(options, "retry_attempts", l.p("#{scope}.retry_attempts", nil))

# TLS nested object with a Cert inside
ca_cert=l.p("#{scope}.ca_cert",nil)
unless ca_cert.empty?
options["tls"]={"cert"=>ca_cert}
end
end

-%>
<%= JSON.pretty_generate(options) %>
Loading