Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
# - Docker image name
# - Kubernetes service, rc, pod, secret, volume names
SHORT_NAME := postgres
DEIS_REGISTY ?= ${DEV_REGISTRY}/
DEIS_REGISTRY ?= ${DEV_REGISTRY}/
IMAGE_PREFIX ?= deis

include versioning.mk
Expand Down
4 changes: 4 additions & 0 deletions charts/database/templates/database-deployment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,10 @@ spec:
value: "{{.Values.global.storage}}"
- name: PGCTLTIMEOUT
value: "{{.Values.postgres.timeout}}"
{{- if eq .Values.global.storage "s3" }}
- name: S3_SSE
value: "{{.Values.global.s3.use_sse}}"
{{- end}}
lifecycle:
preStop:
exec:
Expand Down
34 changes: 16 additions & 18 deletions rootfs/bin/create_bucket
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ from oauth2client.service_account import ServiceAccountCredentials
from gcloud.storage.client import Client
from gcloud import exceptions
from azure.storage.blob import BlobService
from urllib.parse import urlparse

def bucket_exists(conn, name):
bucket = conn.lookup(name)
Expand All @@ -23,25 +24,22 @@ bucket_name = os.getenv('BUCKET_NAME')
region = os.getenv('S3_REGION')

if os.getenv('DATABASE_STORAGE') == "s3":
conn = boto.s3.connect_to_region(region)
if os.getenv('S3_ENDPOINT'):
endpoint = urlparse(os.getenv('S3_ENDPOINT'))
conn = boto.s3.connect_to_region(region,
host=endpoint.hostname,
port=endpoint.port,
path=endpoint.path,
calling_format=boto.s3.connection.OrdinaryCallingFormat())
else:
conn = boto.s3.connect_to_region(region)

if not bucket_exists(conn, bucket_name):
try:
if region == "us-east-1":
# use "US Standard" region. workaround for https://github.com/boto/boto3/issues/125
conn.create_bucket(bucket_name)
else:
conn.create_bucket(bucket_name, location=region)
# NOTE(bacongobbler): for versions prior to v2.9.0, the bucket is created in the default region.
# if we got here, we need to propagate "us-east-1" into WALE_S3_ENDPOINT because the bucket
# exists in a different region and we cannot find it.
# TODO(bacongobbler): deprecate this once we drop support for v2.8.0 and lower
except S3CreateError as err:
if region != 'us-east-1':
print('Failed to create bucket in {}. We are now assuming that the bucket was created in us-east-1.'.format(region))
with open(os.path.join(os.environ['WALE_ENVDIR'], "WALE_S3_ENDPOINT"), "w+") as file:
file.write('https+path://s3.amazonaws.com:443')
else:
raise
if region == "us-east-1":
# use "US Standard" region. workaround for https://github.com/boto/boto3/issues/125
conn.create_bucket(bucket_name)
else:
conn.create_bucket(bucket_name, location=region)

elif os.getenv('DATABASE_STORAGE') == "gcs":
scopes = ['https://www.googleapis.com/auth/devstorage.full_control']
Expand Down
25 changes: 18 additions & 7 deletions rootfs/docker-entrypoint-initdb.d/001_setup_envdir.sh
Original file line number Diff line number Diff line change
Expand Up @@ -7,15 +7,26 @@ if [[ "$DATABASE_STORAGE" == "s3" || "$DATABASE_STORAGE" == "minio" ]]; then
AWS_SECRET_ACCESS_KEY=$(cat /var/run/secrets/deis/objectstore/creds/secretkey)
if [[ "$DATABASE_STORAGE" == "s3" ]]; then
AWS_REGION=$(cat /var/run/secrets/deis/objectstore/creds/region)
S3_ENDPOINT=$(cat /var/run/secrets/deis/objectstore/creds/endpoint)
BUCKET_NAME=$(cat /var/run/secrets/deis/objectstore/creds/database-bucket)
# Convert $AWS_REGION into $WALE_S3_ENDPOINT to avoid "Connection reset by peer" from
# regions other than us-standard.
# See https://github.com/wal-e/wal-e/issues/167
# See https://github.com/boto/boto/issues/2207
if [[ "$AWS_REGION" == "us-east-1" ]]; then
echo "https+path://s3.amazonaws.com:443" > WALE_S3_ENDPOINT
if [[ "$S3_ENDPOINT" == "" ]]; then
# Convert $AWS_REGION into $WALE_S3_ENDPOINT to avoid "Connection reset by peer" from
# regions other than us-standard.
# See https://github.com/wal-e/wal-e/issues/167
# See https://github.com/boto/boto/issues/2207
if [[ "$AWS_REGION" == "us-east-1" ]]; then
echo "https+path://s3.amazonaws.com:443" > WALE_S3_ENDPOINT
else
echo "https+path://s3-${AWS_REGION}.amazonaws.com:443" > WALE_S3_ENDPOINT
fi
else
echo "https+path://s3-${AWS_REGION}.amazonaws.com:443" > WALE_S3_ENDPOINT
echo "$S3_ENDPOINT" > S3_ENDPOINT
echo "$S3_ENDPOINT" | sed -E -e 's!http(s?)://!http\1+path://!' -e 's!/$!!' > WALE_S3_ENDPOINT
fi
if [[ $S3_SSE ]]; then
echo $S3_SSE > WALE_S3_SSE
else
echo "false" > WALE_S3_SSE
fi
else
AWS_REGION="us-east-1"
Expand Down
6 changes: 3 additions & 3 deletions rootfs/patcher-script.d/patch_wal_e_s3.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,10 @@ def wrap_uri_put_file(creds, uri, fp, content_type=None, conn=None):
k = s3_util._uri_to_key(creds, uri, conn=conn)
if content_type is not None:
k.content_type = content_type
encrypt_key = False
if os.getenv('DATABASE_STORAGE') == 's3':
encrypt_key=True
else:
encrypt_key=False
if os.getenv('WALE_S3_SSE', 'false') == 'true':
encrypt_key = True
k.set_contents_from_file(fp, encrypt_key=encrypt_key)
return k
s3.uri_put_file = wrap_uri_put_file
Expand Down