forked from smooth80/defold
-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy paths3.py
170 lines (141 loc) · 6.46 KB
/
s3.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
import run
from log import log
import os
import re
import sys
import urlparse
from datetime import datetime
from ConfigParser import ConfigParser
s3buckets = {}
def get_archive_prefix(archive_path, sha1):
u = urlparse.urlparse(archive_path)
assert (u.scheme == 's3')
prefix = os.path.join(u.path, sha1)[1:]
return prefix
def get_bucket(bucket_name):
if bucket_name in s3buckets:
return s3buckets[bucket_name]
configpath = os.path.expanduser("~/.s3cfg")
if os.path.exists(configpath):
config = ConfigParser()
config.read(configpath)
key = config.get('default', 'access_key')
secret = config.get('default', 'secret_key')
else:
key = os.getenv("S3_ACCESS_KEY")
secret = os.getenv("S3_SECRET_KEY")
log("get_bucket key %s" % (key))
if not (key and secret):
log('S3 key and/or secret not found in .s3cfg or environment variables')
sys.exit(5)
from boto.s3.connection import S3Connection
from boto.s3.connection import OrdinaryCallingFormat
from boto.s3.key import Key
# NOTE: We hard-code host (region) here and it should not be required.
# but we had problems with certain buckets with period characters in the name.
# Probably related to the following issue https://github.com/boto/boto/issues/621
conn = S3Connection(key, secret, host='s3-eu-west-1.amazonaws.com', calling_format=OrdinaryCallingFormat())
bucket = conn.get_bucket(bucket_name)
s3buckets[bucket_name] = bucket
return bucket
def find_files_in_bucket(archive_path, bucket, sha1, path, pattern):
root = urlparse.urlparse(archive_path).path[1:]
base_prefix = os.path.join(root, sha1)
prefix = os.path.join(base_prefix, path)
files = []
for x in bucket.list(prefix = prefix):
if x.name[-1] != '/':
# Skip directory "keys". When creating empty directories
# a psudeo-key is created. Directories isn't a first-class object on s3
if re.match(pattern, x.name):
name = os.path.relpath(x.name, base_prefix)
files.append({'name': name, 'path': '/' + x.name})
return files
# Get archive files for a single release/sha1
def get_files(archive_path, bucket, sha1):
files = []
files = files + find_files_in_bucket(archive_path, bucket, sha1, "engine", '.*(/dmengine.*|builtins.zip|classes.dex|android-resources.zip|android.jar|gdc.*|defoldsdk.zip|ref-doc.zip)$')
files = files + find_files_in_bucket(archive_path, bucket, sha1, "bob", '.*(/bob.jar)$')
files = files + find_files_in_bucket(archive_path, bucket, sha1, "editor", '.*(/Defold-.*)$')
files = files + find_files_in_bucket(archive_path, bucket, sha1, "dev", '.*(/Defold-.*)$')
files = files + find_files_in_bucket(archive_path, bucket, sha1, "alpha", '.*(/Defold-.*)$')
files = files + find_files_in_bucket(archive_path, bucket, sha1, "beta", '.*(/Defold-.*)$')
files = files + find_files_in_bucket(archive_path, bucket, sha1, "stable", '.*(/Defold-.*)$')
files = files + find_files_in_bucket(archive_path, bucket, sha1, "editor-alpha", '.*(/Defold-.*)$')
return files
def get_tagged_releases(archive_path, pattern=None):
u = urlparse.urlparse(archive_path)
bucket = get_bucket(u.hostname)
if pattern is None:
pattern = "(.*?)$" # captures all tags
tags = run.shell_command("git for-each-ref --sort=taggerdate --format '%(*objectname) %(refname)' refs/tags").split('\n')
tags.reverse()
releases = []
matches = []
for line in tags:
line = line.strip()
if not line:
continue
p = '(.*?) refs/tags/%s' % pattern
m = re.match('(.*?) refs/tags/%s' % pattern, line)
if not m:
continue
sha1, tag = m.groups()
matches.append((sha1, tag))
for sha1, tag in matches[:10]: # Only the first releases
epoch = run.shell_command('git log -n1 --pretty=%%ct %s' % sha1.strip())
date = datetime.fromtimestamp(float(epoch))
files = get_files(archive_path, bucket, sha1)
if len(files) > 0:
releases.append({'tag': tag,
'sha1': sha1,
'abbrevsha1': sha1[:7],
'date': str(date),
'files': files})
return releases
def get_single_release(archive_path, version_tag, sha1):
u = urlparse.urlparse(archive_path)
bucket = get_bucket(u.hostname)
files = get_files(archive_path, bucket, sha1)
return {'tag': version_tag,
'sha1': sha1,
'abbrevsha1': sha1[:7],
'files': files}
def move_release(archive_path, sha1, channel):
u = urlparse.urlparse(archive_path)
# get archive root and bucket name
# archive root: s3://d.defold.com/archive -> archive
# bucket name: s3://d.defold.com/archive -> d.defold.com
archive_root = u.path[1:]
bucket_name = u.hostname
bucket = get_bucket(bucket_name)
# the search prefix we use when listing keys
# we only want the keys associated with specifed sha1
prefix = "%s/%s/" % (archive_root, sha1)
keys = bucket.get_all_keys(prefix = prefix)
for key in keys:
# get the name of the file this key points to
# archive/sha1/engine/arm64-android/android.jar -> engine/arm64-android/android.jar
name = key.name.replace(prefix, "")
# destination
new_key = "archive/%s/%s/%s" % (channel, sha1, name)
# the keys in archive/sha1/* are all redirects to files in archive/channel/sha1/*
# get the actual file from the redirect
redirect_path = key.get_redirect()
if not redirect_path:
# the key is an actual file and not a redirect
# it shouldn't really happen but it's better to check
print("The file %s has no redirect. The file will not be moved" % name)
continue
# resolve the redirect and get a key to the file
redirect_name = urlparse.urlparse(redirect_path).path[1:]
redirect_key = bucket.get_key(redirect_name)
if not redirect_key:
print("Invalid redirect for %s. The file will not be moved" % redirect_path)
continue
# copy the file to the new location
print("Copying %s to %s" % (redirect_key.name, new_key))
bucket.copy_key(new_key, bucket_name, redirect_key.name)
# update the redirect
new_redirect = "http://%s/%s" % (bucket_name, new_key)
key.set_redirect(new_redirect)