91
# Checks whether the credentials the user has given us are valid for the
92
# datastore they want to utilize. No credentials are needed to utilize
93
# AppScale's datastores (referred to as AppDB).
94
# TODO(cgb): Change this to use the AWS RubyGem just like we use for
96
# TODO(cgb): Since this returns a boolean, the name of the method should
97
# be self.are_storage_credentials_valid?(storage, creds)
98
def self.valid_storage_creds(storage, creds)
101
elsif storage == "s3"
102
conn = self.get_s3_conn(creds)
104
all_buckets = conn.list_all_my_buckets
105
Djinn.log_debug("this user owns these buckets: [#{all_buckets.join(', ')}]")
107
rescue RightAws::AwsError
112
Djinn.log_debug("did user provide valid storage creds? #{valid}")
116
# Stores a string or a file's contents in a remote datastore, utilizing the
118
def self.set_output(path, output, storage="appdb", creds={}, is_file=false)
119
return self.set(path, output, :output, storage, creds, is_file)
123
# Gets a file from the remote datastore, returning either the location of
124
# the file on the local filesystem or the contents of the file as a string.
125
def self.get_output(path, storage="appdb", creds={}, is_file=false)
126
return self.get(path, :output, storage, creds, is_file)
130
# Sets the access policy for the named file, in the appropriate datastore.
131
def self.set_acl(path, new_acl, storage="appdb", creds={})
132
return self.set(path, new_acl, :acl, storage, creds)
136
# Returns the access policy for the named file, in the appropriate datastore.
137
def self.get_acl(path, storage="appdb", creds={})
138
return self.get(path, :acl, storage, creds)
142
# Checks whether the given file exists in the specified datastore.
143
def self.does_file_exist?(path, storage="appdb", creds={})
144
if storage == "appdb"
145
result = `curl http://#{@@ip}:8079/doesexist -X POST -d 'SECRET=#{@@secret}' -d 'KEY=#{path}'`
146
elsif storage == "s3"
147
conn = self.get_s3_conn(creds)
148
bucket, file = self.parse_s3_key(path)
150
if self.does_s3_bucket_exist?(conn, bucket)
151
Djinn.log_debug("[does file exist] bucket [#{bucket}] exists")
153
Djinn.log_debug("[does file exist] getting acl for bucket [#{bucket}] and file [#{file}] ")
154
conn.get_acl(bucket, file)
156
rescue RightAws::AwsError
160
Djinn.log_debug("[does file exist] bucket [#{bucket}] does not exist")
164
msg = "ERROR - unrecognized storage for does_file_exist via repo - you requested #{storage}"
169
Djinn.log_debug("does key=#{path} exist? #{result}")
170
return result == "true"
177
# Gets an item (a file or ACL) from the remote datastore, with the credentials
179
def self.get(key, type, storage, creds, is_file=false)
180
if storage == "appdb"
181
Djinn.log_debug("performing a get on key [#{key}], type [#{type}]")
182
get_url = "http://#{@@ip}:8079/get"
183
params = {'SECRET' => @@secret, 'KEY' => key, 'TYPE' => type}
184
data = Net::HTTP.post_form(URI.parse(get_url), params).body
185
decoded_data = Base64.decode64(data)
188
HelperFunctions.write_file(is_file, decoded_data)
190
result = decoded_data
191
elsif storage == "s3"
192
conn = self.get_s3_conn(creds)
193
bucket, file = self.parse_s3_key(key)
197
# If fetching a directory, fetch all files via the prefix parameter
198
# TODO(cgb): presumably list_bucket only lists the first 1000 and
199
# returns a cursor for more, so change this accordingly
200
Djinn.log_debug("doing a list bucket on #{bucket}, with prefix #{file}")
201
files_to_write = conn.list_bucket(bucket, {'prefix'=> file})
202
Djinn.log_debug("list bucket returned [#{files_to_write.join(', ')}]")
203
files_to_write.each { |s3_file_data|
204
s3_filename = s3_file_data[:key]
205
local_path = is_file + File::Separator + s3_filename
206
#suffix = s3_filename.scan(/\A.+?\/(.*)\Z/).flatten.to_s
207
#local_path = is_file + File::Separator + suffix
208
Djinn.log_debug("writing local file #{local_path} from is_file #{is_file} and keyname #{s3_filename}")
210
# if the user gives us a file to fetch that's several directories
211
# deep, we need to make all the directories first
212
FileUtils.mkdir_p(File.dirname(local_path))
214
f = File.new(local_path, File::CREAT|File::RDWR)
215
result = conn.get(bucket, s3_filename) { |chunk|
221
result = conn.get(bucket, file)[:object]
224
# TODO: implement me!
227
msg = "type not supported for get operation - #{type} was used"
231
msg = "ERROR - unrecognized storage for get via repo - you requested #{storage}"
236
Djinn.log_debug("get key=#{key} type=#{type}")
241
# Writes an item (file or ACL) to the remote datastore, with the specified
243
def self.set(key, val, type, storage, creds, is_file=false)
244
if storage == "appdb"
246
if File.directory?(val)
248
`ls #{val}`.split.each { |file|
249
fullkey = key + "/" + file
250
fullval = val + "/" + file
251
Djinn.log_debug("recursive dive - now saving remote [#{fullkey}], local [#{fullval}]")
252
temp = self.set(fullkey, fullval, type, storage, creds, is_file)
256
Djinn.log_debug("setting remote [#{fullkey}] failed - reported [#{temp}]")
261
Djinn.log_debug("attempting to put local file #{val} into file #{key}")
262
val = HelperFunctions.read_file(val, chomp=false)
266
encoded_val = Base64.encode64(val)
267
set_url = "http://#{@@ip}:8079/set"
268
params = {'SECRET' => @@secret, 'KEY' => key,
269
'VALUE' => encoded_val, 'TYPE' => type}
270
res = Net::HTTP.post_form(URI.parse(set_url), params)
271
Djinn.log_debug("set key=#{key} type=#{type} returned #{res.body}")
272
result = true if res.body == "success"
273
rescue Exception => e
274
Djinn.log_debug("saw exception #{e.class} when posting userdata to repo at #{key}")
278
Djinn.log_debug("attempting to put local file into location #{key}")
279
encoded_val = Base64.encode64(val)
280
set_url = "http://#{@@ip}:8079/set"
281
params = {'SECRET' => @@secret, 'KEY' => key,
282
'VALUE' => encoded_val, 'TYPE' => type}
283
result = Net::HTTP.post_form(URI.parse(set_url), params).body
284
Djinn.log_debug("set key=#{key} type=#{type} returned #{result}")
285
result = true if result == "success"
287
elsif storage == "s3"
288
conn = self.get_s3_conn(creds)
289
bucket, file = self.parse_s3_key(key)
292
# TODO: for now we assume the bucket exists
293
#if !self.does_s3_bucket_exist?(conn, bucket)
294
# Djinn.log_debug("bucket #{bucket} does not exist - creating it now")
295
# conn.create_bucket(bucket)
297
# bucket creation takes a few moments - wait for it to exist
298
# before we start putting things in it
300
# Djinn.log_debug("waiting for s3 bucket #{bucket} to exist")
302
# break if self.does_s3_bucket_exist?(conn, bucket)
306
Djinn.log_debug("s3 bucket #{bucket} exists, now storing file #{file}")
308
# this throws an exception that gets automatically caught and logged
309
# looks like "undefined method `pos' for <String...>"
310
# the put operation still succeeds
312
if File.directory?(val)
314
`ls #{val}`.split.each { |file|
315
fullkey = key + "/" + file
316
fullval = val + "/" + file
317
Djinn.log_debug("recursive dive - now saving remote [#{fullkey}], local [#{fullval}]")
318
temp = self.set(fullkey, fullval, type, storage, creds, is_file)
319
result = false unless temp
323
Djinn.log_debug("attempting to put local file #{val} into bucket #{bucket}, location #{file}")
324
result = conn.put(bucket, file, File.open(val)) #headers={"Content-Length" => val.length})
327
result = conn.put(bucket, file, val, headers={"Content-Length" => val.length})
330
Djinn.log_debug("done putting file #{file} to s3!")
332
# TODO: implement me!
335
msg = "type not supported for get operation - #{type} was used"
339
msg = "ERROR - unrecognized storage for set via repo - you requested #{storage}"
344
Djinn.log_debug("set operation returned #{result}")
349
# Returns a connection that can be used to access Amazon S3, or anything that
350
# is API-compatible (e.g., Google Storage, Eucalyptus Walrus).
351
def self.get_s3_conn(creds)
352
access_key = creds['EC2_ACCESS_KEY']
353
secret_key = creds['EC2_SECRET_KEY']
355
s3_url = creds['S3_URL']
357
obscured_a_key = HelperFunctions.obscure_string(access_key)
358
obscured_s_key = HelperFunctions.obscure_string(secret_key)
360
Djinn.log_debug("creating S3 connection with access key [#{obscured_a_key}], secret key [#{obscured_s_key}], and S3 url [#{s3_url}]")
362
old_s3_url = ENV['S3_URL']
363
ENV['S3_URL'] = s3_url
364
conn = RightAws::S3Interface.new(access_key, secret_key)
365
ENV['S3_URL'] = old_s3_url
371
# Given a full S3 path, returns the bucket and filename.
372
def self.parse_s3_key(key)
373
paths = key.split("/")
375
file = paths[2, paths.length - 1].join("/")
380
# Queries Amazon S3 with the given connection to see if the user owns the
382
def self.does_s3_bucket_exist?(conn, bucket)
383
all_buckets = conn.list_all_my_buckets
384
bucket_names = all_buckets.map { |b| b[:name] }
385
bucket_exists = bucket_names.include?(bucket)
386
Djinn.log_debug("the user owns buckets [#{bucket_names.join(', ')}] - do they own [#{bucket}]? #{bucket_exists}")