1
0
Fork 0
mirror of https://github.com/fog/fog.git synced 2022-11-09 13:51:43 -05:00

Merge branch 'master' of github.com:redzebra/fog into auto_scaling_20100801

This commit is contained in:
Nick Osborn 2011-06-25 12:57:17 +01:00
commit a4f417f419
39 changed files with 588 additions and 175 deletions

View file

@ -53,8 +53,7 @@ task :examples do
end
task :test do # => :examples do
Rake::Task[:mock_tests].invoke
Rake::Task[:real_tests].invoke
Rake::Task[:mock_tests].invoke && Rake::Task[:examples].invoke && Rake::Task[:real_tests].invoke
end
def tests(mocked)
@ -210,6 +209,8 @@ task :changelog do
if [
'Aaron Suggs',
'geemus',
'Lincoln Stoll',
'Luqman Amjad',
'nightshade427',
'Wesley Beary'
].include?(committer)
@ -243,6 +244,10 @@ task :docs do
Rake::Task[:upload_fog_io].invoke
Rake::Task[:upload_rdoc].invoke
# connect to storage provider
Fog.credential = :geemus
storage = Fog::Storage.new(:provider => 'AWS')
directory = storage.directories.new(:key => 'fog.io')
# write base index with redirect to new version
directory.files.create(
:body => redirecter('latest'),

View file

@ -1,3 +1,190 @@
0.9.0 06/24/2011 32960d165a65f12d41785f924e6b6b6d8442516a
=========================================================
MVPs! Lincoln Stoll and Luqman Amjad
[aws]
use AWS.escape instead of CGI.escape. thanks geemus
[aws|compute]
Use #public_ip_address instead of deprecated #ip_address in Server#setup. thanks Martin Emde
mock: make address detach others before associating. thanks geemus
[aws|elb]
Fix failing created_at test caused by Ruby 1.9 changes to Range#include?. Use simpler test that doesn't care about the exact created_at time. thanks Blake Gentry
Update ELB API to version 2011-04-05. thanks Blake Gentry
Fix typo in usage documentation and add 'ap-northeast-1' to regions list. thanks Blake Gentry
Rearrange DescribeLoadBalancersResult contents to alphabetical order to match the official AWS docs and make it easier to update the list. thanks Blake Gentry
Add new attributes for 2011-04-05 API. thanks Blake Gentry
[aws|rds]
Add parameter group tests. thanks Aaron Suggs
Add server model & collection tests. thanks Aaron Suggs
Add security_groups collection and model tests. thanks Aaron Suggs
Server#destroy argument is optional. thanks Aaron Suggs
Refactor RDS model & collection tests. thanks Aaron Suggs
[aws|simpledb]
recognize :region option in SimpleDB.new(). thanks Nick Osborn
[aws|storage]
Add get/put bucket policy support. thanks Michael Linderman
Add options argument to delete_object to set headers. thanks Michael Linderman
Add delete bucket policy. thanks Michael Linderman
discern between no file and no directory for files.get. thanks geemus
fix error type for non-directories in files.get. thanks geemus
[brightbox|compute]
Added missing Image#compatibility_mode attribute. thanks Paul Thornthwaite
Fixed Format of Account representation. thanks Paul Thornthwaite
Fixed Format of nested CloudIP's server attribute. thanks Paul Thornthwaite
New account limits exposed in API, updating format test. thanks Paul Thornthwaite
ApiClient revoked time exposed in API. Updated format test. thanks Paul Thornthwaite
[cdn]
refactor provider/service namespacing. thanks geemus
fix top level class/module mismatch. thanks geemus
[compute]
first pass at examples. thanks geemus
refactor provider/service namespacing. thanks geemus
fixes/skips to get examples working. thanks geemus
[compute|aws]
fix helpers to use Fog::AWS. thanks geemus
simplify describe_instances parser. thanks geemus
fix deprecated compute service accessor usage. thanks geemus
improve consistency of waiting for ssh to be ready. thanks geemus
remove debug output from last commit. thanks geemus
[compute|bluebox]
fix format and template id in tests. thanks geemus
[compute|brightbox]
Fixed missed lookup in broken tests caused by namespace rename. thanks Paul Thornthwaite
[compute|ecloud]
fix namespace leftovers. thanks geemus
[compute|ninefold]
test cleanup. thanks geemus
[compute|rackspace]
fix nil check for auth token. thanks geemus
[compute|stormondemand]
fix namespace issue. thanks geemus
[compute|voxel]
fix flavor tests to properly skip voxel. thanks geemus
fix namespace issue. thanks geemus
[core]
add namespaced errors for better messaging. thanks geemus
making collection.new error more idiomatic. thanks geemus
fix mock reset to work with new namespaces. thanks geemus
[dns]
rename ip to value for record. thanks geemus
refactor provider/service namespacing. thanks geemus
[dns|dnsmadeeasy]
skip model/collection tests for now (timing issue). thanks geemus
[dns|examples]
fix deprecated record#ip= usage. thanks geemus
[dns|zerigo]
fixes for namespacing. thanks geemus
namespace related fixes. thanks geemus
[docs]
main index redirects to /latest. thanks geemus
fix rdoc link on index. thanks geemus
update to match refactorings. thanks geemus
[examples]
fix descriptions. thanks geemus
[linode|compute]
mark format test for stackscripts pending due to inconsistency of string/float for a value. thanks geemus
[misc]
add braces for new into the documents. thanks Chris Mague
use correct variable name in test description. thanks Dr Nic Williams
Not sure if I'm missing something here, but rake did not work. thanks Dylan Egan
You only need either the size or the snapshot_id. thanks Dylan Egan
Provide mocked console output if server has been up for over the delay time. thanks Dylan Egan
LIES!. thanks Dylan Egan
Add support for specifying a CDN CNAME when getting a Rackspace Cloud Files directory. thanks H. Wade Minter
add missing comma. thanks Joseph Anthony Pasquale Holsten
skip rackspace get_object test when mocking. thanks Joseph Anthony Pasquale Holsten
give a more useful error if someone tries to say connection.directories.create('dir'). thanks Joseph Anthony Pasquale Holsten
Added my blog post. thanks Larry Wright
Add recursive argument to server scp methods. Set to false by default. thanks Luke Robins
Add an options hash to scp. Set to {} by default. thanks Luke Robins
Added new DNS provider => DNS Made Easy. thanks Luqman Amjad
Removed sandbox url for DNS Made Easy. thanks Luqman Amjad
Added missing method "delete all domains". thanks Luqman Amjad
(DNSMadeEasy) added support for updating records via PUT. thanks Luqman Amjad
Added missing reference to delete_all_domains. thanks Luqman Amjad
Rescue 404 when fetching zone. thanks Luqman Amjad
Added new blog posting about fog and Carrierwave. thanks Mike Gehard
Edited docs/about/press.markdown via GitHub. thanks Mike Gehard
Typo fix. thanks Oge Nnadi
Fixed Fog::AWS::SimpleDB#delete_attributes. thanks Pan Thomakos
add Net::SCP options parameter to Fog::SCP proxy. thanks Phil Cohen
use a hash not nil for default scp_options. thanks Phil Cohen
rackspace auth url only prepend protocol as needed. thanks Todd Willey
Allow auth tokens to be shared among connections to rackspace api. thanks Todd Willey
OpenStack responds 200 when creating servers. thanks Todd Willey
added 0.8.2 changelog contents. thanks geemus
separate fog.io and rdoc tasks. thanks geemus
remove provider attribute from shared services. thanks geemus
[storage][aws] fix leftover namespace mismatch. thanks geemus
[storage][google] fix leftover namespace mismatch. thanks geemus
Edited lib/fog/storage/rackspace.rb via GitHub. thanks kbockmanrs
[ninefold|compute]
Boilerplate for ninefold. thanks Lincoln Stoll
Ninefold List Functionality. thanks Lincoln Stoll
VM Operations + dependencies. thanks Lincoln Stoll
Fix data formats for virtual machines. thanks Lincoln Stoll
IP Address requests. thanks Lincoln Stoll
Refactor out job waiting functionality, test correct data. thanks Lincoln Stoll
NAT functionality. thanks Lincoln Stoll
Core model functionality. thanks Lincoln Stoll
Public IPs and Rules. thanks Lincoln Stoll
use lowest network ID as default, correct assignment. thanks Lincoln Stoll
No mocks, simplify code. thanks Lincoln Stoll
Save operations not supported. thanks Lincoln Stoll
[rake]
add examples back into default rake task. thanks geemus
[release]
update changelog during release process. thanks geemus
[storage]
refactor provider/service namespacing. thanks geemus
[storage|aws]
more robust query handling for signed url. thanks geemus
make url a bit more robust. thanks geemus
fix url to check for query. thanks geemus
fix whitespace errors. thanks geemus
more precise mocked get_object. thanks geemus
[storage|rackspace]
fix files#get_url. thanks geemus
[tests]
add dnsmadeeasy and ninefold to mock credentials. thanks geemus
nuke rake task for test related cleanup. thanks geemus
make collection gsub to find nils a bit more resilient/unique. thanks geemus
trying again to make collection gsub to get nils more resilient/unique. thanks geemus
0.8.2 05/26/2011 9e6ebb6f7316273eb489f8cb60eb1642e6df357b
=========================================================

View file

@ -17,10 +17,10 @@ With a rapidly expanding community and codebase the advantages of fog just keep
Now type 'fog' to try stuff, confident that fog will let you know what to do. Here is an example of wading through server creation for Amazon Elastic Compute Cloud:
>> server = AWS.servers.create
>> server = Compute[:aws].servers.create
ArgumentError: image_id is required for this operation
>> server = AWS.servers.create(:image_id => 'ami-5ee70037')
>> server = Compute[:aws].servers.create(:image_id => 'ami-5ee70037')
<Fog::AWS::EC2::Server [...]>
>> server.destroy # cleanup after yourself or regret it, trust me

View file

@ -9,9 +9,12 @@ Shindo.tests('compute examples', 'compute') do
# iterate over all the providers
Fog.providers.each do |provider|
# FIXME: implement expected shared compute stuff for these providers as well
next if ['Bluebox', 'Brightbox', 'Ecloud', 'GoGrid', 'Linode', 'NewServers', 'Ninefold', 'Slicehost', 'StormOnDemand', 'VirtualBox', 'Voxel'].include?(provider)
provider = eval(provider) # convert from string to object
# skip if provider does not have storage
# skip if provider does not have compute
next unless provider.respond_to?(:services) && provider.services.include?(:compute)
tests(provider, provider.to_s.downcase) do
@ -46,10 +49,15 @@ Shindo.tests('compute examples', 'compute') do
end
# scp a directory to a server
Dir.mkdir('/tmp/lorem')
file = ::File.new('/tmp/lorem/lorem.txt', 'w')
file.write(File.read(lorem_path))
lorem_dir = File.join([File.dirname(__FILE__), '..', 'tests'])
tests("@server.scp('#{lorem_dir}', '/tmp/lorem', :recursive => true)").succeeds do
@server.scp(lorem_dir, '/tmp/lorem', :recursive => true)
end
File.delete('/tmp/lorem/lorem.txt')
Dir.rmdir('/tmp/lorem')
# destroy the server
tests('@server.destroy').succeeds do

View file

@ -35,7 +35,7 @@ Shindo.tests('dns examples', 'dns') do
# type is the type of record to create
tests('@record = @zone.records.create').succeeds do
@record = @zone.records.create(
:ip => '1.2.3.4',
:value => '1.2.3.4',
:name => 'www.fogdnsexamples.com',
:type => 'A'
)

View file

@ -6,8 +6,8 @@ Gem::Specification.new do |s|
## If your rubyforge_project name is different, then edit it and comment out
## the sub! line in the Rakefile
s.name = 'fog'
s.version = '0.8.2'
s.date = '2011-05-26'
s.version = '0.9.0'
s.date = '2011-06-24'
s.rubyforge_project = 'fog'
## Make sure your summary is short. The description may be as long
@ -41,7 +41,8 @@ Gem::Specification.new do |s|
s.add_dependency('formatador', '>=0.1.3')
s.add_dependency('json')
s.add_dependency('mime-types')
s.add_dependency('net-ssh', '>=2.1.3')
s.add_dependency('net-scp', '>=1.0.4')
s.add_dependency('net-ssh', '>=2.1.4')
s.add_dependency('nokogiri', '>=1.4.4')
s.add_dependency('ruby-hmac')

View file

@ -3,7 +3,7 @@ require File.join(File.dirname(__FILE__), 'fog', 'core')
module Fog
unless const_defined?(:VERSION)
VERSION = '0.8.2'
VERSION = '0.9.0'
end
end

View file

@ -32,12 +32,14 @@ module Fog
request :authorize_security_group_ingress
request :create_image
request :create_key_pair
request :create_placement_group
request :create_security_group
request :create_snapshot
request :create_tags
request :create_volume
request :delete_key_pair
request :delete_security_group
request :delete_placement_group
request :delete_snapshot
request :delete_tags
request :delete_volume
@ -48,6 +50,7 @@ module Fog
request :describe_instances
request :describe_reserved_instances
request :describe_key_pairs
request :describe_placement_groups
request :describe_regions
request :describe_reserved_instances_offerings
request :describe_security_groups
@ -266,7 +269,7 @@ module Fog
rescue Excon::Errors::HTTPStatusError => error
if match = error.message.match(/<Code>(.*)<\/Code><Message>(.*)<\/Message>/)
raise case match[1].split('.').last
when 'NotFound'
when 'NotFound', 'Unknown'
Fog::Compute::AWS::NotFound.slurp(error, match[2])
else
Fog::Compute::AWS::Error.slurp(error, "#{match[1]} => #{match[2]}")

View file

@ -12,7 +12,7 @@ module Fog
attr_accessor :architecture
attribute :ami_launch_index, :aliases => 'amiLaunchIndex'
attribute :availability_zone, :aliases => ['availabilityZone', 'placement'], :squash => 'availabilityZone'
attribute :availability_zone, :aliases => 'availabilityZone'
attribute :block_device_mapping, :aliases => 'blockDeviceMapping'
attribute :client_token, :aliases => 'clientToken'
attribute :dns_name, :aliases => 'dnsName'
@ -24,6 +24,7 @@ module Fog
attribute :key_name, :aliases => 'keyName'
attribute :created_at, :aliases => 'launchTime'
attribute :monitoring, :squash => 'state'
attribute :placement_group, :aliases => 'groupName'
attribute :product_codes, :aliases => 'productCodes'
attribute :private_dns_name, :aliases => 'privateDnsName'
attribute :private_ip_address, :aliases => 'privateIpAddress'
@ -35,6 +36,7 @@ module Fog
attribute :state, :aliases => 'instanceState', :squash => 'name'
attribute :state_reason, :aliases => 'stateReason'
attribute :subnet_id, :aliases => 'subnetId'
attribute :tenancy
attribute :tags, :aliases => 'tagSet'
attribute :user_data
@ -45,6 +47,7 @@ module Fog
self.groups ||= ["default"] unless attributes[:subnet_id]
self.flavor_id ||= 't1.micro'
self.image_id ||= begin
self.username = 'ubuntu'
case attributes[:connection].instance_variable_get(:@region) # Ubuntu 10.04 LTS 64bit (EBS)
when 'ap-northeast-1'
'ami-5e0fa45f'
@ -144,6 +147,8 @@ module Fog
'KeyName' => key_name,
'Monitoring.Enabled' => monitoring,
'Placement.AvailabilityZone' => availability_zone,
'Placement.GroupName' => placement_group,
'Placement.Tenancy' => tenancy,
'RamdiskId' => ramdisk_id,
'SecurityGroup' => groups,
'SubnetId' => subnet_id,
@ -177,19 +182,21 @@ module Fog
if public_key
commands << %{echo "#{public_key}" >> ~/.ssh/authorized_keys}
end
# allow some retries over the first 120 seconds because aws is weird
Timeout::timeout(120) do
# wait for aws to be ready
Timeout::timeout(360) do
begin
Timeout::timeout(4) do
Fog::SSH.new(public_ip_address, username, credentials).run(commands)
Timeout::timeout(8) do
Fog::SSH.new(public_ip_address, username, credentials.merge(:timeout => 4)).run('pwd')
end
rescue Errno::ECONNREFUSED
sleep(2)
retry
rescue Net::SSH::AuthenticationFailed, Timeout::Error
retry
end
end
rescue Errno::ECONNREFUSED => e
sleep(1)
retry
Fog::SSH.new(public_ip_address, username, credentials).run(commands)
end
def ssh(commands)
@ -246,6 +253,16 @@ module Fog
self.monitoring = new_monitor
end
private
def placement=(new_placement)
if new_placement.is_a?(Hash)
merge_attributes(new_placement)
else
self.attributes[:placement] = new_placement
end
end
end
end

View file

@ -109,7 +109,7 @@ module Fog
def setup(credentials = {})
requires :identity, :ips, :public_key, :username
Fog::SSH.new(ips.first['address'], username, credentials).run([
Fog::SSH.new(public_ip_address, username, credentials).run([
%{mkdir .ssh},
%{echo "#{public_key}" >> ~/.ssh/authorized_keys},
%{passwd -l #{username}},
@ -125,7 +125,7 @@ module Fog
options = {}
options[:key_data] = [private_key] if private_key
Fog::SSH.new(ips.first['address'], username, options).run(commands)
Fog::SSH.new(public_ip_address, username, options).run(commands)
end
def scp(local_path, remote_path, upload_options = {})
@ -133,7 +133,7 @@ module Fog
scp_options = {}
scp_options[:key_data] = [private_key] if private_key
Fog::SCP.new(ips.first['address'], username, scp_options).upload(local_path, remote_path, upload_options)
Fog::SCP.new(public_ip_address, username, scp_options).upload(local_path, remote_path, upload_options)
end
def username

View file

@ -14,7 +14,7 @@ module Fog
attribute :name
attribute :image_id # id or name
attribute :public_ip_address, :aliases => 'ip'
attribute :public_ip_address, :aliases => 'ip', :squash => 'ip'
attribute :memory # server.ram
attribute :state
attribute :description # Optional
@ -59,36 +59,36 @@ module Fog
def save
raise Fog::Errors::Error.new('Resaving an existing object may create a duplicate') if identity
requires :name, :image_id, :ip, :memory
requires :name, :image_id, :memory, :public_ip_address
options = {
'isSandbox' => sandbox,
'image' => image_id
}
options = options.reject {|key, value| value.nil?}
data = connection.grid_server_add(image, ip, name, memory, options)
data = connection.grid_server_add(image, public_ip_address, name, memory, options)
merge_attributes(data.body)
true
end
def ssh(commands)
requires :ip, :identity, :username
requires :identity, :public_ip_address, :username
options = {}
options[:key_data] = [private_key] if private_key
Fog::SSH.new(ip['ip'], username, options).run(commands)
Fog::SSH.new(public_ip_address, username, options).run(commands)
end
def scp(local_path, remote_path, upload_options = {})
requires :ip, :username
requires :public_ip_address, :username
scp_options = {}
scp_options[:key_data] = [private_key] if private_key
Fog::SCP.new(ip['ip'], username, scp_options).upload(local_path, remote_path, upload_options)
Fog::SCP.new(public_ip_address, username, scp_options).upload(local_path, remote_path, upload_options)
end
def setup(credentials = {})
requires :ip, :identity, :public_key, :username
Fog::SSH.new(ip['ip'], username, credentials).run([
requires :identity, :public_ip_address, :public_key, :username
Fog::SSH.new(public_ip_address, username, credentials).run([
%{mkdir .ssh},
%{echo "#{public_key}" >> ~/.ssh/authorized_keys},
%{passwd -l root},

View file

@ -17,6 +17,7 @@ module Fog
def bootstrap(new_attributes = {})
server = create(new_attributes)
server.wait_for { ready? }
server.setup
server
end

View file

@ -7,6 +7,8 @@ module Fog
def reset
@block_device_mapping = {}
@context = []
@contexts = ['blockDeviceMapping', 'groupSet', 'instancesSet', 'instanceState', 'placement', 'productCodes', 'stateReason', 'tagSet']
@instance = { 'blockDeviceMapping' => [], 'instanceState' => {}, 'monitoring' => {}, 'placement' => {}, 'productCodes' => [], 'stateReason' => {}, 'tagSet' => {} }
@reservation = { 'groupSet' => [], 'instancesSet' => [] }
@response = { 'reservationSet' => [] }
@ -15,19 +17,8 @@ module Fog
def start_element(name, attrs = [])
super
case name
when 'blockDeviceMapping'
@in_block_device_mapping = true
when'groupSet', 'productCodes'
@in_subset = true
when 'instancesSet'
@in_instances_set = true
when 'instanceState'
@in_instance_state = true
when 'stateReason'
@in_state_reason = true
when 'tagSet'
@in_tag_set = true
if @contexts.include?(name)
@context.push(name)
end
end
@ -35,7 +26,7 @@ module Fog
case name
when 'amiLaunchIndex'
@instance[name] = value.to_i
when 'availabilityZone'
when 'availabilityZone', 'tenancy'
@instance['placement'][name] = value
when 'architecture', 'clientToken', 'dnsName', 'imageId',
'instanceId', 'instanceType', 'ipAddress', 'kernelId',
@ -44,41 +35,33 @@ module Fog
@instance[name] = value
when 'attachTime'
@block_device_mapping[name] = Time.parse(value)
when 'blockDeviceMapping'
@in_block_device_mapping = false
when *@contexts
@context.pop
when 'code'
if @in_instance_state
@instance['instanceState'][name] = value.to_i
elsif @in_state_reason
@instance['stateReason'][name] = value.to_i
end
@instance[@context.last][name] = value.to_i
when 'deleteOnTermination'
if value == 'true'
@block_device_mapping[name] = true
else
@block_device_mapping[name] = false
end
@block_device_mapping[name] = (value == 'true')
when 'deviceName', 'status', 'volumeId'
@block_device_mapping[name] = value
when 'groupId'
when 'groupName'
case @context.last
when 'groupSet'
@reservation['groupSet'] << value
when 'groupSet', 'productCodes'
@in_subset = false
when 'instancesSet'
@in_instances_set = false
when 'instanceState'
@in_instance_state = false
when 'placement'
@instance['placement'][name] = value
end
when 'item'
if @in_block_device_mapping
case @context.last
when 'blockDeviceMapping'
@instance['blockDeviceMapping'] << @block_device_mapping
@block_device_mapping = {}
elsif @in_tag_set
@instance['tagSet'][@tag['key']] = @tag['value']
@tag = {}
elsif @in_instances_set
when 'instancesSet'
@reservation['instancesSet'] << @instance
@instance = { 'blockDeviceMapping' => [], 'instanceState' => {}, 'monitoring' => {}, 'placement' => {}, 'productCodes' => [], 'stateReason' => {}, 'tagSet' => {} }
elsif !@in_subset
when 'tagSet'
@instance['tagSet'][@tag['key']] = @tag['value']
@tag = {}
when nil
@response['reservationSet'] << @reservation
@reservation = { 'groupSet' => [], 'instancesSet' => [] }
end
@ -87,11 +70,7 @@ module Fog
when 'launchTime'
@instance[name] = Time.parse(value)
when 'name'
if @in_instance_state
@instance['instanceState'][name] = value
elsif @in_state_reason
@instance['stateReason'][name] = value
end
@instance[@context.last][name] = value
when 'ownerId', 'reservationId'
@reservation[name] = value
when 'requestId'
@ -99,15 +78,7 @@ module Fog
when 'productCode'
@instance['productCodes'] << value
when 'state'
if value == 'true'
@instance['monitoring'][name] = true
else
@instance['monitoring'][name] = false
end
when 'stateReason'
@in_state_reason = false
when 'tagSet'
@in_tag_set = false
@instance['monitoring'][name] = (value == 'true')
end
end

View file

@ -0,0 +1,30 @@
module Fog
module Parsers
module Compute
module AWS
class DescribePlacementGroups < Fog::Parsers::Base
def reset
@placement_group = {}
@response = { 'placementGroupSet' => [] }
end
def end_element(name)
case name
when 'item'
@response['placementGroupSet'] << @placement_group
@placement_group = {}
when 'groupName', 'state', 'strategy'
@placement_group[name] = value
when 'requestId'
@response[name] = value
end
end
end
end
end
end
end

View file

@ -7,19 +7,16 @@ module Fog
def reset
@block_device_mapping = {}
@context = []
@contexts = ['blockDeviceMapping', 'groupSet', 'placement', 'productCodes']
@instance = { 'blockDeviceMapping' => [], 'instanceState' => {}, 'monitoring' => {}, 'placement' => {}, 'productCodes' => [] }
@response = { 'groupSet' => [], 'instancesSet' => [] }
end
def start_element(name, attrs = [])
super
case name
when 'blockDeviceMapping'
@in_block_device_mapping = true
when 'groupSet'
@in_group_set = true
when 'productCodes'
@in_product_codes = true
if @contexts.include?(name)
@context.push(name)
end
end
@ -32,12 +29,12 @@ module Fog
'keyName', 'privateDnsName', 'privateIpAddress', 'ramdiskId',
'reason', 'rootDeviceType'
@instance[name] = value
when 'availabilityZone'
when 'availabilityZone', 'tenancy'
@instance['placement'][name] = value
when 'attachTime'
@block_device_mapping[name] = Time.parse(value)
when 'blockDeviceMapping'
@in_block_device_mapping = false
when *@contexts
@context.pop
when 'code'
@instance['instanceState'][name] = value.to_i
when 'deleteOnTermination'
@ -46,13 +43,19 @@ module Fog
@block_device_mapping[name] = value
when 'groupId'
@response['groupSet'] << value
when 'groupName'
case @context.last
when 'groupSet'
@in_group_set = false
@response['groupSet'] << value
when 'placement'
@instance['placement'][name] = value
end
when 'item'
if @in_block_device_mapping
case @context.last
when 'blockDeviceMapping'
@instance['blockDeviceMapping'] << @block_device_mapping
@block_device_mapping = {}
elsif !@in_group_set && !@in_product_codes
when nil
@response['instancesSet'] << @instance
@instance = { 'blockDeviceMapping' => [], 'instanceState' => {}, 'monitoring' => {}, 'placement' => {}, 'productCodes' => [] }
end
@ -64,8 +67,6 @@ module Fog
@response[name] = value
when 'product_code'
@instance['productCodes'] << value
when 'productCodes'
@in_product_codes = false
when 'state'
@instance['monitoring'][name] = (value == 'true')
when 'subnetId'

View file

@ -0,0 +1,34 @@
module Fog
module Compute
class AWS
class Real
require 'fog/compute/parsers/aws/basic'
# Create a new placement group
#
# ==== Parameters
# * group_name<~String> - Name of the placement group.
# * strategy<~String> - Placement group strategy. Valid options in ['cluster']
#
# ==== Returns
# * response<~Excon::Response>:
# * body<~Hash>:
# * 'requestId'<~String> - Id of request
# * 'return'<~Boolean> - success?
#
# {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-CreatePlacementGroup.html]
def create_placement_group(name, strategy)
request(
'Action' => 'CreatePlacementGroup',
'GroupName' => name,
'Strategy' => strategy,
:parser => Fog::Parsers::Compute::AWS::Basic.new
)
end
end
end
end
end

View file

@ -0,0 +1,32 @@
module Fog
module Compute
class AWS
class Real
require 'fog/compute/parsers/aws/basic'
# Delete a placement group that you own
#
# ==== Parameters
# * group_name<~String> - Name of the placement group.
#
# ==== Returns
# * response<~Excon::Response>:
# * body<~Hash>:
# * 'requestId'<~String> - Id of request
# * 'return'<~Boolean> - success?
#
# {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-DeletePlacementGroup.html]
def delete_placement_group(name)
request(
'Action' => 'DeletePlacementGroup',
'GroupName' => name,
:idempotent => true,
:parser => Fog::Parsers::Compute::AWS::Basic.new
)
end
end
end
end
end

View file

@ -0,0 +1,35 @@
module Fog
module Compute
class AWS
class Real
require 'fog/compute/parsers/aws/describe_placement_groups'
# Describe all or specified placement groups
#
# ==== Parameters
# * filters<~Hash> - List of filters to limit results with
#
# === Returns
# * response<~Excon::Response>:
# * body<~Hash>:
# * 'requestId'<~String> - Id of request
# * 'placementGroupSet'<~Array>:
# * 'groupName'<~String> - Name of placement group
# * 'strategy'<~String> - Strategy of placement group
# * 'state'<~String> - State of placement group
#
# {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-DescribePlacementGroups.html]
def describe_placement_groups(filters = {})
params = Fog::AWS.indexed_filters(filters)
request({
'Action' => 'DescribePlacementGroups',
:idempotent => true,
:parser => Fog::Parsers::Compute::AWS::DescribePlacementGroups.new
}.merge!(params))
end
end
end
end
end

View file

@ -30,7 +30,7 @@ module Fog
# * 'SecurityGroup'<~Array> or <~String> - Name of security group(s) for instances (you must omit this parameter if using Virtual Private Clouds)
# * 'InstanceInitiatedShutdownBehaviour'<~String> - specifies whether volumes are stopped or terminated when instance is shutdown, in [stop, terminate]
# * 'InstanceType'<~String> - Type of instance to boot. Valid options
# in ['m1.small', 'm1.large', 'm1.xlarge', 'c1.medium', 'c1.xlarge', 'm2.2xlarge', 'm2.4xlarge']
# in ['t1.micro', 'm1.small', 'm1.large', 'm1.xlarge', 'c1.medium', 'c1.xlarge', 'm2.xlarge', m2.2xlarge', 'm2.4xlarge', 'cc1.4xlarge', 'cg1.4xlarge']
# default is 'm1.small'
# * 'KernelId'<~String> - Id of kernel with which to launch
# * 'KeyName'<~String> - Name of a keypair to add to booting instances

View file

@ -4,7 +4,6 @@ module Fog
class Real
def list_async_jobs(options = {})
puts "about to perf request.."
request('listAsyncJobs', options, :expects => [200],
:response_prefix => 'listasyncjobsresponse/asyncjobs', :response_type => Array)
end

View file

@ -22,6 +22,7 @@ module Fog
request :head_object
request :put_container
request :put_object
request :put_object_manifest
module Utils

View file

@ -7,6 +7,9 @@ module Fog
#
# ==== Parameters
# * container<~String> - Name for container, should be < 256 bytes and must not contain '/'
# * object<~String> - Name for object
# * data<~String|File> - data to upload
# * options<~Hash> - config headers for object. Defaults to {}.
#
def put_object(container, object, data, options = {})
data = Fog::Storage.parse_data(data)

View file

@ -0,0 +1,25 @@
module Fog
module Storage
class Rackspace
class Real
# Create a new object
#
# ==== Parameters
# * container<~String> - Name for container, should be < 256 bytes and must not contain '/'
# * object<~String> - Name for object
#
def put_object_manifest(container, object)
path = "#{URI.escape(container)}/#{URI.escape(object)}"
request(
:expects => 201,
:headers => {'X-Object-Manifest' => path},
:method => 'PUT',
:path => path
)
end
end
end
end
end

View file

@ -8,7 +8,7 @@ Shindo.tests("AWS::RDS | security_group", ['aws', 'rds']) do
tests("#description").returns('fog test') { @instance.description }
tests("#authorize_ec2_security_group").succeeds do
@ec2_sec_group = AWS[:compute].security_groups.create(:name => 'fog-test', :description => 'fog test')
@ec2_sec_group = Compute[:aws].security_groups.create(:name => 'fog-test', :description => 'fog test')
@instance.authorize_ec2_security_group(@ec2_sec_group.name)
returns('authorizing') do

View file

@ -85,7 +85,7 @@ Shindo.tests('AWS::CloudFormation | stack requests', ['aws', 'cloudformation'])
unless Fog.mocking?
@stack_name = 'fogstack' << Time.now.to_i.to_s
@keypair = AWS[:compute].key_pairs.create(:name => 'cloudformation')
@keypair = Compute[:aws].key_pairs.create(:name => 'cloudformation')
@template_url = 'https://s3.amazonaws.com/cloudformation-templates-us-east-1/EC2InstanceSample-1.0.0.template'
end

View file

@ -44,7 +44,7 @@ Shindo.tests('AWS::ELB | models', ['aws', 'elb']) do
raises(Fog::AWS::ELB::InvalidInstance) { elb.deregister_instances('i-00000000') }
end
server = AWS[:compute].servers.create
server = Compute[:aws].servers.create
tests('register instance') do
begin
elb.register_instances(server.id)

View file

@ -79,11 +79,11 @@ Shindo.tests('Fog::Compute[:aws] | instance requests', ['aws']) do
@instance_id = nil
# Use a MS Windows AMI to test #get_password_data
@windows_ami = 'ami-ee926087' # Microsoft Windows Server 2008 R2 Base 64-bit
@windows_ami = 'ami-1cbd4475' # Microsoft Windows Server 2008 R2 Base 64-bit
# Create a keypair for decrypting the password
key_name = 'fog-test-key'
key = AWS.key_pairs.create(:name => key_name)
key = Fog::Compute[:aws].key_pairs.create(:name => key_name)
tests("#run_instances").formats(@run_instances_format) do
data = Fog::Compute[:aws].run_instances(@windows_ami, 1, 1, 'InstanceType' => 't1.micro', 'KeyName' => key_name).body

View file

@ -0,0 +1,54 @@
Shindo.tests('Fog::Compute[:aws] | placement group requests', ['aws']) do
@placement_group_format = {
'requestId' => String,
'placementGroupSet' => [{
'groupName' => String,
'state' => String,
'strategy' => String
}]
}
tests('success') do
tests("#create_placement_group('fog_placement_group', 'cluster')").formats(AWS::Compute::Formats::BASIC) do
pending if Fog.mocking?
Fog::Compute[:aws].create_placement_group('fog_placement_group', 'cluster').body
end
tests("#describe_placement_groups").formats(@placement_group_format) do
pending if Fog.mocking?
Fog::Compute[:aws].describe_placement_groups.body
end
tests("#describe_placement_groups('group-name' => 'fog_placement_group)").formats(@placement_group_format) do
pending if Fog.mocking?
Fog::Compute[:aws].describe_placement_groups('group-name' => 'fog_security_group').body
end
tests("#delete_placement_group('fog_placement_group')").formats(AWS::Compute::Formats::BASIC) do
pending if Fog.mocking?
Fog::Compute[:aws].delete_placement_group('fog_placement_group').body
end
end
tests('failure') do
unless Fog.mocking?
Fog::Compute[:aws].create_placement_group('fog_placement_group', 'cluster')
end
tests("duplicate #create_placement_group('fog_placement_group', 'cluster')").raises(Fog::Compute::AWS::Error) do
Fog::Compute[:aws].create_placement_group('fog_placement_group', 'cluster')
end
tests("#delete_placement_group('not_a_group_name')").raises(Fog::Compute::AWS::NotFound) do
Fog::Compute[:aws].delete_placement_group('not_a_group_name')
end
unless Fog.mocking?
Fog::Compute[:aws].delete_placement_group('fog_placement_group')
end
end
end

View file

@ -5,7 +5,7 @@ class Bluebox
module Formats
PRODUCT = {
'cost' => Float,
'cost' => String,
'description' => String,
'id' => String
}

View file

@ -9,7 +9,7 @@ Shindo.tests('Fog::Compute[:bluebox] | template requests', ['bluebox']) do
tests('success') do
@template_id = 'a00baa8f-b5d0-4815-8238-b471c4c4bf72' # Ubuntu 9.10 64bit
@template_id = '03807e08-a13d-44e4-b011-ebec7ef2c928' # Ubuntu LTS 10.04 64bit
tests("get_template('#{@template_id}')").formats(@template_format) do
pending if Fog.mocking?

View file

@ -26,6 +26,7 @@ Shindo.tests('Fog::Compute[:linode] | stack_script requests', ['linode']) do
end
tests('#stackscript_list').formats(@stack_scripts_format) do
pending # TODO: REV_NOTE can be either string or float?
pending if Fog.mocking?
Fog::Compute[:linode].stackscript_list.body
end

View file

@ -1,4 +1,4 @@
Shindo.tests('Fog::Compute[:ninefold] | server requests', ['ninefold']) do
Shindo.tests('Fog::Compute[:ninefold] | address requests', ['ninefold']) do
tests('success') do
@ -10,13 +10,13 @@ Shindo.tests('Fog::Compute[:ninefold] | server requests', ['ninefold']) do
end
result = Fog::Compute[:ninefold].query_async_job_result(:jobid => job['jobid'])['jobresult']['ipaddress']
@newaddressid = result['id']
Ninefold::Compute::Formats::Addresses::fill_address_data(result)
result
end
tests("#list_public_ip_addresses()").formats(Ninefold::Compute::Formats::Addresses::ADDRESSES) do
pending if Fog.mocking?
result = Fog::Compute[:ninefold].list_public_ip_addresses
Ninefold::Compute::Formats::Addresses::fill_address_data(result)
result
end
tests("#disassociate_ip_address()").formats(Ninefold::Compute::Formats::Addresses::DISASSOC_ADDRESS) do

View file

@ -1,6 +1,6 @@
# This will fail until there are jobs in the system.
Shindo.tests('Fog::Compute[:ninefold] | server requests', ['ninefold']) do
Shindo.tests('Fog::Compute[:ninefold] | async job requests', ['ninefold']) do
tests('success') do

View file

@ -153,21 +153,6 @@ class Ninefold
}]
end
module VirtualMachines
# Sometimes a few fields are missing from VM data - this method
# will fill them in if they don't exist, to ensure the format passes
def fill_virtual_machine_data(vms)
if vms.kind_of? Hash
vms['cpuused'] ||= ''
vms['networkkbsread'] ||= 0
vms['networkkbswrite'] ||= 0
elsif vms.kind_of? Array
vms.each {|vm| fill_virtual_machine_data(vm) }
end
vms
end
module_function :fill_virtual_machine_data
VIRTUAL_MACHINE = {
"id"=>Integer,
"name"=>String,
@ -196,17 +181,17 @@ class Ninefold
"nic"=>[{
"id"=>Integer,
"networkid"=>Integer,
"netmask"=>String,
"gateway"=>String,
"ipaddress"=>String,
"netmask"=>Fog::Nullable::String,
"gateway"=>Fog::Nullable::String,
"ipaddress"=>Fog::Nullable::String,
"traffictype"=>String,
"type"=>String,
"isdefault"=>Fog::Boolean,
}],
"hypervisor"=>String,
"cpuused"=>String,
"networkkbsread"=>Integer,
"networkkbswrite"=>Integer
"cpuused"=>Fog::Nullable::String,
"networkkbsread"=>Fog::Nullable::Integer,
"networkkbswrite"=>Fog::Nullable::Integer
}
VIRTUAL_MACHINES = [VIRTUAL_MACHINE]
end
@ -272,7 +257,7 @@ class Ninefold
"issystem"=>Fog::Boolean,
"state"=>String,
"related"=>Integer,
"broadcasturi"=>String,
"broadcasturi"=>Fog::Nullable::String,
"dns1"=>String,
"dns2"=>String,
"type"=>String,
@ -286,17 +271,6 @@ class Ninefold
}]
end
module Addresses
def fill_address_data(data)
if data.kind_of? Hash
data['virtualmachineid'] ||= 0
data['virtualmachinename'] ||= ''
elsif data.kind_of? Array
data.each {|d| fill_address_data(d) }
end
data
end
module_function :fill_address_data
ADDRESS = {
"id"=>Integer,
"ipaddress"=>String,
@ -312,8 +286,8 @@ class Ninefold
"associatednetworkid"=>Integer,
"networkid"=>Integer,
"state"=>String,
"virtualmachineid"=>Integer,
"virtualmachinename"=>String
"virtualmachineid"=>Fog::Nullable::Integer,
"virtualmachinename"=>Fog::Nullable::String
}
ADDRESSES = [ADDRESS]
DISASSOC_ADDRESS = {"jobid"=>Integer}

View file

@ -3,7 +3,7 @@
# can optionally specify VM_ID and IP_ID as environment variables, and we will use those. Note:
# The IP must not already have static nat enabled or any port mappings.
Shindo.tests('Fog::Compute[:ninefold] | server requests', ['ninefold']) do
Shindo.tests('Fog::Compute[:ninefold] | nat requests', ['ninefold']) do
if ENV['VM_ID'] && ENV['IP_ID']
@ipid, @vmid = ENV['IP_ID'], ENV['VM_ID']

View file

@ -1,4 +1,4 @@
Shindo.tests('Fog::Compute[:ninefold] | server requests', ['ninefold']) do
Shindo.tests('Fog::Compute[:ninefold] | template requests', ['ninefold']) do
tests('success') do

View file

@ -17,51 +17,42 @@ Shindo.tests('Fog::Compute[:ninefold] | server requests', ['ninefold']) do
:networkids => networks[0]['id'])
# wait for deployment, stash the job id.
@newvmid = newvm['id']
result = Ninefold::Compute::TestSupport.wait_for_job(newvm['jobid'])['jobresult']['virtualmachine']
Ninefold::Compute::Formats::VirtualMachines::fill_virtual_machine_data(result)
Ninefold::Compute::TestSupport.wait_for_job(newvm['jobid'])['jobresult']['virtualmachine']
end
tests("#list_virtual_machines()").formats(Ninefold::Compute::Formats::VirtualMachines::VIRTUAL_MACHINES) do
pending if Fog.mocking?
vms = Fog::Compute[:ninefold].list_virtual_machines
# This is a hack to work around the changing format - these fields may or may not exist.
Ninefold::Compute::Formats::VirtualMachines::fill_virtual_machine_data(vms)
Fog::Compute[:ninefold].list_virtual_machines
end
tests("#reboot_virtual_machine()").formats(Ninefold::Compute::Formats::VirtualMachines::VIRTUAL_MACHINE) do
pending if Fog.mocking?
job = Fog::Compute[:ninefold].reboot_virtual_machine(:id => @newvmid)
result = Ninefold::Compute::TestSupport.wait_for_job(job)['jobresult']['virtualmachine']
Ninefold::Compute::Formats::VirtualMachines::fill_virtual_machine_data(result)
Ninefold::Compute::TestSupport.wait_for_job(job)['jobresult']['virtualmachine']
end
tests("#stop_virtual_machine()").formats(Ninefold::Compute::Formats::VirtualMachines::VIRTUAL_MACHINE) do
pending if Fog.mocking?
job = Fog::Compute[:ninefold].stop_virtual_machine(:id => @newvmid)
result = Ninefold::Compute::TestSupport.wait_for_job(job)['jobresult']['virtualmachine']
Ninefold::Compute::Formats::VirtualMachines::fill_virtual_machine_data(result)
Ninefold::Compute::TestSupport.wait_for_job(job)['jobresult']['virtualmachine']
end
tests("#change_service_for_virtual_machine()").formats(Ninefold::Compute::Formats::VirtualMachines::VIRTUAL_MACHINE) do
pending if Fog.mocking?
vms = Fog::Compute[:ninefold].change_service_for_virtual_machine(:id => @newvmid,
Fog::Compute[:ninefold].change_service_for_virtual_machine(:id => @newvmid,
:serviceofferingid => Ninefold::Compute::TestSupport::ALT_SERVICE_OFFERING)
Ninefold::Compute::Formats::VirtualMachines::fill_virtual_machine_data(vms)
end
tests("#start_virtual_machine()").formats(Ninefold::Compute::Formats::VirtualMachines::VIRTUAL_MACHINE) do
pending if Fog.mocking?
job = Fog::Compute[:ninefold].start_virtual_machine(:id => @newvmid)
result = Ninefold::Compute::TestSupport.wait_for_job(job)['jobresult']['virtualmachine']
Ninefold::Compute::Formats::VirtualMachines::fill_virtual_machine_data(result)
Ninefold::Compute::TestSupport.wait_for_job(job)['jobresult']['virtualmachine']
end
tests("#destroy_virtual_machine()").formats(Ninefold::Compute::Formats::VirtualMachines::VIRTUAL_MACHINE) do
pending if Fog.mocking?
job = Fog::Compute[:ninefold].destroy_virtual_machine(:id => @newvmid)
result = Ninefold::Compute::TestSupport.wait_for_job(job)['jobresult']['virtualmachine']
Ninefold::Compute::Formats::VirtualMachines::fill_virtual_machine_data(result)
Ninefold::Compute::TestSupport.wait_for_job(job)['jobresult']['virtualmachine']
end
end

View file

@ -1,4 +1,4 @@
Shindo.tests('Fog::Storage[:aws] | object requests', [:aws]) do
Shindo.tests('Fog::Storage[:aws] | multipart upload requests', [:aws]) do
@directory = Fog::Storage[:aws].directories.create(:key => 'fogmultipartuploadtests')
@ -98,10 +98,7 @@ Shindo.tests('Fog::Storage[:aws] | object requests', [:aws]) do
tests("#get_object('#{@directory.identity}', 'fog_multipart_upload').body").succeeds do
pending if Fog.mocking?
data = Fog::Storage[:aws].get_object(@directory.identity, 'fog_multipart_upload').body
unless data == ('x' * 10 * 1024 * 1024)
raise 'content mismatch'
end
Fog::Storage[:aws].get_object(@directory.identity, 'fog_multipart_upload').body == ('x' * 10 * 1024 * 1024)
end
if !Fog.mocking?

View file

@ -0,0 +1,43 @@
Shindo.tests('Fog::Storage[:rackspace] | large object requests', [:rackspace]) do
@directory = Fog::Storage[:rackspace].directories.create(:key => 'foglargeobjecttests')
tests('success') do
tests("#put_object('#{@directory.identity}', 'fog_large_object/1', ('x' * 6 * 1024 * 1024))").succeeds do
pending if Fog.mocking?
Fog::Storage[:rackspace].put_object(@directory.identity, 'fog_large_object/1', ('x' * 6 * 1024 * 1024))
end
tests("#put_object('#{@directory.identity}', 'fog_large_object/2', ('x' * 4 * 1024 * 1024))").succeeds do
pending if Fog.mocking?
Fog::Storage[:rackspace].put_object(@directory.identity, 'fog_large_object/2', ('x' * 4 * 1024 * 1024))
end
tests("#put_object_manifest('#{@directory.identity}', 'fog_large_object')").succeeds do
pending if Fog.mocking?
Fog::Storage[:rackspace].put_object_manifest(@directory.identity, 'fog_large_object')
end
tests("#get_object('#{@directory.identity}', 'fog_large_object').body").succeeds do
pending if Fog.mocking?
Fog::Storage[:rackspace].get_object(@directory.identity, 'fog_large_object').body == ('x' * 10 * 1024 * 1024)
end
if !Fog.mocking?
['fog_large_object', 'fog_large_object/1', 'fog_large_object/2'].each do |key|
@directory.files.new(:key => key).destroy
end
end
end
tests('failure') do
tests("put_object_manifest")
end
@directory.destroy
end