1
0
Fork 0
mirror of https://github.com/fog/fog.git synced 2022-11-09 13:51:43 -05:00

Merge branch 'master' of github.com:fog/fog into file_examples

This commit is contained in:
Kyle Rames 2013-02-27 14:46:30 -06:00
commit b2c8bc27d3
78 changed files with 1742 additions and 362 deletions

View file

@ -13,6 +13,7 @@ module Fog
service(:compute, 'aws/compute', 'Compute')
service(:cloud_formation, 'aws/cloud_formation', 'CloudFormation')
service(:cloud_watch, 'aws/cloud_watch', 'CloudWatch')
service(:data_pipeline, 'aws/data_pipeline', 'DataPipeline')
service(:dynamodb, 'aws/dynamodb', 'DynamoDB')
service(:dns, 'aws/dns', 'DNS')
service(:elasticache, 'aws/elasticache', 'Elasticache')

View file

@ -0,0 +1,114 @@
require 'fog/aws'
module Fog
module AWS
class DataPipeline < Fog::Service
extend Fog::AWS::CredentialFetcher::ServiceMethods
requires :aws_access_key_id, :aws_secret_access_key
recognizes :region, :host, :path, :port, :scheme, :persistent, :use_iam_profile, :aws_session_token, :aws_credentials_expire_at
request_path 'fog/aws/requests/data_pipeline'
request :activate_pipeline
request :create_pipeline
request :delete_pipeline
request :describe_pipelines
request :list_pipelines
request :put_pipeline_definition
model_path 'fog/aws/models/data_pipeline'
model :pipeline
collection :pipelines
class Mock
def initialize(options={})
Fog::Mock.not_implemented
end
end
class Real
attr_reader :region
include Fog::AWS::CredentialFetcher::ConnectionMethods
# Initialize connection to DataPipeline
#
# ==== Notes
# options parameter must include values for :aws_access_key_id and
# :aws_secret_access_key in order to create a connection
#
# ==== Examples
# datapipeline = DataPipeline.new(
# :aws_access_key_id => your_aws_access_key_id,
# :aws_secret_access_key => your_aws_secret_access_key
# )
#
# ==== Parameters
# * options<~Hash> - config arguments for connection. Defaults to {}.
# * region<~String> - optional region to use. For instance, 'eu-west-1', 'us-east-1' and etc.
#
# ==== Returns
# * DataPipeline object with connection to AWS.
def initialize(options={})
@use_iam_profile = options[:use_iam_profile]
@connection_options = options[:connection_options] || {}
@version = '2012-10-29'
@region = options[:region] || 'us-east-1'
@host = options[:host] || "datapipeline.#{@region}.amazonaws.com"
@path = options[:path] || '/'
@persistent = options[:persistent] || false
@port = options[:port] || 443
@scheme = options[:scheme] || 'https'
@connection = Fog::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options)
setup_credentials(options)
end
def owner_id
@owner_id ||= security_groups.get('default').owner_id
end
def reload
@connection.reset
end
private
def setup_credentials(options)
@aws_access_key_id = options[:aws_access_key_id]
@aws_secret_access_key = options[:aws_secret_access_key]
@aws_session_token = options[:aws_session_token]
@aws_credentials_expire_at = options[:aws_credentials_expire_at]
@signer = Fog::AWS::SignatureV4.new(@aws_access_key_id, @aws_secret_access_key, @region, 'datapipeline')
end
def request(params)
refresh_credentials_if_expired
# Params for all DataPipeline requests
params.merge!({
:expects => 200,
:host => @host,
:method => :post,
:path => '/',
})
date = Fog::Time.now
params[:headers] = {
'Date' => date.to_date_header,
'Host' => @host,
'X-Amz-Date' => date.to_iso8601_basic,
'Content-Type' => 'application/x-amz-json-1.1',
'Content-Length' => params[:body].bytesize.to_s,
}.merge!(params[:headers] || {})
params[:headers]['x-amz-security-token'] = @aws_session_token if @aws_session_token
params[:headers]['Authorization'] = @signer.sign(params, date)
response = @connection.request(params)
response
end
end
end
end
end

View file

@ -0,0 +1,67 @@
require 'fog/core/model'
module Fog
module AWS
class DataPipeline
class Pipeline < Fog::Model
identity :id, :aliases => 'pipelineId'
attribute :name
attribute :description
attribute :user_id, :aliases => 'userId'
attribute :account_id, :aliases => 'accountId'
attribute :state, :aliases => 'pipelineState'
attribute :unique_id, :aliases => 'uniqueId'
def initialize(attributes={})
# Extract the 'fields' portion of a response to attributes
if attributes.include?('fields')
string_fields = attributes['fields'].select { |f| f.include?('stringValue') }
field_attributes = Hash[string_fields.map { |f| [f['key'][/^@(.+)$/, 1], f['stringValue']] }]
merge_attributes(field_attributes)
end
super
end
def save
requires :name
requires :unique_id
data = service.create_pipeline(unique_id, name)
merge_attributes(data)
true
end
def activate
requires :id
service.activate_pipeline(id)
true
end
def put(objects)
requires :id
service.put_pipeline_definition(id, objects)
true
end
def destroy
requires :id
service.delete_pipeline(id)
true
end
end
end
end
end

View file

@ -0,0 +1,36 @@
require 'fog/core/collection'
require 'fog/aws/models/data_pipeline/pipeline'
module Fog
module AWS
class DataPipeline
class Pipelines < Fog::Collection
model Fog::AWS::DataPipeline::Pipeline
def all
ids = []
begin
result = service.list_pipelines
ids << result['pipelineIdList'].map { |id| id['id'] }
end while (result['hasMoreResults'] && result['marker'])
load(service.describe_pipelines(ids.flatten)['pipelineDescriptionList'])
end
def get(id)
data = service.describe_pipelines([id])['pipelineDescriptionList'].first
new(data)
rescue Excon::Errors::BadRequest => error
data = Fog::JSON.decode(error.response.body)
raise unless data['__type'] == 'PipelineDeletedException' || data['__type'] == 'PipelineNotFoundException'
nil
end
end
end
end
end

View file

@ -0,0 +1,35 @@
module Fog
module AWS
class DataPipeline
class Real
# Activate a pipeline
# http://docs.aws.amazon.com/datapipeline/latest/APIReference/API_ActivatePipeline.html
# ==== Parameters
# * PipelineId <~String> - The ID of the pipeline to activate
# ==== Returns
# * response<~Excon::Response>:
# * body<~Hash>:
def activate_pipeline(id)
params = { 'pipelineId' => id }
response = request({
:body => Fog::JSON.encode(params),
:headers => { 'X-Amz-Target' => 'DataPipeline.ActivatePipeline' },
})
Fog::JSON.decode(response.body)
end
end
class Mock
def activate_pipeline(id)
Fog::Mock.not_implemented
end
end
end
end
end

View file

@ -0,0 +1,41 @@
module Fog
module AWS
class DataPipeline
class Real
# Create a pipeline
# http://docs.aws.amazon.com/datapipeline/latest/APIReference/API_CreatePipeline.html
# ==== Parameters
# * UniqueId <~String> - A unique ID for of the pipeline
# * Name <~String> - The name of the pipeline
# * Description <~String> - Description of the pipeline
# ==== Returns
# * response<~Excon::Response>:
# * body<~Hash>:
def create_pipeline(unique_id, name, description=nil)
params = {
'uniqueId' => unique_id,
'name' => name,
}
params['Description'] = description if description
response = request({
:body => Fog::JSON.encode(params),
:headers => { 'X-Amz-Target' => 'DataPipeline.CreatePipeline' },
})
Fog::JSON.decode(response.body)
end
end
class Mock
def create_pipeline(unique_id, name, description=nil)
Fog::Mock.not_implemented
end
end
end
end
end

View file

@ -0,0 +1,35 @@
module Fog
module AWS
class DataPipeline
class Real
# Delete a pipeline
# http://docs.aws.amazon.com/datapipeline/latest/APIReference/API_DeletePipeline.html
# ==== Parameters
# * PipelineId <~String> - The id of the pipeline to delete
# ==== Returns
# * response<~Excon::Response>:
# * body<~Hash>:
def delete_pipeline(id)
params = { 'pipelineId' => id }
response = request({
:body => Fog::JSON.encode(params),
:headers => { 'X-Amz-Target' => 'DataPipeline.DeletePipeline' },
})
Fog::JSON.decode(response.body)
end
end
class Mock
def delete_pipeline(id)
Fog::Mock.not_implemented
end
end
end
end
end

View file

@ -0,0 +1,36 @@
module Fog
module AWS
class DataPipeline
class Real
# Describe pipelines
# http://docs.aws.amazon.com/datapipeline/latest/APIReference/API_DescribePipelines.html
# ==== Parameters
# * PipelineIds <~String> - ID of pipeline to retrieve information for
# ==== Returns
# * response<~Excon::Response>:
# * body<~Hash>:
def describe_pipelines(ids)
params = {}
params['pipelineIds'] = ids
response = request({
:body => Fog::JSON.encode(params),
:headers => { 'X-Amz-Target' => 'DataPipeline.DescribePipelines' },
})
Fog::JSON.decode(response.body)
end
end
class Mock
def describe_pipelines(ids)
Fog::Mock.not_implemented
end
end
end
end
end

View file

@ -0,0 +1,36 @@
module Fog
module AWS
class DataPipeline
class Real
# List all pipelines
# http://docs.aws.amazon.com/datapipeline/latest/APIReference/API_ListPipelines.html
# ==== Parameters
# * Marker <~String> - The starting point for the results to be returned.
# ==== Returns
# * response<~Excon::Response>:
# * body<~Hash>:
def list_pipelines(options={})
params = {}
params['Marker'] = options[:marker] if options[:marker]
response = request({
:body => Fog::JSON.encode(params),
:headers => { 'X-Amz-Target' => 'DataPipeline.ListPipelines' },
})
Fog::JSON.decode(response.body)
end
end
class Mock
def list_pipelines(options={})
Fog::Mock.not_implemented
end
end
end
end
end

View file

@ -0,0 +1,72 @@
module Fog
module AWS
class DataPipeline
class Real
# Put raw pipeline definition JSON
# http://docs.aws.amazon.com/datapipeline/latest/APIReference/API_PutPipelineDefinition.html
# ==== Parameters
# * PipelineId <~String> - The ID of the pipeline
# * PipelineObjects <~String> - Objects in the pipeline
# ==== Returns
# * response<~Excon::Response>:
# * body<~Hash>:
def put_pipeline_definition(id, objects)
params = {
'pipelineId' => id,
'pipelineObjects' => transform_objects(objects),
}
response = request({
:body => Fog::JSON.encode(params),
:headers => { 'X-Amz-Target' => 'DataPipeline.PutPipelineDefinition' },
})
Fog::JSON.decode(response.body)
end
# Take a list of pipeline object hashes as specified in the Data Pipeline JSON format
# and transform it into the format expected by the API
private
def transform_objects(objects)
output = []
objects.each do |object|
new_object = {}
new_object['id'] = object.delete('id')
new_object['name'] = object.delete('name') || new_object['id']
new_object['fields'] = []
object.each do |key, value|
if value.is_a?(Hash)
new_object['fields'] << { 'key' => key, 'refValue' => value['ref'] }
elsif value.is_a?(Array)
value.each do |v|
new_object['fields'] << { 'key' => key, 'stringValue' => v }
end
else
new_object['fields'] << { 'key' => key, 'stringValue' => value }
end
end
output << new_object
end
output
end
end
class Mock
def put_pipeline_definition(id, objects)
Fog::Mock.not_implemented
end
end
end
end
end

View file

@ -25,8 +25,7 @@ module Fog
:host => @host,
:idempotent => true,
:method => 'GET',
:parser => Fog::Parsers::Storage::AWS::GetService.new,
:url => @host
:parser => Fog::Parsers::Storage::AWS::GetService.new
})
end

View file

@ -400,7 +400,7 @@ DATA
begin
response = @connection.request(params, &block)
rescue Excon::Errors::TemporaryRedirect => error
uri = URI.parse(error.response.headers['Location'])
uri = URI.parse(error.response.is_a?(Hash) ? error.response[:headers]['Location'] : error.response.headers['Location'])
Fog::Logger.warning("fog: followed redirect to #{uri.host}, connecting to the matching region will be more performant")
response = Fog::Connection.new("#{@scheme}://#{uri.host}:#{@port}", false, @connection_options).request(original_params, &block)
end

View file

@ -15,6 +15,8 @@ class AWS < Fog::Bin
Fog::AWS::CloudWatch
when :compute
Fog::Compute::AWS
when :data_pipeline
Fog::AWS::DataPipeline
when :ddb, :dynamodb
Fog::AWS::DynamoDB
when :dns
@ -68,6 +70,8 @@ class AWS < Fog::Bin
when :compute
Fog::Logger.warning("AWS[:compute] is not recommended, use Compute[:aws] for portability")
Fog::Compute.new(:provider => 'AWS')
when :data_pipeline
Fog::AWS::DataPipeline
when :ddb, :dynamodb
Fog::AWS::DynamoDB.new
when :dns

View file

@ -7,10 +7,14 @@ class OpenStack < Fog::Bin
Fog::Compute::OpenStack
when :identity
Fog::Identity::OpenStack
when :image
Fog::Image::OpenStack
when :network
Fog::Network::OpenStack
when :storage
Fog::Storage::OpenStack
when :volume
Fog::Volume::OpenStack
else
raise ArgumentError, "Unrecognized service: #{key}"
end
@ -23,14 +27,20 @@ class OpenStack < Fog::Bin
Fog::Logger.warning("OpenStack[:compute] is not recommended, use Compute[:openstack] for portability")
Fog::Compute.new(:provider => 'OpenStack')
when :identity
Fog::Logger.warning("OpenStack[:identity] is not recommended, use Compute[:openstack] for portability")
Fog::Compute.new(:provider => 'OpenStack')
Fog::Logger.warning("OpenStack[:identity] is not recommended, use Identity[:openstack] for portability")
Fog::Identity.new(:provider => 'OpenStack')
when :image
Fog::Logger.warning("OpenStack[:image] is not recommended, use Image[:openstack] for portability")
Fog::Image.new(:provider => 'OpenStack')
when :network
Fog::Logger.warning("OpenStack[:network] is not recommended, use Network[:openstack] for portability")
Fog::Network.new(:provider => 'OpenStack')
when :storage
Fog::Logger.warning("OpenStack[:storage] is not recommended, use Storage[:openstack] for portability")
Fog::Network.new(:provider => 'OpenStack')
Fog::Storage.new(:provider => 'OpenStack')
when :volume
Fog::Logger.warning("OpenStack[:volume] is not recommended, use Volume[:openstack] for portability")
Fog::Volume.new(:provider => 'OpenStack')
else
raise ArgumentError, "Unrecognized service: #{key.inspect}"
end

View file

@ -7,19 +7,12 @@ module Fog
def self.new(attributes)
attributes = attributes.dup # prevent delete from having side effects
case provider = attributes.delete(:provider).to_s.downcase.to_sym
when :aws
require 'fog/aws/cdn'
Fog::CDN::AWS.new(attributes)
when :hp
require 'fog/hp/cdn'
Fog::CDN::HP.new(attributes)
when :rackspace
require 'fog/rackspace/cdn'
Fog::CDN::Rackspace.new(attributes)
else
raise ArgumentError.new("#{provider} is not a recognized cdn provider")
provider = attributes.delete(:provider).to_s.downcase.to_sym
if self.providers.include?(provider)
require "fog/#{provider}/cdn"
return Fog::CDN.const_get(Fog.providers[provider]).new(attributes)
end
raise ArgumentError.new("#{provider} is not a recognized cdn provider")
end
def self.providers

View file

@ -9,46 +9,11 @@ module Fog
attributes = attributes.dup # prevent delete from having side effects
provider = attributes.delete(:provider).to_s.downcase.to_sym
case provider
when :aws
require 'fog/aws/compute'
Fog::Compute::AWS.new(attributes)
when :bluebox
require 'fog/bluebox/compute'
Fog::Compute::Bluebox.new(attributes)
when :brightbox
require 'fog/brightbox/compute'
Fog::Compute::Brightbox.new(attributes)
when :cloudstack
require 'fog/cloudstack/compute'
Fog::Compute::Cloudstack.new(attributes)
when :clodo
require 'fog/clodo/compute'
Fog::Compute::Clodo.new(attributes)
when :ecloud
require 'fog/ecloud/compute'
Fog::Compute::Ecloud.new(attributes)
when :glesys
require 'fog/glesys/compute'
Fog::Compute::Glesys.new(attributes)
when :gogrid
require 'fog/go_grid/compute'
Fog::Compute::GoGrid.new(attributes)
when :hp
require 'fog/hp/compute'
Fog::Compute::HP.new(attributes)
when :ibm
require 'fog/ibm/compute'
Fog::Compute::IBM.new(attributes)
when :joyent
require 'fog/joyent/compute'
Fog::Compute::Joyent.new(attributes)
when :libvirt
require 'fog/libvirt/compute'
Fog::Compute::Libvirt.new(attributes)
when :linode
require 'fog/linode/compute'
Fog::Compute::Linode.new(attributes)
when :new_servers
require 'fog/bare_metal_cloud/compute'
Fog::Logger.deprecation "`new_servers` is deprecated. Please use `bare_metal_cloud` instead."
@ -56,15 +21,6 @@ module Fog
when :baremetalcloud
require 'fog/bare_metal_cloud/compute'
Fog::Compute::BareMetalCloud.new(attributes)
when :ninefold
require 'fog/ninefold/compute'
Fog::Compute::Ninefold.new(attributes)
when :openstack
require 'fog/openstack/compute'
Fog::Compute::OpenStack.new(attributes)
when :ovirt
require 'fog/ovirt/compute'
Fog::Compute::Ovirt.new(attributes)
when :rackspace
version = attributes.delete(:version)
version = version.to_s.downcase.to_sym unless version.nil?
@ -76,31 +32,17 @@ module Fog
require 'fog/rackspace/compute'
Fog::Compute::Rackspace.new(attributes)
end
when :serverlove
require 'fog/serverlove/compute'
Fog::Compute::Serverlove.new(attributes)
when :stormondemand
require 'fog/storm_on_demand/compute'
Fog::Compute::StormOnDemand.new(attributes)
when :vcloud
require 'fog/vcloud/compute'
Fog::Vcloud::Compute.new(attributes)
when :virtualbox
require 'fog/virtual_box/compute'
Fog::Compute::VirtualBox.new(attributes)
when :vmfusion
require 'fog/vmfusion/compute'
Fog::Compute::Vmfusion.new(attributes)
when :voxel
require 'fog/voxel/compute'
Fog::Compute::Voxel.new(attributes)
when :vsphere
require 'fog/vsphere/compute'
Fog::Compute::Vsphere.new(attributes)
when :xenserver
require 'fog/xenserver/compute'
Fog::Compute::XenServer.new(attributes)
else
if self.providers.include?(provider)
require "fog/#{provider}/compute"
return Fog::Compute.const_get(Fog.providers[provider]).new(attributes)
end
raise ArgumentError.new("#{provider} is not a recognized compute provider")
end
end

View file

@ -7,37 +7,14 @@ module Fog
def self.new(attributes)
attributes = attributes.dup # prevent delete from having side effects
case provider = attributes.delete(:provider).to_s.downcase.to_sym
when :aws
require 'fog/aws/dns'
Fog::DNS::AWS.new(attributes)
when :bluebox
require 'fog/bluebox/dns'
Fog::DNS::Bluebox.new(attributes)
when :dnsimple
require 'fog/dnsimple/dns'
Fog::DNS::DNSimple.new(attributes)
when :dnsmadeeasy
require 'fog/dnsmadeeasy/dns'
Fog::DNS::DNSMadeEasy.new(attributes)
when :dreamhost
require 'fog/dreamhost/dns'
Fog::DNS::Dreamhost.new(attributes)
when :dynect
require 'fog/dynect/dns'
Fog::DNS::Dynect.new(attributes)
when :linode
require 'fog/linode/dns'
Fog::DNS::Linode.new(attributes)
when :zerigo
require 'fog/zerigo/dns'
Fog::DNS::Zerigo.new(attributes)
when :rackspace
require 'fog/rackspace/dns'
Fog::DNS::Rackspace.new(attributes)
else
raise ArgumentError.new("#{provider} is not a recognized dns provider")
provider = attributes.delete(:provider).to_s.downcase.to_sym
if self.providers.include?(provider)
require "fog/#{provider}/dns"
return Fog::DNS.const_get(Fog.providers[provider]).new(attributes)
end
raise ArgumentError.new("#{provider} is not a recognized dns provider")
end
def self.providers

View file

@ -19,7 +19,6 @@ module Fog
)
else
response = request({
:block => block,
:expects => 200,
:method => 'GET',
:path => "#{Fog::HP.escape(container)}/#{Fog::HP.escape(object)}"

View file

@ -22,7 +22,6 @@ module Fog
)
else
response = shared_request({
:block => block,
:expects => 200,
:method => 'GET',
:path => path

View file

@ -11,16 +11,17 @@ module Fog
when :rackspace
require 'fog/rackspace/identity'
Fog::Rackspace::Identity.new(attributes)
when :openstack
require 'fog/openstack/identity'
Fog::Identity::OpenStack.new(attributes)
else
if self.providers.include?(provider)
require "fog/#{provider}/identity"
return Fog::Identity.const_get(Fog.providers[provider]).new(attributes)
end
raise ArgumentError.new("#{provider} has no identity service")
end
end
def self.providers
Fog.services[:idenity]
Fog.services[:identity]
end
end

View file

@ -7,13 +7,12 @@ module Fog
def self.new(attributes)
attributes = attributes.dup # Prevent delete from having side effects
case provider = attributes.delete(:provider).to_s.downcase.to_sym
when :openstack
require 'fog/openstack/image'
Fog::Image::OpenStack.new(attributes)
else
raise ArgumentError.new("#{provider} has no identity service")
provider = attributes.delete(:provider).to_s.downcase.to_sym
if self.providers.include?(provider)
require "fog/#{provider}/image"
return Fog::Image.const_get(Fog.providers[provider]).new(attributes)
end
raise ArgumentError.new("#{provider} has no identity service")
end
def self.providers

View file

@ -53,7 +53,7 @@ module Fog
def resize(flavor)
requires :id
service.resize_machine(id, flavor)
service.resize_machine(id, flavor.name)
true
end

View file

@ -1,6 +1,7 @@
module Fog
module Compute
class Joyent
class Real
def resize_machine(id, package)
request(
:method => "POST",
@ -8,6 +9,7 @@ module Fog
:query => {"action" => "resize", "package" => package}
)
end
end
end
end
end

View file

@ -9,13 +9,12 @@ module Fog
attributes = attributes.dup # Prevent delete from having side effects
provider = attributes.delete(:provider).to_s.downcase.to_sym
case provider
when :openstack
require 'fog/openstack/network'
Fog::Network::OpenStack.new(attributes)
else
raise ArgumentError.new("#{provider} has no network service")
if self.providers.include?(provider)
require "fog/#{provider}/network"
return Fog::Network.const_get(Fog.providers[provider]).new(attributes)
end
raise ArgumentError.new("#{provider} has no network service")
end
def self.providers

View file

@ -42,9 +42,11 @@ module Fog
end
service(:compute , 'openstack/compute' , 'Compute' )
service(:image, 'openstack/image', 'Image')
service(:identity, 'openstack/identity', 'Identity')
service(:network, 'openstack/network', 'Network')
service(:storage, 'openstack/storage', 'Storage')
service(:volume, 'openstack/volume', 'Volume')
# legacy v1.0 style auth
def self.authenticate_v1(options, connection_options = {})

View file

@ -100,6 +100,10 @@ module Fog
request :update_metadata
request :delete_metadata
# Metadatam
request :delete_meta
request :update_meta
# Address
request :list_addresses
request :list_address_pools

View file

@ -46,7 +46,6 @@ module Fog
def initialize(attributes={})
# Old 'connection' is renamed as service and should be used instead
prepare_service_value(attributes)
attributes[:metadata] = {}
self.security_groups = attributes.delete(:security_groups)
self.min_count = attributes.delete(:min_count)
@ -66,9 +65,10 @@ module Fog
end
def metadata=(new_metadata={})
return unless new_metadata
metas = []
new_metadata.each_pair {|k,v| metas << {"key" => k, "value" => v} }
metadata.load(metas)
@metadata = metadata.load(metas)
end
def user_data=(ascii_userdata)
@ -86,24 +86,41 @@ module Fog
service.images(:server => self)
end
def private_ip_address
if addresses['private']
#assume only a single private
return addresses['private'].first
elsif addresses['internet']
#assume no private IP means private cloud
return addresses['internet'].first
end
def all_addresses
# currently openstack API does not tell us what is a floating ip vs a fixed ip for the vm listing,
# we fall back to get all addresses and filter sadly.
@all_addresses ||= service.list_all_addresses.body["floating_ips"].select{|data| data['instance_id'] == id}
end
def public_ip_address
if addresses['public']
#assume last is either original or assigned from floating IPs
return addresses['public'].last
elsif addresses['internet']
#assume no public IP means private cloud
return addresses['internet'].first
end
def reload
@all_addresses = nil
super
end
# returns all ip_addresses for a given instance
# this includes both the fixed ip(s) and the floating ip(s)
def ip_addresses
addresses.values.flatten.map{|x| x['addr']}
end
def floating_ip_addresses
all_addresses.map{|addr| addr["ip"]}
end
alias_method :public_ip_addresses, :floating_ip_addresses
def floating_ip_address
floating_ip_addresses.first
end
alias_method :public_ip_address, :floating_ip_address
def private_ip_addresses
ip_addresses - floating_ip_addresses
end
def private_ip_address
private_ip_addresses.first
end
def image_ref
@ -229,10 +246,7 @@ module Fog
def save
raise Fog::Errors::Error.new('Resaving an existing object may create a duplicate') if persisted?
requires :flavor_ref, :image_ref, :name
meta_hash = {}
metadata.each { |meta| meta_hash.store(meta.key, meta.value) }
options = {
'metadata' => meta_hash,
'personality' => personality,
'accessIPv4' => accessIPv4,
'accessIPv6' => accessIPv6,
@ -244,6 +258,7 @@ module Fog
'max_count' => @max_count,
'os:scheduler_hints' => @os_scheduler_hints
}
options['metadata'] = metadata.to_hash unless @metadata.nil?
options = options.reject {|key, value| value.nil?}
data = service.create_server(name, image_ref, flavor_ref, options)
merge_attributes(data.body['server'])

View file

@ -32,7 +32,7 @@ module Fog
merge_attributes(connection.create_floating_ip(self.floating_network_id,
self.attributes).body['floating_ip'])
self.attributes).body['floatingip'])
self
end

View file

@ -17,11 +17,11 @@ module Fog
def all(filters = filters)
self.filters = filters
load(connection.list_floating_ips(filters).body['floating_ips'])
load(connection.list_floating_ips(filters).body['floatingips'])
end
def get(floating_network_id)
if floating_ip = connection.get_floating_ip(floating_network_id).body['floating_ip']
if floating_ip = connection.get_floating_ip(floating_network_id).body['floatingip']
new(floating_ip)
end
rescue Fog::Network::OpenStack::NotFound

View file

@ -12,6 +12,14 @@ module Fog
attribute :status
attribute :admin_state_up
attribute :tenant_id
attribute :provider_network_type,
:aliases => 'provider:network_type'
attribute :provider_physical_network,
:aliases => 'provider:physical_network'
attribute :provider_segmentation_id,
:aliases => 'provider:segmentation_id'
attribute :router_external,
:aliases => 'router:external'
def initialize(attributes)
# Old 'connection' is renamed as service and should be used instead

View file

@ -0,0 +1,43 @@
module Fog
module Compute
class OpenStack
class Real
def delete_meta(collection_name, parent_id, key)
request(
:expects => 204,
:method => 'DELETE',
:path => "#{collection_name}/#{parent_id}/metadata/#{key}"
)
end
end
class Mock
def delete_meta(collection_name, parent_id, key)
if collection_name == "images" then
if not list_images_detail.body['images'].detect {|_| _['id'] == parent_id}
raise Fog::Compute::OpenStack::NotFound
end
end
if collection_name == "servers" then
if not list_servers_detail.body['servers'].detect {|_| _['id'] == parent_id}
raise Fog::Compute::OpenStack::NotFound
end
end
response = Excon::Response.new
response.status = 204
response
end
end
end
end
end

View file

@ -0,0 +1,46 @@
module Fog
module Compute
class OpenStack
class Real
def update_meta(collection_name, parent_id, key, value)
request(
:body => Fog::JSON.encode({ 'meta' => {key => value}}),
:expects => 200,
:method => 'PUT',
:path => "#{collection_name}/#{parent_id}/metadata/#{key}"
)
end
end
class Mock
def update_meta(collection_name, parent_id, key, value)
if collection_name == "images" then
if not list_images_detail.body['images'].detect {|_| _['id'] == parent_id}
raise Fog::Compute::OpenStack::NotFound
end
end
if collection_name == "servers" then
if not list_servers_detail.body['servers'].detect {|_| _['id'] == parent_id}
raise Fog::Compute::OpenStack::NotFound
end
end
#FIXME join w/ existing metadata here
response = Excon::Response.new
response.body = { "metadata" => {key => value} }
response.status = 200
response
end
end
end
end
end

View file

@ -5,14 +5,14 @@ module Fog
class Real
def associate_floating_ip(floating_ip_id, port_id, options = {})
data = {
'floating_ip' => {
'floatingip' => {
'port_id' => port_id,
}
}
vanilla_options = [:fixed_ip_address]
vanilla_options.reject{ |o| options[o].nil? }.each do |key|
data['floating_ip'][key] = options[key]
data['floatingip'][key] = options[key]
end
request(
@ -39,7 +39,7 @@ module Fog
}
self.data[:floating_ips][data['floating_ip_id']] = data
response.body = { 'floating_ip' => data }
response.body = { 'floatingip' => data }
response
end
end

View file

@ -5,7 +5,7 @@ module Fog
class Real
def create_floating_ip(floating_network_id, options = {})
data = {
'floating_ip' => {
'floatingip' => {
'floating_network_id' => floating_network_id,
'port_id' => options[:port_id],
'tenant_id' => options[:tenant_id],
@ -15,7 +15,7 @@ module Fog
vanilla_options = [:port_id, :tenant_id, :fixed_ip_address ]
vanilla_options.reject{ |o| options[o].nil? }.each do |key|
data['floating_ip'][key] = options[key]
data['floatingip'][key] = options[key]
end
request(
@ -40,7 +40,7 @@ module Fog
'router_id' => nil,
}
self.data[:floating_ips][data['id']] = data
response.body = { 'floating_ip' => data }
response.body = { 'floatingip' => data }
response
end
end

View file

@ -15,7 +15,7 @@ module Fog
class Mock
def delete_floating_ip(floating_ip_id)
response = Excon::Response.new
if list_floating_ips.body['floating_ips'].map { |r| r['id'] }.include? floating_ip_id
if list_floating_ips.body['floatingips'].map { |r| r['id'] }.include? floating_ip_id
self.data[:floating_ips].delete(floating_ip_id)
response.status = 204
response

View file

@ -5,14 +5,14 @@ module Fog
class Real
def disassociate_floating_ip(floating_ip_id, options = {})
data = {
'floating_ip' => {
'floatingip' => {
'port_id' => nil,
}
}
vanilla_options = [:fixed_ip_address]
vanilla_options.reject{ |o| options[o].nil? }.each do |key|
data['floating_ip'][key] = options[key]
data['floatingip'][key] = options[key]
end
request(
@ -39,7 +39,7 @@ module Fog
}
self.data[:floating_ips][data['floating_ip_id']] = data
response.body = { 'floating_ip' => data }
response.body = { 'floatingip' => data }
response
end
end

View file

@ -18,7 +18,7 @@ module Fog
if data = self.data[:floating_ips][floating_ip_id]
response.status = 200
response.body = {
"floating_ip" => {
"floatingip" => {
"id" => "00000000-0000-0000-0000-000000000000",
# changed
# "floating_ip_id" => floating_ip_id,

View file

@ -16,7 +16,7 @@ module Fog
class Mock
def list_floating_ips(filters = {})
Excon::Response.new(
:body => { 'floating_ips' => self.data[:floating_ips].values },
:body => { 'floatingips' => self.data[:floating_ips].values },
:status => 200
)
end

View file

@ -17,7 +17,6 @@ module Fog
end
request(params.merge!({
:block => block,
:expects => 200,
:method => 'GET',
:path => "#{Fog::OpenStack.escape(container)}/#{Fog::OpenStack.escape(object)}"

View file

@ -42,6 +42,7 @@ module Fog
request :list_template_volumes
request :add_volume
request :destroy_volume
request :get_api_version
module Shared
# converts an OVIRT object into an hash for fog to consume.

View file

@ -33,7 +33,7 @@ module Fog
end
def locked?
!!(status =~ /locked/i)
!!(status =~ /locked/i) || (attributes[:volumes]=nil) || volumes.any?{|v| !!(v.status =~ /locked/i)}
end
def stopped?
@ -84,7 +84,7 @@ module Fog
end
def start(options = {})
wait_for { stopped? } if options[:blocking]
wait_for { !locked? } if options[:blocking]
service.vm_action(:id =>id, :action => :start)
reload
end

View file

@ -15,6 +15,7 @@ module Fog
attribute :format
attribute :sparse
attribute :size_gb
attribute :status
def size_gb
attributes[:size_gb] ||= attributes[:size].to_i / DISK_SIZE_TO_GB if attributes[:size]

View file

@ -0,0 +1,16 @@
module Fog
module Compute
class Ovirt
class Real
def api_version
client.api_version
end
end
class Mock
def api_version
"3.1"
end
end
end
end
end

View file

@ -6,7 +6,7 @@ module Fog
class Rackspace < Fog::Service
requires :rackspace_api_key, :rackspace_username
recognizes :rackspace_auth_url, :persistent
recognizes :rackspace_auth_url, :persistent, :rackspace_cdn_ssl
request_path 'fog/rackspace/requests/cdn'
request :get_containers
@ -14,9 +14,48 @@ module Fog
request :post_container
request :put_container
request :delete_object
module Base
URI_HEADERS = {
"X-Cdn-Ios-Uri" => :ios_uri,
"X-Cdn-Uri" => :uri,
"X-Cdn-Streaming-Uri" => :streaming_uri,
"X-Cdn-Ssl-Uri" => :ssl_uri
}.freeze
def publish_container(container, publish = true)
enabled = publish ? 'True' : 'False'
response = put_container(container.key, 'X-Cdn-Enabled' => enabled)
return {} unless publish
urls_from_headers(response.headers)
end
def urls(container)
begin
response = head_container(container.key)
return {} unless response.headers['X-Cdn-Enabled'] == 'True'
urls_from_headers response.headers
rescue Fog::Service::NotFound
{}
end
end
private
def urls_from_headers(headers)
h = {}
URI_HEADERS.keys.each do | header |
key = URI_HEADERS[header]
h[key] = headers[header]
end
h
end
end
class Mock
include Base
def self.data
@data ||= Hash.new do |hash, key|
hash[key] = {}
@ -34,20 +73,21 @@ module Fog
def data
self.class.data[@rackspace_username]
end
def purge(object)
return true if object.is_a? Fog::Storage::Rackspace::File
raise Fog::Errors::NotImplemented.new("#{object.class} does not support CDN purging") if object
end
def reset_data
self.class.data.delete(@rackspace_username)
end
def purge(object)
return true if object.is_a? Fog::Storage::Rackspace::File
raise Fog::Errors::NotImplemented.new("#{object.class} does not support CDN purging") if object
end
end
class Real
include Base
def initialize(options={})
@connection_options = options[:connection_options] || {}
credentials = Fog::Rackspace.authenticate(options, @connection_options)
@ -66,15 +106,6 @@ module Fog
end
end
def purge(object)
if object.is_a? Fog::Storage::Rackspace::File
delete_object object.directory.key, object.key
else
raise Fog::Errors::NotImplemented.new("#{object.class} does not support CDN purging") if object
end
true
end
def enabled?
@enabled
end
@ -82,6 +113,15 @@ module Fog
def reload
@cdn_connection.reset
end
def purge(file)
unless file.is_a? Fog::Storage::Rackspace::File
raise Fog::Errors::NotImplemented.new("#{object.class} does not support CDN purging")
end
delete_object file.directory.key, file.key
true
end
def request(params, parse_json = true)
begin

View file

@ -1,6 +1,9 @@
module Fog
module Rackspace
module MockData
NOT_FOUND_ID = "NOT-FOUND"
def data
@@data ||= Hash.new do |hash, key|
hash[key] = begin
@ -101,12 +104,13 @@ module Fog
}
#Mock Data Hash
{
h = {
#Compute V2
:flavors => {flavor_id => flavor},
:images => {image_id => image},
:flavors => Hash.new { |h,k| h[k] = flavor unless k == NOT_FOUND_ID},
:images => Hash.new { |h,k| h[k] = image unless k == NOT_FOUND_ID },
:networks => Hash.new { |h,k| h[k] = network unless k == NOT_FOUND_ID },
:servers => {},
:networks => { network_id => network },
#Block Storage
:volumes => {},
@ -114,6 +118,13 @@ module Fog
:volume_attachments => [],
:volume_types => {volume_type1_id => volume_type1, volume_type2_id => volume_type2},
}
# seed with initial data
h[:flavors][flavor_id] = flavor
h[:images][image_id] = image
h[:networks][network_id] = network
h
end
end[@rackspace_api_key]
end

View file

@ -19,15 +19,14 @@ module Fog
end
# Creates a new server and populates ssh keys
# @note This method is incompatible with Cloud Servers utlizing RackConnect. RackConnect users
# should use server personalization to install keys. Please see Server#personality for more information.
# @example
# service = Fog::Compute.new(:provider => 'rackspace',
# :version => :v2,
# service.servers.bootstrap :name => 'bootstrap-server',
# :flavor_id => service.flavors.first.id,
# :image_id => service.images.find {|img| img.name =~ /Ubuntu/}.id,
# :public_key_path => '~/.ssh/fog_rsa.pub',
# :private_key_path => '~/.ssh/fog_rsa')
#
# service.servers.bootstrap :name => 'bootstap-server',
# :flavor_id => service.flavors.first.id
# :image_id => service.servers.first.id
# :private_key_path => '~/.ssh/fog_rsa'
#
def bootstrap(new_attributes = {})
server = create(new_attributes)

View file

@ -0,0 +1,24 @@
require 'fog/core/model'
module Fog
module Storage
class Rackspace
class Account < Fog::Model
attribute :meta_temp_url_key, :aliases => 'X-Account-Meta-Temp-Url-Key'
attribute :container_count, :aliases => 'X-Account-Container-Count', :type => :integer
attribute :bytes_used, :aliases => 'X-Account-Bytes-Used', :type => :integer
attribute :object_count, :aliases => 'X-Account-Object-Count', :type => :integer
def save
service.post_set_meta_temp_url_key meta_temp_url_key
true
end
def reload
response = service.head_containers
merge_attributes response.headers
end
end
end
end
end

View file

@ -26,8 +26,11 @@ module Fog
directory.merge_attributes(key => value)
end
end
directory.metadata = Metadata.from_headers(directory, data.headers)
directory.files.merge_attributes(options)
directory.files.instance_variable_set(:@loaded, true)
data.body.each do |file|
directory.files << directory.files.new(file)
end

View file

@ -1,5 +1,6 @@
require 'fog/core/model'
require 'fog/rackspace/models/storage/files'
require 'fog/rackspace/models/storage/metadata'
module Fog
module Storage
@ -9,14 +10,33 @@ module Fog
identity :key, :aliases => 'name'
attribute :bytes, :aliases => 'X-Container-Bytes-Used'
attribute :count, :aliases => 'X-Container-Object-Count'
attribute :bytes, :aliases => 'X-Container-Bytes-Used', :type => :integer
attribute :count, :aliases => 'X-Container-Object-Count', :type => :integer
attribute :cdn_cname
attr_writer :public, :public_url
def metadata=(hash)
if hash.is_a? Fog::Storage::Rackspace::Metadata
attributes[:metadata] = hash
else
attributes[:metadata] = Fog::Storage::Rackspace::Metadata.new(self, hash)
end
attributes[:metadata]
end
def metadata
unless attributes[:metadata]
response = service.head_container(key)
attributes[:metadata] = Fog::Storage::Rackspace::Metadata.from_headers(self, response.headers)
end
attributes[:metadata]
end
def destroy
requires :key
service.delete_container(key)
service.cdn.post_container(key, 'X-CDN-Enabled' => 'False')
service.cdn.publish_container(self, false) if cdn_enabled?
true
rescue Excon::Errors::NotFound
false
@ -31,54 +51,59 @@ module Fog
end
end
def public=(new_public)
@public = new_public
def public?
if @public.nil?
@public ||= (key && public_url) ? true : false
end
@public
end
def public?
@public ||= !public_url.nil?
def reload
@public = nil
@urls = nil
@files = nil
super
end
def public_url
requires :key
@public_url ||= begin
begin response = service.cdn.head_container(key)
if response.headers['X-Cdn-Enabled'] == 'True'
if service.rackspace_cdn_ssl == true
response.headers['X-Cdn-Ssl-Uri']
else
cdn_cname || response.headers['X-Cdn-Uri']
end
end
rescue Fog::Service::NotFound
nil
end
end
def public_url
return nil if urls.empty?
return urls[:ssl_uri] if service.ssl?
cdn_cname || urls[:uri]
end
def ios_url
urls[:ios_uri]
end
def streaming_url
urls[:streaming_uri]
end
def save
requires :key
service.put_container(key)
if service.cdn && public?
# if public and CDN connection then update cdn to public
uri_header = 'X-CDN-URI'
if service.rackspace_cdn_ssl == true
uri_header = 'X-CDN-SSL-URI'
end
@public_url = service.cdn.put_container(key, 'X-CDN-Enabled' => 'True').headers[uri_header]
elsif service.cdn && !public?
service.cdn.put_container(key, 'X-CDN-Enabled' => 'False')
@public_url = nil
elsif !service.cdn && public?
# if public but no CDN connection then error
raise(Fog::Storage::Rackspace::Error.new("Directory can not be set as :public without a CDN provided"))
end
create_container
raise Fog::Storage::Rackspace::Error.new("Directory can not be set as :public without a CDN provided") if public? && !cdn_enabled?
@urls = service.cdn.publish_container(self, public?)
true
end
private
def cdn_enabled?
service.cdn && service.cdn.enabled?
end
def urls
requires :key
return {} unless cdn_enabled?
@urls ||= service.cdn.urls(self)
end
def create_container
headers = attributes[:metadata].nil? ? {} : metadata.to_headers
service.put_container(key, headers)
end
end
end
end
end

View file

@ -14,6 +14,11 @@ module Fog
attribute :last_modified, :aliases => ['last_modified', 'Last-Modified'], :type => :time
attribute :access_control_allow_origin, :aliases => ['Access-Control-Allow-Origin']
attribute :origin, :aliases => ['Origin']
attr_writer :public
attr_accessor :directory
attr_writer :public
def body
attributes[:body] ||= if last_modified
@ -27,10 +32,6 @@ module Fog
attributes[:body] = new_body
end
def directory
@directory
end
def copy(target_directory_key, target_file_key, options={})
requires :directory, :key
options['Content-Type'] ||= content_type if content_type
@ -47,10 +48,19 @@ module Fog
true
end
def metadata
@metadata ||= headers_to_metadata
def metadata=(hash)
if hash.is_a? Fog::Storage::Rackspace::Metadata
attributes[:metadata] = hash
else
attributes[:metadata] = Fog::Storage::Rackspace::Metadata.new(self, hash)
end
attributes[:metadata]
end
def metadata
attributes[:metadata] ||= Fog::Storage::Rackspace::Metadata.new(self)
end
def owner=(new_owner)
if new_owner
attributes[:owner] = {
@ -60,19 +70,22 @@ module Fog
end
end
def public=(new_public)
new_public
end
def public?
directory.public?
end
def public_url
requires :key
self.collection.get_url(self.key)
Files::file_url directory.public_url, key
end
def ios_url
Files::file_url directory.ios_url, key
end
def streaming_url
Files::file_url directory.streaming_url, key
end
def purge_from_cdn
if public?
service.cdn.purge(self)
@ -86,12 +99,11 @@ module Fog
options['Content-Type'] = content_type if content_type
options['Access-Control-Allow-Origin'] = access_control_allow_origin if access_control_allow_origin
options['Origin'] = origin if origin
options.merge!(metadata_to_headers)
options.merge!(metadata.to_headers)
data = service.put_object(directory.key, key, body, options)
update_attributes_from(data)
refresh_metadata
self.content_length = Fog::Storage.get_body_size(body)
self.content_type ||= Fog::Storage.get_content_type(body)
true
@ -99,59 +111,6 @@ module Fog
private
def directory=(new_directory)
@directory = new_directory
end
def refresh_metadata
metadata.reject! {|k, v| v.nil? }
end
def headers_to_metadata
key_map = key_mapping
Hash[metadata_attributes.map {|k, v| [key_map[k], v] }]
end
def key_mapping
key_map = metadata_attributes
key_map.each_pair {|k, v| key_map[k] = header_to_key(k)}
end
def header_to_key(opt)
opt.gsub(metadata_prefix, '').split('-').map {|k| k[0, 1].downcase + k[1..-1]}.join('_').to_sym
end
def metadata_to_headers
hash = {}
metadata.each_pair do |k,v|
key = metakey(k,v)
hash[key] = v
end
hash
end
def metakey(key, value)
prefix = value.nil? ? "X-Remove-Object-Meta-" : "X-Object-Meta-"
prefix + key.to_s.split(/[-_]/).map(&:capitalize).join('-')
end
def metadata_attributes
if last_modified
headers = service.head_object(directory.key, self.key).headers
headers.reject! {|k, v| !metadata_attribute?(k)}
else
{}
end
end
def metadata_attribute?(key)
key.to_s =~ /^#{metadata_prefix}/
end
def metadata_prefix
"X-Object-Meta-"
end
def update_attributes_from(data)
merge_attributes(data.headers.reject {|key, value| ['Content-Length', 'Content-Type'].include?(key)})
end

View file

@ -55,10 +55,13 @@ module Fog
def get(key, &block)
requires :directory
data = service.get_object(directory.key, key, &block)
metadata = Metadata.from_headers(data.headers)
file_data = data.headers.merge({
:body => data.body,
:key => key
:key => key,
:metadata => metadata
})
new(file_data)
rescue Fog::Storage::Rackspace::NotFound
nil
@ -67,10 +70,10 @@ module Fog
def get_url(key)
requires :directory
if self.directory.public_url
"#{self.directory.public_url}/#{Fog::Rackspace.escape(key, '/')}"
Files::file_url directory.public_url, key
end
end
def head(key, options = {})
requires :directory
data = service.head_object(directory.key, key)
@ -87,8 +90,12 @@ module Fog
super({ :directory => directory }.merge!(attributes))
end
end
def self.file_url(path, key)
return nil unless path
"#{path}/#{Fog::Rackspace.escape(key, '/')}"
end
end
end
end
end

View file

@ -0,0 +1,112 @@
module Fog
module Storage
class Rackspace
class Metadata
OBJECT_META_PREFIX = "X-Object-Meta-"
OBJECT_REMOVE_META_PREFIX = "X-Remove-Object-Meta-"
CONTAINER_META_PREFIX = "X-Container-Meta-"
CONTAINER_REMOVE_META_PREFIX = "X-Remove-Container-Meta-"
# Cloud Files will ignore headers without a value
DUMMY_VALUE = 1
CONTAINER_KEY_REGEX = /^#{CONTAINER_META_PREFIX}(.*)/
OBJECT_KEY_REGEX = /^#{OBJECT_META_PREFIX}(.*)/
attr_reader :data, :parent
def initialize(parent, hash={})
@data = hash || {}
@deleted_hash = {}
@parent = parent
end
def delete(key)
data.delete(key)
@deleted_hash[key] = nil
end
def to_headers
headers = {}
h = data.merge(@deleted_hash)
h.each_pair do |k,v|
key = to_header_key(k,v)
headers[key] = v || DUMMY_VALUE
end
headers
end
def self.from_headers(parent, headers)
metadata = Metadata.new(parent)
headers.each_pair do |k, v|
key = metadata.send(:to_key, k)
next unless key
metadata.data[key] = v
end
metadata
end
def respond_to?(method_sym, include_private = false)
super(method_sym, include_private) || data.respond_to?(method_sym, include_private)
end
def method_missing(method, *args, &block)
data.send(method, *args, &block)
end
private
def meta_prefix
if parent.is_a? Fog::Storage::Rackspace::Directory
CONTAINER_META_PREFIX
elsif parent.is_a? Fog::Storage::Rackspace::File
OBJECT_META_PREFIX
else
raise "Metadata prefix is unknown for #{parent.class}"
end
end
def remove_meta_prefix
if parent.is_a? Fog::Storage::Rackspace::Directory
CONTAINER_REMOVE_META_PREFIX
elsif parent.is_a? Fog::Storage::Rackspace::File
OBJECT_REMOVE_META_PREFIX
else
raise "Remove Metadata prefix is unknown for #{parent.class}"
end
end
def meta_prefix_regex
if parent.is_a? Fog::Storage::Rackspace::Directory
CONTAINER_KEY_REGEX
elsif parent.is_a? Fog::Storage::Rackspace::File
OBJECT_KEY_REGEX
else
raise "Metadata prefix is unknown for #{parent.class}"
end
end
def to_key(key)
m = key.match meta_prefix_regex
return nil unless m && m[1]
a = m[1].split('-')
a.collect!(&:downcase)
str = a.join('_')
str.to_sym
end
def to_header_key(key, value)
prefix = value.nil? ? remove_meta_prefix : meta_prefix
prefix + key.to_s.split(/[-_]/).collect(&:capitalize).join('-')
end
end
end
end
end

View file

@ -17,7 +17,6 @@ module Fog
end
request(params.merge!({
:block => block,
:expects => 200,
:method => 'GET',
:path => "#{Fog::Rackspace.escape(container)}/#{Fog::Rackspace.escape(object)}"

View file

@ -8,10 +8,11 @@ module Fog
# ==== Parameters
# * name<~String> - Name for container, should be < 256 bytes and must not contain '/'
#
def put_container(name)
def put_container(name, options={})
request(
:expects => [201, 202],
:method => 'PUT',
:headers => options,
:path => Fog::Rackspace.escape(name)
)
end

View file

@ -14,6 +14,7 @@ module Fog
collection :directories
model :file
collection :files
model :account
request_path 'fog/rackspace/requests/storage'
request :copy_object
@ -38,7 +39,8 @@ module Fog
:provider => 'Rackspace',
:rackspace_api_key => @rackspace_api_key,
:rackspace_auth_url => @rackspace_auth_url,
:rackspace_username => @rackspace_username
:rackspace_username => @rackspace_username,
:rackspace_cdn_ssl => @rackspace_cdn_ssl
)
if @cdn.enabled?
@cdn
@ -97,6 +99,15 @@ module Fog
Excon.defaults[:ssl_verify_peer] = false if options[:rackspace_servicenet] == true
@connection = Fog::Connection.new("#{@scheme}://#{@host}:#{@port}", @persistent, @connection_options)
end
def account
account = Fog::Storage::Rackspace::Account.new(:service => self)
account.reload
end
def ssl?
!rackspace_cdn_ssl.nil?
end
def reload
@connection.reset

View file

@ -8,37 +8,15 @@ module Fog
def self.new(attributes)
attributes = attributes.dup # prevent delete from having side effects
case provider = attributes.delete(:provider).to_s.downcase.to_sym
when :atmos
require 'fog/atmos/storage'
Fog::Storage::Atmos.new(attributes)
when :aws
require 'fog/aws/storage'
Fog::Storage::AWS.new(attributes)
when :google
require 'fog/google/storage'
Fog::Storage::Google.new(attributes)
when :hp
require 'fog/hp/storage'
Fog::Storage::HP.new(attributes)
when :ibm
require 'fog/ibm/storage'
Fog::Storage::IBM.new(attributes)
when :internetarchive
require 'fog/internet_archive/storage'
Fog::Storage::InternetArchive.new(attributes)
when :local
require 'fog/local/storage'
Fog::Storage::Local.new(attributes)
when :ninefold
require 'fog/ninefold/storage'
Fog::Storage::Ninefold.new(attributes)
when :rackspace
require 'fog/rackspace/storage'
Fog::Storage::Rackspace.new(attributes)
when :openstack
require 'fog/openstack/storage'
Fog::Storage::OpenStack.new(attributes)
else
if self.providers.include?(provider)
require "fog/#{provider}/storage"
return Fog::Storage.const_get(Fog.providers[provider]).new(attributes)
end
raise ArgumentError.new("#{provider} is not a recognized storage provider")
end
end

View file

@ -7,13 +7,13 @@ module Fog
def self.new(attributes)
attributes = attributes.dup # Prevent delete from having side effects
case provider = attributes.delete(:provider).to_s.downcase.to_sym
when :openstack
require 'fog/openstack/volume'
Fog::Volume::OpenStack.new(attributes)
else
raise ArgumentError.new("#{provider} has no identity service")
provider = attributes.delete(:provider).to_s.downcase.to_sym
if self.providers.include?(provider)
require "fog/#{provider}/volume"
return Fog::Volume.const_get(Fog.providers[provider]).new(attributes)
end
raise ArgumentError.new("#{provider} has no identity service")
end
def self.providers

View file

@ -5,7 +5,7 @@ module Fog
def vm_reconfig_hardware(options = {})
raise ArgumentError, "hardware_spec is a required parameter" unless options.has_key? 'hardware_spec'
raise ArgumentError, "instance_uuid is a required parameter" unless options.has_key? 'instance_uuid'
vm_mob_ref = get_vm_by_ref(options['instance_uuid'])
vm_mob_ref = get_vm_ref(options['instance_uuid'])
task = vm_mob_ref.ReconfigVM_Task(:spec => RbVmomi::VIM.VirtualMachineConfigSpec(options['hardware_spec']))
task.wait_for_completion
{ 'task_state' => task.info.state }

View file

@ -0,0 +1,8 @@
Shindo.tests("AWS::DataPipeline | pipelines", ['aws', 'data_pipeline']) do
pending if Fog.mocking?
unique_id = uniq_id
model_tests(Fog::AWS[:data_pipeline].pipelines, { :id => unique_id, :name => "#{unique_id}-name", :unique_id => unique_id }) do
@instance.wait_for { state }
end
end

View file

@ -0,0 +1,8 @@
Shindo.tests("AWS::DataPipeline | pipelines", ['aws', 'data_pipeline']) do
pending if Fog.mocking?
unique_id = uniq_id
collection_tests(Fog::AWS[:data_pipeline].pipelines, { :id => unique_id, :name => "#{unique_id}-name", :unique_id => unique_id }) do
@instance.wait_for { state }
end
end

View file

@ -0,0 +1,44 @@
class AWS
module DataPipeline
module Formats
BASIC = {
'pipelineId' => String,
}
LIST_PIPELINES = {
"hasMoreResults" => Fog::Nullable::Boolean,
"marker" => Fog::Nullable::String,
"pipelineIdList" => [
{
"id" => String,
"name" => String,
}
]
}
DESCRIBE_PIPELINES = {
"pipelineDescriptionList" => [
{
"description" => Fog::Nullable::String,
"name" => String,
"pipelineId" => String,
"fields" => [
{
"key" => String,
"refValue" => Fog::Nullable::String,
"stringValue" => Fog::Nullable::String,
}
]
}
]
}
PUT_PIPELINE_DEFINITION = {
"errored" => Fog::Boolean,
"validationErrors" => Fog::Nullable::Array,
}
end
end
end

View file

@ -0,0 +1,54 @@
Shindo.tests('AWS::DataPipeline | pipeline_tests', ['aws', 'data_pipeline']) do
pending if Fog.mocking?
@pipeline_id = nil
tests('success') do
tests("#create_pipeline").formats(AWS::DataPipeline::Formats::BASIC) do
unique_id = 'fog-test-pipeline-unique-id'
name = 'fog-test-pipeline-name'
description = 'Fog test pipeline'
result = Fog::AWS[:data_pipeline].create_pipeline(unique_id, name, description)
@pipeline_id = result['pipelineId']
result
end
tests("#list_pipelines").formats(AWS::DataPipeline::Formats::LIST_PIPELINES) do
Fog::AWS[:data_pipeline].list_pipelines()
end
tests("#describe_pipelines").formats(AWS::DataPipeline::Formats::DESCRIBE_PIPELINES) do
ids = [@pipeline_id]
Fog::AWS[:data_pipeline].describe_pipelines(ids)
end
tests("#put_pipeline_definition").formats(AWS::DataPipeline::Formats::PUT_PIPELINE_DEFINITION) do
objects = [
{
"id" => "Nightly",
"type" => "Schedule",
"startDateTime" => Time.now.strftime("%Y-%m-%dT%H:%M:%S"),
"period" => "24 hours",
},
{
"id" => "Default",
"role" => "role-dumps",
"resourceRole" => "role-dumps-inst",
"schedule" => { "ref" => "Nightly" },
},
]
Fog::AWS[:data_pipeline].put_pipeline_definition(@pipeline_id, objects)
end
tests("#activate_pipeline") do
Fog::AWS[:data_pipeline].activate_pipeline(@pipeline_id)
end
tests("#delete_pipeline") do
Fog::AWS[:data_pipeline].delete_pipeline(@pipeline_id)
end
end
end

View file

@ -1,6 +1,6 @@
Shindo.tests('Fog::Connection', 'core') do
tests('user_agent').returns("fog/#{Fog::VERSION}") do
conn = Fog::Connection.new("http://www.testserviceurl.com", false, {})
conn.instance_variable_get(:@excon).connection[:headers]['User-Agent']
conn.instance_variable_get(:@excon).data[:headers]['User-Agent']
end
end

View file

@ -1,6 +1,7 @@
Shindo.tests("Fog::Compute[:openstack] | server", ['openstack']) do
tests('success') do
tests('#security_groups').succeeds do
fog = Fog::Compute[:openstack]
@ -20,7 +21,6 @@ Shindo.tests("Fog::Compute[:openstack] | server", ['openstack']) do
server.save
found_groups = server.security_groups
returns(1) { found_groups.length }
group = found_groups.first
@ -40,6 +40,47 @@ Shindo.tests("Fog::Compute[:openstack] | server", ['openstack']) do
my_group.destroy if my_group
end
end
tests('#metadata').succeeds do
fog = Fog::Compute[:openstack]
begin
flavor = fog.flavors.first.id
image = fog.images.first.id
server = fog.servers.new(:name => 'test server',
:metadata => {"foo" => "bar"},
:flavor_ref => flavor,
:image_ref => image)
server.save
returns(1) { server.metadata.length }
server.metadata.each do |datum|
datum.value = 'foo'
datum.save
datum.destroy
end
ensure
unless Fog.mocking? then
server.destroy if server
begin
fog.servers.get(server.id).wait_for do false end
rescue Fog::Errors::Error
# ignore, server went away
end
end
end
end
end
end

View file

@ -1,7 +1,7 @@
Shindo.tests("Fog::Network[:openstack] | network", ['openstack']) do
tests('success') do
tests('#create').succeeds do
@instance = Fog::Network[:openstack].networks.create(:name => 'net_name',
:shared => false,
@ -9,6 +9,26 @@ Shindo.tests("Fog::Network[:openstack] | network", ['openstack']) do
:tenant_id => 'tenant_id')
!@instance.id.nil?
end
tests('have attributes') do
attributes = [
:name,
:subnets,
:shared,
:status,
:admin_state_up,
:tenant_id,
:provider_network_type,
:provider_physical_network,
:provider_segmentation_id,
:router_external
]
tests("The network model should respond to") do
attributes.each do |attribute|
test("#{attribute}") { @instance.respond_to? attribute }
end
end
end
tests('#update').succeeds do
@instance.name = 'new_net_name'

View file

@ -0,0 +1,15 @@
Shindo.tests('Fog::Volume[:openstack]', ['openstack', 'volume']) do
volume = Fog::Volume[:openstack]
tests("Volumes collection") do
%w{ volumes }.each do |collection|
test("it should respond to #{collection}") { volume.respond_to? collection }
test("it should respond to #{collection}.all") { eval("volume.#{collection}").respond_to? 'all' }
# not implemented
#test("it should respond to #{collection}.get") { eval("volume.#{collection}").respond_to? 'get' }
end
end
end

View file

@ -0,0 +1,78 @@
Shindo.tests('Fog::CDN::Rackspace', ['rackspace']) do
pending if Fog.mocking?
def container_meta_attributes
@cdn.head_container(@directory.key).headers
end
def clear_metadata
@instance.metadata.tap do |metadata|
metadata.each_pair {|k, v| metadata[k] = nil }
end
end
directory_attributes = {
# Add a random suffix to prevent collision
:key => "fogfilestests-#{rand(65536)}"
}
@directory = Fog::Storage[:rackspace].directories.create(directory_attributes)
@cdn = @directory.service.cdn
begin
tests('publish_container').succeeds do
returns(nil, "CDN is not enabled") { container_meta_attributes['X-CDN-Enabled'] }
urls = @cdn.publish_container @directory
returns(true, "hash contains expected urls") { Fog::CDN::Rackspace::Base::URI_HEADERS.values.all? { |url_type| urls[url_type] } }
returns("True", "CDN is enabled") { container_meta_attributes['X-Cdn-Enabled'] }
end
tests('urls') do
tests('CDN enabled container').returns(false) do
@cdn.publish_container @directory
@cdn.urls(@directory).empty?
end
tests('Non-CDN enabled container').returns(true) do
@cdn.publish_container @directory, false
@cdn.urls(@directory).empty?
end
tests('Non-existent container').returns(true) do
non_existent_container = Fog::Storage::Rackspace::Directory.new :key => "non-existent"
@cdn.urls(non_existent_container).empty?
end
end
tests('urls_from_headers') do
headers = {
"X-Cdn-Streaming-Uri"=>"http://168e307d41afe64f1a62-d1e9259b2132e81da48ed3e1e802ef22.r2.stream.cf1.rackcdn.com",
"X-Cdn-Uri"=>"http://6e8f4bf5125c9c2e4e3a-d1e9259b2132e81da48ed3e1e802ef22.r2.cf1.rackcdn.com",
"Date"=>"Fri, 15 Feb 2013 18:36:41 GMT",
"Content-Length"=>"0",
"X-Trans-Id"=>"tx424df53b79bc43fe994d3cec0c4d2d8a",
"X-Ttl"=>"3600",
"X-Cdn-Ssl-Uri"=>"https://f83cb7d39e0b9ff9581b-d1e9259b2132e81da48ed3e1e802ef22.ssl.cf1.rackcdn.com",
"X-Cdn-Ios-Uri"=>"http://a590286a323fec6aed22-d1e9259b2132e81da48ed3e1e802ef22.iosr.cf1.rackcdn.com",
"X-Cdn-Enabled"=>"True",
"Content-Type"=>"text/html; charset=UTF-8",
"X-Log-Retention"=>"False"
}
urls = @cdn.send(:urls_from_headers, headers)
returns(4) { urls.size }
returns("http://168e307d41afe64f1a62-d1e9259b2132e81da48ed3e1e802ef22.r2.stream.cf1.rackcdn.com") { urls[:streaming_uri] }
returns("http://6e8f4bf5125c9c2e4e3a-d1e9259b2132e81da48ed3e1e802ef22.r2.cf1.rackcdn.com") { urls[:uri] }
returns("https://f83cb7d39e0b9ff9581b-d1e9259b2132e81da48ed3e1e802ef22.ssl.cf1.rackcdn.com") { urls[:ssl_uri] }
returns("http://a590286a323fec6aed22-d1e9259b2132e81da48ed3e1e802ef22.iosr.cf1.rackcdn.com") { urls[:ios_uri] }
end
tests('purge') do
pending
end
ensure
@directory.destroy if @directory
end
end

View file

@ -12,6 +12,6 @@ Shindo.tests('Fog::Compute::RackspaceV2 | flavors', ['rackspace']) do
end
tests("failure").returns(nil) do
service.flavors.get('some_random_identity')
service.flavors.get(Fog::Rackspace::MockData::NOT_FOUND_ID)
end
end

View file

@ -15,6 +15,6 @@ Shindo.tests('Fog::Compute::RackspaceV2 | images', ['rackspace']) do
end
tests("failure").returns(nil) do
service.images.get('some_random_identity')
service.images.get(Fog::Rackspace::MockData::NOT_FOUND_ID)
end
end

View file

@ -0,0 +1,28 @@
Shindo.tests('Fog::Rackspace::Storage | account', ['rackspace']) do
pending if Fog.mocking?
@account = Fog::Storage[:rackspace].account
tests('load') do
headers = @account.service.head_containers.headers
returns(headers['X-Account-Meta-Temp-Url-Key']) { @account.meta_temp_url_key }
returns(headers['X-Account-Container-Count'].to_i) { @account.container_count }
returns(headers['X-Account-Bytes-Used'].to_i) { @account.bytes_used }
returns(headers['X-Account-Object-Count'].to_i) { @account.object_count }
end
tests('reload') do
@account.reload
end
tests('save') do
key = "testing-update-#{Time.now.to_i}"
@account.meta_temp_url_key = "testing-update-#{Time.now.to_i}"
@account.save
headers = @account.service.head_containers.headers
returns(key) { headers['X-Account-Meta-Temp-Url-Key'] }
end
end

View file

@ -0,0 +1,29 @@
Shindo.tests('Fog::Rackspace::Storage | directories', ['rackspace']) do
pending if Fog.mocking?
@service = Fog::Storage[:rackspace]
begin
@name = "fog-directories-test-#{Time.now.to_i.to_s}"
@filename = 'lorem.txt'
@dir = @service.directories.create :key => @name, :metadata => {:fog_test => true}
@file = @dir.files.create :key => @filename, :body => lorem_file
tests('#get').succeeds do
instance = @service.directories.get @name
returns(false) { instance.nil? }
returns('true') { instance.metadata[:fog_test] }
returns(@name) { instance.key }
returns(1) { instance.count }
returns( Fog::Storage.get_body_size(lorem_file)) {instance.bytes }
returns(@filename) { instance.files.first.key }
end
ensure
@file.destroy if @file
@dir.destroy if @dir
end
end

View file

@ -0,0 +1,141 @@
Shindo.tests('Fog::Rackspace::Storage | directory', ['rackspace']) do
pending if Fog.mocking?
@service = Fog::Storage[:rackspace]
def container_meta_attributes
@service.head_container(@instance.key).headers.reject {|k, v| !(k =~ /X-Container-Meta-/)}
end
directory_attributes = {
# Add a random suffix to prevent collision
:key => "fog-directory-tests-#{rand(65536)}"
}
model_tests(@service.directories, directory_attributes, Fog.mocking?) do
tests('#public?').returns(false) do
@instance.public?
end
tests('#public_url') do
tests('http').returns(nil) do
@instance.public_url
end
@instance.cdn_cname = "my_cname.com"
tests('cdn_cname').returns(nil) do
@instance.public_url
end
@instance.cdn_cname = nil
@service.instance_variable_set "@rackspace_cdn_ssl", true
tests('ssl').returns(nil) do
@instance.public_url
end
@service.instance_variable_set "@rackspace_cdn_ssl", nil
end
tests('#ios_url').returns(nil) do
@instance.ios_url
end
tests('#streaming_url').returns(nil) do
@instance.streaming_url
end
tests('cdn') do
@instance.public = true
@instance.save
tests('#public?').returns(true) do
@instance.public?
end
tests('#public_url') do
tests('http').returns(0) do
@instance.public_url =~ /http:\/\//
end
@instance.cdn_cname = "my_cname.com"
tests('cdn_cname').returns(0) do
@instance.public_url =~ /my_cname\.com/
end
@instance.cdn_cname = nil
@service.instance_variable_set "@rackspace_cdn_ssl", true
tests('ssl').returns(0) do
@instance.public_url =~ /https:\/\/.+\.ssl\./
end
@service.instance_variable_set "@rackspace_cdn_ssl", nil
end
tests('#ios_url').returns(0) do
@instance.ios_url =~ /http:\/\/.+\.iosr\./
end
tests('#streaming_url').returns(0) do
@instance.streaming_url =~ /http:\/\/.+\.stream\./
end
end
tests("reload") do
@instance.reload
returns(nil) { @instance.instance_variable_get("@urls") }
returns(nil) { @instance.instance_variable_get("@files") }
returns(nil) { @instance.instance_variable_get("@public") }
end
end
directory_attributes[:metadata] = {:draft => 'true'}
tests('metadata') do
pending if Fog.mocking?
model_tests(@service.directories, directory_attributes, Fog.mocking?) do
tests('sets metadata on create').returns('true') do
@instance.metadata.data
container_meta_attributes["X-Container-Meta-Draft"]
end
tests('update metadata').returns({"X-Container-Meta-Draft"=>"true", "X-Container-Meta-Color"=>"green"}) do
@instance.metadata[:color] = 'green'
@instance.save
container_meta_attributes
end
tests('set metadata to nil').returns({"X-Container-Meta-Draft"=>"true"}) do
@instance.metadata[:color] = nil
@instance.save
container_meta_attributes
end
tests('delete metadata').returns({}) do
@instance.metadata.delete(:draft)
@instance.save
container_meta_attributes
end
tests('should retrieve metadata when necessary') do
@service.put_container(@instance.key, {"X-Container-Meta-List-Test"=>"true"} )
dir = @service.directories.find {|d| d.key == @instance.key }
returns(nil) { dir.instance_variable_get("@metadata") }
returns('true') { dir.metadata[:list_test] }
end
tests("should reload metadata after calling reload").returns("42") do
@service.put_container @instance.key, "X-Container-Meta-Answer" => 42
@instance.reload
@instance.metadata[:answer]
end
tests("should reload metadata after calling reload").returns("42") do
@service.put_container @instance.key, "X-Container-Meta-Answer" => 42
@instance.reload
@instance.metadata[:answer]
end
end
end
end

View file

@ -2,8 +2,8 @@ Shindo.tests('Fog::Rackspace::Storage | file', ['rackspace']) do
pending if Fog.mocking?
def object_meta_attributes
@instance.service.head_object(@directory.key, @instance.key).headers.reject {|k, v| !(k =~ /X-Object-Meta-/)}
def object_meta_attributes(file=@instance)
@instance.service.head_object(@directory.key, file.key).headers.reject {|k, v| !(k =~ /X-Object-Meta-/)}
end
def clear_metadata
@ -35,7 +35,7 @@ Shindo.tests('Fog::Rackspace::Storage | file', ['rackspace']) do
model_tests(@directory.files, file_attributes, Fog.mocking?) do
tests("#metadata should load empty metadata").returns({}) do
@instance.metadata
@instance.metadata.data
end
tests('#save') do
@ -79,6 +79,77 @@ Shindo.tests('Fog::Rackspace::Storage | file', ['rackspace']) do
end
end
end
begin
tests("sets metadata on create").returns("true") do
@file = @directory.files.create :key => 'meta-test', :body => lorem_file, :metadata => {:works => true }
object_meta_attributes(@file)["X-Object-Meta-Works"]
end
ensure
@file.destroy if @file
end
tests('urls') do
tests('no CDN') do
tests('#public_url') do
tests('http').returns(nil) do
@instance.public_url
end
@directory.cdn_cname = "my_cname.com"
tests('cdn_cname').returns(nil) do
@instance.public_url
end
@directory.cdn_cname = nil
@directory.service.instance_variable_set "@rackspace_cdn_ssl", true
tests('ssl').returns(nil) do
@instance.public_url
end
@directory.service.instance_variable_set "@rackspace_cdn_ssl", nil
end
tests('#ios_url').returns(nil) do
@instance.ios_url
end
tests('#streaming_url').returns(nil) do
@instance.streaming_url
end
end
tests('With CDN') do
tests('#public_url') do
@directory.public = true
@directory.save
tests('http').returns(0) do
@instance.public_url =~ /http:\/\/.*#{@instance.key}/
end
@directory.cdn_cname = "my_cname.com"
tests('cdn_cname').returns(0) do
@instance.public_url =~ /my_cname\.com.*#{@instance.key}/
end
@directory.cdn_cname = nil
@directory.service.instance_variable_set "@rackspace_cdn_ssl", true
tests('ssl').returns(0) do
@instance.public_url =~ /https:\/\/.+\.ssl\..*#{@instance.key}/
end
@directory.service.instance_variable_set "@rackspace_cdn_ssl", nil
end
tests('#ios_url').returns(0) do
@instance.ios_url =~ /http:\/\/.+\.iosr\..*#{@instance.key}/
end
tests('#streaming_url').returns(0) do
@instance.streaming_url =~ /http:\/\/.+\.stream\..*#{@instance.key}/
end
end
end
tests('#metadata keys') do

View file

@ -0,0 +1,127 @@
require 'fog/rackspace/models/storage/metadata'
require 'fog/rackspace/models/storage/directory'
require 'fog/rackspace/models/storage/file'
Shindo.tests('Fog::Rackspace::Storage | metadata', ['rackspace']) do
tests('Directory') do
@directory = Fog::Storage::Rackspace::Directory.new
tests('#to_key') do
tests('valid key').returns(:image_size) do
metadata = Fog::Storage::Rackspace::Metadata.new @directory
metadata.send(:to_key, "X-Container-Meta-Image-Size")
end
tests('invalid key').returns(nil) do
metadata = Fog::Storage::Rackspace::Metadata.new @directory
metadata.send(:to_key, "bad-key")
end
end
tests('#to_header_key') do
metadata = Fog::Storage::Rackspace::Metadata.new @directory
tests('key to add').returns("X-Container-Meta-Thumbnail-Image") do
metadata.send(:to_header_key, :thumbnail_image, true)
end
tests('key to remove').returns("X-Remove-Container-Meta-Thumbnail-Image") do
metadata.send(:to_header_key, :thumbnail_image, nil)
end
end
tests('#to_headers').returns({"X-Container-Meta-Preview"=>true, "X-Remove-Container-Meta-Delete-Me"=>1}) do
metadata = Fog::Storage::Rackspace::Metadata.new @directory
metadata[:preview] = true
metadata[:delete_me] = nil
metadata.to_headers
end
tests("#from_headers").returns({:my_boolean=>"true", :my_integer=>"42", :my_string=>"I am a string"}) do
headers = {
"X-Container-Meta-My-Integer"=> "42",
"X-Container-Meta-My-Boolean"=> "true",
"X-Container-Meta-My-String"=> "I am a string"
}
metadata = Fog::Storage::Rackspace::Metadata.from_headers @directory, headers
metadata.data
end
tests("#delete").returns({"X-Remove-Container-Meta-Delete-Me"=>1}) do
metadata = Fog::Storage::Rackspace::Metadata.new @directory
metadata.delete(:delete_me)
metadata.to_headers
end
end
tests('File') do
@file = Fog::Storage::Rackspace::File.new
tests('#to_key') do
tests('valid key').returns(:image_size) do
metadata = Fog::Storage::Rackspace::Metadata.new @file
metadata.send(:to_key, "X-Object-Meta-Image-Size")
end
tests('invalid key').returns(nil) do
metadata = Fog::Storage::Rackspace::Metadata.new @file
metadata.send(:to_key, "bad-key")
end
end
tests('#to_header_key') do
metadata = Fog::Storage::Rackspace::Metadata.new @file
tests('key to add').returns("X-Object-Meta-Thumbnail-Image") do
metadata.send(:to_header_key, :thumbnail_image, true)
end
tests('key to remove').returns("X-Remove-Object-Meta-Thumbnail-Image") do
metadata.send(:to_header_key, :thumbnail_image, nil)
end
end
tests('#to_headers').returns({"X-Object-Meta-Preview"=>true, "X-Remove-Object-Meta-Delete-Me"=>1}) do
metadata = Fog::Storage::Rackspace::Metadata.new @file
metadata[:preview] = true
metadata[:delete_me] = nil
metadata.to_headers
end
tests("#from_headers").returns({:my_boolean=>"true", :my_integer=>"42", :my_string=>"I am a string"}) do
headers = {
"X-Object-Meta-My-Integer"=> "42",
"X-Object-Meta-My-Boolean"=> "true",
"X-Object-Meta-My-String"=> "I am a string"
}
metadata = Fog::Storage::Rackspace::Metadata.from_headers @file, headers
metadata.data
end
tests("#delete").returns({"X-Remove-Object-Meta-Delete-Me"=>1}) do
metadata = Fog::Storage::Rackspace::Metadata.new @file
metadata.delete(:delete_me)
metadata.to_headers
end
end
tests("#respond_to?") do
tests('Should respond to all of the methods in Hash class').returns(true) do
metadata = Fog::Storage::Rackspace::Metadata.new @file
Hash.instance_methods.all? {|method| metadata.respond_to?(method)}
end
tests('Should respond to all of the methods in the Metadata class').returns(true) do
metadata = Fog::Storage::Rackspace::Metadata.new @file
metadata.methods.all? {|method| metadata.respond_to?(method)}
end
end
tests("#method_missing").returns(true) do
metadata = Fog::Storage::Rackspace::Metadata.new @file
metadata[:test] = true
metadata[:test]
end
end

View file

@ -57,12 +57,12 @@ Shindo.tests('Fog::Compute[:rackspace] | image requests', ['rackspace']) do
tests('failure') do
tests('#delete_image(0)').raises(Excon::Errors::BadRequest) do
@service.delete_image(0)
@service.delete_image(Fog::Rackspace::MockData::NOT_FOUND_ID)
end
tests('#get_image_details(0)').raises(Fog::Compute::Rackspace::NotFound) do
pending if Fog.mocking?
@service.get_image_details(0)
@service.get_image_details(Fog::Rackspace::MockData::NOT_FOUND_ID)
end
end

View file

@ -59,11 +59,11 @@ Shindo.tests('Fog::Compute::RackspaceV2 | image_tests', ['rackspace']) do
tests('failure') do
tests('#delete_image').raises(Excon::Errors::BadRequest) do
Fog::Compute[:rackspace].delete_image(0)
Fog::Compute[:rackspace].delete_image(Fog::Rackspace::MockData::NOT_FOUND_ID)
end
tests('#get_image').raises(Fog::Compute::RackspaceV2::NotFound) do
service.get_image(0)
service.get_image(Fog::Rackspace::MockData::NOT_FOUND_ID)
end
end
ensure

View file

@ -10,11 +10,18 @@ Shindo.tests('Fog::Storage[:rackspace] | container requests', ["rackspace"]) do
tests('success') do
tests("#put_container('fogcontainertests')").succeeds do
tests("#put_container('fogcontainertests', {})").succeeds do
pending if Fog.mocking?
Fog::Storage[:rackspace].put_container('fogcontainertests')
end
tests("#put_container('fogcontainertests', 'X-Container-Meta-Color'=>'green')").succeeds do
pending if Fog.mocking?
Fog::Storage[:rackspace].put_container('fogcontainertests', 'X-Container-Meta-Color'=>'green')
response = Fog::Storage[:rackspace].head_container('fogcontainertests')
returns('green') { response.headers['X-Container-Meta-Color'] }
end
tests("#get_container('fogcontainertests')").formats(@container_format) do
pending if Fog.mocking?
Fog::Storage[:rackspace].get_container('fogcontainertests').body

View file

@ -0,0 +1,8 @@
Shindo.tests('Fog::Storage::Rackspace', ['rackspace']) do |variable|
pending if Fog.mocking?
tests('account').succeeds do
Fog::Storage[:rackspace].account
end
end