mirror of
https://github.com/fog/fog.git
synced 2022-11-09 13:51:43 -05:00
Merge remote-tracking branch 'upstream/master'
This commit is contained in:
commit
9f1eba6662
1455 changed files with 9068 additions and 5998 deletions
40
.travis.yml
40
.travis.yml
|
@ -1,43 +1,27 @@
|
|||
language: ruby
|
||||
|
||||
gemfile:
|
||||
- Gemfile
|
||||
- Gemfile.1.8.7
|
||||
|
||||
rvm:
|
||||
- 1.8.7
|
||||
- 1.9.2
|
||||
- 1.9.3
|
||||
- 2.0.0
|
||||
- 2.1.0
|
||||
- jruby-18mode
|
||||
- jruby-19mode
|
||||
- jruby-head
|
||||
|
||||
script: bundle exec rake travis
|
||||
|
||||
matrix:
|
||||
fast_finish: true
|
||||
include:
|
||||
- rvm: 2.1.0
|
||||
gemfile: Gemfile
|
||||
env: COVERAGE=true
|
||||
exclude:
|
||||
- rvm: 1.8.7
|
||||
gemfile: Gemfile
|
||||
- rvm: 1.9.2
|
||||
gemfile: Gemfile.1.8.7
|
||||
- rvm: 1.9.3
|
||||
gemfile: Gemfile.1.8.7
|
||||
- rvm: 2.0.0
|
||||
gemfile: Gemfile.1.8.7
|
||||
- rvm: 2.1.0
|
||||
gemfile: Gemfile.1.8.7
|
||||
- rvm: jruby-18mode
|
||||
gemfile: Gemfile
|
||||
- rvm: 2.0.0
|
||||
gemfile: Gemfile
|
||||
- rvm: 2.1.0
|
||||
gemfile: Gemfile
|
||||
- rvm: 2.1.1
|
||||
gemfile: Gemfile
|
||||
env: COVERAGE=true
|
||||
- rvm: jruby-18mode
|
||||
gemfile: Gemfile.1.8.7
|
||||
- rvm: jruby-19mode
|
||||
gemfile: Gemfile.1.8.7
|
||||
gemfile: Gemfile
|
||||
- rvm: jruby-head
|
||||
gemfile: Gemfile.1.8.7
|
||||
gemfile: Gemfile
|
||||
|
||||
allow_failures:
|
||||
- rvm: jruby-head
|
||||
|
|
|
@ -3,8 +3,10 @@
|
|||
* Adam Bozanich <adam.boz@gmail.com>
|
||||
* Adam Greene <adam@sweetspotdiabetes.com>
|
||||
* Adam Heinz <amh@metricwise.net>
|
||||
* Adam Stegman and Zach Robinson <pair+astegman+zrobinson@pivotallabs.com>
|
||||
* Adam Tanner <adam@adamtanner.org>
|
||||
* Adam Tucker <adam.j.tucker@gmail.com>
|
||||
* Adan Saenz <asaenz@momentumsi.com>
|
||||
* Ahmed Al Hafoudh <alhafoudh@freevision.sk>
|
||||
* Akira Matsuda <ronnie@dio.jp>
|
||||
* Akshay Joshi <me@akshayjoshi.com>
|
||||
|
@ -32,6 +34,7 @@
|
|||
* Alvin Garcia <agarcia@exist.com>
|
||||
* Amitava <amitava.bs@gmail.com>
|
||||
* Amos Benari <abenari@redhat.com>
|
||||
* Amy Sutedja <asutedja@biaprotect.com>
|
||||
* Amy Woodward <aswoodward@gmail.com>
|
||||
* Andre Meij <andre@socialreferral.com>
|
||||
* Andreas Gerauer <helle@fraggaz.de>
|
||||
|
@ -59,6 +62,7 @@
|
|||
* Artem Veremey <artem@veremey.net>
|
||||
* Arthur Gunawan <acgun3@gmail.com>
|
||||
* Arvid Andersson <arvid@winstondesign.se>
|
||||
* Ash Wilson <ash.wilson@rackspace.com>
|
||||
* Athir Nuaimi <anuaimi@devfoundry.com>
|
||||
* Avrohom Katz <iambpentameter@gmail.com>
|
||||
* BK Box <bk@theboxes.org>
|
||||
|
@ -66,6 +70,8 @@
|
|||
* Ben Bleything <ben@bleything.net>
|
||||
* Ben Burkert <ben@benburkert.com>
|
||||
* Ben Butler-Cole <ben@bridesmere.com>
|
||||
* Ben Chadwick <bchadwick@mdsol.com>
|
||||
* Ben Hundley <ben.hundley@gmail.com>
|
||||
* Ben Turley <ben@scan.me>
|
||||
* Benjamin Manns <benmanns@gmail.com>
|
||||
* Benton Roberts <benton@bentonroberts.com>
|
||||
|
@ -80,13 +86,16 @@
|
|||
* Brad Gignac <brad.gignac@rackspace.com>
|
||||
* Brad Heller <brad@cloudability.com>
|
||||
* Bradley Schaefer <bradley.schaefer@gmail.com>
|
||||
* Brandon Dunne <bdunne@redhat.com>
|
||||
* Brendan Fosberry <brendan.fosberry@rackspace.com>
|
||||
* Brian D. Burns <iosctr@gmail.com>
|
||||
* Brian Dorry <bdorry@Brian-Dorrys-MacBook-Pro.local>
|
||||
* Brian Hartsock <brian.hartsock@gmail.com>
|
||||
* Brian Hartsock <brian.hartsock@rackspace.com>
|
||||
* Brian Nelson <bnelson@sugarcrm.com>
|
||||
* Brian Palmer <brianp@instructure.com>
|
||||
* Brice Figureau <brice-puppet@daysofwonder.com>
|
||||
* Bruno Enten <bruno@enten-itc.ch>
|
||||
* Bulat Shakirzyanov <mallluhuct@gmail.com>
|
||||
* Caius Durling <dev@caius.name>
|
||||
* Caleb Tennis <caleb.tennis@gmail.com>
|
||||
|
@ -107,6 +116,7 @@
|
|||
* Chris Chiodo <chris@viximo.com>
|
||||
* Chris Frederick <chris@maginatics.com>
|
||||
* Chris Hasenpflug <github@chris.hasenpflug.us>
|
||||
* Chris Howe <howech@infochimps.com>
|
||||
* Chris Mague <github@mague.com>
|
||||
* Chris Roberts <chrisroberts.code@gmail.com>
|
||||
* Chris Wuest <chris.wuest@rackspace.com>
|
||||
|
@ -137,6 +147,7 @@
|
|||
* Daniel Schweighoefer <daniel@netsteward.net>
|
||||
* Danny Garcia <dannygarcia.me@gmail.com>
|
||||
* Darrin Eden <darrin.eden@gmail.com>
|
||||
* Dave Donahue <dave@12spokes.com>
|
||||
* Dave Myron <therealdave.myron@gmail.com>
|
||||
* Dave Ungerer <daveungerer@gmail.com>
|
||||
* David <davidxz@x0-air.gateway.2wire.net>
|
||||
|
@ -155,6 +166,7 @@
|
|||
* Dominic Cleal <dcleal@redhat.com>
|
||||
* DoubleMalt <christoph@web.crofting.com>
|
||||
* Doug Henderson <dhenderson@maestrodev.com>
|
||||
* Doug Henderson <dougforpres@gmail.com>
|
||||
* Doug McInnes <doug@dougmcinnes.com>
|
||||
* Dr Nic Williams <drnicwilliams@gmail.com>
|
||||
* Dusty Jones <dusty@teamsnap.com>
|
||||
|
@ -172,6 +184,7 @@
|
|||
* Eric Boehs <ericboehs@gmail.com>
|
||||
* Eric Chernuka <ericchernuka@gmail.com>
|
||||
* Eric Hankins <ehankins@rednovalabs.com>
|
||||
* Eric Herot <eric.github@herot.com>
|
||||
* Eric Hodel <drbrain@segment7.net>
|
||||
* Eric Lindvall <eric@5stops.com>
|
||||
* Eric Stonfer <ericstonfer@yahoo.com>
|
||||
|
@ -198,6 +211,7 @@
|
|||
* Gabriel Rosendorf <gabriel.rosendorf@weather.com>
|
||||
* Garima Singh <igarimasingh@gmail.com>
|
||||
* Garret Alfert <alfert@wevelop.de>
|
||||
* Gaurish Sharma <contact@gaurishsharma.com>
|
||||
* Gavin Sandie <beach@vicecity.co.uk>
|
||||
* Gavin Sandie <g.sandie@digital-science.com>
|
||||
* Geoff Pado <geoffpado@gmail.com>
|
||||
|
@ -244,6 +258,7 @@
|
|||
* Jason Hansen & Josh Lane <jhansen@engineyard.com>
|
||||
* Jason Montleon <jmontleo@redhat.com>
|
||||
* Jason Roelofs <jameskilton@gmail.com>
|
||||
* Jason Smith <jsmith@gold-sonata.com>
|
||||
* Jay Faulkner <jay.faulkner@rackspace.com>
|
||||
* Jay Perry <jperry@brightcove.com>
|
||||
* Jeff McCune <jeff@puppetlabs.com>
|
||||
|
@ -261,6 +276,7 @@
|
|||
* Joe Yates <joe.g.yates@gmail.com>
|
||||
* John Dyer <john@krumpt.com>
|
||||
* John E. Vincent <lusis.org+github.com@gmail.com>
|
||||
* John F. Douthat <johndouthat@gmail.com>
|
||||
* John Feminella <johnf@fluxcrux.com>
|
||||
* John Ferlito <johnf@inodes.org>
|
||||
* John Hawthorn <john.hawthorn@gmail.com>
|
||||
|
@ -277,6 +293,7 @@
|
|||
* Jon-Erik Schneiderhan <jschneiderhan@gmail.com>
|
||||
* Jonas Pfenniger <jonas@pfenniger.name>
|
||||
* Jonas Pfenniger <zimbatm@zimbatm.com>
|
||||
* Jonathon Scanes <me@jscanes.com>
|
||||
* Joonas Reynders <joonas.reynders@iki.fi>
|
||||
* Jose Diaz-Gonzalez <josegonzalez@users.noreply.github.com>
|
||||
* Jose Luis Salas <josacar@gmail.com>
|
||||
|
@ -287,14 +304,17 @@
|
|||
* Josh Kearney <josh@jk0.org>
|
||||
* Josh Lane & Ines Sombra <jlane@engineyard.com>
|
||||
* Josh Lane & Jason Hansen <jlane@engineyard.com>
|
||||
* Josh Lane <jlane@engineyard.com>
|
||||
* Josh Lane <lane.joshlane@gmail.com>
|
||||
* Josh Lane <me@joshualane.com>
|
||||
* Josh Pasqualetto <josh.pasqualetto@sonian.net>
|
||||
* Josh Yotty <jyotty@bluebox.net>
|
||||
* Joshua Gross <joshua@surfeasy.com>
|
||||
* Joshua Krall <joshuakrall@pobox.com>
|
||||
* Joshua Napoli <jnapoli@swipely-napoli.home>
|
||||
* Joshua Napoli <jnapoli@swipely-napoli.local>
|
||||
* Joshua Nichols <josh@technicalpickles.com>
|
||||
* Joshua Schairbaum <joshua.schairbaum@gmail.com>
|
||||
* Julian Fischer <fischer@enterprise-rails.de>
|
||||
* Julian Weber <jweber@anynines.com>
|
||||
* Julian Weber <jweber@avarteq.de>
|
||||
|
@ -302,6 +322,7 @@
|
|||
* Juris Galang <jurisgalang@gmail.com>
|
||||
* Justin Barry <justin.d.barry@gmail.com>
|
||||
* Justin Clayton <justin.clayton@gettyimages.com>
|
||||
* KATOH Yasufumi <karma@jazz.email.ne.jp>
|
||||
* Kaloyan Kanev <kaloyan.kanev@cloudsigma.com>
|
||||
* Karan Misra <karan@erodov.com>
|
||||
* Karl Freeman <karlfreeman@gmail.com>
|
||||
|
@ -353,6 +374,7 @@
|
|||
* Mark Phillips <mark.phillips2@bskyb.com>
|
||||
* Mark Rushakoff <mark.rushakoff@gmail.com>
|
||||
* Mark Turner <mark@amerine.net>
|
||||
* Marshall Yount <marshall@yountlabs.com>
|
||||
* Martin Emde <martin.emde@gmail.com>
|
||||
* Martin Englund <martin@englund.nu>
|
||||
* Martin Matuska <martin@matuska.org>
|
||||
|
@ -402,6 +424,7 @@
|
|||
* Nat Welch <nat@natwelch.com>
|
||||
* Nathan Sullivan <nsullivan@kixeye.com>
|
||||
* Nathan Sutton <nate@zencoder.com>
|
||||
* Nathan Williams <nwilliams@bluebox.net>
|
||||
* Neill Turner <neillwturner@gmail.com>
|
||||
* Nelvin Driz <NelvinDriz@live.com>
|
||||
* Nelvin Driz <iam@nelv.in>
|
||||
|
@ -444,7 +467,10 @@
|
|||
* Pedro Perez <pedro@bvox.net>
|
||||
* Peter Bonnell <peter@circuitllc.com>
|
||||
* Peter C. Norton <pn@knewton.com>
|
||||
* Peter Drake <peter.drake@acquia.com>
|
||||
* Peter M. Goldstein <peter.m.goldstein@gmail.com>
|
||||
* Peter Meier <peter.meier@immerda.ch>
|
||||
* Peter Vawser <peter@catapult-elearning.com>
|
||||
* Peter Weldon <peter.weldon@null.net>
|
||||
* Peter Weldon <peter@lautus.net>
|
||||
* Phil Cohen <github@phlippers.net>
|
||||
|
@ -456,6 +482,7 @@
|
|||
* Pierre Carrier <pierre@gcarrier.fr>
|
||||
* Pieter van de Bruggen <pieter@puppetlabs.com>
|
||||
* Pieter van de Bruggen <pvande@gmail.com>
|
||||
* Piotr Kedziora <piotr.kedziora27@gmail.com>
|
||||
* Postmodern <postmodern.mod3@gmail.com>
|
||||
* Prashant Nadarajan <prashant.nadarajan@gmail.com>
|
||||
* Pratik Naik <pratiknaik@gmail.com>
|
||||
|
@ -483,11 +510,16 @@
|
|||
* Sam Kottler <shk@redhat.com>
|
||||
* Sam Merritt <spam@andcheese.org>
|
||||
* Sami Samhuri <sami@samhuri.net>
|
||||
* Sammy Larbi <sam@codeodor.com>
|
||||
* Samuel Merritt <spam@andcheese.org>
|
||||
* Sarah Vessels <sarah.vessels@cirrusmio.com>
|
||||
* Sascha Korth <sascha.korth@zweitag.de>
|
||||
* Scott Gonyea <me@aitrus.org>
|
||||
* Sean Caffery <sean.caffery@c3businesssolutions.com>
|
||||
* Sean Handley <sean.handley@gmail.com>
|
||||
* Sean Handley <sean.handley@melbourne.co.uk>
|
||||
* Sean Handley <sean@Seans-MacBook-Air.local>
|
||||
* Sean Handley <seanhandley@users.noreply.github.com>
|
||||
* Sean Hart <boardnutz@blacklight.net>
|
||||
* Sean Hart <sean.hart@gree.co.jp>
|
||||
* Sean Porter <portertech@gmail.com>
|
||||
|
@ -497,11 +529,13 @@
|
|||
* Shai Rosenfeld & Jacob Burkhart <srosenfeld@engineyard.com>
|
||||
* Shai Rosenfeld <shaiguitar@gmail.com>
|
||||
* Shai Rosenfeld <srosenfeld@engineyard.com>
|
||||
* Shaun Davis <davissp14@gmail.com>
|
||||
* Shawn Catanzarite <me@shawncatz.com>
|
||||
* Shay Bergmann <shayb@panaya.com>
|
||||
* Simon Gate <simon@smgt.me>
|
||||
* Simon Josi <me@yokto.net>
|
||||
* Simon Rozet <simon@rozet.name>
|
||||
* Simone Carletti <weppos@weppos.net>
|
||||
* Sjoerd Andringa <sjoerd.andringa@me.com>
|
||||
* Sneha Somwanshi <sneha.vishwas-somwanshi@digital.cabinet-office.gov.uk>
|
||||
* Spencer Dillard <dillards@amazon.com>
|
||||
|
@ -513,6 +547,7 @@
|
|||
* Stephen von Takach <steve@advancedcontrol.com.au>
|
||||
* Steve Agalloco <steve.agalloco@gmail.com>
|
||||
* Steve Frank <lardcanoe@gmail.com>
|
||||
* Steve Meyfroidt <steve.meyfroidt@gmail.com>
|
||||
* Steve Smith <github@scsworld.co.uk>
|
||||
* Steven Danna <steve@opscode.com>
|
||||
* Stuart Eccles <stuart@madebymany.co.uk>
|
||||
|
@ -591,6 +626,7 @@
|
|||
* bmiller <bmiller@handson.com>
|
||||
* bonkydog <bonkydog@bonkydog.com>
|
||||
* brookemckim <brooke.mckim@gmail.com>
|
||||
* bugagazavr <kirik910@gmail.com>
|
||||
* ccloes <chad_cloes@intuit.com>
|
||||
* coliver <coliver@datapipe.com>
|
||||
* crazed <cr4z3d@gmail.com>
|
||||
|
@ -625,6 +661,7 @@
|
|||
* jschneiderhan <jon-erik.schneiderhan@meyouhealth.com>
|
||||
* kanetann <kanetann@gmail.com>
|
||||
* kbockmanrs <kevin@rightscale.com>
|
||||
* kfafel <keith.fafel@gmail.com>
|
||||
* leehuffman <lhuffman@bluebox.net>
|
||||
* lostboy <paul.crabtree@gmail.com>
|
||||
* marios <marios@marios.(none)>
|
||||
|
@ -634,6 +671,7 @@
|
|||
* mlincoln <mlincoln@thoughtworks.com>
|
||||
* mriley <wdperson@hotmail.com>
|
||||
* msa <marios@marios.(none)>
|
||||
* neillturner <neillwturner@gmail.com>
|
||||
* nightshade427 <nightshade427@gmail.com>
|
||||
* phiggins <pete@peterhiggins.org>
|
||||
* phillc <spyyderz@gmail.com>
|
||||
|
@ -648,6 +686,7 @@
|
|||
* thattommyhall <thattommyhall@gmail.com>
|
||||
* tipt0e <topo-2@charter.net>
|
||||
* torake.fransson <torake.fransson@klarna.com>
|
||||
* unknown <bturner_2@pibuk-lp71.pibenchmark.com>
|
||||
* vkhatri <vir.khatri@gmail.com>
|
||||
* watsonian <watsonian@gmail.com>
|
||||
* wenlock <edward.raigosa@gmail.com>
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2009-2013 [CONTRIBUTORS.md](https://github.com/fog/fog/blob/master/CONTRIBUTORS.md)
|
||||
Copyright (c) 2009-2014 [CONTRIBUTORS.md](https://github.com/fog/fog/blob/master/CONTRIBUTORS.md)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
|
|
8
Rakefile
8
Rakefile
|
@ -1,4 +1,5 @@
|
|||
require 'bundler/setup'
|
||||
require 'rake/testtask'
|
||||
require 'date'
|
||||
require 'rubygems'
|
||||
require 'rubygems/package_task'
|
||||
|
@ -47,10 +48,11 @@ end
|
|||
|
||||
GEM_NAME = "#{name}"
|
||||
task :default => :test
|
||||
task :travis => ['test:travis', 'coveralls_push_workaround']
|
||||
task :travis => ['test', 'test:travis', 'coveralls_push_workaround']
|
||||
|
||||
require "tasks/test_task"
|
||||
Fog::Rake::TestTask.new
|
||||
Rake::TestTask.new do |t|
|
||||
t.pattern = File.join("**", "test", "**", "*_test.rb")
|
||||
end
|
||||
|
||||
namespace :test do
|
||||
mock = 'true' || ENV['FOG_MOCK']
|
||||
|
|
312
changelog.txt
312
changelog.txt
|
@ -1,3 +1,315 @@
|
|||
1.20.0 02/07/2014 76846bb4bd2d94ec169757f6f4125dc173359140
|
||||
==========================================================
|
||||
|
||||
Stats! { 'collaborators' => 54, 'downloads' => 4366478, 'forks' => 1074, 'open_issues' => 166, 'watchers' => 2904 }
|
||||
|
||||
MVP! Ash Wilson
|
||||
|
||||
[AWS|EC2]
|
||||
request_spot_instances.rb requires that date parameters be iso8601. thanks Frederick Cheung
|
||||
fix sporadically failing network_acl_tests.rb. thanks Frederick Cheung
|
||||
|
||||
[Brightbox]
|
||||
Sort schema collections. thanks Paul Thornthwaite
|
||||
Add CloudIp#destination_id. thanks Paul Thornthwaite
|
||||
Add support for Cloud SQL service. thanks Paul Thornthwaite
|
||||
Remove old #destroy request. thanks Paul Thornthwaite
|
||||
|
||||
[OpenStack|Network]
|
||||
Add CRUD for SecurityGroup and SecurityGroupRules. thanks Brandon Dunne
|
||||
Display subnets as a child of Network. thanks Brandon Dunne
|
||||
Add security_groups and security_group_rules hashes to the base Mock data. thanks Brandon Dunne
|
||||
Add tests for security_groups and security_group_rules methods. thanks Brandon Dunne
|
||||
|
||||
[aws]
|
||||
align hashrockets, remove whitespace. thanks Eric Stonfer
|
||||
mock block device mapping on run_instances. thanks Josh Lane
|
||||
mock setup block device deleteOnTermination. thanks Josh Lane
|
||||
|
||||
[aws|elb]
|
||||
Mimic create_load_balancer mock. thanks Jose Luis Salas
|
||||
compact possible nil. thanks geemus
|
||||
|
||||
[aws|fog]
|
||||
Don't pass :host to Excon request. thanks Jason Smith
|
||||
|
||||
[aws|iam]
|
||||
Mock delete_server_certificate raises NotFound appropriately. thanks Dan Peterson
|
||||
UploadServerCertificate parser respects CertificateBody and CertificateChain. thanks Dan Peterson
|
||||
|
||||
[core]
|
||||
Adds `ssh_ip_address=` so users can override the ssh address per issue #2584. thanks Kyle Rames
|
||||
updating Server models to use ssh_ip_address rather than public_ip_address. thanks Kyle Rames
|
||||
updating ssh_ip_address to take a block in order to defer address specification to execution time. thanks Kyle Rames
|
||||
|
||||
[digital ocean|compute]
|
||||
pass '1' instead of true for scrub. thanks geemus
|
||||
|
||||
[digitalocean|compute]
|
||||
add created_at timestamp as attribute of server. thanks Dave Donahue
|
||||
additional test coverage and some maintenance. thanks Dave Donahue
|
||||
allow bootstrapping with keys rather than paths. thanks Dave Donahue
|
||||
sync with latest master for new fog_test_server_attributes. thanks Dave Donahue
|
||||
fix merge conflict in fog_test_server_attributes. thanks Dave Donahue
|
||||
|
||||
[google]
|
||||
Handle 500 errors from GCE. thanks Carlos Sanchez
|
||||
Excon::Errors::NotFound never reaches the models. thanks Carlos Sanchez
|
||||
Disk.ready? should not reload the data. thanks Carlos Sanchez
|
||||
Expose Google API client, compute and api_url for easier debugging. thanks Carlos Sanchez
|
||||
|
||||
[google|compute]
|
||||
Readme update. thanks Nat Welch
|
||||
Remove RHEL from Global Project list. thanks Nat Welch
|
||||
|
||||
[hp|compute_v2]
|
||||
added security group support. thanks Kyle Rames
|
||||
|
||||
[libvirt]
|
||||
fix readme gem reference. thanks geemus
|
||||
|
||||
[libvirt|compute]
|
||||
Allow volumes to have backing volumes. thanks Dominic Cleal
|
||||
|
||||
[misc]
|
||||
Use endpoint, port, and path_style options in AWS storage Mock. thanks Adam Stegman and Zach Robinson
|
||||
Added Openstack compute support for add_security_group and remove_security_group. thanks Adan Saenz
|
||||
Fixed method names for Mock objects for add/remove_security_group. thanks Adan Saenz
|
||||
Add support for AWS VPC Network ACLs. thanks Alex Coomans
|
||||
Fix subnet mocking, related to #2510. thanks Alex Coomans
|
||||
Fix DhcpOption#associate. thanks Alex Coomans
|
||||
reject unnecessary methods creation and clean up remove_method. thanks Alexander Lomov
|
||||
make test output more verbose if provider service is unavailable. thanks Alexander Lomov
|
||||
mock tests doesn't have to rely on provider availability. thanks Alexander Lomov
|
||||
fix get_bucket_acl request method in Google Cloud Storage. thanks Alexander Lomov
|
||||
fix put_bucket_acl request for Google Cloud Storage service. thanks Alexander Lomov
|
||||
remove unnecessary duplication in put_bucket_acl request in Google storage. thanks Alexander Lomov
|
||||
add put_object_acl request to Google Cloud Storage service. thanks Alexander Lomov
|
||||
Removed host from SQS connection request arguments. thanks Amy Sutedja
|
||||
Revert "[aws|sqs] remove host/port from request". thanks Amy Sutedja
|
||||
Create a realistic, but fake, service catalog. thanks Ash Wilson
|
||||
The fake service catalog matches format. thanks Ash Wilson
|
||||
DRY up some of that service catalog generation. thanks Ash Wilson
|
||||
Handle failed logins. thanks Ash Wilson
|
||||
Rackspace identity_tests now pass in mocking mode. thanks Ash Wilson
|
||||
Mock the #get_containers Storage call. thanks Ash Wilson
|
||||
Mock the #head_container Storage call. thanks Ash Wilson
|
||||
Mock the #put_container Storage request. thanks Ash Wilson
|
||||
Don't count the :meta entry as an object. thanks Ash Wilson
|
||||
Another pair of tests that already work with mocks. thanks Ash Wilson
|
||||
Some speculative mocking of #get_container. thanks Ash Wilson
|
||||
Refactor to use some utility classes. thanks Ash Wilson
|
||||
Already took care of these three. thanks Ash Wilson
|
||||
Don't call methods on a non-enabled CDN. thanks Ash Wilson
|
||||
Mock delete_object and put_object. thanks Ash Wilson
|
||||
Mock get_object. thanks Ash Wilson
|
||||
Mock HEAD request for objects. thanks Ash Wilson
|
||||
Refactor into Fog::Storage::Rackspace::Common. thanks Ash Wilson
|
||||
Support "chunking" in #put_object. thanks Ash Wilson
|
||||
Implement bulk deletion. thanks Ash Wilson
|
||||
Test failure cases for mocks, too. thanks Ash Wilson
|
||||
Mock #post_set_meta_temp_url_key with a no-op. thanks Ash Wilson
|
||||
Eliminate a ton of redundancy. thanks Ash Wilson
|
||||
Large object tests now pass in mock mode. thanks Ash Wilson
|
||||
Account tests now pass. thanks Ash Wilson
|
||||
directories_tests now pass with mocks. thanks Ash Wilson
|
||||
First half of directory_tests now pass. thanks Ash Wilson
|
||||
The rest of the directory_tests now pass too. thanks Ash Wilson
|
||||
file_tests now all pass under mocking. thanks Ash Wilson
|
||||
files_tests already works with the mocks. Huzzah!. thanks Ash Wilson
|
||||
storage_tests now all pass under mocking, too. thanks Ash Wilson
|
||||
Refactoring: #each_part in MockObject. thanks Ash Wilson
|
||||
Documentation for the storage mock utility classes. thanks Ash Wilson
|
||||
Split a multiline statement with trailing .'s. thanks Ash Wilson
|
||||
Parenthesize method parameters. thanks Ash Wilson
|
||||
Create the mock Queues service. thanks Ash Wilson
|
||||
Enable Queues service tests for mocking. thanks Ash Wilson
|
||||
Enable Cloud Queues request tests. thanks Ash Wilson
|
||||
Mock the create_queue call. thanks Ash Wilson
|
||||
Mock the list_queues call. thanks Ash Wilson
|
||||
Mock the delete_queue call. thanks Ash Wilson
|
||||
Mock the get_queue call. thanks Ash Wilson
|
||||
No need to map! here, we're modifying the hashes. thanks Ash Wilson
|
||||
Mock the queue_stats request. thanks Ash Wilson
|
||||
Handle a corner case in the create_queue mock. thanks Ash Wilson
|
||||
On to the messages_tests. thanks Ash Wilson
|
||||
Mock the create_message call. thanks Ash Wilson
|
||||
get_message and list_messages mocks. thanks Ash Wilson
|
||||
Initial support for mocking delete_message. thanks Ash Wilson
|
||||
Enable claim_tests in mocking mode. thanks Ash Wilson
|
||||
Mock the create_claim request. thanks Ash Wilson
|
||||
create_claim returns a 204 for empty claims. thanks Ash Wilson
|
||||
Mock the get_claim request. thanks Ash Wilson
|
||||
Mock the update_claim request. thanks Ash Wilson
|
||||
Mock the delete_claim request. thanks Ash Wilson
|
||||
Refactor out some common error checking. thanks Ash Wilson
|
||||
Actually compute #claimed and #free. thanks Ash Wilson
|
||||
Similar refactoring for accessing MockClaims. thanks Ash Wilson
|
||||
Some documentation. thanks Ash Wilson
|
||||
Completely untested ageoff code. thanks Ash Wilson
|
||||
Enable model tests for Claims. thanks Ash Wilson
|
||||
Enable the claims_tests in mocking mode. thanks Ash Wilson
|
||||
Enable the message_tests in mocking mode. thanks Ash Wilson
|
||||
Enable the messages_tests in mocking mode. thanks Ash Wilson
|
||||
Enable the queue_tests in mocking mode. thanks Ash Wilson
|
||||
Er, *actually* enable the messages_tests for mocks. thanks Ash Wilson
|
||||
Enable the queues_tests in mocking mode. thanks Ash Wilson
|
||||
Refactor PATH_BASE into a constant. thanks Ash Wilson
|
||||
Er, actually enable queues_tests, too. thanks Ash Wilson
|
||||
Yep, just did that. thanks Ash Wilson
|
||||
Make the delete_message mock consistent. thanks Ash Wilson
|
||||
Only extend the TTL of a MockMessage. thanks Ash Wilson
|
||||
Don't use `&:to_h` style enumerations. thanks Ash Wilson
|
||||
I guess there isn't really a better place. thanks Ash Wilson
|
||||
Include the oldest and newest message in stats. thanks Ash Wilson
|
||||
Missed a chance to use queue.claim!. thanks Ash Wilson
|
||||
Replace the JSON round-trip with #stringify. thanks Ash Wilson
|
||||
A hack to fix the Claim#messages= hack on 1.8.7. thanks Ash Wilson
|
||||
Use case-insensitive header access for Location. thanks Ash Wilson
|
||||
Fix case sensitivity of the Content-type header. thanks Ash Wilson
|
||||
extended IOPS support. thanks Ben Chadwick
|
||||
add Iops to snapshot model. thanks Ben Chadwick
|
||||
adding new HVM-based instance types to AWS in lib/fog/aws/models/compute/flavors.rb. thanks Ben Hundley
|
||||
White space cleanup. thanks Brandon Dunne
|
||||
use current region for subnet checks when creating DB and Cache subnet groups. thanks Brian Nelson
|
||||
add aws storage multipart upload mocks. thanks Brian Palmer
|
||||
enable the relevant tests for multipart mocks. thanks Brian Palmer
|
||||
Write logger output to stderr to conform to convention. thanks Bruno Enten
|
||||
[google][compute] Update to API v1. thanks Carlos Sanchez
|
||||
Update rubygems to fix travis in ruby 1.8. thanks Carlos Sanchez
|
||||
Fix typos that make ruby 1.8 break. thanks Carlos Sanchez
|
||||
Changed openstack server model to build security group objects without generating deprication warning messages. thanks Chris Howe
|
||||
[google][compute] Add support for blank disks (i.e. remove code that required only image or snapshot based disks to be created). thanks Doug Henderson
|
||||
Fixes for AWS Mocking. thanks Doug Henderson
|
||||
Implemented Replace Route. thanks Eric Herot
|
||||
Add replace_route failure tests. thanks Eric Herot
|
||||
Undo date change. thanks Eric Herot
|
||||
Add a test for passing a nonexisiting route table and an exisiting internet gateway to replace_route failures section. thanks Eric Herot
|
||||
Switch to hashed parameters method for handling replace_route arguments. thanks Eric Herot
|
||||
Remove commented code. thanks Eric Herot
|
||||
making destination_cidr_block a required parameter for replace_route. thanks Eric Herot
|
||||
Switch to or-equals for DestinationCidrBlock and instanceOwnerId. thanks Eric Herot
|
||||
Moved #compact to a sesparate line to make it a little more obvious. thanks Evan Light
|
||||
Servers with a password locked root user have a nil @password. thanks Evan Light
|
||||
Issues a deprecation warning if the Rackspace Fog user is relying on region to be provided by default. thanks Evan Light
|
||||
Closes #2469. thanks Evan Light
|
||||
Ported fog rackspace storage docs for OpenStack. thanks Evan Light
|
||||
Oops. Missed a couple of deletions of CDN stuff. thanks Evan Light
|
||||
No, we don't want people hitting up Rackspace specifically about OpenStack docs. It's a joint effort!. thanks Evan Light
|
||||
Adds getting started guide for OpenStack fog. thanks Evan Light
|
||||
Added Ruby-specific code blocks. thanks Evan Light
|
||||
Formating fixes. thanks Evan Light
|
||||
Fixed another formating error. thanks Evan Light
|
||||
Fixes #2586. thanks Evan Light
|
||||
Check if security group is nil, fixes #2507. thanks Gaurish Sharma
|
||||
Update Flavours.rb with new M3 Instance Types. thanks Gaurish Sharma
|
||||
Fixing defect with handling of multiple <item> elements in reponse to describe-reservations. thanks Joe Kinsella
|
||||
Fix typo puplic -> public. thanks John F. Douthat
|
||||
Tests and fixes for Elasticache VPC subnet groups. thanks Jon Topper
|
||||
Rackspace/examples; cloudfiles directory is set to public, therefore file is accessible. thanks Jonathon Scanes
|
||||
DNSimple get_domain also accepts domain name. thanks Jose Luis Salas
|
||||
rm rspec dependency. thanks Joseph Anthony Pasquale Holsten
|
||||
adding Rage4 module file. thanks Joshua Gross
|
||||
all request types support by the rage4 api without mocks or tests. thanks Joshua Gross
|
||||
setting up testing library to start writing tests. thanks Joshua Gross
|
||||
Wrote shindo tests for all supported requests. Resulted in a lot of debuggin of request methods. As well update zone/record models to work in simple cases. thanks Joshua Gross
|
||||
using proper hash syntax for ruby 1.8.7. thanks Joshua Gross
|
||||
when a zone doesn't exist return nil for rage4. thanks Joshua Gross
|
||||
previous change was for records, duplicating nil return for zones now. thanks Joshua Gross
|
||||
changing handling of zone returns after testing that data was different on errors and success. thanks Joshua Gross
|
||||
adding domain alias to record to match zerigo api in rage4. thanks Joshua Gross
|
||||
quick syntax fix for an attribute alias. thanks Joshua Gross
|
||||
making a reader for domain to duplicate name attribute. thanks Joshua Gross
|
||||
adding more attributes to rage4 records. thanks Joshua Gross
|
||||
updating documentation for list records in rage4. thanks Joshua Gross
|
||||
fixing a typo in rage4 record. thanks Joshua Gross
|
||||
minor fixes for record and domain destroying. thanks Joshua Gross
|
||||
fixing rage4 structure as to recent fog chagnes. thanks Joshua Gross
|
||||
fixing service creation in rage4. thanks Joshua Gross
|
||||
Pass params necessary to upload key pairs. thanks Joshua Schairbaum
|
||||
Fix logic bug in data structure creation. thanks Joshua Schairbaum
|
||||
Revert "[hp|compute_v2] added security group support". thanks Kyle Rames
|
||||
normalize requires syntax. thanks Lance Ivy
|
||||
ensure that each service requires its provider. thanks Lance Ivy
|
||||
openstack orchestration no longer depends on cloud formation. thanks Lance Ivy
|
||||
ensure that all services require their provider. thanks Lance Ivy
|
||||
create core for each provider. keep load hook for provider. thanks Lance Ivy
|
||||
extend load time benchmarks for new load targets. thanks Lance Ivy
|
||||
don't require service when registering it. thanks Lance Ivy
|
||||
add benchmark scripts to load each provider and service independently. thanks Lance Ivy
|
||||
support rackspace storage delete_at and delete_after headers. thanks Marshall Yount
|
||||
fixed misspelling. thanks Matheus Mina
|
||||
refactor DataPipeline format conversion, allowing for arrays of refs. thanks Matt Gillooly
|
||||
make disassociate_address mock idempotent, by not requiring instance data. thanks Michael Hale
|
||||
ignore more Ruby version manager files. thanks Mike Fiedler
|
||||
drop dependency on deprecated ruby-hmac gem, fixes #2034. thanks Mike Fiedler
|
||||
Always scrub data when deleting a server from DO. thanks Nat Welch
|
||||
Hardcode some responses to tests. thanks Nat Welch
|
||||
Cleanup some whitespace in the Google dir. thanks Nat Welch
|
||||
fix error - invalid excon request keys: :host. thanks Nathan Williams
|
||||
Make Coveralls opt-in. thanks Paul Thornthwaite
|
||||
Reduce size of Travis matrix. thanks Paul Thornthwaite
|
||||
Revert to original .travis.yml and include one case. thanks Paul Thornthwaite
|
||||
Record and Zone put requests are idempotent. thanks Peter Drake
|
||||
Add Ruby 2.1.0 to the test matrix. thanks Peter M. Goldstein
|
||||
Fixed error when accessing files via atmos where keys contain spaces. thanks Peter Vawser
|
||||
There is a bug here or maybe i'm using the gem wrong... Edit you should merge the ACLs after merging the meta_has if not The new permission will be overwritten by the old one. If Before you had a directory with : X-Container-Read: .r:*,.rlistings. thanks Piotr Kedziora
|
||||
Add support for Rackspace's Extract Archive API call See http://docs.rackspace.com/files/api/v1/cf-devguide/content/Extract_Archive-d1e2338.html for documentation on the API call. thanks Sammy Larbi
|
||||
Set the Content-Type of extract_archive requests to '' The documentation for extract archive (http://docs.rackspace.com/files/api/v1/cf-devguide/content/Extract_Archive-d1e2338.html) says if a Content-Type is sent, every object in the archive will have its Content-Type set to that value. However, if a blank Content-Type is sent, CloudFiles will determine it based on each individual file. Since we don't want every file to be interpreted as an archive (which would happen if we let Fog determine the Content-Type), we set it explicitly to a blank string. thanks Sammy Larbi
|
||||
Use @ in comment for YARD docs. thanks Sammy Larbi
|
||||
Update Nokogiri version. thanks Sascha Korth
|
||||
Fix version. thanks Sascha Korth
|
||||
Undo last nokogiri version setting. thanks Sascha Korth
|
||||
Remove duplicates. thanks Sean Handley
|
||||
Formatting flavor data and updating documentation to include i2 instances. thanks Shaun Davis
|
||||
Renaming ebs_optimized -> ebs_optimized_available. thanks Shaun Davis
|
||||
Removing unnecessary comments. thanks Shaun Davis
|
||||
Display number of instance store volumes per instance flavor. thanks Shaun Davis
|
||||
Switch to DNSimple versioned API. thanks Simone Carletti
|
||||
Cleanup documentation and resource representations. thanks Simone Carletti
|
||||
Remove :host key in SQS request method to eliminate excon error. thanks Steve Meyfroidt
|
||||
get non capitalized content-type. thanks bugagazavr
|
||||
fix MVP exclude list. thanks geemus
|
||||
add CONTRIBUTORS, assign copyright. thanks geemus
|
||||
fix link in license. thanks geemus
|
||||
update contributors/license files. thanks geemus
|
||||
Update network parser to add private ips to array Previously the network parser would overwrite the private ip addresses if there were more than one. These are now added to an array. thanks joe
|
||||
Update documentation of return values. thanks joe
|
||||
[google][compute] Add rhel-cloud to project search list. thanks kbockmanrs
|
||||
Implement Rackspace Monitoring Agent Host information. thanks kfafel
|
||||
minor fix for get_filesystems_info. thanks kfafel
|
||||
fix mock output hash. thanks kfafel
|
||||
tighten up agent info mocks. thanks kfafel
|
||||
Implement agent_info tests; better mocks. thanks kfafel
|
||||
Cleaned up agent_tests. thanks kfafel
|
||||
add missing get_agent and list_agents. thanks kfafel
|
||||
addresses and settags. thanks neillturner
|
||||
delete snapshots and address requests for google. thanks unknown
|
||||
fix a couple of bugs. thanks unknown
|
||||
add attach and detach disk. thanks unknown
|
||||
|
||||
[openstack]
|
||||
image.update_image_members expects are incorrect #2627. thanks radekg
|
||||
Fix for OpenStack flavor id calculation. thanks radekg
|
||||
|
||||
[openstack|compute]
|
||||
Allow to use Symbol when specifying the hash of NICs. thanks KATOH Yasufumi
|
||||
Adding docs for OpenStack Compute. thanks Kyle Rames
|
||||
|
||||
[rackspace|compute]
|
||||
updating Rackspace compute docs. thanks Kyle Rames
|
||||
|
||||
[rackspace|compute_v2]
|
||||
updates key_pair model to pass additional attributes onto compute service. (You can now pass public and private keys via the model). thanks Kyle Rames
|
||||
added key_name and modified key_pair= to take KeyPair objects as well as strings in order to be more compatible with other fog providers. thanks Kyle Rames
|
||||
|
||||
[storm_on_demand]
|
||||
don't pass host to request. thanks Josh Blancett
|
||||
|
||||
[vcloud_director]
|
||||
fix typo as per #2621. thanks Mike Pountney
|
||||
|
||||
|
||||
1.19.0 12/19/2013 15180fd7c0993f7fe6cfdc861a4db7ada14825ad
|
||||
==========================================================
|
||||
|
||||
|
|
29
fog.gemspec
29
fog.gemspec
|
@ -6,8 +6,8 @@ Gem::Specification.new do |s|
|
|||
## If your rubyforge_project name is different, then edit it and comment out
|
||||
## the sub! line in the Rakefile
|
||||
s.name = 'fog'
|
||||
s.version = '1.19.0'
|
||||
s.date = '2013-12-19'
|
||||
s.version = '1.20.0'
|
||||
s.date = '2014-03-14'
|
||||
s.rubyforge_project = 'fog'
|
||||
|
||||
## Make sure your summary is short. The description may be as long
|
||||
|
@ -41,30 +41,31 @@ Gem::Specification.new do |s|
|
|||
|
||||
## List your runtime dependencies here. Runtime dependencies are those
|
||||
## that are needed for an end user to actually USE your code.
|
||||
s.add_dependency('builder')
|
||||
s.add_dependency('excon', '~>0.31.0')
|
||||
s.add_dependency('formatador', '~>0.2.0')
|
||||
s.add_dependency('multi_json', '~>1.0')
|
||||
s.add_dependency('mime-types')
|
||||
s.add_dependency('net-scp', '~>1.1')
|
||||
s.add_dependency('net-ssh', '>=2.1.3')
|
||||
s.add_dependency('nokogiri', '>=1.5.11')
|
||||
s.add_dependency("fog-core", "~> 1.21", ">= 1.21.1")
|
||||
s.add_dependency("fog-json")
|
||||
|
||||
s.add_dependency('nokogiri', '~> 1.5', '>= 1.5.11')
|
||||
|
||||
# Modular providers
|
||||
s.add_dependency("fog-brightbox")
|
||||
|
||||
## List your development dependencies here. Development dependencies are
|
||||
## those that are only needed during development
|
||||
s.add_development_dependency('minitest')
|
||||
s.add_development_dependency('jekyll') unless RUBY_PLATFORM == 'java'
|
||||
s.add_development_dependency('rake')
|
||||
s.add_development_dependency('rbvmomi')
|
||||
s.add_development_dependency('yard')
|
||||
s.add_development_dependency('thor')
|
||||
s.add_development_dependency('rbovirt', '>=0.0.11')
|
||||
s.add_development_dependency('rbovirt', '>= 0.0.24')
|
||||
s.add_development_dependency('shindo', '~> 0.3.4')
|
||||
s.add_development_dependency('fission')
|
||||
s.add_development_dependency('pry')
|
||||
s.add_development_dependency('google-api-client', '~>0.6.2')
|
||||
s.add_development_dependency('google-api-client', '~> 0.6', '>= 0.6.2')
|
||||
s.add_development_dependency('unf')
|
||||
if ENV["FOG_USE_LIBVIRT"] && RUBY_PLATFORM != 'java'
|
||||
s.add_development_dependency('ruby-libvirt','~>0.4.0')
|
||||
|
||||
if ENV["FOG_USE_LIBVIRT"]
|
||||
s.add_development_dependency('ruby-libvirt','~> 0.5.0')
|
||||
end
|
||||
|
||||
s.files = `git ls-files`.split("\n")
|
||||
|
|
15
lib/fog.rb
15
lib/fog.rb
|
@ -3,6 +3,19 @@
|
|||
__LIB_DIR__ = File.expand_path(File.dirname(__FILE__))
|
||||
$LOAD_PATH.unshift __LIB_DIR__ unless $LOAD_PATH.include?(__LIB_DIR__)
|
||||
|
||||
# Use core
|
||||
require 'fog/core'
|
||||
|
||||
# Previously treated as "core"
|
||||
# data exchange specific (to be extracted and used on a per provider basis)
|
||||
require 'fog/xml'
|
||||
require 'fog/json'
|
||||
require 'fog/core/parser'
|
||||
|
||||
# deprecation wrappers (XML wrapped version)
|
||||
require 'fog/core/deprecated/connection'
|
||||
require 'fog/core/deprecated_connection_accessors'
|
||||
|
||||
# any one of these can be required separately.
|
||||
# they all depend on fog/core for shared functionality.
|
||||
require 'fog/atmos'
|
||||
|
@ -14,6 +27,7 @@ require 'fog/clodo'
|
|||
require 'fog/digitalocean'
|
||||
require 'fog/dnsimple'
|
||||
require 'fog/dnsmadeeasy'
|
||||
require 'fog/fogdocker'
|
||||
require 'fog/dreamhost'
|
||||
require 'fog/dynect'
|
||||
require 'fog/ecloud'
|
||||
|
@ -34,6 +48,7 @@ require 'fog/rage4'
|
|||
require 'fog/riakcs'
|
||||
require 'fog/openstack'
|
||||
require 'fog/ovirt'
|
||||
require 'fog/sakuracloud'
|
||||
require 'fog/serverlove'
|
||||
require 'fog/storm_on_demand'
|
||||
require 'fog/terremark'
|
||||
|
|
|
@ -1,25 +0,0 @@
|
|||
module Fog
|
||||
module Account
|
||||
|
||||
def self.[](provider)
|
||||
self.new(:provider => provider)
|
||||
end
|
||||
|
||||
def self.new(attributes)
|
||||
attributes = attributes.dup
|
||||
provider = attributes.delete(:provider).to_s.downcase.to_sym
|
||||
|
||||
if provider == :stormondemand
|
||||
require 'fog/storm_on_demand/account'
|
||||
Fog::Account::StormOnDemand.new(attributes)
|
||||
else
|
||||
raise ArgumentError.new("#{provider} has no account service")
|
||||
end
|
||||
end
|
||||
|
||||
def self.providers
|
||||
Fog.services[:account]
|
||||
end
|
||||
|
||||
end
|
||||
end
|
|
@ -1,5 +1,4 @@
|
|||
require 'fog/atmos/core'
|
||||
require 'fog/storage'
|
||||
|
||||
module Fog
|
||||
module Storage
|
||||
|
@ -84,7 +83,7 @@ module Fog
|
|||
@hmac = Fog::HMAC.new('sha1', @storage_secret_decoded)
|
||||
@persistent = options.fetch(:persistent, false)
|
||||
|
||||
@connection = Fog::Connection.new("#{@prefix}://#{@storage_host}:#{@storage_port}",
|
||||
@connection = Fog::XML::Connection.new("#{@prefix}://#{@storage_host}:#{@storage_port}",
|
||||
@persistent, @connection_options)
|
||||
end
|
||||
|
||||
|
|
|
@ -104,7 +104,7 @@ module Fog
|
|||
@port = options[:port] || 443
|
||||
@persistent = options[:persistent] || false
|
||||
@scheme = options[:scheme] || 'https'
|
||||
@connection = Fog::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options)
|
||||
@connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options)
|
||||
end
|
||||
|
||||
def reload
|
||||
|
|
|
@ -78,7 +78,7 @@ module Fog
|
|||
@persistent = options[:persistent] || false
|
||||
@port = options[:port] || 443
|
||||
@scheme = options[:scheme] || 'https'
|
||||
@connection = Fog::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options)
|
||||
@connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options)
|
||||
end
|
||||
|
||||
def reload
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
require 'fog/aws/core'
|
||||
require 'fog/cdn'
|
||||
|
||||
module Fog
|
||||
module CDN
|
||||
|
@ -155,7 +154,7 @@ EOF
|
|||
@port = options[:port] || 443
|
||||
@scheme = options[:scheme] || 'https'
|
||||
@version = options[:version] || '2010-11-01'
|
||||
@connection = Fog::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options)
|
||||
@connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options)
|
||||
end
|
||||
|
||||
def reload
|
||||
|
|
|
@ -60,7 +60,7 @@ module Fog
|
|||
@persistent = options[:persistent] || false
|
||||
@port = options[:port] || 443
|
||||
@scheme = options[:scheme] || 'https'
|
||||
@connection = Fog::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options)
|
||||
@connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options)
|
||||
end
|
||||
|
||||
def reload
|
||||
|
|
|
@ -104,7 +104,7 @@ module Fog
|
|||
@persistent = options[:persistent] || false
|
||||
@port = options[:port] || 443
|
||||
@scheme = options[:scheme] || 'https'
|
||||
@connection = Fog::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options)
|
||||
@connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options)
|
||||
end
|
||||
|
||||
def reload
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
require 'fog/aws/core'
|
||||
require 'fog/compute'
|
||||
|
||||
module Fog
|
||||
module Compute
|
||||
|
@ -140,6 +139,7 @@ module Fog
|
|||
request :release_address
|
||||
request :replace_network_acl_association
|
||||
request :replace_network_acl_entry
|
||||
request :replace_route
|
||||
request :register_image
|
||||
request :request_spot_instances
|
||||
request :reset_network_interface_attribute
|
||||
|
@ -163,6 +163,7 @@ module Fog
|
|||
|
||||
class Mock
|
||||
include Fog::AWS::CredentialFetcher::ConnectionMethods
|
||||
include Fog::AWS::RegionMethods
|
||||
|
||||
def self.data
|
||||
@data ||= Hash.new do |hash, region|
|
||||
|
@ -278,10 +279,7 @@ module Fog
|
|||
@aws_credentials_expire_at = Time::now + 20
|
||||
setup_credentials(options)
|
||||
@region = options[:region] || 'us-east-1'
|
||||
|
||||
unless ['ap-northeast-1', 'ap-southeast-1', 'ap-southeast-2', 'eu-west-1', 'us-east-1', 'us-west-1', 'us-west-2', 'sa-east-1'].include?(@region)
|
||||
raise ArgumentError, "Unknown region: #{@region.inspect}"
|
||||
end
|
||||
validate_aws_region @region
|
||||
end
|
||||
|
||||
def region_data
|
||||
|
@ -351,6 +349,7 @@ module Fog
|
|||
|
||||
class Real
|
||||
include Fog::AWS::CredentialFetcher::ConnectionMethods
|
||||
include Fog::AWS::RegionMethods
|
||||
# Initialize connection to EC2
|
||||
#
|
||||
# ==== Notes
|
||||
|
@ -385,6 +384,8 @@ module Fog
|
|||
@instrumentor_name = options[:instrumentor_name] || 'fog.aws.compute'
|
||||
@version = options[:version] || '2013-10-01'
|
||||
|
||||
validate_aws_region @region
|
||||
|
||||
if @endpoint = options[:endpoint]
|
||||
endpoint = URI.parse(@endpoint)
|
||||
@host = endpoint.host
|
||||
|
@ -398,7 +399,7 @@ module Fog
|
|||
@port = options[:port] || 443
|
||||
@scheme = options[:scheme] || 'https'
|
||||
end
|
||||
@connection = Fog::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options)
|
||||
@connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options)
|
||||
end
|
||||
|
||||
def reload
|
||||
|
|
|
@ -1,5 +1,8 @@
|
|||
require 'fog/core'
|
||||
require 'fog/xml'
|
||||
require 'fog/json'
|
||||
require 'fog/aws/credential_fetcher'
|
||||
require 'fog/aws/region_methods'
|
||||
require 'fog/aws/signaturev4'
|
||||
|
||||
module Fog
|
||||
|
|
|
@ -62,7 +62,7 @@ module Fog
|
|||
@persistent = options[:persistent] || false
|
||||
@port = options[:port] || 443
|
||||
@scheme = options[:scheme] || 'https'
|
||||
@connection = Fog::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options)
|
||||
@connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options)
|
||||
|
||||
setup_credentials(options)
|
||||
end
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
require 'fog/aws/core'
|
||||
require 'fog/dns'
|
||||
|
||||
module Fog
|
||||
module DNS
|
||||
|
@ -100,7 +99,7 @@ module Fog
|
|||
@scheme = options[:scheme] || 'https'
|
||||
@version = options[:version] || '2012-02-29'
|
||||
|
||||
@connection = Fog::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options)
|
||||
@connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options)
|
||||
end
|
||||
|
||||
def reload
|
||||
|
|
|
@ -88,7 +88,7 @@ module Fog
|
|||
@port = options[:port] || '443'
|
||||
@scheme = options[:scheme] || 'https'
|
||||
|
||||
@connection = Fog::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options)
|
||||
@connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options)
|
||||
end
|
||||
|
||||
private
|
||||
|
|
|
@ -62,7 +62,7 @@ module Fog
|
|||
@path = options[:path] || '/'
|
||||
@port = options[:port] || 443
|
||||
@scheme = options[:scheme] || 'https'
|
||||
@connection = Fog::Connection.new(
|
||||
@connection = Fog::XML::Connection.new(
|
||||
"#{@scheme}://#{@host}:#{@port}#{@path}", options[:persistent]
|
||||
)
|
||||
end
|
||||
|
@ -139,6 +139,15 @@ module Fog
|
|||
:clusters => {}, # cache cluster data, indexed by cluster ID
|
||||
:security_groups => {}, # security groups
|
||||
:subnet_groups => {},
|
||||
:parameter_groups => {"default.memcached1.4" => { "CacheParameterGroupFamily"=>"memcached1.4",
|
||||
"Description"=>"Default parameter group for memcached1.4",
|
||||
"CacheParameterGroupName"=>"default.memcached1.4"
|
||||
},
|
||||
"default.redis2.6" => {"CacheParameterGroupFamily"=>"redis2.6",
|
||||
"Description"=>"Default parameter group for redis2.6",
|
||||
"CacheParameterGroupName"=>"default.redis2.6"
|
||||
}
|
||||
}
|
||||
}
|
||||
end
|
||||
end
|
||||
|
|
|
@ -139,7 +139,7 @@ module Fog
|
|||
@persistent = options[:persistent] || false
|
||||
@port = options[:port] || 443
|
||||
@scheme = options[:scheme] || 'https'
|
||||
@connection = Fog::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options)
|
||||
@connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options)
|
||||
end
|
||||
|
||||
def reload
|
||||
|
|
|
@ -73,7 +73,7 @@ module Fog
|
|||
@persistent = options[:persistent] || false
|
||||
@port = options[:port] || 443
|
||||
@scheme = options[:scheme] || 'https'
|
||||
@connection = Fog::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options)
|
||||
@connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options)
|
||||
@region = options[:region]
|
||||
end
|
||||
|
||||
|
|
|
@ -134,7 +134,7 @@ module Fog
|
|||
@port = options[:port] || 443
|
||||
@scheme = options[:scheme] || 'https'
|
||||
|
||||
@connection = Fog::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options)
|
||||
@connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options)
|
||||
end
|
||||
|
||||
|
||||
|
|
|
@ -35,6 +35,7 @@ module Fog
|
|||
request :delete_signing_certificate
|
||||
request :delete_user
|
||||
request :delete_user_policy
|
||||
request :get_account_summary
|
||||
request :get_group
|
||||
request :get_group_policy
|
||||
request :get_instance_profile
|
||||
|
@ -169,7 +170,7 @@ module Fog
|
|||
@persistent = options[:persistent] || false
|
||||
@port = options[:port] || 443
|
||||
@scheme = options[:scheme] || 'https'
|
||||
@connection = Fog::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options)
|
||||
@connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options)
|
||||
end
|
||||
|
||||
def reload
|
||||
|
|
|
@ -49,7 +49,7 @@ module Fog
|
|||
# >> g = AWS.network_interfaces.new(:subnet_id => "subnet-someId", options)
|
||||
# >> g.save
|
||||
#
|
||||
# options is an optional hash which may contain 'PrivateIpAddress', 'Description', 'groupSet'
|
||||
# options is an optional hash which may contain 'PrivateIpAddress', 'Description', 'GroupSet'
|
||||
#
|
||||
# == Returns:
|
||||
#
|
||||
|
@ -58,7 +58,13 @@ module Fog
|
|||
|
||||
def save
|
||||
requires :subnet_id
|
||||
data = service.create_network_interface(subnet_id).body['networkInterface']
|
||||
options = {
|
||||
'PrivateIpAddress' => private_ip_address,
|
||||
'Description' => description,
|
||||
'GroupSet' => group_set,
|
||||
}
|
||||
options.delete_if {|key, value| value.nil?}
|
||||
data = service.create_network_interface(subnet_id, options).body['networkInterface']
|
||||
new_attributes = data.reject {|key,value| key == 'requestId'}
|
||||
merge_attributes(new_attributes)
|
||||
true
|
||||
|
|
|
@ -58,7 +58,7 @@ module Fog
|
|||
|
||||
|
||||
def initialize(attributes={})
|
||||
self.groups ||= ["default"] unless (attributes[:subnet_id] || attributes[:security_group_ids])
|
||||
self.groups ||= ["default"] unless (attributes[:subnet_id] || attributes[:security_group_ids] || attributes[:network_interfaces])
|
||||
self.flavor_id ||= 't1.micro'
|
||||
|
||||
# Old 'connection' is renamed as service and should be used instead
|
||||
|
@ -146,6 +146,7 @@ module Fog
|
|||
|
||||
options = {
|
||||
'BlockDeviceMapping' => block_device_mapping,
|
||||
'NetworkInterfaces' => network_interfaces,
|
||||
'ClientToken' => client_token,
|
||||
'EbsOptimized' => ebs_optimized,
|
||||
'IamInstanceProfile.Arn' => @iam_instance_profile_arn,
|
||||
|
|
|
@ -25,6 +25,7 @@ module Fog
|
|||
# ami_launch_index=nil,
|
||||
# availability_zone=nil,
|
||||
# block_device_mapping=nil,
|
||||
# network_interfaces=nil,
|
||||
# client_token=nil,
|
||||
# dns_name=nil,
|
||||
# groups=["default"],
|
||||
|
|
|
@ -37,7 +37,9 @@ module Fog
|
|||
end
|
||||
|
||||
def save
|
||||
unless self.alias_target
|
||||
self.ttl ||= 3600
|
||||
end
|
||||
options = attributes_to_options('CREATE')
|
||||
data = service.change_resource_record_sets(zone.id, [options]).body
|
||||
merge_attributes(data)
|
||||
|
@ -84,9 +86,9 @@ module Fog
|
|||
end
|
||||
|
||||
def attributes_to_options(action)
|
||||
requires :name, :ttl, :type, :zone
|
||||
requires :name, :type, :zone
|
||||
requires_one :value, :alias_target
|
||||
{
|
||||
options = {
|
||||
:action => action,
|
||||
:name => name,
|
||||
:resource_records => [*value],
|
||||
|
@ -97,6 +99,11 @@ module Fog
|
|||
:set_identifier => set_identifier,
|
||||
:region => region
|
||||
}
|
||||
unless self.alias_target
|
||||
requires :ttl
|
||||
options[:ttl] = ttl
|
||||
end
|
||||
options
|
||||
end
|
||||
|
||||
end
|
||||
|
|
|
@ -32,6 +32,8 @@ module Fog
|
|||
:aliases => 'CacheSubnetGroupName'
|
||||
attribute :vpc_security_groups,
|
||||
:aliases => 'VpcSecurityGroups', :type => :array
|
||||
attribute :s3_snapshot_location,
|
||||
:aliases => 'SnapshotArns', :type => :array
|
||||
|
||||
attr_accessor :parameter_group_name
|
||||
|
||||
|
@ -63,6 +65,7 @@ module Fog
|
|||
:port => port,
|
||||
:preferred_availablility_zone => zone,
|
||||
:preferred_maintenance_window => maintenance_window,
|
||||
:s3_snapshot_location => s3_snapshot_location,
|
||||
:parameter_group_name => parameter_group_name || parameter_group['CacheParameterGroupName'],
|
||||
:cache_subnet_group_name => cache_subnet_group_name,
|
||||
:vpc_security_groups => vpc_security_groups,
|
||||
|
|
|
@ -7,9 +7,10 @@ module Fog
|
|||
|
||||
def reset
|
||||
@block_device_mapping = {}
|
||||
@network_interfaces = {}
|
||||
@context = []
|
||||
@contexts = ['blockDeviceMapping', 'groupSet', 'placement', 'productCodes']
|
||||
@instance = { 'blockDeviceMapping' => [], 'instanceState' => {}, 'monitoring' => {}, 'placement' => {}, 'productCodes' => [] }
|
||||
@contexts = ['networkInterfaces', 'blockDeviceMapping', 'groupSet', 'placement', 'productCodes']
|
||||
@instance = { 'networkInterfaces' => [], 'blockDeviceMapping' => [], 'instanceState' => {}, 'monitoring' => {}, 'placement' => {}, 'productCodes' => [] }
|
||||
@response = { 'groupSet' => [], 'instancesSet' => [] }
|
||||
end
|
||||
|
||||
|
@ -40,8 +41,11 @@ module Fog
|
|||
@instance['instanceState'][name] = value.to_i
|
||||
when 'deleteOnTermination'
|
||||
@block_device_mapping[name] = (value == 'true')
|
||||
@network_interfaces[name] = (value == 'true')
|
||||
when 'deviceName', 'status', 'volumeId'
|
||||
@block_device_mapping[name] = value
|
||||
when 'networkInterfaceId'
|
||||
@network_interfaces[name] = value
|
||||
when 'groupId'
|
||||
@response['groupSet'] << value
|
||||
when 'groupName'
|
||||
|
@ -56,9 +60,12 @@ module Fog
|
|||
when 'blockDeviceMapping'
|
||||
@instance['blockDeviceMapping'] << @block_device_mapping
|
||||
@block_device_mapping = {}
|
||||
when 'networkInterfaces'
|
||||
@instance['networkInterfaces'] << @network_interfaces
|
||||
@network_interfaces = {}
|
||||
when nil
|
||||
@response['instancesSet'] << @instance
|
||||
@instance = { 'blockDeviceMapping' => [], 'instanceState' => {}, 'monitoring' => {}, 'placement' => {}, 'productCodes' => [] }
|
||||
@instance = { 'networkInterfaces' => [], 'blockDeviceMapping' => [], 'instanceState' => {}, 'monitoring' => {}, 'placement' => {}, 'productCodes' => [] }
|
||||
end
|
||||
when 'launchTime'
|
||||
@instance[name] = Time.parse(value)
|
||||
|
|
46
lib/fog/aws/parsers/iam/get_account_summary.rb
Normal file
46
lib/fog/aws/parsers/iam/get_account_summary.rb
Normal file
|
@ -0,0 +1,46 @@
|
|||
module Fog
|
||||
module Parsers
|
||||
module AWS
|
||||
module IAM
|
||||
|
||||
class GetAccountSummary < Fog::Parsers::Base
|
||||
|
||||
def reset
|
||||
super
|
||||
@stack = []
|
||||
@response = {'Summary' => {}}
|
||||
end
|
||||
|
||||
def start_element(name, attrs = [])
|
||||
super
|
||||
case name
|
||||
when 'SummaryMap'
|
||||
@stack << name
|
||||
end
|
||||
end
|
||||
|
||||
def end_element(name)
|
||||
case name
|
||||
when 'SummaryMap'
|
||||
@stack.pop
|
||||
when 'key'
|
||||
if @stack.last == 'SummaryMap'
|
||||
@key = value
|
||||
end
|
||||
when 'value'
|
||||
if @stack.last == 'SummaryMap'
|
||||
@response['Summary'][@key] = value.strip.to_i
|
||||
end
|
||||
when 'RequestId'
|
||||
if @stack.empty?
|
||||
@response['RequestId'] = value.strip
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
|
@ -11,7 +11,7 @@ module Fog
|
|||
|
||||
def end_element(name)
|
||||
case name
|
||||
when 'Arn', 'Path', 'ServerCertificateId', 'ServerCertificateName'
|
||||
when 'Arn', 'Path', 'ServerCertificateId', 'ServerCertificateName', 'CertificateBody', 'CertificateChain'
|
||||
@response['Certificate'][name] = value
|
||||
when 'UploadDate'
|
||||
@response['Certificate'][name] = Time.parse(value)
|
||||
|
|
|
@ -168,7 +168,7 @@ module Fog
|
|||
@persistent = options[:persistent] || false
|
||||
@port = options[:port] || 443
|
||||
@scheme = options[:scheme] || 'https'
|
||||
@connection = Fog::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options)
|
||||
@connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options)
|
||||
@version = options[:version] || '2013-05-15'
|
||||
end
|
||||
|
||||
|
|
|
@ -90,7 +90,7 @@ module Fog
|
|||
@port = options[:port] || 443
|
||||
@scheme = options[:scheme] || 'https'
|
||||
|
||||
@connection = Fog::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options)
|
||||
@connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options)
|
||||
end
|
||||
|
||||
|
||||
|
|
13
lib/fog/aws/region_methods.rb
Normal file
13
lib/fog/aws/region_methods.rb
Normal file
|
@ -0,0 +1,13 @@
|
|||
module Fog
|
||||
module AWS
|
||||
module RegionMethods
|
||||
|
||||
def validate_aws_region region
|
||||
unless ['ap-northeast-1', 'ap-southeast-1', 'ap-southeast-2', 'eu-west-1', 'us-east-1', 'us-west-1', 'us-west-2', 'sa-east-1'].include?(region)
|
||||
raise ArgumentError, "Unknown region: #{region.inspect}"
|
||||
end
|
||||
end
|
||||
|
||||
end
|
||||
end
|
||||
end
|
|
@ -12,6 +12,8 @@ module Fog
|
|||
# * public_ip<~String> - Public ip to assign to instance (conditional)
|
||||
# * network_interface_id<~String> - Id of a nic to associate address with (required in a vpc instance with more than one nic) (conditional)
|
||||
# * allocation_id<~String> - Allocation Id to associate address with (vpc only) (conditional)
|
||||
# * private_ip_address<~String> - Private Ip Address to associate address with (vpc only)
|
||||
# * allow_reassociation<~Boolean> - Allows an elastic ip address to be reassigned (vpc only) (conditional)
|
||||
#
|
||||
# ==== Returns
|
||||
# * response<~Excon::Response>:
|
||||
|
@ -21,34 +23,62 @@ module Fog
|
|||
# * 'associationId'<~String> - association Id for eip to node (vpc only)
|
||||
#
|
||||
# {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-AssociateAddress.html]
|
||||
def associate_address(instance_id=nil, public_ip=nil, network_interface_id=nil, allocation_id=nil)
|
||||
def associate_address(*args)
|
||||
if args.first.kind_of? Hash
|
||||
params = args.first
|
||||
else
|
||||
params = {
|
||||
:instance_id => args[0],
|
||||
:public_ip => args[1],
|
||||
:network_interface_id => args[2],
|
||||
:allocation_id => args[3],
|
||||
:private_ip_address => args[4],
|
||||
:allow_reassociation => args[5],
|
||||
}
|
||||
end
|
||||
# Cannot specify an allocation ip and a public IP at the same time. If you have an allocation Id presumably you are in a VPC
|
||||
# so we will null out the public IP
|
||||
public_ip = allocation_id.nil? ? public_ip : nil
|
||||
params[:public_ip] = params[:allocation_id].nil? ? params[:public_ip] : nil
|
||||
|
||||
request(
|
||||
'Action' => 'AssociateAddress',
|
||||
'AllocationId' => allocation_id,
|
||||
'InstanceId' => instance_id,
|
||||
'NetworkInterfaceId' => network_interface_id,
|
||||
'PublicIp' => public_ip,
|
||||
'AllocationId' => params[:allocation_id],
|
||||
'InstanceId' => params[:instance_id],
|
||||
'NetworkInterfaceId' => params[:network_interface_id],
|
||||
'PublicIp' => params[:public_ip],
|
||||
'PrivateIpAddress' => params[:private_ip_address],
|
||||
'AllowReassociation' => params[:allow_reassociation],
|
||||
:idempotent => true,
|
||||
:parser => Fog::Parsers::Compute::AWS::AssociateAddress.new
|
||||
)
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
class Mock
|
||||
|
||||
def associate_address(instance_id=nil, public_ip=nil, network_interface_id=nil, allocation_id=nil)
|
||||
public_ip = allocation_id.nil? ? public_ip : nil
|
||||
def associate_address(*args)
|
||||
if args.first.kind_of? Hash
|
||||
params = args.first
|
||||
else
|
||||
params = {
|
||||
:instance_id => args[0],
|
||||
:public_ip => args[1],
|
||||
:network_interface_id => args[2],
|
||||
:allocation_id => args[3],
|
||||
:private_ip_address => args[4],
|
||||
:allow_reassociation => args[5],
|
||||
}
|
||||
end
|
||||
params[:public_ip] = params[:allocation_id].nil? ? params[:public_ip] : nil
|
||||
response = Excon::Response.new
|
||||
response.status = 200
|
||||
instance = self.data[:instances][instance_id]
|
||||
address = public_ip.nil? ? nil : self.data[:addresses][public_ip]
|
||||
if ((instance && address) || (instance && !allocation_id.nil?) || (!allocation_id.nil? && !network_interface_id.nil?))
|
||||
if !allocation_id.nil?
|
||||
allocation_ip = describe_addresses( 'allocation-id' => "#{allocation_id}").body['addressesSet'].first
|
||||
instance = self.data[:instances][params[:instance_id]]
|
||||
# address = self.data[:addresses][params[:public_ip]]
|
||||
address = params[:public_ip].nil? ? nil : self.data[:addresses][params[:public_ip]]
|
||||
# This is a classic server, a VPC with a single network interface id or a VPC with multiple network interfaces one of which is specified
|
||||
if ((instance && address) || (instance && !params[:allocation_id].nil?) || (!params[:allocation_id].nil? && !network_interface_id.nil?))
|
||||
if !params[:allocation_id].nil?
|
||||
allocation_ip = describe_addresses( 'allocation-id' => "#{params[:allocation_id]}").body['addressesSet'].first
|
||||
if !allocation_ip.nil?
|
||||
public_ip = allocation_ip['publicIp']
|
||||
address = public_ip.nil? ? nil : self.data[:addresses][public_ip]
|
||||
|
@ -58,23 +88,23 @@ module Fog
|
|||
if current_instance = self.data[:instances][address['instanceId']]
|
||||
current_instance['ipAddress'] = current_instance['originalIpAddress']
|
||||
end
|
||||
address['instanceId'] = instance_id
|
||||
address['instanceId'] = params[:instance_id]
|
||||
end
|
||||
# detach other address (if any)
|
||||
if self.data[:addresses][instance['ipAddress']]
|
||||
self.data[:addresses][instance['ipAddress']]['instanceId'] = nil
|
||||
end
|
||||
if !public_ip.nil?
|
||||
instance['ipAddress'] = public_ip
|
||||
instance['dnsName'] = Fog::AWS::Mock.dns_name_for(public_ip)
|
||||
if !params[:public_ip].nil?
|
||||
instance['ipAddress'] = params[:public_ip]
|
||||
instance['dnsName'] = Fog::AWS::Mock.dns_name_for(params[:public_ip])
|
||||
end
|
||||
response.status = 200
|
||||
if !instance_id.nil? && !public_ip.nil?
|
||||
if !params[:instance_id].nil? && !params[:public_ip].nil?
|
||||
response.body = {
|
||||
'requestId' => Fog::AWS::Mock.request_id,
|
||||
'return' => true
|
||||
}
|
||||
elsif !allocation_id.nil?
|
||||
elsif !params[:allocation_id].nil?
|
||||
response.body = {
|
||||
'requestId' => Fog::AWS::Mock.request_id,
|
||||
'return' => true,
|
||||
|
@ -82,16 +112,14 @@ module Fog
|
|||
}
|
||||
end
|
||||
response
|
||||
#elsif ! network_interface_id.nil? && allocation_id.nil?
|
||||
# raise Fog::Compute::AWS::NotFound.new("You must specify an AllocationId when specifying a NetworkInterfaceID")
|
||||
#elsif instance.nil? && network_interface_id.nil?
|
||||
# raise Fog::Compute::AWS::Error.new("You must specify either an InstanceId or a NetworkInterfaceID")
|
||||
#elsif !instance && !network_interface_id
|
||||
# raise Fog::Compute::AWS::Error.new(" 2 You must specify either an InstanceId or a NetworkInterfaceID")
|
||||
elsif !instance
|
||||
raise Fog::Compute::AWS::NotFound.new("You must specify either an InstanceId or a NetworkInterfaceID")
|
||||
elsif !address
|
||||
raise Fog::Compute::AWS::Error.new("AuthFailure => The address '#{public_ip}' does not belong to you.")
|
||||
elsif params[:network_interface_id].nil? && params[:allocation_id].nil?
|
||||
raise Fog::Compute::AWS::NotFound.new("You must specify an AllocationId when specifying a NetworkInterfaceID")
|
||||
else (!instance.nil? && params[:network_interface_id].nil?) || (params[:instance_id].nil? && !params[:network_interface_id].nil?)
|
||||
raise Fog::Compute::AWS::Error.new("You must specify either an InstanceId or a NetworkInterfaceID")
|
||||
end
|
||||
end
|
||||
|
||||
|
|
|
@ -184,13 +184,29 @@ module Fog
|
|||
'ipProtocol' => permission['IpProtocol'],
|
||||
'fromPort' => Integer(permission['FromPort']),
|
||||
'toPort' => Integer(permission['ToPort']),
|
||||
'groups' => (permission['Groups'] || []).map {|g| {'groupName' => g['GroupName'], 'userId' => g['UserId'] || self.data[:owner_id], 'groupId' => self.data[:security_groups][g['GroupName']] && self.data[:security_groups][g['GroupName']]['groupId']} },
|
||||
'groups' => (permission['Groups'] || []).map do |authorized_group|
|
||||
security_group = if group_name = authorized_group['GroupName']
|
||||
self.data[:security_groups][group_name] || {}
|
||||
elsif group_id = authorized_group['GroupId']
|
||||
self.data[:security_groups].values.find { |sg| sg['groupId'] == group_id }
|
||||
end
|
||||
|
||||
{'groupName' => authorized_group['GroupName'] || security_group["groupName"], 'userId' => authorized_group['UserId'] || self.data[:owner_id], 'groupId' => authorized_group["GroupId"] || security_group['groupId']}
|
||||
end,
|
||||
'ipRanges' => (permission['IpRanges'] || []).map {|r| { 'cidrIp' => r['CidrIp'] } }
|
||||
}
|
||||
else
|
||||
normalized_permissions << {
|
||||
'ipProtocol' => permission['IpProtocol'],
|
||||
'groups' => (permission['Groups'] || []).map {|g| {'groupName' => g['GroupName'], 'userId' => g['UserId'] || self.data[:owner_id], 'groupId' => self.data[:security_groups][g['GroupName']]['groupId']} },
|
||||
'groups' => (permission['Groups'] || []).map do |authorized_group|
|
||||
security_group = if group_name = authorized_group['GroupName']
|
||||
self.data[:security_groups][group_name] || {}
|
||||
elsif group_id = authorized_group['GroupId']
|
||||
self.data[:security_groups].values.find { |sg| sg['groupId'] == group_id }
|
||||
end
|
||||
|
||||
{'groupName' => authorized_group['GroupName'] || security_group["groupName"], 'userId' => authorized_group['UserId'] || self.data[:owner_id], 'groupId' => authorized_group["GroupId"] || security_group['groupId']}
|
||||
end,
|
||||
'ipRanges' => (permission['IpRanges'] || []).map {|r| { 'cidrIp' => r['CidrIp'] } }
|
||||
}
|
||||
end
|
||||
|
|
|
@ -12,7 +12,7 @@ module Fog
|
|||
# * options<~Hash>:
|
||||
# * PrivateIpAddress<~String> - The private IP address of the network interface
|
||||
# * Description<~String> - The description of the network interface
|
||||
# * groupSet<~Array> - The security group IDs for use by the network interface
|
||||
# * GroupSet<~Array> - The security group IDs for use by the network interface
|
||||
#
|
||||
# === Returns
|
||||
# * response<~Excon::Response>:
|
||||
|
@ -70,7 +70,7 @@ module Fog
|
|||
groups = {}
|
||||
if options['GroupSet']
|
||||
options['GroupSet'].each do |group_id|
|
||||
name = self.data[:security_groups].select { |k,v| v['groupId'] == group_id } .first.first
|
||||
name = self.data[:security_groups].select { |k,v| v['groupId'] == group_id }.first
|
||||
if name.nil?
|
||||
raise Fog::Compute::AWS::Error.new("Unknown security group '#{group_id}' specified")
|
||||
end
|
||||
|
|
|
@ -44,14 +44,14 @@ module Fog
|
|||
if cidrBlock
|
||||
response.status = 200
|
||||
vpc_id = Fog::AWS::Mock.vpc_id
|
||||
self.data[:vpcs].push({
|
||||
vpc = {
|
||||
'vpcId' => vpc_id,
|
||||
'state' => 'pending',
|
||||
'cidrBlock' => cidrBlock,
|
||||
'dhcpOptionsId' => Fog::AWS::Mock.request_id,
|
||||
'tagSet' => {}
|
||||
|
||||
})
|
||||
}
|
||||
self.data[:vpcs].push(vpc)
|
||||
|
||||
#Creates a default route for the subnet
|
||||
default_route = self.route_tables.new(:vpc_id => vpc_id)
|
||||
|
@ -74,7 +74,7 @@ module Fog
|
|||
|
||||
response.body = {
|
||||
'requestId' => Fog::AWS::Mock.request_id,
|
||||
'vpcSet' => self.data[:vpcs]
|
||||
'vpcSet' => [vpc]
|
||||
}
|
||||
else
|
||||
response.status = 400
|
||||
|
|
|
@ -71,10 +71,20 @@ module Fog
|
|||
end
|
||||
end
|
||||
|
||||
active_instances = self.data[:instances].values.select do |instance|
|
||||
if instance['groupSet'].include?(name) && instance['instanceState'] != "terminated"
|
||||
instance
|
||||
end
|
||||
end
|
||||
|
||||
unless used_by_groups.empty?
|
||||
raise Fog::Compute::AWS::Error.new("InvalidGroup.InUse => Group #{self.data[:owner_id]}:#{name} is used by groups: #{used_by_groups.uniq.join(" ")}")
|
||||
end
|
||||
|
||||
if active_instances.any?
|
||||
raise Fog::Compute::AWS::Error.new("InUse => There are active instances using security group '#{name}'")
|
||||
end
|
||||
|
||||
self.data[:security_groups].delete(name)
|
||||
response.status = 200
|
||||
response.body = {
|
||||
|
|
|
@ -151,6 +151,10 @@ module Fog
|
|||
instance_set = instance_set.reject{|instance| ![*filter_value].include?(instance['stateReason'][aliased_key])}
|
||||
elsif filter_key == "group-name"
|
||||
instance_set = instance_set.reject {|instance| !instance['groupSet'].include?(filter_value)}
|
||||
elsif filter_key == "group-id"
|
||||
group_ids = [*filter_value]
|
||||
security_group_names = self.data[:security_groups].values.select { |sg| group_ids.include?(sg['groupId']) }.map { |sg| sg['groupName'] }
|
||||
instance_set = instance_set.reject {|instance| (security_group_names & instance['groupSet']).empty?}
|
||||
else
|
||||
aliased_key = aliases[filter_key]
|
||||
instance_set = instance_set.reject {|instance| ![*filter_value].include?(instance[aliased_key])}
|
||||
|
@ -205,6 +209,19 @@ module Fog
|
|||
|
||||
if self.data[:instances][instance['instanceId']]
|
||||
|
||||
instance['networkInterfaces'] = self.data[:network_interfaces].select{|ni,ni_conf|
|
||||
ni_conf['attachment']['instanceId'] == instance['instanceId']
|
||||
}.map{|ni,ni_conf|
|
||||
{
|
||||
'ownerId' => ni_conf['ownerId'],
|
||||
'subnetId' => ni_conf['subnetId'],
|
||||
'vpcId' => ni_conf['vpcId'],
|
||||
'networkInterfaceId' => ni_conf['networkInterfaceId'],
|
||||
'groupSet' => ni_conf['groupSet'],
|
||||
'attachmentId' => ni_conf['attachment']['attachmentId']
|
||||
}
|
||||
}
|
||||
|
||||
reservation_set[instance['reservationId']] ||= {
|
||||
'groupSet' => instance['groupSet'],
|
||||
'groupIds' => instance['groupIds'],
|
||||
|
|
|
@ -69,14 +69,15 @@ module Fog
|
|||
'protocol' => 'ipProtocol',
|
||||
'to-port' => 'toPort'
|
||||
}
|
||||
security_group_groups = lambda { |security_group| (security_group['ipPermissions'] || []).map { |permission| permission["groups"] }.flatten.compact.uniq }
|
||||
for filter_key, filter_value in filters
|
||||
if permission_key = filter_key.split('ip-permission.')[1]
|
||||
if permission_key == 'group-name'
|
||||
security_group_info = security_group_info.reject{|security_group| !security_group['ipPermissions']['groups'].detect {|group| [*filter_value].include?(group['groupName'])}}
|
||||
security_group_info = security_group_info.reject{|security_group| !security_group_groups.call(security_group).detect {|group| [*filter_value].include?(group['groupName'])}}
|
||||
elsif permission_key == 'group-id'
|
||||
security_group_info = security_group_info.reject{|security_group| !security_group['ipPermissions']['groups'].detect {|group| [*filter_value].include?(group['groupId'])}}
|
||||
security_group_info = security_group_info.reject{|security_group| !security_group_groups.call(security_group).detect {|group| [*filter_value].include?(group['groupId'])}}
|
||||
elsif permission_key == 'user-id'
|
||||
security_group_info = security_group_info.reject{|security_group| !security_group['ipPermissions']['groups'].detect {|group| [*filter_value].include?(group['userId'])}}
|
||||
security_group_info = security_group_info.reject{|security_group| !security_group_groups.call(security_group).detect {|group| [*filter_value].include?(group['userId'])}}
|
||||
else
|
||||
aliased_key = permission_aliases[filter_key]
|
||||
security_group_info = security_group_info.reject{|security_group| !security_group['ipPermissions'].detect {|permission| [*filter_value].include?(permission[aliased_key])}}
|
||||
|
|
84
lib/fog/aws/requests/compute/replace_route.rb
Executable file
84
lib/fog/aws/requests/compute/replace_route.rb
Executable file
|
@ -0,0 +1,84 @@
|
|||
module Fog
|
||||
module Compute
|
||||
class AWS
|
||||
class Real
|
||||
|
||||
require 'fog/aws/parsers/compute/basic'
|
||||
|
||||
# Replaces a route in a route table within a VPC.
|
||||
#
|
||||
# ==== Parameters
|
||||
# * RouteTableId<~String> - The ID of the route table for the route.
|
||||
# * options<~Hash>:
|
||||
# * DestinationCidrBlock<~String> - The CIDR address block used for the destination match. Routing decisions are based on the most specific match.
|
||||
# * GatewayId<~String> - The ID of an Internet gateway attached to your VPC.
|
||||
# * InstanceId<~String> - The ID of a NAT instance in your VPC. The operation fails if you specify an instance ID unless exactly one network interface is attached.
|
||||
# * NetworkInterfaceId<~String> - The ID of a network interface.
|
||||
#
|
||||
# === Returns
|
||||
# * response<~Excon::Response>:
|
||||
# * body<~Hash>:
|
||||
# * 'requestId'<~String> - Id of the request
|
||||
# * 'return'<~Boolean> - Returns true if the request succeeds. Otherwise, returns an error.
|
||||
#
|
||||
# {Amazon API Reference}[http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-ReplaceRoute.html]
|
||||
def replace_route(route_table_id, destination_cidr_block, options = {})
|
||||
options['DestinationCidrBlock'] ||= destination_cidr_block
|
||||
|
||||
request({
|
||||
'Action' => 'ReplaceRoute',
|
||||
'RouteTableId' => route_table_id,
|
||||
:idempotent => true,
|
||||
:parser => Fog::Parsers::Compute::AWS::Basic.new
|
||||
}.merge!(options))
|
||||
|
||||
end
|
||||
end
|
||||
|
||||
class Mock
|
||||
|
||||
def replace_route(route_table_id, destination_cidr_block, options = {})
|
||||
options['instanceOwnerId'] ||= nil
|
||||
options['DestinationCidrBlock'] ||= destination_cidr_block
|
||||
|
||||
route_table = self.data[:route_tables].find { |routetable| routetable["routeTableId"].eql? route_table_id }
|
||||
if !route_table.nil? && destination_cidr_block
|
||||
if !options['gatewayId'].nil? || !options['instanceId'].nil? || !options['networkInterfaceId'].nil?
|
||||
if !options['gatewayId'].nil? && self.internet_gateways.all('internet-gateway-id'=>options['gatewayId']).first.nil?
|
||||
raise Fog::Compute::AWS::NotFound.new("The gateway ID '#{options['gatewayId']}' does not exist")
|
||||
elsif !options['instanceId'].nil? && self.servers.all('instance-id'=>options['instanceId']).first.nil?
|
||||
raise Fog::Compute::AWS::NotFound.new("The instance ID '#{options['instanceId']}' does not exist")
|
||||
elsif !options['networkInterfaceId'].nil? && self.network_interfaces.all('networkInterfaceId'=>options['networkInterfaceId']).first.nil?
|
||||
raise Fog::Compute::AWS::NotFound.new("The networkInterface ID '#{options['networkInterfaceId']}' does not exist")
|
||||
elsif route_table['routeSet'].find { |route| route['destinationCidrBlock'].eql? destination_cidr_block }.nil?
|
||||
raise Fog::Compute::AWS::Error, "RouteAlreadyExists => The route identified by #{destination_cidr_block} doesn't exist."
|
||||
else
|
||||
response = Excon::Response.new
|
||||
route_set = route_table['routeSet'].find { |routeset| routeset['destinationCidrBlock'].eql? destination_cidr_block }
|
||||
route_set.merge!(options)
|
||||
route_set['state'] = 'pending'
|
||||
route_set['origin'] = 'ReplaceRoute'
|
||||
|
||||
response.status = 200
|
||||
response.body = {
|
||||
'requestId'=> Fog::AWS::Mock.request_id,
|
||||
'return' => true
|
||||
}
|
||||
response
|
||||
end
|
||||
else
|
||||
message = 'MissingParameter => '
|
||||
message << 'The request must contain either a gateway id, a network interface id, or an instance id'
|
||||
raise Fog::Compute::AWS::Error.new(message)
|
||||
end
|
||||
elsif route_table.nil?
|
||||
raise Fog::Compute::AWS::NotFound.new("The routeTable ID '#{route_table_id}' does not exist")
|
||||
elsif destination_cidr_block.empty?
|
||||
raise Fog::Compute::AWS::InvalidParameterValue.new("Value () for parameter destinationCidrBlock is invalid. This is not a valid CIDR block.")
|
||||
end
|
||||
end
|
||||
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
|
@ -30,6 +30,18 @@ module Fog
|
|||
# * 'Ebs.DeleteOnTermination'<~String> - specifies whether or not to delete the volume on instance termination
|
||||
# * 'Ebs.VolumeType'<~String> - Type of EBS volue. Valid options in ['standard', 'io1'] default is 'standard'.
|
||||
# * 'Ebs.Iops'<~String> - The number of I/O operations per second (IOPS) that the volume supports. Required when VolumeType is 'io1'
|
||||
# * 'NetworkInterfaces'<~Array>: array of hashes
|
||||
# * 'NetworkInterfaceId'<~String> - An existing interface to attach to a single instance
|
||||
# * 'DeviceIndex'<~String> - The device index. Applies both to attaching an existing network interface and creating a network interface
|
||||
# * 'SubnetId'<~String> - The subnet ID. Applies only when creating a network interface
|
||||
# * 'Description'<~String> - A description. Applies only when creating a network interface
|
||||
# * 'PrivateIpAddress'<~String> - The primary private IP address. Applies only when creating a network interface
|
||||
# * 'SecurityGroupId'<~String> - The ID of the security group. Applies only when creating a network interface.
|
||||
# * 'DeleteOnTermination'<~String> - Indicates whether to delete the network interface on instance termination.
|
||||
# * 'PrivateIpAddresses.PrivateIpAddress'<~String> - The private IP address. This parameter can be used multiple times to specify explicit private IP addresses for a network interface, but only one private IP address can be designated as primary.
|
||||
# * 'PrivateIpAddresses.Primary'<~Bool> - Indicates whether the private IP address is the primary private IP address.
|
||||
# * 'SecondaryPrivateIpAddressCount'<~Bool> - The number of private IP addresses to assign to the network interface.
|
||||
# * 'AssociatePublicIpAddress'<~String> - Indicates whether to assign a public IP address to an instance in a VPC. The public IP address is assigned to a specific network interface
|
||||
# * 'ClientToken'<~String> - unique case-sensitive token for ensuring idempotency
|
||||
# * 'DisableApiTermination'<~Boolean> - specifies whether or not to allow termination of the instance from the api
|
||||
# * 'SecurityGroup'<~Array> or <~String> - Name of security group(s) for instances (not supported for VPC)
|
||||
|
@ -108,6 +120,13 @@ module Fog
|
|||
if options['UserData']
|
||||
options['UserData'] = Base64.encode64(options['UserData'])
|
||||
end
|
||||
if network_interfaces = options.delete('NetworkInterfaces')
|
||||
network_interfaces.each_with_index do |mapping, index|
|
||||
for key, value in mapping
|
||||
options.merge!({ format("NetworkInterface.%d.#{key}", index) => value })
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
idempotent = !(options['ClientToken'].nil? || options['ClientToken'].empty?)
|
||||
|
||||
|
@ -159,11 +178,36 @@ module Fog
|
|||
}
|
||||
end
|
||||
|
||||
network_interfaces = (options['NetworkInterfaces'] || []).inject([]) do |mapping, device|
|
||||
device_index = device.fetch("DeviceIndex", 0)
|
||||
subnet_id = device.fetch("SubnetId", options[:subnet_id] || Fog::AWS::Mock.subnet_id)
|
||||
private_ip_address = device.fetch("PrivateIpAddress", options[:private_ip_address] || Fog::AWS::Mock.private_ip_address)
|
||||
delete_on_termination = device.fetch("DeleteOnTermination", true)
|
||||
description = device.fetch("Description", "mock_network_interface")
|
||||
security_group_id = device.fetch("SecurityGroupId", self.data[:security_groups]['default']['groupId'])
|
||||
interface_options = {
|
||||
"PrivateIpAddress" => private_ip_address,
|
||||
"GroupSet" => device.fetch("GroupSet", [security_group_id]),
|
||||
"Description" => description
|
||||
}
|
||||
|
||||
interface_id = device.fetch("NetworkInterfaceId", create_network_interface(subnet_id, interface_options))
|
||||
|
||||
mapping << {
|
||||
"networkInterfaceId" => interface_id,
|
||||
"subnetId" => subnet_id,
|
||||
"status" => "attached",
|
||||
"attachTime" => Time.now,
|
||||
"deleteOnTermination" => delete_on_termination,
|
||||
}
|
||||
end
|
||||
|
||||
instance = {
|
||||
'amiLaunchIndex' => i,
|
||||
'associatePublicIP' => options['associatePublicIP'] || false,
|
||||
'architecture' => 'i386',
|
||||
'blockDeviceMapping' => block_device_mapping,
|
||||
'networkInterfaces' => network_interfaces,
|
||||
'clientToken' => options['clientToken'],
|
||||
'dnsName' => nil,
|
||||
'ebsOptimized' => options['EbsOptimized'] || false,
|
||||
|
|
|
@ -10,14 +10,16 @@ module Fog
|
|||
# * PipelineId <~String> - The ID of the pipeline
|
||||
# * Sphere <~String> - Specifies whether the query applies to components or instances.
|
||||
# Allowable values: COMPONENT, INSTANCE, ATTEMPT.
|
||||
# * Marker <~String> - The starting point for the results to be returned.
|
||||
# ==== Returns
|
||||
# * response<~Excon::Response>:
|
||||
# * body<~Hash>:
|
||||
def query_objects(id, sphere)
|
||||
def query_objects(id, sphere, options={})
|
||||
params = {
|
||||
'pipelineId' => id,
|
||||
'sphere' => sphere,
|
||||
}
|
||||
params['marker'] = options[:marker] if options[:marker]
|
||||
|
||||
response = request({
|
||||
:body => Fog::JSON.encode(params),
|
||||
|
@ -30,7 +32,7 @@ module Fog
|
|||
end
|
||||
|
||||
class Mock
|
||||
def query_objects(id, objects)
|
||||
def query_objects(id, sphere, options={})
|
||||
Fog::Mock.not_implemented
|
||||
end
|
||||
end
|
||||
|
|
|
@ -75,12 +75,15 @@ module Fog
|
|||
raise(Excon::Errors.status_error({:expects => 200}, response))
|
||||
end
|
||||
|
||||
if options[:type]
|
||||
records = zone[:records][options[:type]].values
|
||||
records = if options[:type]
|
||||
records_type = zone[:records][options[:type]]
|
||||
records_type.values if records_type
|
||||
else
|
||||
records = zone[:records].values.map{|r| r.values}.flatten
|
||||
zone[:records].values.map{|r| r.values}.flatten
|
||||
end
|
||||
|
||||
records ||= []
|
||||
|
||||
# sort for pagination
|
||||
records.sort! { |a,b| a[:name].gsub(zone[:name],"") <=> b[:name].gsub(zone[:name],"") }
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@ module Fog
|
|||
# * :preferred_availablility_zone <~String>
|
||||
# * :preferred_maintenance_window <~String>
|
||||
# * :cache_subnet_group_name <~String>
|
||||
# * :s3_snapshot_location <~String> - Amazon resource location for snapshot
|
||||
# === Returns
|
||||
# * response <~Excon::Response>:
|
||||
# * body <~Hash>
|
||||
|
@ -45,6 +46,10 @@ module Fog
|
|||
:parser => Fog::Parsers::AWS::Elasticache::SingleCacheCluster.new
|
||||
}
|
||||
|
||||
if s3_snapshot_location = options.delete(:s3_snapshot_location)
|
||||
req_options.merge!(Fog::AWS.indexed_param('SnapshotArns.member.%d', [*s3_snapshot_location]))
|
||||
end
|
||||
|
||||
if cache_security_groups = options.delete(:security_group_names)
|
||||
req_options.merge!(Fog::AWS.indexed_param('CacheSecurityGroupNames.member.%d', [*cache_security_groups]))
|
||||
end
|
||||
|
|
|
@ -27,9 +27,25 @@ module Fog
|
|||
end
|
||||
|
||||
class Mock
|
||||
def create_cache_parameter_group(name, description = name,
|
||||
family = 'memcached1.4')
|
||||
Fog::Mock.not_implemented
|
||||
def create_cache_parameter_group(name, description = name, family = 'memcached1.4')
|
||||
response = Excon::Response.new
|
||||
if self.data[:parameter_groups] and self.data[:parameter_groups][name]
|
||||
raise Fog::AWS::Elasticache::IdentifierTaken.new("Parameter group #{name} already exists")
|
||||
end
|
||||
|
||||
data = {
|
||||
'CacheParameterGroupName' => name,
|
||||
'CacheParameterGroupFamily' => family.downcase,
|
||||
'Description' => description
|
||||
}
|
||||
self.data[:parameter_groups][name] = data
|
||||
|
||||
response.body = {
|
||||
"ResponseMetadata"=>{ "RequestId"=> Fog::AWS::Mock.request_id },
|
||||
"CreateCacheParameterGroupResult"=> {"CacheParameterGroup"=> data}
|
||||
}
|
||||
response.status = 200
|
||||
response
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -23,7 +23,17 @@ module Fog
|
|||
|
||||
class Mock
|
||||
def delete_cache_parameter_group(name)
|
||||
Fog::Mock.not_implemented
|
||||
response = Excon::Response.new
|
||||
|
||||
if self.data[:parameter_groups].delete(name)
|
||||
response.status = 200
|
||||
response.body = {
|
||||
"ResponseMetadata"=>{ "RequestId"=> Fog::AWS::Mock.request_id },
|
||||
}
|
||||
response
|
||||
else
|
||||
raise Fog::AWS::Elasticache::NotFound.new("CacheParameterGroup not found: #{name}")
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -26,7 +26,23 @@ module Fog
|
|||
|
||||
class Mock
|
||||
def describe_cache_parameter_groups(name = nil, options = {})
|
||||
Fog::Mock.not_implemented
|
||||
response = Excon::Response.new
|
||||
parameter_set = []
|
||||
if name
|
||||
if server = self.data[:parameter_groups][name]
|
||||
parameter_set << server
|
||||
else
|
||||
raise Fog::AWS::Elasticache::NotFound.new("CacheParameterGroup #{name} not found")
|
||||
end
|
||||
else
|
||||
parameter_set = self.data[:parameter_groups].values
|
||||
end
|
||||
|
||||
|
||||
response.status = 200
|
||||
|
||||
response.body = { "CacheParameterGroups" => parameter_set }
|
||||
response
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
80
lib/fog/aws/requests/iam/get_account_summary.rb
Normal file
80
lib/fog/aws/requests/iam/get_account_summary.rb
Normal file
|
@ -0,0 +1,80 @@
|
|||
module Fog
|
||||
module AWS
|
||||
class IAM
|
||||
class Real
|
||||
|
||||
require 'fog/aws/parsers/iam/get_account_summary'
|
||||
|
||||
# Retrieve account level information about account entity usage and IAM quotas
|
||||
#
|
||||
# ==== Returns
|
||||
# * response<~Excon::Response>:
|
||||
# * body<~Hash>:
|
||||
# * 'Summary'<~Hash>:
|
||||
# * 'AccessKeysPerUserQuota'<~Integer> - Maximum number of access keys that can be created per user
|
||||
# * 'AccountMFAEnabled'<~Integer> - 1 if the root account has an MFA device assigned to it, 0 otherwise
|
||||
# * 'AssumeRolePolicySizeQuota'<~Integer> - Maximum allowed size for assume role policy documents (in kilobytes)
|
||||
# * 'GroupPolicySizeQuota'<~Integer> - Maximum allowed size for Group policy documents (in kilobytes)
|
||||
# * 'Groups'<~Integer> - Number of Groups for the AWS account
|
||||
# * 'GroupsPerUserQuota'<~Integer> - Maximum number of groups a user can belong to
|
||||
# * 'GroupsQuota'<~Integer> - Maximum groups allowed for the AWS account
|
||||
# * 'InstanceProfiles'<~Integer> - Number of instance profiles for the AWS account
|
||||
# * 'InstanceProfilesQuota'<~Integer> - Maximum instance profiles allowed for the AWS account
|
||||
# * 'MFADevices'<~Integer> - Number of MFA devices, either assigned or unassigned
|
||||
# * 'MFADevicesInUse'<~Integer> - Number of MFA devices that have been assigned to an IAM user or to the root account
|
||||
# * 'Providers'<~Integer> -
|
||||
# * 'RolePolicySizeQuota'<~Integer> - Maximum allowed size for role policy documents (in kilobytes)
|
||||
# * 'Roles'<~Integer> - Number of roles for the AWS account
|
||||
# * 'RolesQuota'<~Integer> - Maximum roles allowed for the AWS account
|
||||
# * 'ServerCertificates'<~Integer> - Number of server certificates for the AWS account
|
||||
# * 'ServerCertificatesQuota'<~Integer> - Maximum server certificates allowed for the AWS account
|
||||
# * 'SigningCertificatesPerUserQuota'<~Integer> - Maximum number of X509 certificates allowed for a user
|
||||
# * 'UserPolicySizeQuota'<~Integer> - Maximum allowed size for user policy documents (in kilobytes)
|
||||
# * 'Users'<~Integer> - Number of users for the AWS account
|
||||
# * 'UsersQuota'<~Integer> - Maximum users allowed for the AWS account
|
||||
# * 'RequestId'<~String> - Id of the request
|
||||
#
|
||||
# ==== See Also
|
||||
# http://docs.amazonwebservices.com/IAM/latest/APIReference/API_CreateAccessKey.html
|
||||
#
|
||||
def get_account_summary
|
||||
request(
|
||||
'Action' => 'GetAccountSummary',
|
||||
:parser => Fog::Parsers::AWS::IAM::GetAccountSummary.new
|
||||
)
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
class Mock
|
||||
|
||||
def get_account_summary
|
||||
Excon::Response.new.tap do |response|
|
||||
response.status = 200
|
||||
response.body = {
|
||||
'Summary' => {
|
||||
'AccessKeysPerUserQuota' => 2,
|
||||
'AccountMFAEnabled' => 0,
|
||||
'GroupPolicySizeQuota' => 10240,
|
||||
'Groups' => 31,
|
||||
'GroupsPerUserQuota' => 10,
|
||||
'GroupsQuota' => 50,
|
||||
'MFADevices' => 20,
|
||||
'MFADevicesInUse' => 10,
|
||||
'ServerCertificates' => 5,
|
||||
'ServerCertificatesQuota' => 10,
|
||||
'SigningCertificatesPerUserQuota' => 2,
|
||||
'UserPolicySizeQuota' => 10240,
|
||||
'Users' => 35,
|
||||
'UsersQuota' => 150,
|
||||
},
|
||||
'RequestId' => Fog::AWS::Mock.request_id
|
||||
}
|
||||
end
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
end
|
||||
end
|
||||
end
|
|
@ -24,6 +24,7 @@ module Fog
|
|||
# * MultiAZ <~Boolean> Specifies if the DB Instance is a Multi-AZ deployment
|
||||
# * PreferredBackupWindow <~String> The daily time range during which automated backups are created if automated backups are enabled
|
||||
# * PreferredMaintenanceWindow <~String> The weekly time range (in UTC) during which system maintenance can occur, which may result in an outage
|
||||
# * VpcSecurityGroups <~Array> A list of VPC Security Group IDs to authorize on this DB instance
|
||||
# ==== Returns
|
||||
# * response<~Excon::Response>:
|
||||
# * body<~Hash>:
|
||||
|
@ -33,6 +34,10 @@ module Fog
|
|||
options.merge!(Fog::AWS.indexed_param('DBSecurityGroups.member.%d', [*security_groups]))
|
||||
end
|
||||
|
||||
if vpc_security_groups = options.delete('VpcSecurityGroups')
|
||||
options.merge!(Fog::AWS.indexed_param('VpcSecurityGroupIds.member.%d', [*vpc_security_groups]))
|
||||
end
|
||||
|
||||
request({
|
||||
'Action' => 'ModifyDBInstance',
|
||||
'DBInstanceIdentifier' => db_name,
|
||||
|
|
|
@ -62,7 +62,7 @@ module Fog
|
|||
@persistent = options[:persistent] || false
|
||||
@port = options[:port] || 443
|
||||
@scheme = options[:scheme] || 'https'
|
||||
@connection = Fog::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options)
|
||||
@connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options)
|
||||
end
|
||||
|
||||
def reload
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
require 'fog/aws/core'
|
||||
|
||||
# See http://docs.amazonwebservices.com/general/latest/gr/signature-version-4.html
|
||||
#
|
||||
module Fog
|
||||
module AWS
|
||||
class SignatureV4
|
||||
|
|
|
@ -89,7 +89,7 @@ module Fog
|
|||
@persistent = options[:persistent] || false
|
||||
@port = options[:port] || 443
|
||||
@scheme = options[:scheme] || 'https'
|
||||
@connection = Fog::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options)
|
||||
@connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options)
|
||||
end
|
||||
|
||||
private
|
||||
|
|
|
@ -61,7 +61,7 @@ module Fog
|
|||
@persistent = options[:persistent] || false
|
||||
@port = options[:port] || 443
|
||||
@scheme = options[:scheme] || 'https'
|
||||
@connection = Fog::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options)
|
||||
@connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options)
|
||||
end
|
||||
|
||||
def reload
|
||||
|
|
|
@ -89,7 +89,7 @@ module Fog
|
|||
@persistent = options[:persistent] || false
|
||||
@port = options[:port] || 443
|
||||
@scheme = options[:scheme] || 'https'
|
||||
@connection = Fog::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options)
|
||||
@connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options)
|
||||
end
|
||||
|
||||
def reload
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
require 'fog/aws/core'
|
||||
require 'fog/storage'
|
||||
|
||||
module Fog
|
||||
module Storage
|
||||
|
@ -524,7 +523,7 @@ DATA
|
|||
else
|
||||
@connection = nil
|
||||
end
|
||||
@connection ||= Fog::Connection.new(uri, @persistent, @connection_options)
|
||||
@connection ||= Fog::XML::Connection.new(uri, @persistent, @connection_options)
|
||||
end
|
||||
|
||||
def request(params, &block)
|
||||
|
@ -551,7 +550,7 @@ DATA
|
|||
headers = (error.response.is_a?(Hash) ? error.response[:headers] : error.response.headers)
|
||||
uri = URI.parse(headers['Location'])
|
||||
Fog::Logger.warning("fog: followed redirect to #{uri.host}, connecting to the matching region will be more performant")
|
||||
response = Fog::Connection.new("#{uri.scheme}://#{uri.host}:#{uri.port}", false, @connection_options).request(original_params, &block)
|
||||
response = Fog::XML::Connection.new("#{uri.scheme}://#{uri.host}:#{uri.port}", false, @connection_options).request(original_params, &block)
|
||||
end
|
||||
|
||||
response
|
||||
|
|
|
@ -83,7 +83,7 @@ module Fog
|
|||
@persistent = options[:persistent] || false
|
||||
@port = options[:port] || 443
|
||||
@scheme = options[:scheme] || 'https'
|
||||
@connection = Fog::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options)
|
||||
@connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options)
|
||||
end
|
||||
|
||||
def reload
|
||||
|
@ -123,7 +123,6 @@ module Fog
|
|||
:expects => 200,
|
||||
:idempotent => idempotent,
|
||||
:headers => { 'Content-Type' => 'application/x-www-form-urlencoded' },
|
||||
:host => @host,
|
||||
:method => 'POST',
|
||||
:parser => parser
|
||||
})
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
require 'fog/bare_metal_cloud/core'
|
||||
require 'fog/compute'
|
||||
|
||||
module Fog
|
||||
module Compute
|
||||
|
@ -59,7 +58,7 @@ module Fog
|
|||
@persistent = options[:persistent] || false
|
||||
@port = options[:port] || 443
|
||||
@scheme = options[:scheme] || 'https'
|
||||
@connection = Fog::Connection.new("#{@scheme}://#{@host}:#{@port}", @persistent, @connection_options)
|
||||
@connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}", @persistent, @connection_options)
|
||||
end
|
||||
|
||||
def reload
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
require 'fog/core'
|
||||
require 'fog/xml'
|
||||
|
||||
module Fog
|
||||
module BareMetalCloud
|
||||
|
|
|
@ -1,23 +0,0 @@
|
|||
module Fog
|
||||
module Billing
|
||||
|
||||
def self.[](provider)
|
||||
self.new(:provider => provider)
|
||||
end
|
||||
|
||||
def self.new(attributes)
|
||||
attributes = attributes.dup
|
||||
provider = attributes.delete(:provider).to_s.downcase.to_sym
|
||||
if provider == :stormondemand
|
||||
require 'fog/storm_on_demand/billing'
|
||||
Fog::Billing::StormOnDemand.new(attributes)
|
||||
else
|
||||
raise ArgumentError.new("#{provider} has no billing service")
|
||||
end
|
||||
end
|
||||
|
||||
def self.providers
|
||||
Fog.services[:billing]
|
||||
end
|
||||
end
|
||||
end
|
|
@ -65,6 +65,7 @@ require 'fog/bin/clodo'
|
|||
require 'fog/bin/digitalocean'
|
||||
require 'fog/bin/dnsimple'
|
||||
require 'fog/bin/dnsmadeeasy'
|
||||
require 'fog/bin/fogdocker'
|
||||
require 'fog/bin/dreamhost'
|
||||
require 'fog/bin/dynect'
|
||||
require 'fog/bin/ecloud'
|
||||
|
@ -85,6 +86,7 @@ require 'fog/bin/rage4'
|
|||
require 'fog/bin/riakcs'
|
||||
require 'fog/bin/openstack'
|
||||
require 'fog/bin/ovirt'
|
||||
require 'fog/bin/sakuracloud'
|
||||
require 'fog/bin/serverlove'
|
||||
require 'fog/bin/stormondemand'
|
||||
require 'fog/bin/terremark'
|
||||
|
|
30
lib/fog/bin/fogdocker.rb
Normal file
30
lib/fog/bin/fogdocker.rb
Normal file
|
@ -0,0 +1,30 @@
|
|||
class Fogdocker < Fog::Bin
|
||||
class << self
|
||||
|
||||
def class_for(key)
|
||||
case key
|
||||
when :compute
|
||||
Fog::Compute::Fogdocker
|
||||
else
|
||||
raise ArgumentError, "Unrecognized service: #{key}"
|
||||
end
|
||||
end
|
||||
|
||||
def [](service)
|
||||
@@connections ||= Hash.new do |hash, key|
|
||||
hash[key] = case key
|
||||
when :compute
|
||||
Fog::Compute.new(:provider => 'Fogdocker')
|
||||
else
|
||||
raise ArgumentError, "Unrecognized service: #{key.inspect}"
|
||||
end
|
||||
end
|
||||
@@connections[service]
|
||||
end
|
||||
|
||||
def services
|
||||
Fog::Fogdocker.services
|
||||
end
|
||||
|
||||
end
|
||||
end
|
|
@ -5,6 +5,8 @@ class Joyent < Fog::Bin
|
|||
case key
|
||||
when :compute
|
||||
Fog::Compute::Joyent
|
||||
when :analytics
|
||||
Fog::Joyent::Analytics
|
||||
else
|
||||
raise ArgumentError, "Unrecognized service: #{key}"
|
||||
end
|
||||
|
@ -16,6 +18,8 @@ class Joyent < Fog::Bin
|
|||
when :compute
|
||||
Fog::Logger.warning("Joyent[:compute] is not recommended, use Compute[:joyent] for portability")
|
||||
Fog::Compute.new(:provider => 'Joyent')
|
||||
when :analytics
|
||||
Fog::Joyent::Analytics.new
|
||||
else
|
||||
raise ArgumentError, "Unrecognized service: #{key.inspect}"
|
||||
end
|
||||
|
|
36
lib/fog/bin/sakuracloud.rb
Normal file
36
lib/fog/bin/sakuracloud.rb
Normal file
|
@ -0,0 +1,36 @@
|
|||
class SakuraCloud < Fog::Bin
|
||||
class << self
|
||||
|
||||
def class_for(key)
|
||||
case key
|
||||
when :compute
|
||||
Fog::Compute::SakuraCloud
|
||||
when :volume
|
||||
Fog::Volume::SakuraCloud
|
||||
else
|
||||
raise ArgumentError, "Unrecognized service: #{key}"
|
||||
end
|
||||
end
|
||||
|
||||
def [](service)
|
||||
@@connections ||= Hash.new do |hash, key|
|
||||
hash[key] = case key
|
||||
when :compute
|
||||
Fog::Logger.warning("SakuraCloud[:compute] is not recommended, use Compute[:sakuracloud] for portability")
|
||||
Fog::Compute.new(:provider => 'SakuraCloud')
|
||||
when :volume
|
||||
Fog::Logger.warning("SakuraCloud[:compute] is not recommended, use Compute[:SakuraCloud] for portability")
|
||||
Fog::Compute.new(:provider => 'SakuraCloud')
|
||||
else
|
||||
raise ArgumentError, "Unrecognized service: #{key.inspect}"
|
||||
end
|
||||
end
|
||||
@@connections[service]
|
||||
end
|
||||
|
||||
def services
|
||||
Fog::SakuraCloud.services
|
||||
end
|
||||
|
||||
end
|
||||
end
|
|
@ -50,7 +50,7 @@ module Fog
|
|||
@persistent = options[:persistent] || false
|
||||
@port = options[:bluebox_port] || 443
|
||||
@scheme = options[:bluebox_scheme] || 'https'
|
||||
@connection = Fog::Connection.new("#{@scheme}://#{@host}:#{@port}", @persistent, @connection_options)
|
||||
@connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}", @persistent, @connection_options)
|
||||
end
|
||||
|
||||
def reload
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
require 'fog/bluebox/core'
|
||||
require 'fog/compute'
|
||||
|
||||
module Fog
|
||||
module Compute
|
||||
|
@ -69,7 +68,7 @@ module Fog
|
|||
@persistent = options[:persistent] || false
|
||||
@port = options[:bluebox_port] || 443
|
||||
@scheme = options[:bluebox_scheme] || 'https'
|
||||
@connection = Fog::Connection.new("#{@scheme}://#{@host}:#{@port}", @persistent, @connection_options)
|
||||
@connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}", @persistent, @connection_options)
|
||||
end
|
||||
|
||||
def reload
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
require 'fog/core'
|
||||
require 'fog/json'
|
||||
|
||||
module Fog
|
||||
module Bluebox
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
require 'fog/bluebox/core'
|
||||
require 'fog/dns'
|
||||
|
||||
module Fog
|
||||
module DNS
|
||||
|
@ -60,7 +59,7 @@ module Fog
|
|||
@persistent = options[:persistent] || false
|
||||
@port = options[:bluebox_port] || 443
|
||||
@scheme = options[:bluebox_scheme] || 'https'
|
||||
@connection = Fog::Connection.new("#{@scheme}://#{@host}:#{@port}", @persistent, @connection_options)
|
||||
@connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}", @persistent, @connection_options)
|
||||
end
|
||||
|
||||
def reload
|
||||
|
|
|
@ -21,6 +21,7 @@ module Fog
|
|||
attribute :state, :aliases => "status"
|
||||
attribute :storage
|
||||
attribute :template
|
||||
attribute :ipv6_only
|
||||
|
||||
attr_accessor :hostname, :password, :lb_applications, :lb_services, :lb_backends
|
||||
|
||||
|
@ -93,6 +94,7 @@ module Fog
|
|||
|
||||
options['username'] = username
|
||||
options['hostname'] = hostname if @hostname
|
||||
options['ipv6_only'] = ipv6_only if ipv6_only
|
||||
data = service.create_block(flavor_id, image_id, location_id, options)
|
||||
merge_attributes(data.body)
|
||||
true
|
||||
|
|
|
@ -30,6 +30,8 @@ module Fog
|
|||
'location' => location_id
|
||||
}
|
||||
|
||||
query['ipv6_only'] = options.delete('ipv6_only') if options['ipv6_only']
|
||||
|
||||
request(
|
||||
:expects => 200,
|
||||
:method => 'POST',
|
||||
|
|
|
@ -1,23 +0,0 @@
|
|||
module Fog
|
||||
module CDN
|
||||
|
||||
def self.[](provider)
|
||||
self.new(:provider => provider)
|
||||
end
|
||||
|
||||
def self.new(attributes)
|
||||
attributes = attributes.dup # prevent delete from having side effects
|
||||
provider = attributes.delete(:provider).to_s.downcase.to_sym
|
||||
if self.providers.include?(provider)
|
||||
require "fog/#{provider}/cdn"
|
||||
return Fog::CDN.const_get(Fog.providers[provider]).new(attributes)
|
||||
end
|
||||
raise ArgumentError.new("#{provider} is not a recognized cdn provider")
|
||||
end
|
||||
|
||||
def self.providers
|
||||
Fog.services[:cdn]
|
||||
end
|
||||
|
||||
end
|
||||
end
|
|
@ -84,7 +84,7 @@ module Fog
|
|||
@clodo_must_reauthenticate = false
|
||||
authenticate
|
||||
Excon.ssl_verify_peer = false if options[:clodo_servicenet] == true
|
||||
@connection = Fog::Connection.new("#{@scheme}://#{@host}:#{@port}", options[:persistent])
|
||||
@connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}", options[:persistent])
|
||||
end
|
||||
|
||||
def reload
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
require 'fog/core'
|
||||
require 'fog/json'
|
||||
|
||||
module Fog
|
||||
module Clodo
|
||||
|
@ -12,7 +13,7 @@ module Fog
|
|||
url = clodo_auth_url.match(/^https?:/) ? \
|
||||
clodo_auth_url : 'https://' + clodo_auth_url
|
||||
uri = URI.parse(url)
|
||||
connection = Fog::Connection.new(url)
|
||||
connection = Fog::XML::Connection.new(url)
|
||||
@clodo_api_key = options[:clodo_api_key]
|
||||
@clodo_username = options[:clodo_username]
|
||||
response = connection.request({
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
require 'fog/cloudsigma/core'
|
||||
require 'fog/cloudsigma/connection'
|
||||
require 'fog/compute'
|
||||
|
||||
module Fog
|
||||
module Compute
|
||||
|
|
|
@ -37,7 +37,7 @@ module Fog
|
|||
@api_version = options[:cloudsigma_api_version] || '2.0'
|
||||
@path_prefix = "#{@api_path_prefix}/#{@api_version}/"
|
||||
|
||||
@connection = Fog::Connection.new("#{@scheme}://#{@host}:#{@port}", @persistent, @connection_options)
|
||||
@connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}", @persistent, @connection_options)
|
||||
end
|
||||
|
||||
def request(params)
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
require 'fog/core'
|
||||
require 'fog/json'
|
||||
|
||||
module Fog
|
||||
module CloudSigma
|
||||
|
|
|
@ -13,6 +13,7 @@ module Fog
|
|||
attribute :mac, :type => :string
|
||||
attribute :model, :type => :string
|
||||
attribute :vlan
|
||||
attribute :firewall_policy
|
||||
model_attribute :ip_v4_conf, IPConf
|
||||
model_attribute :ip_v6_conf, IPConf
|
||||
|
||||
|
|
|
@ -76,6 +76,10 @@ module Fog
|
|||
service.stop_server(identity)
|
||||
end
|
||||
|
||||
def ready?
|
||||
status == "running"
|
||||
end
|
||||
|
||||
def open_vnc
|
||||
requires :identity
|
||||
service.open_vnc(identity)
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
require 'fog/cloudstack/core'
|
||||
require 'fog/compute'
|
||||
require 'digest/md5'
|
||||
|
||||
module Fog
|
||||
|
@ -150,7 +149,7 @@ module Fog
|
|||
@path = options[:cloudstack_path] || '/client/api'
|
||||
@port = options[:cloudstack_port] || 443
|
||||
@scheme = options[:cloudstack_scheme] || 'https'
|
||||
@connection = Fog::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", options[:cloudstack_persistent], {:ssl_verify_peer => false})
|
||||
@connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", options[:cloudstack_persistent], {:ssl_verify_peer => false})
|
||||
end
|
||||
|
||||
def reload
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
require 'fog/core'
|
||||
require 'fog/json'
|
||||
require 'uri'
|
||||
|
||||
module Fog
|
||||
|
|
|
@ -1,80 +0,0 @@
|
|||
module Fog
|
||||
module Compute
|
||||
|
||||
def self.[](provider)
|
||||
self.new(:provider => provider)
|
||||
end
|
||||
|
||||
def self.new(attributes)
|
||||
attributes = attributes.dup # prevent delete from having side effects
|
||||
provider = attributes.delete(:provider).to_s.downcase.to_sym
|
||||
|
||||
|
||||
case provider
|
||||
when :gogrid
|
||||
require 'fog/go_grid/compute'
|
||||
Fog::Compute::GoGrid.new(attributes)
|
||||
when :hp
|
||||
version = attributes.delete(:version)
|
||||
version = version.to_s.downcase.to_sym unless version.nil?
|
||||
if version == :v2
|
||||
require 'fog/hp/compute_v2'
|
||||
Fog::Compute::HPV2.new(attributes)
|
||||
else
|
||||
Fog::Logger.deprecation "HP Cloud Compute V1 service will be soon deprecated. Please use `:version => v2` attribute to use HP Cloud Compute V2 service."
|
||||
require 'fog/hp/compute'
|
||||
Fog::Compute::HP.new(attributes)
|
||||
end
|
||||
when :new_servers
|
||||
require 'fog/bare_metal_cloud/compute'
|
||||
Fog::Logger.deprecation "`new_servers` is deprecated. Please use `bare_metal_cloud` instead."
|
||||
Fog::Compute::BareMetalCloud.new(attributes)
|
||||
when :baremetalcloud
|
||||
require 'fog/bare_metal_cloud/compute'
|
||||
Fog::Compute::BareMetalCloud.new(attributes)
|
||||
when :rackspace
|
||||
version = attributes.delete(:version)
|
||||
version = version.to_s.downcase.to_sym unless version.nil?
|
||||
if version == :v1
|
||||
Fog::Logger.deprecation "First Gen Cloud Servers are deprecated. Please use `:version => :v2` attribute to use Next Gen Cloud Servers."
|
||||
require 'fog/rackspace/compute'
|
||||
Fog::Compute::Rackspace.new(attributes)
|
||||
else
|
||||
require 'fog/rackspace/compute_v2'
|
||||
Fog::Compute::RackspaceV2.new(attributes)
|
||||
end
|
||||
when :stormondemand
|
||||
require 'fog/storm_on_demand/compute'
|
||||
Fog::Compute::StormOnDemand.new(attributes)
|
||||
when :vcloud
|
||||
require 'fog/vcloud/compute'
|
||||
Fog::Vcloud::Compute.new(attributes)
|
||||
when :vclouddirector
|
||||
require 'fog/vcloud_director/compute'
|
||||
Fog::Compute::VcloudDirector.new(attributes)
|
||||
else
|
||||
if self.providers.include?(provider)
|
||||
require "fog/#{provider}/compute"
|
||||
return Fog::Compute.const_get(Fog.providers[provider]).new(attributes)
|
||||
end
|
||||
raise ArgumentError.new("#{provider} is not a recognized compute provider")
|
||||
end
|
||||
end
|
||||
|
||||
def self.providers
|
||||
Fog.services[:compute]
|
||||
end
|
||||
|
||||
def self.servers
|
||||
servers = []
|
||||
for provider in self.providers
|
||||
begin
|
||||
servers.concat(self[provider].servers)
|
||||
rescue # ignore any missing credentials/etc
|
||||
end
|
||||
end
|
||||
servers
|
||||
end
|
||||
|
||||
end
|
||||
end
|
|
@ -1,104 +0,0 @@
|
|||
require 'fog/core/model'
|
||||
|
||||
module Fog
|
||||
module Compute
|
||||
class Server < Fog::Model
|
||||
|
||||
attr_writer :username, :private_key, :private_key_path, :public_key, :public_key_path, :ssh_port, :ssh_options
|
||||
|
||||
def username
|
||||
@username ||= 'root'
|
||||
end
|
||||
|
||||
def private_key_path
|
||||
@private_key_path ||= Fog.credentials[:private_key_path]
|
||||
@private_key_path &&= File.expand_path(@private_key_path)
|
||||
end
|
||||
|
||||
def private_key
|
||||
@private_key ||= private_key_path && File.read(private_key_path)
|
||||
end
|
||||
|
||||
def public_key_path
|
||||
@public_key_path ||= Fog.credentials[:public_key_path]
|
||||
@public_key_path &&= File.expand_path(@public_key_path)
|
||||
end
|
||||
|
||||
def public_key
|
||||
@public_key ||= public_key_path && File.read(public_key_path)
|
||||
end
|
||||
|
||||
# Port used for ssh/scp interactions with server.
|
||||
# @return [Integer] IP port
|
||||
# @note By default this returns 22
|
||||
def ssh_port
|
||||
@ssh_port ||= 22
|
||||
end
|
||||
|
||||
# Sets the proc used to determine the IP Address used for ssh/scp interactions.
|
||||
# @example
|
||||
# service.servers.bootstrap :name => 'bootstrap-server',
|
||||
# :flavor_id => service.flavors.first.id,
|
||||
# :image_id => service.images.find {|img| img.name =~ /Ubuntu/}.id,
|
||||
# :public_key_path => '~/.ssh/fog_rsa.pub',
|
||||
# :private_key_path => '~/.ssh/fog_rsa',
|
||||
# :ssh_ip_address => Proc.new {|server| server.private_ip_address }
|
||||
#
|
||||
# @note By default scp/ssh will use the public_ip_address if this proc is not set.
|
||||
def ssh_ip_address=(proc)
|
||||
@ssh_ip_address = proc
|
||||
end
|
||||
|
||||
# IP Address used for ssh/scp interactions with server.
|
||||
# @return [String] IP Address
|
||||
# @note By default this returns the public_ip_address
|
||||
def ssh_ip_address
|
||||
return public_ip_address unless @ssh_ip_address
|
||||
return @ssh_ip_address.call(self) if @ssh_ip_address.is_a?(Proc)
|
||||
@ssh_ip_address
|
||||
end
|
||||
|
||||
def ssh_options
|
||||
@ssh_options ||= {}
|
||||
ssh_options = @ssh_options.merge({:port => ssh_port})
|
||||
if private_key
|
||||
ssh_options[:key_data] = [private_key]
|
||||
ssh_options[:auth_methods] = ["publickey"]
|
||||
end
|
||||
ssh_options
|
||||
end
|
||||
|
||||
def scp(local_path, remote_path, upload_options = {})
|
||||
require 'net/scp'
|
||||
requires :ssh_ip_address, :username
|
||||
|
||||
Fog::SCP.new(ssh_ip_address, username, ssh_options).upload(local_path, remote_path, upload_options)
|
||||
end
|
||||
|
||||
alias_method :scp_upload, :scp
|
||||
|
||||
def scp_download(remote_path, local_path, download_options = {})
|
||||
require 'net/scp'
|
||||
requires :ssh_ip_address, :username
|
||||
|
||||
Fog::SCP.new(ssh_ip_address, username, ssh_options).download(remote_path, local_path, download_options)
|
||||
end
|
||||
|
||||
def ssh(commands, options={}, &blk)
|
||||
require 'net/ssh'
|
||||
requires :ssh_ip_address, :username
|
||||
|
||||
options = ssh_options.merge(options)
|
||||
|
||||
Fog::SSH.new(ssh_ip_address, username, options).run(commands, &blk)
|
||||
end
|
||||
|
||||
def sshable?(options={})
|
||||
ready? && !ssh_ip_address.nil? && !!Timeout::timeout(8) { ssh('pwd', options) }
|
||||
rescue SystemCallError, Net::SSH::AuthenticationFailed, Net::SSH::Disconnect, Timeout::Error
|
||||
false
|
||||
end
|
||||
|
||||
end
|
||||
end
|
||||
end
|
|
@ -1,52 +0,0 @@
|
|||
# external core dependencies
|
||||
require 'base64'
|
||||
require 'cgi'
|
||||
require 'uri'
|
||||
require 'excon'
|
||||
require 'fileutils'
|
||||
require 'formatador'
|
||||
require 'openssl'
|
||||
require 'time'
|
||||
require 'timeout'
|
||||
require 'ipaddr'
|
||||
|
||||
# internal core dependencies
|
||||
require "fog/version"
|
||||
require 'fog/core/attributes'
|
||||
require 'fog/core/collection'
|
||||
require 'fog/core/connection'
|
||||
require 'fog/core/credentials'
|
||||
require 'fog/core/current_machine'
|
||||
require 'fog/core/deprecation'
|
||||
require 'fog/core/errors'
|
||||
require 'fog/core/hmac'
|
||||
require 'fog/core/logger'
|
||||
require 'fog/core/model'
|
||||
require 'fog/core/mock'
|
||||
require 'fog/core/provider'
|
||||
require 'fog/core/service'
|
||||
require 'fog/core/ssh'
|
||||
require 'fog/core/scp'
|
||||
require 'fog/core/time'
|
||||
require 'fog/core/wait_for'
|
||||
require 'fog/core/wait_for_defaults'
|
||||
require 'fog/core/class_from_string'
|
||||
require 'fog/core/uuid'
|
||||
|
||||
# data exchange specific (to be extracted and used on a per provider basis)
|
||||
require 'fog/xml'
|
||||
require 'fog/json'
|
||||
|
||||
# deprecation wrappers
|
||||
require 'fog/core/deprecated/connection'
|
||||
|
||||
# service wrappers
|
||||
require 'fog/compute'
|
||||
require 'fog/identity'
|
||||
require 'fog/image'
|
||||
require 'fog/volume'
|
||||
require 'fog/cdn'
|
||||
require 'fog/dns'
|
||||
require 'fog/network'
|
||||
require 'fog/storage'
|
||||
require 'fog/orchestration'
|
|
@ -1,221 +0,0 @@
|
|||
module Fog
|
||||
module Attributes
|
||||
module ClassMethods
|
||||
|
||||
def _load(marshalled)
|
||||
new(Marshal.load(marshalled))
|
||||
end
|
||||
|
||||
def aliases
|
||||
@aliases ||= {}
|
||||
end
|
||||
|
||||
def attributes
|
||||
@attributes ||= []
|
||||
end
|
||||
|
||||
def attribute(name, options = {})
|
||||
class_eval <<-EOS, __FILE__, __LINE__
|
||||
def #{name}
|
||||
attributes[:#{name}]
|
||||
end
|
||||
EOS
|
||||
case options[:type]
|
||||
when :boolean
|
||||
class_eval <<-EOS, __FILE__, __LINE__
|
||||
def #{name}=(new_#{name})
|
||||
attributes[:#{name}] = case new_#{name}
|
||||
when true,'true'
|
||||
true
|
||||
when false,'false'
|
||||
false
|
||||
end
|
||||
end
|
||||
EOS
|
||||
when :float
|
||||
class_eval <<-EOS, __FILE__, __LINE__
|
||||
def #{name}=(new_#{name})
|
||||
attributes[:#{name}] = new_#{name}.to_f
|
||||
end
|
||||
EOS
|
||||
when :integer
|
||||
class_eval <<-EOS, __FILE__, __LINE__
|
||||
def #{name}=(new_#{name})
|
||||
attributes[:#{name}] = new_#{name}.to_i
|
||||
end
|
||||
EOS
|
||||
when :string
|
||||
class_eval <<-EOS, __FILE__, __LINE__
|
||||
def #{name}=(new_#{name})
|
||||
attributes[:#{name}] = new_#{name}.to_s
|
||||
end
|
||||
EOS
|
||||
when :time
|
||||
class_eval <<-EOS, __FILE__, __LINE__
|
||||
def #{name}=(new_#{name})
|
||||
attributes[:#{name}] = if new_#{name}.nil? || new_#{name} == "" || new_#{name}.is_a?(Time)
|
||||
new_#{name}
|
||||
else
|
||||
Time.parse(new_#{name})
|
||||
end
|
||||
end
|
||||
EOS
|
||||
when :array
|
||||
class_eval <<-EOS, __FILE__, __LINE__
|
||||
def #{name}=(new_#{name})
|
||||
attributes[:#{name}] = [*new_#{name}]
|
||||
end
|
||||
EOS
|
||||
else
|
||||
if squash = options[:squash]
|
||||
class_eval <<-EOS, __FILE__, __LINE__
|
||||
def #{name}=(new_data)
|
||||
if new_data.is_a?(Hash)
|
||||
if new_data.has_key?(:'#{squash}')
|
||||
attributes[:#{name}] = new_data[:'#{squash}']
|
||||
elsif new_data.has_key?("#{squash}")
|
||||
attributes[:#{name}] = new_data["#{squash}"]
|
||||
else
|
||||
attributes[:#{name}] = [ new_data ]
|
||||
end
|
||||
else
|
||||
attributes[:#{name}] = new_data
|
||||
end
|
||||
end
|
||||
EOS
|
||||
else
|
||||
class_eval <<-EOS, __FILE__, __LINE__
|
||||
def #{name}=(new_#{name})
|
||||
attributes[:#{name}] = new_#{name}
|
||||
end
|
||||
EOS
|
||||
end
|
||||
end
|
||||
@attributes ||= []
|
||||
@attributes |= [name]
|
||||
for new_alias in [*options[:aliases]]
|
||||
aliases[new_alias] = name
|
||||
end
|
||||
end
|
||||
|
||||
def identity(name, options = {})
|
||||
@identity = name
|
||||
self.attribute(name, options)
|
||||
end
|
||||
|
||||
def ignore_attributes(*args)
|
||||
@ignored_attributes = args.collect {|attr| attr.to_s }
|
||||
end
|
||||
|
||||
def ignored_attributes
|
||||
@ignored_attributes ||= []
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
module InstanceMethods
|
||||
|
||||
def _dump(level)
|
||||
Marshal.dump(attributes)
|
||||
end
|
||||
|
||||
def attributes
|
||||
@attributes ||= {}
|
||||
end
|
||||
|
||||
def dup
|
||||
copy = super
|
||||
copy.dup_attributes!
|
||||
copy
|
||||
end
|
||||
|
||||
def identity
|
||||
send(self.class.instance_variable_get('@identity'))
|
||||
end
|
||||
|
||||
def identity=(new_identity)
|
||||
send("#{self.class.instance_variable_get('@identity')}=", new_identity)
|
||||
end
|
||||
|
||||
def merge_attributes(new_attributes = {})
|
||||
for key, value in new_attributes
|
||||
unless self.class.ignored_attributes.include?(key)
|
||||
if aliased_key = self.class.aliases[key]
|
||||
send("#{aliased_key}=", value)
|
||||
elsif self.respond_to?("#{key}=",true)
|
||||
send("#{key}=", value)
|
||||
else
|
||||
attributes[key] = value
|
||||
end
|
||||
end
|
||||
end
|
||||
self
|
||||
end
|
||||
|
||||
# Returns true if a remote resource has been assigned an
|
||||
# identity and we can assume it has been persisted.
|
||||
#
|
||||
# @return [Boolean]
|
||||
def persisted?
|
||||
!!identity
|
||||
end
|
||||
|
||||
# Returns true if a remote resource has not been assigned an
|
||||
# identity.
|
||||
#
|
||||
# This was added for a ActiveRecord like feel but has been
|
||||
# outdated by ActiveModel API using {#persisted?}
|
||||
#
|
||||
# @deprecated Use inverted form of {#persisted?}
|
||||
# @return [Boolean]
|
||||
def new_record?
|
||||
Fog::Logger.deprecation("#new_record? is deprecated, use !persisted? instead [light_black](#{caller.first})[/]")
|
||||
!persisted?
|
||||
end
|
||||
|
||||
# check that the attributes specified in args exist and is not nil
|
||||
def requires(*args)
|
||||
missing = missing_attributes(args)
|
||||
if missing.length == 1
|
||||
raise(ArgumentError, "#{missing.first} is required for this operation")
|
||||
elsif missing.any?
|
||||
raise(ArgumentError, "#{missing[0...-1].join(", ")} and #{missing[-1]} are required for this operation")
|
||||
end
|
||||
end
|
||||
|
||||
def requires_one(*args)
|
||||
missing = missing_attributes(args)
|
||||
if missing.length == args.length
|
||||
raise(ArgumentError, "#{missing[0...-1].join(", ")} or #{missing[-1]} are required for this operation")
|
||||
end
|
||||
end
|
||||
|
||||
protected
|
||||
|
||||
def missing_attributes(args)
|
||||
missing = []
|
||||
for arg in [:service] | args
|
||||
unless send("#{arg}") || attributes.has_key?(arg)
|
||||
missing << arg
|
||||
end
|
||||
end
|
||||
missing
|
||||
end
|
||||
|
||||
def dup_attributes!
|
||||
@attributes = @attributes.dup if @attributes
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def remap_attributes(attributes, mapping)
|
||||
for key, value in mapping
|
||||
if attributes.key?(key)
|
||||
attributes[value] = attributes.delete(key)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
end
|
||||
end
|
||||
end
|
|
@ -1,26 +0,0 @@
|
|||
module Fog
|
||||
# get class by string or nil
|
||||
def self.class_from_string classname, defaultpath=""
|
||||
if classname and classname.is_a? String then
|
||||
chain = classname.split("::")
|
||||
klass = Kernel
|
||||
chain.each do |klass_string|
|
||||
klass = klass.const_get klass_string
|
||||
end
|
||||
if klass.is_a? Class then
|
||||
klass
|
||||
elsif defaultpath != nil then
|
||||
Fog.class_from_string((defaultpath.split("::")+chain).join("::"), nil)
|
||||
else
|
||||
nil
|
||||
end
|
||||
elsif classname and classname.is_a? Class then
|
||||
classname
|
||||
else
|
||||
nil
|
||||
end
|
||||
rescue NameError
|
||||
defaultpath != nil ? Fog.class_from_string((defaultpath.split("::")+chain).join("::"), nil) : nil
|
||||
end
|
||||
end
|
||||
|
|
@ -1,161 +0,0 @@
|
|||
require "fog/core/deprecated_connection_accessors"
|
||||
|
||||
module Fog
|
||||
class Collection < Array
|
||||
extend Fog::Attributes::ClassMethods
|
||||
include Fog::Attributes::InstanceMethods
|
||||
include Fog::Core::DeprecatedConnectionAccessors
|
||||
|
||||
attr_reader :service
|
||||
|
||||
Array.public_instance_methods(false).each do |method|
|
||||
unless [:reject, :select, :slice, :clear, :inspect].include?(method.to_sym)
|
||||
class_eval <<-EOS, __FILE__, __LINE__
|
||||
def #{method}(*args)
|
||||
unless @loaded
|
||||
lazy_load
|
||||
end
|
||||
super
|
||||
end
|
||||
EOS
|
||||
end
|
||||
end
|
||||
|
||||
%w[reject select slice].each do |method|
|
||||
class_eval <<-EOS, __FILE__, __LINE__
|
||||
def #{method}(*args)
|
||||
unless @loaded
|
||||
lazy_load
|
||||
end
|
||||
data = super
|
||||
self.clone.clear.concat(data)
|
||||
end
|
||||
EOS
|
||||
end
|
||||
|
||||
def self.model(new_model=nil)
|
||||
if new_model == nil
|
||||
@model
|
||||
else
|
||||
@model = new_model
|
||||
end
|
||||
end
|
||||
|
||||
def clear
|
||||
@loaded = true
|
||||
super
|
||||
end
|
||||
|
||||
def create(attributes = {})
|
||||
object = new(attributes)
|
||||
object.save
|
||||
object
|
||||
end
|
||||
|
||||
def destroy(identity)
|
||||
object = new(:identity => identity)
|
||||
object.destroy
|
||||
end
|
||||
|
||||
# Creates a new Fog::Collection based around the passed service
|
||||
#
|
||||
# @param [Hash] attributes
|
||||
# @option attributes [Fog::Service] service Instance of a service
|
||||
#
|
||||
# @return [Fog::Collection]
|
||||
#
|
||||
def initialize(attributes = {})
|
||||
@service = attributes.delete(:service)
|
||||
@loaded = false
|
||||
merge_attributes(attributes)
|
||||
end
|
||||
|
||||
|
||||
def inspect
|
||||
Thread.current[:formatador] ||= Formatador.new
|
||||
data = "#{Thread.current[:formatador].indentation}<#{self.class.name}\n"
|
||||
Thread.current[:formatador].indent do
|
||||
unless self.class.attributes.empty?
|
||||
data << "#{Thread.current[:formatador].indentation}"
|
||||
data << self.class.attributes.map {|attribute| "#{attribute}=#{send(attribute).inspect}"}.join(",\n#{Thread.current[:formatador].indentation}")
|
||||
data << "\n"
|
||||
end
|
||||
data << "#{Thread.current[:formatador].indentation}["
|
||||
unless self.empty?
|
||||
data << "\n"
|
||||
Thread.current[:formatador].indent do
|
||||
data << self.map {|member| member.inspect}.join(",\n")
|
||||
data << "\n"
|
||||
end
|
||||
data << Thread.current[:formatador].indentation
|
||||
end
|
||||
data << "]\n"
|
||||
end
|
||||
data << "#{Thread.current[:formatador].indentation}>"
|
||||
data
|
||||
end
|
||||
|
||||
def load(objects)
|
||||
clear
|
||||
for object in objects
|
||||
self << new(object)
|
||||
end
|
||||
self
|
||||
end
|
||||
|
||||
def model
|
||||
self.class.instance_variable_get('@model')
|
||||
end
|
||||
|
||||
def new(attributes = {})
|
||||
unless attributes.is_a?(::Hash)
|
||||
raise(ArgumentError.new("Initialization parameters must be an attributes hash, got #{attributes.class} #{attributes.inspect}"))
|
||||
end
|
||||
model.new(
|
||||
{
|
||||
:collection => self,
|
||||
:service => service
|
||||
}.merge(attributes)
|
||||
)
|
||||
end
|
||||
|
||||
def reload
|
||||
clear
|
||||
lazy_load
|
||||
self
|
||||
end
|
||||
|
||||
def table(attributes = nil)
|
||||
Formatador.display_table(self.map {|instance| instance.attributes}, attributes)
|
||||
end
|
||||
|
||||
def to_json(options = {})
|
||||
Fog::JSON.encode(self.map {|member| member.attributes})
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def lazy_load
|
||||
self.all
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
# Base class for collection classes whose 'all' method returns only a single page of results and passes the
|
||||
# 'Marker' option along as self.filters[:marker]
|
||||
class PagedCollection < Collection
|
||||
|
||||
def each(filters=filters)
|
||||
if block_given?
|
||||
begin
|
||||
page = self.all(filters)
|
||||
# We need to explicitly use the base 'each' method here on the page, otherwise we get infinite recursion
|
||||
base_each = Fog::Collection.instance_method(:each)
|
||||
base_each.bind(page).call { |item| yield item }
|
||||
end while self.filters[:marker]
|
||||
end
|
||||
self
|
||||
end
|
||||
|
||||
end
|
||||
end
|
|
@ -1,72 +0,0 @@
|
|||
module Fog
|
||||
module Core
|
||||
|
||||
# Fog::Core::Connection is a generic class to contain a HTTP link to an API.
|
||||
#
|
||||
# It is intended to be subclassed by providers who can then add their own
|
||||
# modifications such as authentication or response object.
|
||||
#
|
||||
class Connection
|
||||
# Prepares the connection and sets defaults for any future requests.
|
||||
#
|
||||
# @param [String] url The destination URL
|
||||
# @param persistent [Boolean]
|
||||
# @param [Hash] params
|
||||
# @option params [String] :body Default text to be sent over a socket. Only used if :body absent in Connection#request params
|
||||
# @option params [Hash<Symbol, String>] :headers The default headers to supply in a request. Only used if params[:headers] is not supplied to Connection#request
|
||||
# @option params [String] :host The destination host's reachable DNS name or IP, in the form of a String
|
||||
# @option params [String] :path Default path; appears after 'scheme://host:port/'. Only used if params[:path] is not supplied to Connection#request
|
||||
# @option params [Fixnum] :port The port on which to connect, to the destination host
|
||||
# @option params [Hash] :query Default query; appended to the 'scheme://host:port/path/' in the form of '?key=value'. Will only be used if params[:query] is not supplied to Connection#request
|
||||
# @option params [String] :scheme The protocol; 'https' causes OpenSSL to be used
|
||||
# @option params [String] :proxy Proxy server; e.g. 'http://myproxy.com:8888'
|
||||
# @option params [Fixnum] :retry_limit Set how many times we'll retry a failed request. (Default 4)
|
||||
# @option params [Class] :instrumentor Responds to #instrument as in ActiveSupport::Notifications
|
||||
# @option params [String] :instrumentor_name Name prefix for #instrument events. Defaults to 'excon'
|
||||
#
|
||||
def initialize(url, persistent=false, params={})
|
||||
unless params.has_key?(:debug_response)
|
||||
params[:debug_response] = true
|
||||
end
|
||||
params[:headers] ||= {}
|
||||
params[:headers]['User-Agent'] ||= "fog/#{Fog::VERSION}"
|
||||
params.merge!(:persistent => params.fetch(:persistent, persistent))
|
||||
@excon = Excon.new(url, params)
|
||||
end
|
||||
|
||||
# Makes a request using the connection using Excon
|
||||
#
|
||||
# @param [Hash] params
|
||||
# @option params [String] :body text to be sent over a socket
|
||||
# @option params [Hash<Symbol, String>] :headers The default headers to supply in a request
|
||||
# @option params [String] :host The destination host's reachable DNS name or IP, in the form of a String
|
||||
# @option params [String] :path appears after 'scheme://host:port/'
|
||||
# @option params [Fixnum] :port The port on which to connect, to the destination host
|
||||
# @option params [Hash] :query appended to the 'scheme://host:port/path/' in the form of '?key=value'
|
||||
# @option params [String] :scheme The protocol; 'https' causes OpenSSL to be used
|
||||
# @option params [Proc] :response_block
|
||||
#
|
||||
# @return [Excon::Response]
|
||||
#
|
||||
# @raise [Excon::Errors::StubNotFound]
|
||||
# @raise [Excon::Errors::Timeout]
|
||||
# @raise [Excon::Errors::SocketError]
|
||||
#
|
||||
def request(params, &block)
|
||||
@excon.request(params, &block)
|
||||
end
|
||||
|
||||
# Make {#request} available even when it has been overidden by a subclass
|
||||
# to allow backwards compatibility.
|
||||
#
|
||||
alias_method :original_request, :request
|
||||
protected :original_request
|
||||
|
||||
# Closes the connection
|
||||
#
|
||||
def reset
|
||||
@excon.reset
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
|
@ -1,70 +0,0 @@
|
|||
require 'yaml'
|
||||
|
||||
module Fog
|
||||
require 'fog/core/deprecation'
|
||||
|
||||
# Assign a new credential to use from configuration file
|
||||
# @param [String, Symbol] new_credential name of new credential to use
|
||||
# @ return [Symbol] name of the new credential
|
||||
def self.credential=(new_credential)
|
||||
@credentials = nil
|
||||
@credential = new_credential && new_credential.to_sym
|
||||
end
|
||||
|
||||
# @return [String, Symbol] The credential to use in Fog
|
||||
def self.credential
|
||||
@credential ||= ( ENV["FOG_CREDENTIAL"] && ENV["FOG_CREDENTIAL"].to_sym ) || :default
|
||||
end
|
||||
|
||||
# @return [String] The path for configuration_file
|
||||
def self.credentials_path
|
||||
@credential_path ||= begin
|
||||
path = ENV["FOG_RC"] || (ENV['HOME'] && File.directory?(ENV['HOME']) && '~/.fog')
|
||||
File.expand_path(path) if path
|
||||
rescue
|
||||
nil
|
||||
end
|
||||
end
|
||||
|
||||
# @return [String] The new path for credentials file
|
||||
def self.credentials_path=(new_credentials_path)
|
||||
@credentials = nil
|
||||
@credential_path = new_credentials_path
|
||||
end
|
||||
|
||||
# @return [Hash] The credentials pulled from the configuration file
|
||||
# @raise [LoadError] Configuration unavailable in configuration file
|
||||
def self.credentials
|
||||
@credentials ||= begin
|
||||
if credentials_path && File.exists?(credentials_path)
|
||||
credentials = self.symbolize_credentials(YAML.load_file(credentials_path))
|
||||
(credentials && credentials[credential]) || Fog::Errors.missing_credentials
|
||||
else
|
||||
{}
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# @return [Hash] The newly assigned credentials
|
||||
def self.credentials=(new_credentials)
|
||||
@credentials = new_credentials
|
||||
end
|
||||
|
||||
def self.symbolize_credential?(key)
|
||||
![:headers].include?(key)
|
||||
end
|
||||
|
||||
def self.symbolize_credentials(args)
|
||||
if args.is_a? Hash
|
||||
copy = Array.new
|
||||
args.each do |key, value|
|
||||
obj = symbolize_credential?(key) ? self.symbolize_credentials(value) : value
|
||||
copy.push(key.to_sym, obj)
|
||||
end
|
||||
Hash[*copy]
|
||||
else
|
||||
args
|
||||
end
|
||||
end
|
||||
|
||||
end
|
|
@ -1,34 +0,0 @@
|
|||
require 'thread'
|
||||
module Fog
|
||||
class CurrentMachine
|
||||
@@lock = Mutex.new
|
||||
AMAZON_AWS_CHECK_IP = 'http://checkip.amazonaws.com'
|
||||
|
||||
def self.ip_address= ip_address
|
||||
@@lock.synchronize do
|
||||
@@ip_address = ip_address
|
||||
end
|
||||
end
|
||||
|
||||
# Get the ip address of the machine from which this command is run. It is
|
||||
# recommended that you surround calls to this function with a timeout block
|
||||
# to ensure optimum performance in the case where the amazonaws checkip
|
||||
# service is unavailable.
|
||||
#
|
||||
# @example Get the current ip address
|
||||
# begin
|
||||
# Timeout::timeout(5) do
|
||||
# puts "Your ip address is #{Fog::CurrentMachine.ip_address}"
|
||||
# end
|
||||
# rescue Timeout::Error
|
||||
# puts "Service timeout"
|
||||
# end
|
||||
#
|
||||
# @raise [Excon::Errors::Error] if the net/http request fails.
|
||||
def self.ip_address
|
||||
@@lock.synchronize do
|
||||
@@ip_address ||= Excon.get(AMAZON_AWS_CHECK_IP).body.chomp
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
|
@ -12,13 +12,14 @@ module Fog
|
|||
#
|
||||
# @see https://github.com/geemus/excon/blob/master/lib/excon/connection.rb
|
||||
#
|
||||
class Connection < Fog::XML::SAXParserConnection
|
||||
class Connection < Fog::XML::Connection
|
||||
def request(params, &block)
|
||||
if (parser = params.delete(:parser))
|
||||
super(parser, params)
|
||||
if params.key?(:parser)
|
||||
Fog::Logger.deprecation("Fog::XML::Connection is deprecated use Fog::XML::Connection instead [light_black](#{caller.first})[/]")
|
||||
else
|
||||
original_request(params)
|
||||
end
|
||||
Fog::Logger.deprecation("Fog::XML::Connection is deprecated use Fog::Core::Connection instead [light_black](#{caller.first})[/]")
|
||||
end
|
||||
super(params)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -1,23 +0,0 @@
|
|||
module Fog
|
||||
module Deprecation
|
||||
|
||||
def deprecate(older, newer)
|
||||
module_eval <<-EOS, __FILE__, __LINE__
|
||||
def #{older}(*args)
|
||||
Fog::Logger.deprecation("#{self} => ##{older} is deprecated, use ##{newer} instead [light_black](#{caller.first})[/]")
|
||||
send(:#{newer}, *args)
|
||||
end
|
||||
EOS
|
||||
end
|
||||
|
||||
def self_deprecate(older, newer)
|
||||
module_eval <<-EOS, __FILE__, __LINE__
|
||||
def self.#{older}(*args)
|
||||
Fog::Logger.deprecation("#{self} => ##{older} is deprecated, use ##{newer} instead [light_black](#{caller.first})[/]")
|
||||
send(:#{newer}, *args)
|
||||
end
|
||||
EOS
|
||||
end
|
||||
|
||||
end
|
||||
end
|
|
@ -1,118 +0,0 @@
|
|||
module Fog
|
||||
module Errors
|
||||
|
||||
class Error < StandardError
|
||||
attr_accessor :verbose
|
||||
|
||||
def self.slurp(error, message = nil)
|
||||
new_error = new(message || error.message)
|
||||
new_error.set_backtrace(error.backtrace)
|
||||
new_error.verbose = error.message
|
||||
new_error
|
||||
end
|
||||
end
|
||||
|
||||
class MockNotImplemented < Fog::Errors::Error; end
|
||||
|
||||
class NotFound < Fog::Errors::Error; end
|
||||
|
||||
class LoadError < LoadError; end
|
||||
|
||||
class TimeoutError< Fog::Errors::Error; end
|
||||
|
||||
class NotImplemented < Fog::Errors::Error; end
|
||||
|
||||
# @return [String] The error message that will be raised, if credentials cannot be found
|
||||
def self.missing_credentials
|
||||
missing_credentials_message = <<-YML
|
||||
Missing Credentials
|
||||
|
||||
To run as '#{Fog.credential}', add the following to your resource config file: #{Fog.credentials_path}
|
||||
An alternate file may be used by placing its path in the FOG_RC environment variable
|
||||
|
||||
#######################################################
|
||||
# Fog Credentials File
|
||||
#
|
||||
# Key-value pairs should look like:
|
||||
# :aws_access_key_id: 022QF06E7MXBSAMPLE
|
||||
:#{Fog.credential}:
|
||||
:aws_access_key_id:
|
||||
:aws_secret_access_key:
|
||||
:bluebox_api_key:
|
||||
:bluebox_customer_id:
|
||||
:brightbox_client_id:
|
||||
:brightbox_secret:
|
||||
:clodo_api_key:
|
||||
:clodo_username:
|
||||
:go_grid_api_key:
|
||||
:go_grid_shared_secret:
|
||||
:google_client_email:
|
||||
:google_key_location:
|
||||
:google_project:
|
||||
:google_storage_access_key_id:
|
||||
:google_storage_secret_access_key:
|
||||
:hp_access_key:
|
||||
:hp_secret_key:
|
||||
:hp_tenant_id:
|
||||
:hp_avl_zone:
|
||||
:linode_api_key:
|
||||
:local_root:
|
||||
:bare_metal_cloud_password:
|
||||
:bare_metal_cloud_username:
|
||||
:public_key_path:
|
||||
:private_key_path:
|
||||
:openstack_api_key:
|
||||
:openstack_username:
|
||||
:openstack_auth_url:
|
||||
:openstack_tenant:
|
||||
:openstack_region:
|
||||
:ovirt_username:
|
||||
:ovirt_password:
|
||||
:ovirt_url:
|
||||
:libvirt_uri:
|
||||
:rackspace_api_key:
|
||||
:rackspace_username:
|
||||
:rackspace_servicenet:
|
||||
:rackspace_cdn_ssl:
|
||||
:rage4_email:
|
||||
:rage4_password:
|
||||
:riakcs_access_key_id:
|
||||
:riakcs_secret_access_key:
|
||||
:stormondemand_username:
|
||||
:stormondemand_password:
|
||||
:terremark_username:
|
||||
:terremark_password:
|
||||
:voxel_api_key:
|
||||
:voxel_api_secret:
|
||||
:zerigo_email:
|
||||
:zerigo_token:
|
||||
:dnsimple_email:
|
||||
:dnsimple_password:
|
||||
:dnsmadeeasy_api_key:
|
||||
:dnsmadeeasy_secret_key:
|
||||
:dreamhost_api_key:
|
||||
:cloudstack_host:
|
||||
:cloudstack_api_key:
|
||||
:cloudstack_secret_access_key:
|
||||
:vsphere_server:
|
||||
:vsphere_username:
|
||||
:vsphere_password:
|
||||
:libvirt_username:
|
||||
:libvirt_password:
|
||||
:libvirt_uri:
|
||||
:libvirt_ip_command:
|
||||
:ibm_username:
|
||||
:ibm_password:
|
||||
:vcloud_director_host:
|
||||
:vcloud_director_username:
|
||||
:vcloud_director_password:
|
||||
#
|
||||
# End of Fog Credentials File
|
||||
#######################################################
|
||||
|
||||
YML
|
||||
raise(Fog::Errors::LoadError.new(missing_credentials_message))
|
||||
end
|
||||
|
||||
end
|
||||
end
|
|
@ -1,35 +0,0 @@
|
|||
module Fog
|
||||
class HMAC
|
||||
|
||||
def initialize(type, key)
|
||||
@key = key
|
||||
case type
|
||||
when 'sha1'
|
||||
setup_sha1
|
||||
when 'sha256'
|
||||
setup_sha256
|
||||
end
|
||||
end
|
||||
|
||||
def sign(data)
|
||||
@signer.call(data)
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def setup_sha1
|
||||
@digest = OpenSSL::Digest.new('sha1')
|
||||
@signer = lambda do |data|
|
||||
OpenSSL::HMAC.digest(@digest, @key, data)
|
||||
end
|
||||
end
|
||||
|
||||
def setup_sha256
|
||||
@digest = OpenSSL::Digest.new('sha256')
|
||||
@signer = lambda do |data|
|
||||
OpenSSL::HMAC.digest(@digest, @key, data)
|
||||
end
|
||||
end
|
||||
|
||||
end
|
||||
end
|
|
@ -1,44 +0,0 @@
|
|||
module Fog
|
||||
class Logger
|
||||
|
||||
@channels = {
|
||||
:deprecation => ::STDERR,
|
||||
:warning => ::STDERR
|
||||
}
|
||||
|
||||
@channels[:debug] = ::STDERR if ENV['DEBUG']
|
||||
|
||||
def self.[](channel)
|
||||
@channels[channel]
|
||||
end
|
||||
|
||||
def self.[]=(channel, value)
|
||||
@channels[channel] = value
|
||||
end
|
||||
|
||||
def self.debug(message)
|
||||
self.write(:debug, "[light_black][fog][DEBUG] #{message}[/]\n")
|
||||
end
|
||||
|
||||
def self.deprecation(message)
|
||||
self.write(:deprecation, "[yellow][fog][DEPRECATION] #{message}[/]\n")
|
||||
end
|
||||
|
||||
def self.warning(message)
|
||||
self.write(:warning, "[yellow][fog][WARNING] #{message}[/]\n")
|
||||
end
|
||||
|
||||
def self.write(key, value)
|
||||
if channel = @channels[key]
|
||||
message = if channel.tty?
|
||||
value.gsub(Formatador::PARSE_REGEX) { "\e[#{Formatador::STYLES[$1.to_sym]}m" }.gsub(Formatador::INDENT_REGEX, '')
|
||||
else
|
||||
value.gsub(Formatador::PARSE_REGEX, '').gsub(Formatador::INDENT_REGEX, '')
|
||||
end
|
||||
channel.write(message)
|
||||
end
|
||||
nil
|
||||
end
|
||||
|
||||
end
|
||||
end
|
|
@ -1,115 +0,0 @@
|
|||
module Fog
|
||||
|
||||
@mocking = false
|
||||
|
||||
def self.mock!
|
||||
@mocking = true
|
||||
end
|
||||
|
||||
def self.unmock!
|
||||
@mocking = false
|
||||
end
|
||||
|
||||
def self.mock?
|
||||
@mocking
|
||||
end
|
||||
|
||||
def self.mocking?
|
||||
@mocking
|
||||
end
|
||||
|
||||
module Mock
|
||||
|
||||
@delay = 1
|
||||
def self.delay
|
||||
@delay
|
||||
end
|
||||
|
||||
def self.delay=(new_delay)
|
||||
raise ArgumentError, "delay must be non-negative" unless new_delay >= 0
|
||||
@delay = new_delay
|
||||
end
|
||||
|
||||
def self.not_implemented(message = 'Contributions welcome!')
|
||||
raise Fog::Errors::MockNotImplemented.new(message)
|
||||
end
|
||||
|
||||
def self.random_ip(opts = {:version => :v4})
|
||||
version = opts[:version]
|
||||
if version == :v6
|
||||
bit_length = 128
|
||||
family = Socket::AF_INET6
|
||||
elsif version == :v4
|
||||
bit_length = 32
|
||||
family = Socket::AF_INET
|
||||
else
|
||||
raise ArgumentError, "Unknown IP version: #{version}"
|
||||
end
|
||||
|
||||
seed = 1 + rand((2**bit_length)-1)
|
||||
IPAddr.new(seed, family).to_s
|
||||
end
|
||||
|
||||
def self.random_base64(length)
|
||||
random_selection(
|
||||
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",
|
||||
length
|
||||
)
|
||||
end
|
||||
|
||||
def self.random_hex(length)
|
||||
max = ('f' * length).to_i(16)
|
||||
rand(max).to_s(16).rjust(length, '0')
|
||||
end
|
||||
|
||||
def self.random_letters(length)
|
||||
random_selection(
|
||||
'abcdefghijklmnopqrstuvwxyz',
|
||||
length
|
||||
)
|
||||
end
|
||||
|
||||
def self.random_numbers(length)
|
||||
max = ('9' * length).to_i
|
||||
rand(max).to_s
|
||||
end
|
||||
|
||||
def self.random_letters_and_numbers(length)
|
||||
random_selection(
|
||||
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789',
|
||||
length
|
||||
)
|
||||
end
|
||||
|
||||
def self.random_selection(characters, length)
|
||||
selection = ''
|
||||
length.times do
|
||||
position = rand(characters.length)
|
||||
selection << characters[position..position]
|
||||
end
|
||||
selection
|
||||
end
|
||||
|
||||
def self.reset
|
||||
mocked_services = []
|
||||
Fog.constants.map do |x|
|
||||
x_const = Fog.const_get(x)
|
||||
x_const.respond_to?(:constants) && x_const.constants.map do |y|
|
||||
y_const = x_const.const_get(y)
|
||||
y_const.respond_to?(:constants) && y_const.constants.map do |z|
|
||||
if z.to_sym == :Mock
|
||||
mocked_services << y_const.const_get(z)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
for mocked_service in mocked_services
|
||||
next unless mocked_service.respond_to?(:reset)
|
||||
mocked_service.reset
|
||||
end
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
end
|
|
@ -1,80 +0,0 @@
|
|||
require "fog/core/deprecated_connection_accessors"
|
||||
|
||||
module Fog
|
||||
class Model
|
||||
|
||||
extend Fog::Attributes::ClassMethods
|
||||
include Fog::Attributes::InstanceMethods
|
||||
include Fog::Core::DeprecatedConnectionAccessors
|
||||
|
||||
attr_accessor :collection
|
||||
attr_reader :service
|
||||
|
||||
def initialize(new_attributes = {})
|
||||
# TODO Remove compatibility with old connection option
|
||||
@service = new_attributes.delete(:service)
|
||||
if @service.nil? && new_attributes[:connection]
|
||||
Fog::Logger.deprecation("Passing :connection option is deprecated, use :service instead [light_black](#{caller.first})[/]")
|
||||
@service = new_attributes[:connection]
|
||||
end
|
||||
merge_attributes(new_attributes)
|
||||
end
|
||||
|
||||
def inspect
|
||||
Thread.current[:formatador] ||= Formatador.new
|
||||
data = "#{Thread.current[:formatador].indentation}<#{self.class.name}"
|
||||
Thread.current[:formatador].indent do
|
||||
unless self.class.attributes.empty?
|
||||
data << "\n#{Thread.current[:formatador].indentation}"
|
||||
data << self.class.attributes.map {|attribute| "#{attribute}=#{send(attribute).inspect}"}.join(",\n#{Thread.current[:formatador].indentation}")
|
||||
end
|
||||
end
|
||||
data << "\n#{Thread.current[:formatador].indentation}>"
|
||||
data
|
||||
end
|
||||
|
||||
def reload
|
||||
requires :identity
|
||||
|
||||
return unless data = begin
|
||||
collection.get(identity)
|
||||
rescue Excon::Errors::SocketError
|
||||
nil
|
||||
end
|
||||
|
||||
new_attributes = data.attributes
|
||||
merge_attributes(new_attributes)
|
||||
self
|
||||
end
|
||||
|
||||
def to_json(options = {})
|
||||
Fog::JSON.encode(attributes)
|
||||
end
|
||||
|
||||
def symbolize_keys(hash)
|
||||
return nil if hash.nil?
|
||||
hash.inject({}) do |options, (key, value)|
|
||||
options[(key.to_sym rescue key) || key] = value
|
||||
options
|
||||
end
|
||||
end
|
||||
|
||||
def wait_for(timeout=Fog.timeout, interval=1, &block)
|
||||
reload_has_succeeded = false
|
||||
duration = Fog.wait_for(timeout, interval) do # Note that duration = false if it times out
|
||||
if reload
|
||||
reload_has_succeeded = true
|
||||
instance_eval(&block)
|
||||
else
|
||||
false
|
||||
end
|
||||
end
|
||||
if reload_has_succeeded
|
||||
return duration # false if timeout; otherwise {:duration => elapsed time }
|
||||
else
|
||||
raise Fog::Errors::Error.new("Reload failed, #{self.class} #{self.identity} not present.")
|
||||
end
|
||||
end
|
||||
|
||||
end
|
||||
end
|
|
@ -1,34 +0,0 @@
|
|||
module Fog
|
||||
|
||||
def self.providers
|
||||
@providers ||= {}
|
||||
end
|
||||
|
||||
def self.providers=(new_providers)
|
||||
@providers = new_providers
|
||||
end
|
||||
|
||||
module Provider
|
||||
|
||||
def self.extended(base)
|
||||
provider = base.to_s.split('::').last
|
||||
Fog.providers[provider.downcase.to_sym] = provider
|
||||
end
|
||||
|
||||
def [](service_key)
|
||||
eval(@services_registry[service_key]).new
|
||||
end
|
||||
|
||||
def service(new_service, constant_string)
|
||||
Fog.services[new_service] ||= []
|
||||
Fog.services[new_service] |= [self.to_s.split('::').last.downcase.to_sym]
|
||||
@services_registry ||= {}
|
||||
@services_registry[new_service] = [self.to_s, constant_string].join('::')
|
||||
end
|
||||
|
||||
def services
|
||||
@services_registry.keys
|
||||
end
|
||||
|
||||
end
|
||||
end
|
|
@ -1,96 +0,0 @@
|
|||
module Fog
|
||||
module SCP
|
||||
|
||||
def self.new(address, username, options = {})
|
||||
if Fog.mocking?
|
||||
Fog::SCP::Mock.new(address, username, options)
|
||||
else
|
||||
Fog::SCP::Real.new(address, username, options)
|
||||
end
|
||||
end
|
||||
|
||||
class Mock
|
||||
|
||||
def self.data
|
||||
@data ||= Hash.new do |hash, key|
|
||||
hash[key] = []
|
||||
end
|
||||
end
|
||||
|
||||
def initialize(address, username, options)
|
||||
@address = address
|
||||
@username = username
|
||||
@options = options
|
||||
end
|
||||
|
||||
def upload(local_path, remote_path, upload_options = {})
|
||||
self.class.data[@address] << { :username => @username,
|
||||
:options => @options,
|
||||
:local_path => local_path,
|
||||
:remote_path => remote_path,
|
||||
:upload_options => upload_options }
|
||||
end
|
||||
|
||||
def download(remote_path, local_path, download_options = {})
|
||||
self.class.data[@address] << { :username => @username,
|
||||
:options => @options,
|
||||
:remote_path => remote_path,
|
||||
:local_path => local_path,
|
||||
:download_options => download_options }
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
class Real
|
||||
|
||||
def initialize(address, username, options)
|
||||
require 'net/scp'
|
||||
|
||||
key_manager = Net::SSH::Authentication::KeyManager.new(nil, options)
|
||||
|
||||
unless options[:key_data] || options[:keys] || options[:password] || key_manager.agent
|
||||
raise ArgumentError.new(':key_data, :keys, :password or a loaded ssh-agent is required to initialize SSH')
|
||||
end
|
||||
|
||||
options[:timeout] = 30
|
||||
if options[:key_data] || options[:keys]
|
||||
options[:keys_only] = true
|
||||
#Explicitly set these so net-ssh doesn't add the default keys
|
||||
#as seen at https://github.com/net-ssh/net-ssh/blob/master/lib/net/ssh/authentication/session.rb#L131-146
|
||||
options[:keys] = [] unless options[:keys]
|
||||
options[:key_data] = [] unless options[:key_data]
|
||||
end
|
||||
|
||||
@address = address
|
||||
@username = username
|
||||
@options = { :paranoid => false }.merge(options)
|
||||
end
|
||||
|
||||
def upload(local_path, remote_path, upload_options = {}, &block)
|
||||
begin
|
||||
Net::SCP.start(@address, @username, @options) do |scp|
|
||||
scp.upload!(local_path, remote_path, upload_options) do |ch, name, sent, total|
|
||||
block.call(ch, name, sent, total) if block
|
||||
end
|
||||
end
|
||||
rescue Exception => error
|
||||
raise error
|
||||
end
|
||||
end
|
||||
|
||||
def download(remote_path, local_path, download_options = {}, &block)
|
||||
begin
|
||||
Net::SCP.start(@address, @username, @options) do |scp|
|
||||
scp.download!(remote_path, local_path, download_options) do |ch, name, sent, total|
|
||||
block.call(ch, name, sent, total) if block
|
||||
end
|
||||
end
|
||||
rescue Exception => error
|
||||
raise error
|
||||
end
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
end
|
||||
end
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue