incubator-deltacloud-dev mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mfoj...@redhat.com
Subject [PATCH core 01/10] Revamped EC2 driver using 'aws' gem
Date Wed, 15 Dec 2010 20:14:59 GMT
From: Michal Fojtik <mfojtik@redhat.com>

---
 server/lib/deltacloud/drivers/ec2/ec2_driver.rb | 1042 ++++++++++-------------
 server/lib/deltacloud/models/storage_volume.rb  |    2 +
 server/server.rb                                |   90 ++
 server/views/keys/index.html.haml               |    2 +-
 4 files changed, 529 insertions(+), 607 deletions(-)

diff --git a/server/lib/deltacloud/drivers/ec2/ec2_driver.rb b/server/lib/deltacloud/drivers/ec2/ec2_driver.rb
index c309287..f532bcf 100644
--- a/server/lib/deltacloud/drivers/ec2/ec2_driver.rb
+++ b/server/lib/deltacloud/drivers/ec2/ec2_driver.rb
@@ -1,4 +1,3 @@
-#
 # Copyright (C) 2009, 2010  Red Hat, Inc.
 #
 # Licensed to the Apache Software Foundation (ASF) under one or more
@@ -15,671 +14,502 @@
 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
 # License for the specific language governing permissions and limitations
 # under the License.
-
+#
 
 require 'deltacloud/base_driver'
-require 'active_support'
-require 'AWS'
-require 'right_aws'
-
-module Deltacloud
-  module Drivers
-    module EC2
-class EC2Driver < Deltacloud::BaseDriver
-
-  def supported_collections
-    DEFAULT_COLLECTIONS + [ :keys, :buckets, :load_balancers ]
-  end
-  
-  feature :instances, :user_data
-  feature :instances, :authentication_key
-  feature :instances, :security_group
-  feature :images, :owner_id
-  feature :buckets, :bucket_location
-  feature :instances, :register_to_load_balancer
-
-  define_hardware_profile('m1.small') do
-    cpu                1
-    memory             1.7 * 1024
-    storage            160
-    architecture       'i386'
-  end
+require 'aws'
 
-  define_hardware_profile('m1.large') do
-    cpu                4
-    memory             7.5 * 1024
-    storage            850
-    architecture       'x86_64'
-  end
-
-  define_hardware_profile('m1.xlarge') do
-    cpu                8
-    memory             15 * 1024
-    storage            1690
-    architecture       'x86_64'
-  end
-
-  define_hardware_profile('c1.medium') do
-    cpu                5
-    memory             1.7 * 1024
-    storage            350
-    architecture       'i386'
-  end
+class Instance
+  attr_accessor :keyname
+  attr_accessor :authn_error
 
-  define_hardware_profile('c1.xlarge') do
-    cpu                20
-    memory             7 * 1024
-    storage            1690
-    architecture       'x86_64'
+  def authn_feature_failed?
+    return true unless authn_error.nil?
   end
 
-  define_hardware_profile('m2.xlarge') do
-    cpu                6.5
-    memory             17.1 * 1024
-    storage            420
-    architecture       'x86_64'
-  end
-
-  define_hardware_profile('m2.2xlarge') do
-    cpu                13
-    memory             34.2 * 1024
-    storage            850
-    architecture       'x86_64'
-  end
-
-  define_hardware_profile('m2.4xlarge') do
-    cpu                26
-    memory             68.4 * 1024
-    storage            1690
-    architecture       'x86_64'
-  end
+end
 
-  define_instance_states do
-    start.to( :pending )          .automatically
-    pending.to( :running )        .automatically
-    pending.to( :stopping )       .on( :stop )
-    pending.to( :stopped )        .automatically
-    stopped.to( :running )        .on( :start )
-    running.to( :running )        .on( :reboot )
-    running.to( :stopping )       .on( :stop )
-    shutting_down.to( :stopped )  .automatically
-    stopped.to( :finish )         .automatically
-  end
+module Deltacloud
+  module Drivers
+    module EC2
+      class EC2Driver < Deltacloud::BaseDriver
 
-  DEFAULT_REGION = 'us-east-1'
-  
-  #
-  # Images
-  #
-
-  def images(credentials, opts={} )
-    ec2 = new_client(credentials)
-    img_arr = []
-    # if we know the image_id, we don't want to limit by owner_id, since this
-    # will exclude public images
-    if (opts and opts[:id])
-      config = { :image_id => opts[:id] }
-    else
-      config = { :owner_id => "amazon" }
-      config.merge!({ :owner_id => opts[:owner_id] }) if opts and opts[:owner_id]
-    end
-    safely do
-      image_set = ec2.describe_images(config).imagesSet
-      unless image_set.nil?
-        image_set.item.each do |image|
-          img_arr << convert_image(image)
+        def supported_collections
+          DEFAULT_COLLECTIONS + [ :keys, :buckets ]
         end
-      end
-    end
-    img_arr = filter_on( img_arr, :architecture, opts )
-    img_arr.sort_by{|e| [e.owner_id, e.name]}
-  end
-
-  #
-  # Realms
-  #
 
-  def realms(credentials, opts=nil)
-    ec2 = new_client(credentials)
-    realms = []
-    safely do
-      ec2.describe_availability_zones.availabilityZoneInfo.item.each do |ec2_realm|
-        realms << convert_realm( ec2_realm )
-      end
-    end
-    realms
-  end
-
-  #
-  # Instances
-  #
-  def instances(credentials, opts=nil)
-    ec2 = new_client(credentials)
-    instances = []
-    safely do
-      param = opts.nil? ? nil : opts[:id]
-      ec2_instances = ec2.describe_instances.reservationSet
-      return [] unless ec2_instances
-      ec2_instances.item.each do |item|
-        item.instancesSet.item.each do |ec2_instance|
-          instances << convert_instance( ec2_instance, item.ownerId )
+        feature :instances, :user_data
+        feature :instances, :authentication_key
+        feature :instances, :security_group
+        feature :images, :owner_id
+        feature :buckets, :bucket_location
+
+        define_hardware_profile('t1.micro') do
+          cpu                1
+          memory             0.63 * 1024
+          storage            160
+          architecture       'i386'
         end
-      end
-    end
-    instances = filter_on( instances, :id, opts )
-    instances = filter_on( instances, :state, opts )
-    instances
-  end
 
+        define_hardware_profile('m1.small') do
+          cpu                1
+          memory             1.7 * 1024
+          storage            160
+          architecture       'i386'
+        end
 
-  def create_instance(credentials, image_id, opts)
-    ec2 = new_client( credentials )
-    realm_id = opts[:realm_id]
-    safely do
-      image = image(credentials, :id => image_id )
-      hwp = find_hardware_profile(credentials, opts[:hwp_id], image.id)
-      ec2_instances = ec2.run_instances(
-        :image_id => image.id,
-        :user_data => opts[:user_data],
-        :key_name => opts[:keyname],
-        :availability_zone => realm_id,
-        :monitoring_enabled => true,
-        :instance_type => hwp.name,
-        :disable_api_termination => false,
-        :instance_initiated_shutdown_behavior => 'terminate',
-        :security_group => opts[:security_group]
-      )
-      new_instance = convert_instance( ec2_instances.instancesSet.item.first, 'pending' )
-      if opts[:load_balancer_id] and opts[:load_balancer_id]!=""
-        elb = new_client(credentials, :elb)
-        elb.register_instances_with_load_balancer({
-          :instances => [new_instance.id],
-          :load_balancer_name => opts[:load_balancer_id]
-        })
-      end
-      return new_instance
-    end
-  end
-
-  def generate_instance(ec2, id, backup)
-    begin
-      this_instance = ec2.describe_instances( :instance_id => id ).reservationSet.item.first.instancesSet.item.first
-      convert_instance(this_instance, this_instance.ownerId)
-    rescue Exception => e
-      puts "WARNING: ignored error during instance refresh: #{e.message}"
-      # at this point, the action has succeeded but our follow-up
-      # "describe_instances" failed for some reason.  Create a simple Instance
-      # object with only the ID and new state in place
-      state = convert_state(backup.instancesSet.item.first.currentState.name)
-      Instance.new( {
-        :id => id,
-        :state => state,
-        :actions => instance_actions_for( state ),
-      } )
-    end
-  end
+        define_hardware_profile('m1.large') do
+          cpu                4
+          memory             7.5 * 1024
+          storage            850
+          architecture       'x86_64'
+        end
 
-  def reboot_instance(credentials, id)
-    ec2 = new_client(credentials)
-    backup = ec2.reboot_instances( :instance_id => id )
+        define_hardware_profile('m1.xlarge') do
+          cpu                8
+          memory             15 * 1024
+          storage            1690
+          architecture       'x86_64'
+        end
 
-    generate_instance(ec2, id, backup)
-  end
+        define_hardware_profile('c1.medium') do
+          cpu                5
+          memory             1.7 * 1024
+          storage            350
+          architecture       'i386'
+        end
 
-  def stop_instance(credentials, id)
-    ec2 = new_client(credentials)
-    backup = ec2.terminate_instances( :instance_id => id )
+        define_hardware_profile('c1.xlarge') do
+          cpu                20
+          memory             7 * 1024
+          storage            1690
+          architecture       'x86_64'
+        end
 
-    generate_instance(ec2, id, backup)
-  end
+        define_hardware_profile('m2.xlarge') do
+          cpu                6.5
+          memory             17.1 * 1024
+          storage            420
+          architecture       'x86_64'
+        end
 
-  def destroy_instance(credentials, id)
-    ec2 = new_client(credentials)
-    backup = ec2.terminate_instances( :instance_id => id )
+        define_hardware_profile('m2.2xlarge') do
+          cpu                13
+          memory             34.2 * 1024
+          storage            850
+          architecture       'x86_64'
+        end
 
-    generate_instance(ec2, id, backup)
-  end
+        define_hardware_profile('m2.4xlarge') do
+          cpu                26
+          memory             68.4 * 1024
+          storage            1690
+          architecture       'x86_64'
+        end
 
-  #
-  # Storage Volumes
-  #
+        define_instance_states do
+          start.to( :pending )          .automatically
+          pending.to( :running )        .automatically
+          pending.to( :stopping )       .on( :stop )
+          pending.to( :stopped )        .automatically
+          stopped.to( :running )        .on( :start )
+          running.to( :running )        .on( :reboot )
+          running.to( :stopping )       .on( :stop )
+          shutting_down.to( :stopped )  .automatically
+          stopped.to( :finish )         .automatically
+        end
 
-  def storage_volumes(credentials, opts=nil)
-    ec2 = new_client( credentials )
-    volumes = []
-    safely do
-      if (opts)
-        ec2.describe_volumes(:volume_id => opts[:id]).volumeSet.item.each do |ec2_volume|
-          volumes << convert_volume( ec2_volume )
+        def images(credentials, opts={})
+          ec2 = new_client(credentials)
+          img_arr = []
+          opts ||= {}
+          if opts[:id]
+            safely do
+              img_arr = ec2.describe_images(opts[:id]).collect do |image|
+                convert_image(image)
+              end
+            end
+          else
+            owner_id = opts[:owner_id] || "amazon"
+            safely do
+              img_arr = ec2.describe_images_by_owner(owner_id, "machine").collect do |image|
+                convert_image(image)
+              end
+            end
+          end
+          img_arr = filter_on( img_arr, :architecture, opts )
+          img_arr.sort_by { |e| [e.owner_id, e.name] }
         end
-      else
-        ec2_volumes = ec2.describe_volumes.volumeSet
-        return [] unless ec2_volumes
-        ec2_volumes.item.each do |ec2_volume|
-          volumes << convert_volume( ec2_volume )
+
+        def realms(credentials, opts={})
+          ec2 = new_client(credentials)
+          zone_id = opts ? opts[:id] : nil
+          safely do
+            return ec2.describe_availability_zones(zone_id).collect do |realm|
+              convert_realm(realm)
+            end
+          end
         end
-      end
-    end
-    volumes
-  end
 
-  #
-  # Storage Snapshots
-  #
+        def instances(credentials, opts={})
+          ec2 = new_client(credentials)
+          inst_arr = []
+          safely do
+            inst_arr = ec2.describe_instances.collect do |instance| 
+              convert_instance(instance) if instance
+            end.flatten
+          end
+          inst_arr = filter_on( inst_arr, :id, opts )
+          filter_on( inst_arr, :state, opts )
+        end
 
-  def storage_snapshots(credentials, opts=nil)
-    ec2 = new_client( credentials )
-    snapshots = []
-    safely do
-      if (opts)
-        ec2.describe_snapshots(:owner => 'self', :snapshot_id => opts[:id]).snapshotSet.item.each
do |ec2_snapshot|
-          snapshots << convert_snapshot( ec2_snapshot )
+        def create_instance(credentials, image_id, opts={})
+          ec2 = new_client(credentials)
+          instance_options = {}
+          instance_options.merge!(:user_data => opts[:user_data]) if opts[:user_data]
+          instance_options.merge!(:key_name => opts[:key_name]) if opts[:key_name]
+          instance_options.merge!(:availability_zone => opts[:availability_zone]) if opts[:availability_zone]
+          instance_options.merge!(:instance_type => opts[:hwp_id]) if opts[:hwp_id]
+          instance_options.merge!(:group_ids => opts[:security_group]) if opts[:security_group]
+          safely do
+            new_instance = convert_instance(ec2.launch_instances(image_id, instance_options).first)
+            if opts[:public_ip]
+              ec2.associate_address(new_instance.id, opts[:public_ip])
+            end
+            new_instance
+          end
         end
-      else
-        ec2_snapshots = ec2.describe_snapshots(:owner => 'self').snapshotSet
-        return [] unless ec2_snapshots
-        ec2_snapshots.item.each do |ec2_snapshot|
-          snapshots << convert_snapshot( ec2_snapshot )
+    
+        def reboot_instance(credentials, instance_id)
+          ec2 = new_client(credentials)
+          if ec2.reboot_instances([instance_id])
+            instance(credentials, instance_id)
+          else
+            raise Deltacloud::BackendError.new(500, "Instance", "Instance reboot failed",
"")
+          end
         end
-      end
-    end
-    snapshots
-  end
 
-  def keys(credentials, opts=nil)
-    ec2 = new_client( credentials )
-    opts[:key_name] = opts[:id] if opts and opts[:id]
-    keypairs = ec2.describe_keypairs(opts || {})
-    result = []
-    safely do
-      keypairs.keySet.item.each do |keypair|
-        result << convert_key(keypair)
-      end if keypairs.keySet
-    end
-    result
-  end
+        def destroy_instance(credentials, instance_id)
+          ec2 = new_client(credentials)
+          if ec2.terminate_instances([instance_id])
+            instance(credentials, instance_id)
+          else
+            raise Deltacloud::BackendError.new(500, "Instance", "Instance cannot be terminated",
"")
+          end
+        end
 
-  def create_key(credentials, opts={})
-    key = Key.new
-    ec2 = new_client( credentials )
-    safely do
-      key = convert_key(ec2.create_keypair(opts))
-    end
-    return key
-  end
+        alias :stop_instance :destroy_instance
 
-  def destroy_key(credentials, opts={})
-    safely do
-      ec2 = new_client( credentials )
-      ec2.delete_keypair(opts)
-    end
-  end
+        def keys(credentials, opts={})
+          ec2 = new_client(credentials)
+          opts ||= {}
+          safely do
+            ec2.describe_key_pairs(opts[:id] || nil).collect do |key|
+              convert_key(key)
+            end
+          end
+        end
 
-  def valid_credentials?(credentials)
-    client = new_client(credentials)
-    # FIXME: We need to do this call to determine if
-    #        EC2 is working with given credentials. There is no
-    #        other way to check, if given credentials are valid or not.
-    realms = client.describe_availability_zones rescue false
-    return realms ? true : false
-  end
+        def key(credentials, opts={})
+          keys(credentials, :id => opts[:id]).first
+        end
 
-#--
-# Buckets
-#-- get a list of your buckets from the s3 service
-  def buckets(credentials, opts)
-    buckets = []
-    safely do
-      s3_client = s3_client(credentials)
-      bucket_list = s3_client.buckets
-      bucket_list.each do |current|
-        buckets << convert_bucket(current)
-      end
-    end
-    buckets = filter_on(buckets, :id, opts)
-    buckets
-  end
+        def create_key(credentials, opts={})
+          ec2 = new_client(credentials)
+          safely do
+            convert_key(ec2.create_key_pair(opts[:key_name]))
+          end
+        end
 
-#--
-# Create bucket
-#--
-#valid values for bucket location: 'EU'|'us-west1'|'ap-southeast-1' - if you
-#don't specify a location then by default buckets are created in 'us-east'
-#[but if you *do* specify 'us-east' things blow up]
-  def create_bucket(credentials, name, opts={})
-    bucket = nil
-    safely do
-      begin
-        s3_client = s3_client(credentials)
-        bucket_location = opts['location']
-        if bucket_location
-          bucket = RightAws::S3::Bucket.create(s3_client, name, true, nil, :location =>
bucket_location)
-        else
-          bucket = RightAws::S3::Bucket.create(s3_client, name, true)
-        end #if
-        rescue RightAws::AwsError => e
-          raise e unless e.message =~ /BucketAlreadyExists/
-          raise Deltacloud::BackendError.new(409, e.class.to_s, e.message, e.backtrace)
-      end #begin
-    end #do
-    convert_bucket(bucket)
-  end
+        def destroy_key(credentials, opts={})
+          ec2 = new_client(credentials)
+          original_key = key(credentials, opts)
+          safely do
+            ec2.delete_key_pair(original_key.id)
+            original_key= original_key.state = "DELETED"
+          end
+          original_key
+        end
 
-#--
-# Delete_bucket
-#--
-  def delete_bucket(credentials, name, opts={})
-    s3_client = s3_client(credentials)
-    safely do
-      s3_client.interface.delete_bucket(name)
-    end
-  end
+        def buckets(credentials, opts)
+          buckets = []
+          safely do
+            s3_client = new_client(credentials, :s3)
+            bucket_list = s3_client.buckets
+            bucket_list.each do |current|
+              buckets << convert_bucket(current)
+            end
+          end
+          filter_on(buckets, :id, opts)
+        end
 
-#--
-# Blobs
-#--
-  def blobs(credentials, opts = nil)
-    s3_client = s3_client(credentials)
-    blobs = []
-    safely do
-      s3_bucket = s3_client.bucket(opts['bucket'])
-      s3_bucket.keys({}, true).each do |s3_object|
-        blobs << convert_object(s3_object)
-      end
-    end
-    blobs = filter_on(blobs, :id, opts)
-    blobs
-  end
+        def create_bucket(credentials, name, opts={})
+          bucket = nil
+          safely do
+            s3_client = new_client(credentials, :s3)
+            bucket_location = opts['location']
+            if bucket_location
+              bucket = Aws::S3::Bucket.create(s3_client, name, true, nil, :location =>
bucket_location)
+            else
+              bucket = Aws::S3::Bucket.create(s3_client, name, true)
+            end
+          end
+          convert_bucket(bucket)
+        end
 
-#--
-# Blob data
-#--
-  def blob_data(credentials, bucket_id, blob_id, opts)
-    s3_client = s3_client(credentials)
-    safely do
-      s3_client.interface.get(bucket_id, blob_id) do |chunk|
-        yield chunk
-      end
-    end
-  end
+        def delete_bucket(credentials, name, opts={})
+          s3_client = new_client(credentials, :s3)
+          safely do
+            s3_client.interface.delete_bucket(name)
+          end
+        end
 
-#--
-# Create Blob
-#--
-  def create_blob(credentials, bucket_id, blob_id, data = nil, opts = nil)
-    s3_client = s3_client(credentials)
-    #data is a construct with the temporary file created by server @.tempfile
-    #also file[:type] will give us the content-type
-    safely do
-      res = s3_client.interface.put(bucket_id, blob_id, data[:tempfile], {"Content-Type"
=> data[:type]})
-      #create a new Blob object and return that
-      Blob.new( { :id => blob_id,
-                :bucket => bucket_id,
-                :content_length => data[:tempfile].length,
-                :content_type => data[:type],
-                :last_modified => ''
-              }
-            )
-    end
-  end
+        def blobs(credentials, opts = nil)
+          s3_client = new_client(credentials, :s3)
+          blobs = []
+          safely do
+            s3_bucket = s3_client.bucket(opts['bucket'])
+            s3_bucket.keys({}, true).each do |s3_object|
+              blobs << convert_object(s3_object)
+            end
+          end
+          blobs = filter_on(blobs, :id, opts)
+          blobs
+        end
 
-#--
-# Delete Blob
-#--  
-  def delete_blob(credentials, bucket_id, blob_id, opts=nil)
-    s3_client = s3_client(credentials)
-    safely do
-      s3_client.interface.delete(bucket_id, blob_id)
-    end
-  end
+        #--
+        # Create Blob
+        #--
+        def create_blob(credentials, bucket_id, blob_id, data = nil, opts = nil)
+          s3_client = new_client(credentials, :s3)
+          #data is a construct with the temporary file created by server @.tempfile
+          #also file[:type] will give us the content-type
+          res = nil
+          # File stream needs to be reopened in binary mode for whatever reason
+          file = File::open(data[:tempfile].path, 'rb')
+          safely do
+            res = s3_client.interface.put(bucket_id, 
+                                        blob_id, 
+                                        file, 
+                                        {"Content-Type" => data[:type]})
+          end
+          #create a new Blob object and return that
+          Blob.new( { :id => blob_id,
+                      :bucket => bucket_id,
+                      :content_length => data[:tempfile].length,
+                      :content_type => data[:type],
+                      :last_modified => ''
+                    }
+                  )
+        end
 
-  def load_balancer(credentials, opts={})
-    load_balancers(credentials, {
-      :load_balancer_names => [opts[:id]]
-    }).first
-  end
+        #--
+        # Delete Blob
+        #--  
+        def delete_blob(credentials, bucket_id, blob_id, opts=nil)
+          s3_client = new_client(credentials, :s3)
+          s3_client.interface.delete(bucket_id, blob_id)
+        end
 
-  def load_balancers(credentials, opts=nil)
-    ec2 = new_client( credentials, :elb )
-    result = []
-    safely do
-      loadbalancers = ec2.describe_load_balancers(opts || {})
-      return [] unless loadbalancers.DescribeLoadBalancersResult.LoadBalancerDescriptions
-      loadbalancers.DescribeLoadBalancersResult.LoadBalancerDescriptions.member.each do |loadbalancer|
-        result << convert_load_balancer(credentials, loadbalancer)
-      end
-    end
-    return result
-  end
 
-  def create_load_balancer(credentials, opts={})
-    ec2 = new_client( credentials, :elb )
-    safely do
-      ec2.create_load_balancer({
-        :load_balancer_name => opts['name'],
-        # TODO: Add possibility to push more listeners/realms in one request
-        # Something like 'Hash' in 'Array' parameter
-        :availability_zones => [opts['realm_id']],
-        :listeners => [{
-          :protocol => opts['listener_protocol'],
-          :load_balancer_port => opts['listener_balancer_port'],
-          :instance_port => opts['listener_instance_port']
-         }]
-      })
-      return load_balancer(credentials, opts['name'])
-    end
-  end
+        def blob_data(credentials, bucket_id, blob_id, opts)
+          s3_client = new_client(credentials, :s3)
+          s3_client.interface.get(bucket_id, blob_id) do |chunk|
+            yield chunk
+          end
+        end
 
-  def destroy_load_balancer(credentials, id)
-    ec2 = new_client( credentials, :elb )
-    safely do
-      ec2.delete_load_balancer({
-        :load_balancer_name => id
-      })
-    end
-  end
+        def storage_volumes(credentials, opts={})
+          ec2 = new_client( credentials )
+          volume_list = (opts and opts[:id]) ? opts[:id] : nil
+          safely do
+            ec2.describe_volumes(volume_list).collect do |volume|
+              convert_volume(volume)
+            end
+          end
+        end
 
-  def lb_register_instance(credentials, opts={})
-    ec2 = new_client( credentials, :elb)
-    safely do
-      ec2.register_instances_with_load_balancer(:instances => [opts[:instance_id]],
-        :load_balancer_name => opts[:id])
-      load_balancer(credentials, :id => opts[:id])
-    end
-  end
+        def create_storage_volume(credentials, opts=nil)
+          ec2 = new_client(credentials)
+          opts ||= {}
+          opts[:snapshot_id] ||= ""
+          opts[:capacity] ||= "1"
+          opts[:realm_id] ||= realms(credentials).first.id
+          safely do
+            convert_volume(ec2.create_volume(opts[:snapshot_id], opts[:capacity], opts[:realm_id]))
+          end
+        end
 
-  def lb_unregister_instance(credentials, opts={})
-    ec2 = new_client( credentials, :elb)
-    safely do
-      ec2.deregister_instances_from_load_balancer(:instances => [opts[:instance_id]],
-        :load_balancer_name => opts[:id])
-      load_balancer(credentials, :id => opts[:id])
-    end
-  end
+        def destroy_storage_volume(credentials, opts={})
+          ec2 = new_client(credentials)
+          safely do
+            unless ec2.delete_volume(opts[:id]) 
+              raise Deltacloud::BackendError.new(500, "StorageVolume", "Cannot delete storage
volume")
+            end
+            storage_volume(credentials, opts[:id])
+          end
+        end
 
-  private
-
-  def new_client(credentials, provider_type = :ec2)
-    opts = {
-      :access_key_id => credentials.user,
-      :secret_access_key => credentials.password,
-      :server => endpoint_for_service(provider_type)
-    }
-    safely do
-      case provider_type
-        when :ec2
-          AWS::EC2::Base.new(opts)
-        when :elb
-          AWS::ELB::Base.new(opts)
-      end
-    end
-  end
+        def attach_storage_volume(credentials, opts={})
+          ec2 = new_client(credentials)
+          safely do
+            convert_volume(ec2.attach_volume(opts[:id], opts[:instance_id], opts[:device]))
+          end
+        end
 
-  def endpoint_for_service(service)
-    url = ""
-    url << case service
-           when :ec2
-             'ec2.'
-           when :elb
-             'elasticloadbalancing.'
-           end
-    url << (Thread.current[:provider] || ENV['API_PROVIDER'] || DEFAULT_REGION)
-    url << '.amazonaws.com'
-    url
-  end
-  
-  def convert_load_balancer(credentials, loadbalancer)
-    balancer_realms = loadbalancer.AvailabilityZones.member.collect do |m|
-      realm(credentials, m)
-    end
-    balancer = LoadBalancer.new({
-      :id => loadbalancer['LoadBalancerName'],
-      :created_at => loadbalancer['CreatedTime'],
-      :public_addresses => [loadbalancer['DNSName']],
-      :realms =>  balancer_realms
-    })
-    balancer.listeners = []
-    balancer.instances = []
-    loadbalancer.Listeners.member.each do |listener|
-      balancer.add_listener({
-        :protocol => listener['Protocol'],
-        :load_balancer_port => listener['LoadBalancerPort'],
-        :instance_port => listener['InstancePort']
-      })
-    end
-    loadbalancer.Instances.member.each do |instance|
-      balancer.instances << instances(credentials, :id => instance['InstanceId']).first
-    end if loadbalancer.Instances
-    balancer
-  end
+        def detach_storage_volume(credentials, opts={})
+          ec2 = new_client(credentials)
+          safely do
+            convert_volume(ec2.detach_volume(opts[:id], opts[:instance_id], opts[:device],
true))
+          end
+        end
 
+        def storage_snapshots(credentials, opts={})
+          ec2 = new_client(credentials)
+          snapshot_list = (opts and opts[:id]) ? opts[:id] : []
+          safely do
+            ec2.describe_snapshots(snapshot_list).collect do |snapshot|
+              convert_snapshot(snapshot)
+            end
+          end
+        end
 
-  def convert_key(key)
-    Key.new({
-      :id => key['keyName'],
-      :fingerprint => key['keyFingerprint'],
-      :credential_type => :key,
-      :pem_rsa_key => key['keyMaterial']
-    })
-  end
+        def create_storage_snapshot(credentials, opts={})
+          ec2 = new_client(credentials)
+          safely do
+            convert_snapshot(ec2.try_create_snapshot(opts[:volume_id]))
+          end
+        end
 
-  def convert_image(ec2_image)
-    Image.new( {
-      :id=>ec2_image['imageId'],
-      :name=>ec2_image['name'] || ec2_image['imageId'],
-      :description=>ec2_image['description'] || ec2_image['imageLocation'] || '',
-      :owner_id=>ec2_image['imageOwnerId'],
-      :architecture=>ec2_image['architecture'],
-    } )
-  end
+        def destroy_storage_snapshot(credentials, opts={})
+          ec2 = new_client(credentials)
+          safely do
+            unless convert_snapshot(opts[:id])
+              raise Deltacloud::BackendError.new(500, "StorageSnapshot", "Cannot destroy
this snapshot")
+            end
+          end
+        end
 
-  def convert_realm(ec2_realm)
-    Realm.new( {
-      :id=>ec2_realm['zoneName'],
-      :name=>ec2_realm['zoneName'],
-      :limit=>ec2_realm['zoneState'].eql?('available') ? :unlimited : 0,
-      :state=>ec2_realm['zoneState'].upcase,
-    } )
-  end
+        private
 
-  def convert_state(ec2_state)
-    case ec2_state
-    when "terminated"
-      "STOPPED"
-    when "stopped"
-      "STOPPED"
-    when "running"
-      "RUNNING"
-    when "pending"
-      "PENDING"
-    when "shutting-down"
-      "STOPPED"
-    end
-  end
+        def new_client(credentials, type = :ec2)
+          case type
+            when :ec2 then Aws::Ec2.new(credentials.user, credentials.password)
+            when :s3 then Aws::S3.new(credentials.user, credentials.password)
+          end
+        end
+        
+        def convert_bucket(s3_bucket)
+          #get blob list:
+          blob_list = []
+          s3_bucket.keys.each do |s3_object|
+            blob_list << s3_object.name
+          end
+          #can use AWS::S3::Owner.current.display_name or current.id
+          Bucket.new(
+            :id => s3_bucket.name,
+            :name => s3_bucket.name,
+            :size => s3_bucket.keys.length,
+            :blob_list => blob_list
+          )
+        end
 
-  def convert_instance(ec2_instance, owner_id)
-    state = convert_state(ec2_instance['instanceState']['name'])
-    realm_id = ec2_instance['placement']['availabilityZone']
-    (realm_id = nil ) if ( realm_id == '' )
-    hwp_name = ec2_instance['instanceType']
-    instance = Instance.new( {
-      :id=>ec2_instance['instanceId'],
-      :name => ec2_instance['imageId'],
-      :state=>state,
-      :image_id=>ec2_instance['imageId'],
-      :owner_id=>owner_id,
-      :realm_id=>realm_id,
-      :public_addresses=>( ec2_instance['dnsName'] == '' ? [] : [ec2_instance['dnsName']]
),
-      :private_addresses=>( ec2_instance['privateDnsName'] == '' ? [] : [ec2_instance['privateDnsName']]
),
-      :instance_profile =>InstanceProfile.new(hwp_name),
-      :actions=>instance_actions_for( state ),
-      :keyname => ec2_instance['keyName'],
-      :launch_time => ec2_instance['launchTime']
-    } )
-    instance.authn_error = "Key not set for instance" unless ec2_instance['keyName']
-    return instance
-  end
+        def convert_realm(realm)
+          Realm.new(
+            :id => realm[:zone_name],
+            :name => realm[:zone_name],
+            :state => realm[:zone_state],
+            :limit => realm[:zone_state].eql?('available') ? :unlimited : 0
+          )
+        end
 
-  def convert_volume(ec2_volume)
-    StorageVolume.new( {
-      :id=>ec2_volume['volumeId'],
-      :created=>ec2_volume['createTime'],
-      :state=>ec2_volume['status'].upcase,
-      :capacity=>ec2_volume['size'],
-      :instance_id=>ec2_volume['snapshotId'],
-      :device=>ec2_volume['attachmentSet'],
-    } )
-  end
+        def convert_image(image)
+          # There is not support for 'name' for now
+          Image.new(
+            :id => image[:aws_id],
+            :name => image[:aws_name] || image[:aws_id],
+            :description => image[:aws_description] || image[:aws_location],
+            :owner_id => image[:aws_owner],
+            :architecture => image[:aws_architecture],
+            :state => image[:state]
+          )
+        end
 
-  def convert_snapshot(ec2_snapshot)
-    StorageSnapshot.new( {
-      :id=>ec2_snapshot['snapshotId'],
-      :state=>ec2_snapshot['status'].upcase,
-      :storage_volume_id=>ec2_snapshot['volumeId'],
-      :created=>ec2_snapshot['startTime'],
-    } )
-  end
+        def convert_instance(instance)
+          Instance.new(
+            :id => instance[:aws_instance_id],
+            :name => instance[:aws_image_id],
+            :state => convert_state(instance[:aws_state]),
+            :image_id => instance[:aws_image_id],
+            :owner_id => instance[:aws_owner],
+            :actions => instance_actions_for(convert_state(instance[:aws_state])),
+            :key_name => instance[:ssh_key_name],
+            :launch_time => instance[:aws_launch_time],
+            :instance_profile => InstanceProfile.new(instance[:aws_instance_type]),
+            :realm_id => instance[:aws_availability_zone],
+            :private_addresses => instance[:private_dns_name],
+            :public_addresses => instance[:public_addresses]
+          )
+        end
 
-  def s3_client(credentials)
-    safely do
-      s3_client = RightAws::S3.new(credentials.user, credentials.password)
-    end
-  end
+        def convert_key(key)
+          Key.new(
+            :id => key[:aws_key_name],
+            :fingerprint => key[:aws_fingerprint],
+            :credential_type => :key,
+            :pem_rsa_key => key[:aws_material],
+            :state => "AVAILABLE"
+          )
+        end
 
-  def convert_bucket(s3_bucket)
-    #get blob list:
-    blob_list = []
-    s3_bucket.keys.each do |s3_object|
-      blob_list << s3_object.name
-    end
-    #can use AWS::S3::Owner.current.display_name or current.id
-    Bucket.new(  { :id => s3_bucket.name,
-                      :name => s3_bucket.name,
-                      :size => s3_bucket.keys.length,
-                      :blob_list => blob_list
-                    }
-                 )
-  end
+        def convert_volume(volume)
+          StorageVolume.new(
+            :id => volume[:aws_id],
+            :created => volume[:aws_created_at],
+            :state => volume[:aws_status] ? volume[:aws_status].upcase : 'unknown',
+            :capacity => volume[:aws_size],
+            :instance_id => volume[:aws_instance_id],
+            :realm_id => volume[:zone],
+            :device => volume[:aws_device],
+            # TODO: the available actions should be tied to the current
+            # volume state                
+            :actions => [:attach, :detach, :destroy] 
+          )
+        end
 
-  def convert_object(s3_object)
-    Blob.new({   :id => s3_object.name,
-                 :bucket => s3_object.bucket.name.to_s,
-                 :content_length => s3_object.size,
-                 :content_type => s3_object.content_type,
-                 :last_modified => s3_object.last_modified
-              })
-  end
+        def convert_snapshot(snapshot)
+          StorageSnapshot.new(
+            :id => snapshot[:aws_id],
+            :state => snapshot[:aws_status],
+            :storage_volume_id => snapshot[:aws_volume_id],
+            :created => snapshot[:aws_started_at]
+          )
+        end
 
-  def catched_exceptions_list
-    {
-      :auth => [ AWS::AuthFailure ],
-      :error => [ RightAws::AwsError ],
-      :glob => [ /.*AWS::(\w+)/ ]
-    }
-  end
+        def convert_state(ec2_state)
+          case ec2_state
+            when "terminated"
+              "STOPPED"
+            when "stopped"
+              "STOPPED"
+            when "running"
+              "RUNNING"
+            when "pending"
+              "PENDING"
+            when "shutting-down"
+              "STOPPED"
+          end
+        end
 
-end
+        def catched_exceptions_list
+          {
+            :auth => [], # [ ::Aws::AuthFailure ],
+            :error => [ ::Aws::AwsError ],
+            :glob => [ /AWS::(\w+)/ ]
+          }
+        end
 
+      end
     end
   end
 end
diff --git a/server/lib/deltacloud/models/storage_volume.rb b/server/lib/deltacloud/models/storage_volume.rb
index 0673f85..faf4ca4 100644
--- a/server/lib/deltacloud/models/storage_volume.rb
+++ b/server/lib/deltacloud/models/storage_volume.rb
@@ -24,5 +24,7 @@ class StorageVolume < BaseModel
   attr_accessor :capacity
   attr_accessor :instance_id
   attr_accessor :device
+  attr_accessor :realm_id
+  attr_accessor :actions
 
 end
diff --git a/server/server.rb b/server/server.rb
index 6b446ba..8c3b72c 100644
--- a/server/server.rb
+++ b/server/server.rb
@@ -401,6 +401,12 @@ END
 
 end
 
+get '/api/storage_snapshots/new' do
+  respond_to do |format|
+    format.html { haml :"storage_snapshots/new" }
+  end
+end
+
 collection :storage_snapshots do
   description "Storage snapshots description here"
 
@@ -417,6 +423,40 @@ collection :storage_snapshots do
     param :id,          :string,    :required
     control { show(:storage_snapshot) }
   end
+
+  operation :create do
+    description "Create a new snapshot from volume"
+    with_capability :create_storage_snapshot
+    param :volume_id, :string,  :required
+    control do
+      @storage_snapshot = driver.create_storage_snapshot(credentials, params)
+      show(:storage_snapshot)
+    end
+  end
+
+  operation :destroy do
+    description "Delete storage snapshot"
+    with_capability :destroy_storage_snapshot
+    param :id,  :string,  :required
+    control do
+      driver.create_storage_snapshot(credentials, params)
+      redirect(storage_snapshot_url(params[:id]))
+    end
+  end
+
+end
+
+get '/api/storage_volumes/new' do
+  respond_to do |format|
+    format.html { haml :"storage_volumes/new" }
+  end
+end
+
+get '/api/storage_volumes/attach' do
+  respond_to do |format|
+    @instances = driver.instances(credentials)
+    format.html { haml :"storage_volumes/attach" }
+  end
 end
 
 collection :storage_volumes do
@@ -435,6 +475,56 @@ collection :storage_volumes do
     param :id,          :string,    :required
     control { show(:storage_volume) }
   end
+
+  operation :create do
+    description "Create a new storage volume"
+    with_capability :create_storage_volume
+    param :snapshot_id, :string,  :optional
+    param :capacity,    :string,  :optional
+    param :realm_id,    :string,  :optional
+    control do
+      @storage_volume = driver.create_storage_volume(credentials, params)
+      respond_to do |format|
+        format.html { haml :"storage_volumes/show" }
+        format.xml { haml :"storage_volumes/show" }
+      end
+    end
+  end
+
+  operation :attach, :method => :post, :member => true do
+    description "Attach storage volume to instance"
+    with_capability :attach_storage_volume
+    param :id,         :string,  :required
+    param :instance_id,:string,  :required
+    param :device,     :string,  :required
+    control do
+      driver.attach_storage_volume(credentials, params)
+      redirect(storage_volume_url(params[:id]))
+    end
+  end
+
+  operation :detach, :method => :post, :member => true do
+    description "Detach storage volume to instance"
+    with_capability :detach_storage_volume
+    param :id,         :string,  :required
+    control do
+      volume = driver.storage_volume(credentials, :id => params[:id])
+      driver.detach_storage_volume(credentials, :id => volume.id, :instance_id => volume.instance_id,
+                                   :device => volume.device)
+      redirect(storage_volume_url(params[:id]))
+    end
+  end
+
+  operation :destroy do
+    description "Destroy storage volume"
+    with_capability :destroy_storage_volume
+    param :id,          :string,  :optional
+    control do
+      driver.destroy_storage_volume(credentials, params)
+      redirect(storage_volumes_url)
+    end
+  end
+
 end
 
 get '/api/keys/new' do
diff --git a/server/views/keys/index.html.haml b/server/views/keys/index.html.haml
index 6f246a4..02f6b0c 100644
--- a/server/views/keys/index.html.haml
+++ b/server/views/keys/index.html.haml
@@ -18,7 +18,7 @@
             = "#{key.username} - #{key.password}"
         %td
           - if driver.respond_to?(:destroy_key)
-            =link_to_action 'Destroy', destroy_key_url(key.id), :delete
+            =link_to 'Destroy', destroy_key_url(key.id), :class => 'delete'
   %tfoot
     - if driver.respond_to?(:create_key)
       %tr
-- 
1.7.3.2


Mime
View raw message