diff --git libcloud/common/base.py libcloud/common/base.py
index 91ca544..1653cd8 100644
--- libcloud/common/base.py
+++ libcloud/common/base.py
@@ -574,7 +574,10 @@ class Connection(object):
         params, headers = self.pre_connect_hook(params, headers)
 
         if params:
-            url = '?'.join((action, urlencode(params)))
+            if '?' in action:
+                url = '&'.join((action, urlencode(params)))
+            else:
+                url = '?'.join((action, urlencode(params)))
         else:
             url = action
 
diff --git libcloud/storage/drivers/google_storage.py libcloud/storage/drivers/google_storage.py
index a7a12fa..4043790 100644
--- libcloud/storage/drivers/google_storage.py
+++ libcloud/storage/drivers/google_storage.py
@@ -133,3 +133,4 @@ class GoogleStorageDriver(S3StorageDriver):
     hash_type = 'md5'
     namespace = NAMESPACE
     supports_chunked_encoding = False
+    supports_s3_multipart_upload = False
diff --git libcloud/storage/drivers/s3.py libcloud/storage/drivers/s3.py
index 834f3c6..1c7acd5 100644
--- libcloud/storage/drivers/s3.py
+++ libcloud/storage/drivers/s3.py
@@ -24,6 +24,7 @@ from xml.etree.ElementTree import Element, SubElement, tostring
 from libcloud.utils.py3 import PY3
 from libcloud.utils.py3 import httplib
 from libcloud.utils.py3 import urlquote
+from libcloud.utils.py3 import urlencode
 from libcloud.utils.py3 import b
 
 from libcloud.utils.xml import fixxpath, findtext
@@ -39,6 +40,7 @@ from libcloud.storage.types import ContainerDoesNotExistError
 from libcloud.storage.types import ObjectDoesNotExistError
 from libcloud.storage.types import ObjectHashMismatchError
 
+
 # How long before the token expires
 EXPIRATION_SECONDS = 15 * 60
 
@@ -52,6 +54,9 @@ S3_AP_NORTHEAST_HOST = 's3-ap-northeast-1.amazonaws.com'
 API_VERSION = '2006-03-01'
 NAMESPACE = 'http://s3.amazonaws.com/doc/%s/' % (API_VERSION)
 
+# AWS multi-part chunks must be minimum 5MB
+CHUNK_SIZE = 5 * 1024 * 1024
+
 
 class S3Response(AWSBaseResponse):
 
@@ -168,6 +173,7 @@ class S3StorageDriver(StorageDriver):
     connectionCls = S3Connection
     hash_type = 'md5'
     supports_chunked_encoding = False
+    supports_s3_multipart_upload = True
     ex_location_name = ''
     namespace = NAMESPACE
 
@@ -185,12 +191,13 @@ class S3StorageDriver(StorageDriver):
         params = {}
         last_key = None
         exhausted = False
+        container_path = self.get_container_cdn_url(container)
 
         while not exhausted:
             if last_key:
                 params['marker'] = last_key
 
-            response = self.connection.request('/%s' % (container.name),
+            response = self.connection.request(container_path,
                                                params=params)
 
             if response.status != httplib.OK:
@@ -222,9 +229,9 @@ class S3StorageDriver(StorageDriver):
 
     def get_object(self, container_name, object_name):
         container = self.get_container(container_name=container_name)
-        response = self.connection.request('/%s/%s' % (container_name,
-                                                       object_name),
-                                           method='HEAD')
+        object_path = self._get_object_cdn_url(container, object_name)
+        response = self.connection.request(object_path, method='HEAD')
+
         if response.status == httplib.OK:
             obj = self._headers_to_object(object_name=object_name,
                                           container=container,
@@ -234,6 +241,42 @@ class S3StorageDriver(StorageDriver):
         raise ObjectDoesNotExistError(value=None, driver=self,
                                       object_name=object_name)
 
+    def get_container_cdn_url(self, container, check=False):
+        """
+        Return a container CDN URL.
+
+        @param container: Container instance
+        @type  container: L{Container}
+
+        @param check: Indicates if the path's existance must be checked
+        @type check: C{bool}
+
+        @return: A CDN URL for this container.
+        @rtype: C{str}
+        """
+        if check:
+            self.get_container(container.name)
+
+        return '/%s' % (container.name)
+
+    def _get_object_cdn_url(self, container, object_name):
+        container_url = self.get_container_cdn_url(container)
+        object_name_cleaned = self._clean_object_name(object_name)
+        object_path = '%s/%s' % (container_url, object_name_cleaned)
+        return object_path
+
+    def get_object_cdn_url(self, obj):
+        """
+        Return a object CDN URL.
+
+        @param obj: Object instance
+        @type  obj: L{Object}
+
+        @return: A CDN URL for this object.
+        @rtype: C{str}
+        """
+        return self._get_object_cdn_url(obj.container, obj.name)
+
     def create_container(self, container_name):
         if self.ex_location_name:
             root = Element('CreateBucketConfiguration')
@@ -288,13 +331,9 @@ class S3StorageDriver(StorageDriver):
 
     def download_object(self, obj, destination_path, overwrite_existing=False,
                         delete_on_failure=True):
-        container_name = self._clean_object_name(obj.container.name)
-        object_name = self._clean_object_name(obj.name)
+        obj_path = self.get_object_cdn_url(obj)
 
-        response = self.connection.request('/%s/%s' % (container_name,
-                                                       object_name),
-                                           method='GET',
-                                           raw=True)
+        response = self.connection.request(obj_path, method='GET', raw=True)
 
         return self._get_object(obj=obj, callback=self._save_object,
                                 response=response,
@@ -307,11 +346,8 @@ class S3StorageDriver(StorageDriver):
                                 success_status_code=httplib.OK)
 
     def download_object_as_stream(self, obj, chunk_size=None):
-        container_name = self._clean_object_name(obj.container.name)
-        object_name = self._clean_object_name(obj.name)
-        response = self.connection.request('/%s/%s' % (container_name,
-                                                       object_name),
-                                           method='GET', raw=True)
+        obj_path = self.get_object_cdn_url(obj)
+        response = self.connection.request(obj_path, method='GET', raw=True)
 
         return self._get_object(obj=obj, callback=read_in_chunks,
                                 response=response,
@@ -337,6 +373,133 @@ class S3StorageDriver(StorageDriver):
                                 verify_hash=verify_hash,
                                 storage_class=ex_storage_class)
 
+    def _upload_multipart(self, response, data, iterator, container,
+                          object_name, calculate_hash=True):
+        """Callback invoked for uploading data to S3 using Amazon's
+        multipart upload mechanism"""
+
+        object_path = self._get_object_cdn_url(container, object_name)
+
+        # Get the upload id from the response xml
+        response.body = response.response.read()
+        body = response.parse_body()
+        upload_id = body.find(fixxpath(xpath='UploadId',
+                                       namespace=self.namespace)).text
+
+        try:
+            # Upload the data through the iterator
+            result = self._upload_from_iterator(iterator, object_path,
+                                                upload_id, calculate_hash)
+            (chunks, data_hash, bytes_transferred) = result
+
+            # Commit the chunk info and complete the upload
+            etag = self._commit_multipart(object_path, upload_id, chunks)
+        except Exception:
+            # Amazon provides a mechanism for aborting an upload
+            self._abort_multipart(object_path, upload_id)
+
+            raise LibcloudError('Upload error. Operation aborted',
+                                driver=self)
+
+        # Modify the response header of the first request. This is used
+        # by other functions once the callback is done
+        response.headers['etag'] = etag
+
+        return (True, data_hash, bytes_transferred)
+
+    def _upload_from_iterator(self, iterator, object_path, upload_id,
+                              calculate_hash=True):
+        """Uploads data from an interator in fixed sized chunks to S3"""
+
+        data_hash = None
+        if calculate_hash:
+            data_hash = self._get_hash_function()
+
+        bytes_transferred = 0
+        count = 1
+        chunks = []
+        params = {'uploadId': upload_id}
+
+        # Read the input data in chunk sizes suitable for AWS
+        for data in read_in_chunks(iterator, chunk_size=CHUNK_SIZE):
+            bytes_transferred += len(data)
+
+            if calculate_hash:
+                data_hash.update(data)
+
+            chunk_hash = self._get_hash_function()
+            chunk_hash.update(data)
+            chunk_hash = base64.b64encode(chunk_hash.digest())
+
+            # This provides an extra level of data check and is recommended
+            # by amazon
+            headers = {'Content-MD5': chunk_hash}
+            params['partNumber'] = count
+
+            request_path = '?'.join((object_path, urlencode(params)))
+
+            resp = self.connection.request(request_path, method='PUT',
+                                           data=data, headers=headers)
+
+            if resp.status != httplib.OK:
+                raise LibcloudError('Error uploading chunk', driver=self)
+
+            server_hash = resp.headers['etag']
+
+            # Keep this data for a later commit
+            chunks.append((count, server_hash))
+            count += 1
+
+        if calculate_hash:
+            data_hash = data_hash.hexdigest()
+
+        return (chunks, data_hash, bytes_transferred)
+
+    def _commit_multipart(self, object_path, upload_id, chunks):
+        """Makes a final commit of the data"""
+
+        root = Element('CompleteMultipartUpload')
+
+        for (count, etag) in chunks:
+            part = SubElement(root, 'Part')
+            part_no = SubElement(part, 'PartNumber')
+            part_no.text = str(count)
+
+            etag_id = SubElement(part, 'ETag')
+            etag_id.text = str(etag)
+
+        if PY3:
+            encoding = 'unicode'
+        else:
+            encoding = None
+
+        data = tostring(root, encoding=encoding)
+
+        params = {'uploadId': upload_id}
+        request_path = '?'.join((object_path, urlencode(params)))
+        response = self.connection.request(request_path, data=data,
+                                           method='POST')
+
+        if response.status != httplib.OK:
+            raise LibcloudError('Error in multipart commit', driver=self)
+
+        # Get the server's etag to be passed back to the caller
+        body = response.parse_body()
+        server_hash = body.find(fixxpath(xpath='ETag',
+                                         namespace=self.namespace)).text
+        return server_hash
+
+    def _abort_multipart(self, object_path, upload_id):
+        """Aborts an already initiated multipart upload"""
+
+        params = {'uploadId': upload_id}
+        request_path = '?'.join((object_path, urlencode(params)))
+        resp = self.connection.request(request_path, method='DELETE')
+
+        if resp.status != httplib.NO_CONTENT:
+            raise LibcloudError('Error in multipart abort. status_code=%d' %
+                                (resp.status), driver=self)
+
     def upload_object_via_stream(self, iterator, container, object_name,
                                  extra=None, ex_storage_class=None):
         """
@@ -345,23 +508,42 @@ class S3StorageDriver(StorageDriver):
         @param ex_storage_class: Storage class
         @type ex_storage_class: C{str}
         """
-        #Amazon S3 does not support chunked transfer encoding so the whole data
-        #is read into memory before uploading the object.
-        upload_func = self._upload_data
-        upload_func_kwargs = {}
+
+        method = 'PUT'
+        params = None
+
+        # This driver is used by other S3 API compatible drivers also.
+        # Amazon provides a different (complex?) mechanism to do multipart
+        # uploads
+        if self.supports_s3_multipart_upload:
+            # Initiate the multipart request and get an upload id
+            upload_func = self._upload_multipart
+            upload_func_kwargs = {'iterator': iterator,
+                                  'container': container,
+                                  'object_name': object_name}
+            method = 'POST'
+            iterator = iter('')
+            params = 'uploads'
+
+        elif self.supports_chunked_encoding:
+            upload_func = self._stream_data
+            upload_func_kwargs = {'iterator': iterator}
+        else:
+            # In this case, we have to download the entire object to
+            # memory and send it as normal data
+            upload_func = self._upload_data
+            upload_func_kwargs = {}
 
         return self._put_object(container=container, object_name=object_name,
                                 upload_func=upload_func,
                                 upload_func_kwargs=upload_func_kwargs,
-                                extra=extra, iterator=iterator,
-                                verify_hash=False,
+                                extra=extra, method=method, query_args=params,
+                                iterator=iterator, verify_hash=False,
                                 storage_class=ex_storage_class)
 
     def delete_object(self, obj):
-        object_name = self._clean_object_name(name=obj.name)
-        response = self.connection.request('/%s/%s' % (obj.container.name,
-                                                       object_name),
-                                           method='DELETE')
+        object_path = self.get_object_cdn_url(obj)
+        response = self.connection.request(object_path, method='DELETE')
         if response.status == httplib.NO_CONTENT:
             return True
         elif response.status == httplib.NOT_FOUND:
@@ -375,8 +557,9 @@ class S3StorageDriver(StorageDriver):
         return name
 
     def _put_object(self, container, object_name, upload_func,
-                    upload_func_kwargs, extra=None, file_path=None,
-                    iterator=None, verify_hash=True, storage_class=None):
+                    upload_func_kwargs, method='PUT', query_args=None,
+                    extra=None, file_path=None, iterator=None,
+                    verify_hash=True, storage_class=None):
         headers = {}
         extra = extra or {}
         storage_class = storage_class or 'standard'
@@ -386,8 +569,6 @@ class S3StorageDriver(StorageDriver):
 
         headers['x-amz-storage-class'] = storage_class.upper()
 
-        container_name_cleaned = container.name
-        object_name_cleaned = self._clean_object_name(object_name)
         content_type = extra.get('content_type', None)
         meta_data = extra.get('meta_data', None)
 
@@ -396,7 +577,11 @@ class S3StorageDriver(StorageDriver):
                 key = 'x-amz-meta-%s' % (key)
                 headers[key] = value
 
-        request_path = '/%s/%s' % (container_name_cleaned, object_name_cleaned)
+        request_path = self._get_object_cdn_url(container, object_name)
+
+        if query_args:
+            request_path = '?'.join((request_path, query_args))
+
         # TODO: Let the underlying exceptions bubble up and capture the SIGPIPE
         # here.
         #SIGPIPE is thrown if the provided container does not exist or the user
@@ -404,7 +589,7 @@ class S3StorageDriver(StorageDriver):
         result_dict = self._upload_object(
             object_name=object_name, content_type=content_type,
             upload_func=upload_func, upload_func_kwargs=upload_func_kwargs,
-            request_path=request_path, request_method='PUT',
+            request_path=request_path, request_method=method,
             headers=headers, file_path=file_path, iterator=iterator)
 
         response = result_dict['response']
@@ -454,7 +639,8 @@ class S3StorageDriver(StorageDriver):
 
     def _headers_to_object(self, object_name, container, headers):
         hash = headers['etag'].replace('"', '')
-        extra = {'content_type': headers['content-type'], 'etag': headers['etag']}
+        extra = {'content_type': headers['content-type'],
+                 'etag': headers['etag']}
         meta_data = {}
 
         if 'last-modified' in headers:
diff --git libcloud/test/__init__.py libcloud/test/__init__.py
index d63a0cf..3655fd8 100644
--- libcloud/test/__init__.py
+++ libcloud/test/__init__.py
@@ -85,6 +85,12 @@ class MockResponse(object):
     def read(self, *args, **kwargs):
         return self.body.read(*args, **kwargs)
 
+    def next(self):
+        return next(self.body)
+
+    def __next__(self):
+        return self.next()
+
     def getheader(self, name, *args, **kwargs):
         return self.headers.get(name, *args, **kwargs)
 
@@ -96,6 +102,7 @@ class MockResponse(object):
 
 class BaseMockHttpObject(object):
     def _get_method_name(self, type, use_param, qs, path):
+        path = path.split('?')[0]
         meth_name = path.replace('/', '_').replace('.', '_').replace('-', '_')
         if type:
             meth_name = '%s_%s' % (meth_name, self.type)
@@ -247,12 +254,12 @@ class MockRawResponse(BaseMockHttpObject):
         return self.next()
 
     def _generate_random_data(self, size):
-        data = []
+        data = ''
         current_size = 0
         while current_size < size:
             value = str(random.randint(0, 9))
             value_size = len(value)
-            data.append(value)
+            data += value
             current_size += value_size
 
         return data
@@ -286,7 +293,6 @@ class MockRawResponse(BaseMockHttpObject):
             self._status, self._body, self._headers, self._reason = result
             self._response = self.responseCls(self._status, self._body,
                                               self._headers, self._reason)
-            return self
         return self._response
 
 if __name__ == "__main__":
diff --git libcloud/test/storage/fixtures/s3/complete_multipart.xml libcloud/test/storage/fixtures/s3/complete_multipart.xml
new file mode 100644
index 0000000..f5aa94f
--- /dev/null
+++ libcloud/test/storage/fixtures/s3/complete_multipart.xml
@@ -0,0 +1,7 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<CompleteMultipartUploadResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
+  <Location>http://Example-Bucket.s3.amazonaws.com/Example-Object</Location>
+  <Bucket>Example-Bucket</Bucket>
+  <Key>Example-Object</Key>
+  <ETag>"3858f62230ac3c915f300c664312c11f-9"</ETag>
+</CompleteMultipartUploadResult>
diff --git libcloud/test/storage/fixtures/s3/initiate_multipart.xml libcloud/test/storage/fixtures/s3/initiate_multipart.xml
new file mode 100644
index 0000000..4c0b8bf
--- /dev/null
+++ libcloud/test/storage/fixtures/s3/initiate_multipart.xml
@@ -0,0 +1,6 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<InitiateMultipartUploadResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
+  <Bucket>example-bucket</Bucket>
+  <Key>example-object</Key>
+  <UploadId>VXBsb2FkIElEIGZvciA2aWWpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZA</UploadId>
+</InitiateMultipartUploadResult>
diff --git libcloud/test/storage/test_atmos.py libcloud/test/storage/test_atmos.py
index 671ce80..4da0092 100644
--- libcloud/test/storage/test_atmos.py
+++ libcloud/test/storage/test_atmos.py
@@ -739,14 +739,12 @@ class AtmosMockRawResponse(MockRawResponse):
 
     def _rest_namespace_foo_bar_container_foo_bar_object(self, method, url,
                                                          body, headers):
-        body = 'test'
-        self._data = self._generate_random_data(1000)
+        body = self._generate_random_data(1000)
         return (httplib.OK, body, {}, httplib.responses[httplib.OK])
 
     def _rest_namespace_foo_20_26_20bar_container_foo_20_26_20bar_object(self, method, url,
                                                                          body, headers):
-        body = 'test'
-        self._data = self._generate_random_data(1000)
+        body = self._generate_random_data(1000)
         return (httplib.OK, body, {}, httplib.responses[httplib.OK])
 
     def _rest_namespace_foo_bar_container_foo_bar_object_NOT_FOUND(self, method,
diff --git libcloud/test/storage/test_cloudfiles.py libcloud/test/storage/test_cloudfiles.py
index 5654828..17d20c8 100644
--- libcloud/test/storage/test_cloudfiles.py
+++ libcloud/test/storage/test_cloudfiles.py
@@ -992,8 +992,7 @@ class CloudFilesMockRawResponse(MockRawResponse):
         self, method, url, body, headers):
 
         # test_download_object_success
-        body = 'test'
-        self._data = self._generate_random_data(1000)
+        body = self._generate_random_data(1000)
         return (httplib.OK,
                 body,
                 self.base_headers,
@@ -1002,8 +1001,7 @@ class CloudFilesMockRawResponse(MockRawResponse):
     def _v1_MossoCloudFS_foo_bar_container_foo_bar_object_INVALID_SIZE(
         self, method, url, body, headers):
         # test_download_object_invalid_file_size
-        body = 'test'
-        self._data = self._generate_random_data(100)
+        body = self._generate_random_data(100)
         return (httplib.OK, body,
                 self.base_headers,
                 httplib.responses[httplib.OK])
diff --git libcloud/test/storage/test_s3.py libcloud/test/storage/test_s3.py
index 3d9f930..ad6fa30 100644
--- libcloud/test/storage/test_s3.py
+++ libcloud/test/storage/test_s3.py
@@ -17,7 +17,9 @@ import os
 import sys
 import unittest
 
+from xml.etree import ElementTree as ET
 from libcloud.utils.py3 import httplib
+from libcloud.utils.py3 import urlparse
 
 from libcloud.common.types import InvalidCredsError
 from libcloud.common.types import LibcloudError
@@ -181,15 +183,98 @@ class S3MockHttp(StorageMockHttp):
                 headers,
                 httplib.responses[httplib.OK])
 
+    def _foo_bar_container_foo_test_stream_data(self, method, url, body,
+                                                headers):
+        # test_upload_object_via_stream
+        body = ''
+        headers = {'etag': '"0cc175b9c0f1b6a831c399e269772661"'}
+        return (httplib.OK,
+                body,
+                headers,
+                httplib.responses[httplib.OK])
+
+    def _foo_bar_container_foo_test_stream_data_MULTIPART(self, method, url,
+                                                          body, headers):
+        headers = {'etag': '"0cc175b9c0f1b6a831c399e269772661"'}
+        TEST_ID = 'VXBsb2FkIElEIGZvciA2aWWpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZA'
+
+        bad_request = (httplib.BAD_REQUEST,
+                       '',
+                       self.base_headers,
+                       httplib.responses[httplib.BAD_REQUEST])
+
+        query_string = urlparse.urlsplit(url).query
+        query = urlparse.parse_qs(query_string)
+
+        if not query.get('uploadId', False):
+            return bad_request
+
+        upload_id = query['uploadId'][0]
+        if upload_id != TEST_ID:
+            return bad_request
+
+        if method == 'PUT':
+            # PUT is used for uploading the part. part number is mandatory
+            if not query.get('partNumber', False):
+                return bad_request
+
+            body = ''
+            return (httplib.OK,
+                    body,
+                    headers,
+                    httplib.responses[httplib.OK])
+
+        elif method == 'DELETE':
+            # DELETE is done for aborting the upload
+            body = ''
+            return (httplib.NO_CONTENT,
+                    body,
+                    headers,
+                    httplib.responses[httplib.NO_CONTENT])
+
+        else:
+            # POST is done for committing the upload. Parse the XML and
+            # check if the commit is proper (TODO: XML Schema based check?)
+            commit = ET.fromstring(body)
+            count = 0
+
+            for part in commit.findall('Part'):
+                count += 1
+                part_no = part.find('PartNumber').text
+                etag = part.find('ETag').text
+
+                if part_no != str(count) or etag != headers['etag']:
+                    return bad_request
+
+            if count != 3:
+                return bad_request
+
+            body = self.fixtures.load('complete_multipart.xml')
+            return (httplib.OK,
+                    body,
+                    headers,
+                    httplib.responses[httplib.OK])
+
 
 class S3MockRawResponse(MockRawResponse):
 
     fixtures = StorageFileFixtures('s3')
 
+    def parse_body(self):
+        if len(self.body) == 0 and not self.parse_zero_length_body:
+            return self.body
+
+        try:
+            body = ET.XML(self.body)
+        except:
+            raise MalformedResponseError("Failed to parse XML",
+                                         body=self.body,
+                                         driver=self.connection.driver)
+        return body
+
     def _foo_bar_container_foo_bar_object(self, method, url, body, headers):
         # test_download_object_success
-        body = ''
-        self._data = self._generate_random_data(1000)
+        body = self._generate_random_data(1000)
         return (httplib.OK,
                 body,
                 headers,
@@ -225,6 +310,14 @@ class S3MockRawResponse(MockRawResponse):
                 headers,
                 httplib.responses[httplib.OK])
 
+    def _foo_bar_container_foo_bar_object(self, method, url, body, headers):
+        # test_upload_object_invalid_file_size
+        body = self._generate_random_data(1000)
+        return (httplib.OK,
+                body,
+                headers,
+                httplib.responses[httplib.OK])
+
     def _foo_bar_container_foo_bar_object_INVALID_SIZE(self, method, url,
                                                        body, headers):
         # test_upload_object_invalid_file_size
@@ -244,6 +337,23 @@ class S3MockRawResponse(MockRawResponse):
                 headers,
                 httplib.responses[httplib.OK])
 
+    def _foo_bar_container_foo_test_stream_data_MULTIPART(self, method, url,
+                                                          body, headers):
+        headers = {}
+        # POST is done for initiating multipart upload
+        if method == 'POST':
+            body = self.fixtures.load('initiate_multipart.xml')
+            return (httplib.OK,
+                    body,
+                    headers,
+                    httplib.responses[httplib.OK])
+        else:
+            body = ''
+            return (httplib.BAD_REQUEST,
+                    body,
+                    headers,
+                    httplib.responses[httplib.BAD_REQUEST])
+
 
 class S3Tests(unittest.TestCase):
     driver_type = S3StorageDriver
@@ -377,7 +487,8 @@ class S3Tests(unittest.TestCase):
         self.assertEqual(obj.container.name, 'test2')
         self.assertEqual(obj.size, 12345)
         self.assertEqual(obj.hash, 'e31208wqsdoj329jd')
-        self.assertEqual(obj.extra['last_modified'], 'Thu, 13 Sep 2012 07:13:22 GMT')
+        self.assertEqual(obj.extra['last_modified'],
+                         'Thu, 13 Sep 2012 07:13:22 GMT')
         self.assertEqual(obj.extra['content_type'], 'application/zip')
         self.assertEqual(obj.meta_data['rabbits'], 'monkeys')
 
@@ -457,7 +568,7 @@ class S3Tests(unittest.TestCase):
                               driver=self.driver)
         obj = Object(name='foo_bar_object', size=1000, hash=None, extra={},
                      container=container, meta_data=None,
-                     driver=S3StorageDriver)
+                     driver=self.driver_type)
         destination_path = os.path.abspath(__file__) + '.temp'
         result = self.driver.download_object(obj=obj,
                                              destination_path=destination_path,
@@ -471,7 +582,7 @@ class S3Tests(unittest.TestCase):
                               driver=self.driver)
         obj = Object(name='foo_bar_object', size=1000, hash=None, extra={},
                      container=container, meta_data=None,
-                     driver=S3StorageDriver)
+                     driver=self.driver_type)
         destination_path = os.path.abspath(__file__) + '.temp'
         result = self.driver.download_object(obj=obj,
                                              destination_path=destination_path,
@@ -485,7 +596,7 @@ class S3Tests(unittest.TestCase):
                               driver=self.driver)
         obj = Object(name='foo_bar_object', size=1000, hash=None, extra={},
                      container=container, meta_data=None,
-                     driver=S3StorageDriver)
+                     driver=self.driver_type)
         destination_path = os.path.abspath(__file__)
         try:
             self.driver.download_object(obj=obj,
@@ -503,7 +614,7 @@ class S3Tests(unittest.TestCase):
 
         obj = Object(name='foo_bar_object', size=1000, hash=None, extra={},
                      container=container, meta_data=None,
-                     driver=S3StorageDriver)
+                     driver=self.driver_type)
 
         stream = self.driver.download_object_as_stream(obj=obj,
                                                        chunk_size=None)
@@ -536,8 +647,8 @@ class S3Tests(unittest.TestCase):
 
         self.mock_raw_response_klass.type = 'INVALID_HASH1'
 
-        old_func = S3StorageDriver._upload_file
-        S3StorageDriver._upload_file = upload_file
+        old_func = self.driver_type._upload_file
+        self.driver_type._upload_file = upload_file
         file_path = os.path.abspath(__file__)
         container = Container(name='foo_bar_container', extra={},
                               driver=self.driver)
@@ -552,7 +663,7 @@ class S3Tests(unittest.TestCase):
             self.fail(
                 'Invalid hash was returned but an exception was not thrown')
         finally:
-            S3StorageDriver._upload_file = old_func
+            self.driver_type._upload_file = old_func
 
     def test_upload_object_invalid_hash2(self):
         # Invalid hash is detected when comparing hash provided in the response
@@ -563,8 +674,8 @@ class S3Tests(unittest.TestCase):
 
         self.mock_raw_response_klass.type = 'INVALID_HASH2'
 
-        old_func = S3StorageDriver._upload_file
-        S3StorageDriver._upload_file = upload_file
+        old_func = self.driver_type._upload_file
+        self.driver_type._upload_file = upload_file
         file_path = os.path.abspath(__file__)
         container = Container(name='foo_bar_container', extra={},
                               driver=self.driver)
@@ -579,15 +690,15 @@ class S3Tests(unittest.TestCase):
             self.fail(
                 'Invalid hash was returned but an exception was not thrown')
         finally:
-            S3StorageDriver._upload_file = old_func
+            self.driver_type._upload_file = old_func
 
     def test_upload_object_success(self):
         def upload_file(self, response, file_path, chunked=False,
                         calculate_hash=True):
             return True, '0cc175b9c0f1b6a831c399e269772661', 1000
 
-        old_func = S3StorageDriver._upload_file
-        S3StorageDriver._upload_file = upload_file
+        old_func = self.driver_type._upload_file
+        self.driver_type._upload_file = upload_file
         file_path = os.path.abspath(__file__)
         container = Container(name='foo_bar_container', extra={},
                               driver=self.driver)
@@ -601,9 +712,17 @@ class S3Tests(unittest.TestCase):
         self.assertEqual(obj.name, 'foo_test_upload')
         self.assertEqual(obj.size, 1000)
         self.assertTrue('some-value' in obj.meta_data)
-        S3StorageDriver._upload_file = old_func
+        self.driver_type._upload_file = old_func
 
     def test_upload_object_via_stream(self):
+
+        if self.driver.supports_s3_multipart_upload:
+            self.mock_raw_response_klass.type = 'MULTIPART'
+            self.mock_response_klass.type = 'MULTIPART'
+        else:
+            self.mock_raw_response_klass.type = None
+            self.mock_response_klass.type = None
+
         container = Container(name='foo_bar_container', extra={},
                               driver=self.driver)
         object_name = 'foo_test_stream_data'
@@ -617,6 +736,34 @@ class S3Tests(unittest.TestCase):
         self.assertEqual(obj.name, object_name)
         self.assertEqual(obj.size, 3)
 
+    def test_upload_object_via_stream_abort(self):
+        if not self.driver.supports_s3_multipart_upload:
+            return
+
+        self.mock_raw_response_klass.type = 'MULTIPART'
+        self.mock_response_klass.type = 'MULTIPART'
+
+        def _faulty_iterator():
+            for i in range(0, 5):
+                yield str(i)
+            raise RuntimeError('Error in fetching data')
+
+        container = Container(name='foo_bar_container', extra={},
+                              driver=self.driver)
+        object_name = 'foo_test_stream_data'
+        iterator = _faulty_iterator()
+        extra = {'content_type': 'text/plain'}
+
+        try:
+            obj = self.driver.upload_object_via_stream(container=container,
+                                                       object_name=object_name,
+                                                       iterator=iterator,
+                                                       extra=extra)
+        except LibcloudError:
+            pass
+
+        return
+
     def test_delete_object_not_found(self):
         self.mock_response_klass.type = 'NOT_FOUND'
         container = Container(name='foo_bar_container', extra={},
