ariatosca-dev mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mxm...@apache.org
Subject [2/2] incubator-ariatosca git commit: Moved to SQLAlchemy based models and changed architecture
Date Sun, 27 Nov 2016 16:21:32 GMT
Moved to SQLAlchemy based models and changed architecture


Project: http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/commit/1bb1f6ba
Tree: http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/tree/1bb1f6ba
Diff: http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/diff/1bb1f6ba

Branch: refs/heads/SQLAlchemy-based-models
Commit: 1bb1f6bad90e5fcf8699faa672d2bc53f16998e1
Parents: b33c70e
Author: mxmrlv <mxmrlv@gmail.com>
Authored: Sun Nov 27 13:20:46 2016 +0200
Committer: mxmrlv <mxmrlv@gmail.com>
Committed: Sun Nov 27 18:20:54 2016 +0200

----------------------------------------------------------------------
 aria/__init__.py                                |  39 +-
 aria/orchestrator/context/workflow.py           |   2 +-
 aria/storage/__init__.py                        | 372 +-------
 aria/storage/api_driver/__init__.py             |  18 +
 aria/storage/api_driver/file_system.py          | 229 +++++
 aria/storage/api_driver/in_memory.py            | 146 ++++
 aria/storage/api_driver/sql.py                  |  44 +
 aria/storage/drivers.py                         | 416 ---------
 aria/storage/models.py                          | 853 +++++++++++--------
 aria/storage/states.py                          |  68 ++
 aria/storage/storage_api.py                     |  94 ++
 aria/storage/structures.py                      | 410 ++++-----
 tests/mock/context.py                           |   4 +-
 tests/mock/models.py                            |  18 +-
 tests/orchestrator/context/test_workflow.py     |   4 +-
 .../orchestrator/workflows/core/test_engine.py  |   3 +-
 tests/storage/__init__.py                       |  27 -
 tests/storage/test_drivers.py                   | 135 ---
 tests/storage/test_field.py                     | 124 ---
 tests/storage/test_model_storage.py             | 167 ++--
 tests/storage/test_models.py                    | 364 --------
 tests/storage/test_models_api.py                |  70 --
 tests/storage/test_resource_storage.py          |  55 +-
 23 files changed, 1456 insertions(+), 2206 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/1bb1f6ba/aria/__init__.py
----------------------------------------------------------------------
diff --git a/aria/__init__.py b/aria/__init__.py
index 3f81f98..4dd23c9 100644
--- a/aria/__init__.py
+++ b/aria/__init__.py
@@ -23,7 +23,7 @@ import pkgutil
 from .VERSION import version as __version__
 
 from .orchestrator.decorators import workflow, operation
-from .storage import ModelStorage, ResourceStorage, models, ModelDriver, ResourceDriver
+from . import storage
 from . import (
     utils,
     parser,
@@ -58,37 +58,36 @@ def install_aria_extensions():
             del sys.modules[module_name]
 
 
-def application_model_storage(driver):
+def application_model_storage(api):
     """
     Initiate model storage for the supplied storage driver
     """
 
-    assert isinstance(driver, ModelDriver)
-    if driver not in _model_storage:
-        _model_storage[driver] = ModelStorage(
-            driver, model_classes=[
-                models.Node,
-                models.NodeInstance,
-                models.Plugin,
-                models.Blueprint,
-                models.Snapshot,
-                models.Deployment,
-                models.DeploymentUpdate,
-                models.DeploymentModification,
-                models.Execution,
-                models.ProviderContext,
-                models.Task,
+    if api not in _model_storage:
+        _model_storage[api] = storage.ModelStorage(
+            api, items=[
+                storage.models.Node,
+                storage.models.NodeInstance,
+                storage.models.Plugin,
+                storage.models.Blueprint,
+                storage.models.Snapshot,
+                storage.models.Deployment,
+                storage.models.DeploymentUpdate,
+                storage.models.DeploymentUpdateStep,
+                storage.models.DeploymentModification,
+                storage.models.Execution,
+                storage.models.ProviderContext,
+                storage.models.Task,
             ])
-    return _model_storage[driver]
+    return _model_storage[api]
 
 
 def application_resource_storage(driver):
     """
     Initiate resource storage for the supplied storage driver
     """
-    assert isinstance(driver, ResourceDriver)
     if driver not in _resource_storage:
-        _resource_storage[driver] = ResourceStorage(
+        _resource_storage[driver] = storage.ResourceStorage(
             driver,
             resources=[
                 'blueprint',

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/1bb1f6ba/aria/orchestrator/context/workflow.py
----------------------------------------------------------------------
diff --git a/aria/orchestrator/context/workflow.py b/aria/orchestrator/context/workflow.py
index 3dc222b..280390c 100644
--- a/aria/orchestrator/context/workflow.py
+++ b/aria/orchestrator/context/workflow.py
@@ -76,7 +76,7 @@ class WorkflowContext(BaseContext):
 
 class _CurrentContext(threading.local):
     """
-    Provides thread-level context, which sugarcoats the task api.
+    Provides thread-level context, which sugarcoats the task api_driver.
     """
 
     def __init__(self):

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/1bb1f6ba/aria/storage/__init__.py
----------------------------------------------------------------------
diff --git a/aria/storage/__init__.py b/aria/storage/__init__.py
index 2d142a5..9cb6c77 100644
--- a/aria/storage/__init__.py
+++ b/aria/storage/__init__.py
@@ -20,14 +20,14 @@ Path: aria.storage
 Storage package is a generic abstraction over different storage types.
 We define this abstraction with the following components:
 
-1. storage: simple api to use
-2. driver: implementation of the database client api.
+1. storage: simple api_driver to use
+2. driver: implementation of the database client api_driver.
 3. model: defines the structure of the table/document.
 4. field: defines a field/item in the model.
 
 API:
     * application_storage_factory - function, default Aria storage factory.
-    * Storage - class, simple storage api.
+    * Storage - class, simple storage api_driver.
     * models - module, default Aria standard models.
     * structures - module, default Aria structures - holds the base model,
                    and different fields types.
@@ -37,354 +37,60 @@ API:
     * drivers - module, a pool of Aria standard drivers.
     * StorageDriver - class, abstract model implementation.
 """
-# todo: rewrite the above package documentation
-# (something like explaning the two types of storage - models and resources)
 
-from collections import namedtuple
+from aria.logger import LoggerMixin
+from aria.storage import storage_api
+from aria.storage.exceptions import StorageError
+from . import models, exceptions, storage_api, structures
 
-from .structures import Storage, Field, Model, IterField, PointerField
-from .drivers import (
-    ModelDriver,
-    ResourceDriver,
-    FileSystemResourceDriver,
-    FileSystemModelDriver,
-)
-from . import models, exceptions
 
 __all__ = (
     'ModelStorage',
-    'ResourceStorage',
-    'FileSystemModelDriver',
     'models',
     'structures',
-    'Field',
-    'IterField',
-    'PointerField',
-    'Model',
-    'drivers',
-    'ModelDriver',
-    'ResourceDriver',
-    'FileSystemResourceDriver',
 )
-# todo: think about package output api's...
-# todo: in all drivers name => entry_type
-# todo: change in documentation str => basestring
-
-
-class ModelStorage(Storage):
-    """
-    Managing the models storage.
-    """
-    def __init__(self, driver, model_classes=(), **kwargs):
-        """
-        Simple storage client api for Aria applications.
-        The storage instance defines the tables/documents/code api.
-
-        :param ModelDriver driver: model storage driver.
-        :param model_classes: the models to register.
-        """
-        assert isinstance(driver, ModelDriver)
-        super(ModelStorage, self).__init__(driver, model_classes, **kwargs)
-
-    def __getattr__(self, table):
-        """
-        getattr is a shortcut to simple api
-
-        for Example:
-        >> storage = ModelStorage(driver=FileSystemModelDriver('/tmp'))
-        >> node_table = storage.node
-        >> for node in node_table:
-        >>     print node
-
-        :param str table: table name to get
-        :return: a storage object that mapped to the table name
-        """
-        return super(ModelStorage, self).__getattr__(table)
-
-    def register(self, model_cls):
-        """
-        Registers the model type in the resource storage manager.
-        :param model_cls: the model to register.
-        """
-        model_name = generate_lower_name(model_cls)
-        model_api = _ModelApi(model_name, self.driver, model_cls)
-        self.registered[model_name] = model_api
-
-        for pointer_schema_register in model_api.pointer_mapping.values():
-            model_cls = pointer_schema_register.model_cls
-            self.register(model_cls)
-
-_Pointer = namedtuple('_Pointer', 'name, is_iter')
-
-
-class _ModelApi(object):
-    def __init__(self, name, driver, model_cls):
-        """
-        Managing the model in the storage, using the driver.
-
-        :param basestring name: the name of the model.
-        :param ModelDriver driver: the driver which supports this model in the storage.
-        :param Model model_cls: table/document class model.
-        """
-        assert isinstance(driver, ModelDriver)
-        assert issubclass(model_cls, Model)
-        self.name = name
-        self.driver = driver
-        self.model_cls = model_cls
-        self.pointer_mapping = {}
-        self._setup_pointers_mapping()
-
-    def _setup_pointers_mapping(self):
-        for field_name, field_cls in vars(self.model_cls).items():
-            if not(isinstance(field_cls, PointerField) and field_cls.type):
-                continue
-            pointer_key = _Pointer(field_name, is_iter=isinstance(field_cls, IterField))
-            self.pointer_mapping[pointer_key] = self.__class__(
-                name=generate_lower_name(field_cls.type),
-                driver=self.driver,
-                model_cls=field_cls.type)
-
-    def __iter__(self):
-        return self.iter()
-
-    def __repr__(self):
-        return '{self.name}(driver={self.driver}, model={self.model_cls})'.format(self=self)
-
-    def create(self):
-        """
-        Creates the model in the storage.
-        """
-        with self.driver as connection:
-            connection.create(self.name)
-
-    def get(self, entry_id, **kwargs):
-        """
-        Getter for the model from the storage.
-
-        :param basestring entry_id: the id of the table/document.
-        :return: model instance
-        :rtype: Model
-        """
-        with self.driver as connection:
-            data = connection.get(
-                name=self.name,
-                entry_id=entry_id,
-                **kwargs)
-            data.update(self._get_pointers(data, **kwargs))
-        return self.model_cls(**data)
-
-    def store(self, entry, **kwargs):
-        """
-        Setter for the model in the storage.
-
-        :param Model entry: the table/document to store.
-        """
-        assert isinstance(entry, self.model_cls)
-        with self.driver as connection:
-            data = entry.fields_dict
-            data.update(self._store_pointers(data, **kwargs))
-            connection.store(
-                name=self.name,
-                entry_id=entry.id,
-                entry=data,
-                **kwargs)
 
-    def delete(self, entry_id, **kwargs):
-        """
-        Delete the model from storage.
 
-        :param basestring entry_id: id of the entity to delete from storage.
-        """
-        entry = self.get(entry_id)
-        with self.driver as connection:
-            self._delete_pointers(entry, **kwargs)
-            connection.delete(
-                name=self.name,
-                entry_id=entry_id,
-                **kwargs)
-
-    def iter(self, **kwargs):
-        """
-        Generator over the entries of model in storage.
-        """
-        with self.driver as connection:
-            for data in connection.iter(name=self.name, **kwargs):
-                data.update(self._get_pointers(data, **kwargs))
-                yield self.model_cls(**data)
-
-    def update(self, entry_id, **kwargs):
-        """
-        Updates and entry in storage.
-
-        :param str entry_id: the id of the table/document.
-        :param kwargs: the fields to update.
-        :return:
-        """
-        with self.driver as connection:
-            connection.update(
-                name=self.name,
-                entry_id=entry_id,
-                **kwargs
-            )
-
-    def _get_pointers(self, data, **kwargs):
-        pointers = {}
-        for field, schema in self.pointer_mapping.items():
-            if field.is_iter:
-                pointers[field.name] = [
-                    schema.get(entry_id=pointer_id, **kwargs)
-                    for pointer_id in data[field.name]
-                    if pointer_id]
-            elif data[field.name]:
-                pointers[field.name] = schema.get(entry_id=data[field.name], **kwargs)
-        return pointers
-
-    def _store_pointers(self, data, **kwargs):
-        pointers = {}
-        for field, model_api in self.pointer_mapping.items():
-            if field.is_iter:
-                pointers[field.name] = []
-                for iter_entity in data[field.name]:
-                    pointers[field.name].append(iter_entity.id)
-                    model_api.store(iter_entity, **kwargs)
-            else:
-                pointers[field.name] = data[field.name].id
-                model_api.store(data[field.name], **kwargs)
-        return pointers
-
-    def _delete_pointers(self, entry, **kwargs):
-        for field, schema in self.pointer_mapping.items():
-            if field.is_iter:
-                for iter_entry in getattr(entry, field.name):
-                    schema.delete(iter_entry.id, **kwargs)
-            else:
-                schema.delete(getattr(entry, field.name).id, **kwargs)
-
-
-class ResourceApi(object):
+class Storage(LoggerMixin):
     """
-    Managing the resource in the storage, using the driver.
-
-    :param basestring name: the name of the resource.
-    :param ResourceDriver driver: the driver which supports this resource in the storage.
+    Represents the storage
     """
-    def __init__(self, driver, resource_name):
-        """
-        Managing the resources in the storage, using the driver.
-
-        :param ResourceDriver driver: the driver which supports this model in the storage.
-        :param basestring resource_name: the type of the entry this resourceAPI manages.
-        """
-        assert isinstance(driver, ResourceDriver)
-        self.driver = driver
-        self.resource_name = resource_name
+    def __init__(self, api, items=(), api_params=None, **kwargs):
+        self._api_params = api_params or {}
+        super(Storage, self).__init__(**kwargs)
+        self.api = api
+        self.registered = {}
+        for item in items:
+            self.register(item)
+        self.logger.debug('{name} object is ready: {0!r}'.format(
+            self, name=self.__class__.__name__))
 
     def __repr__(self):
-        return '{name}(driver={self.driver}, resource={self.resource_name})'.format(
-            name=self.__class__.__name__, self=self)
-
-    def create(self):
-        """
-        Create the resource dir in the storage.
-        """
-        with self.driver as connection:
-            connection.create(self.resource_name)
-
-    def data(self, entry_id, path=None, **kwargs):
-        """
-        Retrieve the content of a storage resource.
-
-        :param basestring entry_id: the id of the entry.
-        :param basestring path: path of the resource on the storage.
-        :param kwargs: resources to be passed to the driver..
-        :return the content of a single file:
-        """
-        with self.driver as connection:
-            return connection.data(
-                entry_type=self.resource_name,
-                entry_id=entry_id,
-                path=path,
-                **kwargs)
+        return '{name}(api={self.api})'.format(name=self.__class__.__name__, self=self)
 
-    def download(self, entry_id, destination, path=None, **kwargs):
-        """
-        Download a file/dir from the resource storage.
+    def __getattr__(self, item):
+        try:
+            return self.registered[item]
+        except KeyError:
+            return super(Storage, self).__getattribute__(item)
 
-        :param basestring entry_id: the id of the entry.
-        :param basestring destination: the destination of the file/dir.
-        :param basestring path: path of the resource on the storage.
-        """
-        with self.driver as connection:
-            connection.download(
-                entry_type=self.resource_name,
-                entry_id=entry_id,
-                destination=destination,
-                path=path,
-                **kwargs)
-
-    def upload(self, entry_id, source, path=None, **kwargs):
-        """
-        Upload a file/dir from the resource storage.
-
-        :param basestring entry_id: the id of the entry.
-        :param basestring source: the source path of the file to upload.
-        :param basestring path: the destination of the file, relative to the root dir
-                                of the resource
-        """
-        with self.driver as connection:
-            connection.upload(
-                entry_type=self.resource_name,
-                entry_id=entry_id,
-                source=source,
-                path=path,
-                **kwargs)
-
-
-def generate_lower_name(model_cls):
-    """
-    Generates the name of the class from the class object. e.g. SomeClass -> some_class
-    :param model_cls: the class to evaluate.
-    :return: lower name
-    :rtype: basestring
-    """
-    return ''.join(
-        character if character.islower() else '_{0}'.format(character.lower())
-        for character in model_cls.__name__)[1:]
+    def register(self, name):
+        raise NotImplementedError("BBBBBBB")
 
 
 class ResourceStorage(Storage):
-    """
-    Managing the resource storage.
-    """
-    def __init__(self, driver, resources=(), **kwargs):
-        """
-        Simple storage client api for Aria applications.
-        The storage instance defines the tables/documents/code api.
-
-        :param ResourceDriver driver: resource storage driver
-        :param resources: the resources to register.
-        """
-        assert isinstance(driver, ResourceDriver)
-        super(ResourceStorage, self).__init__(driver, resources, **kwargs)
+    def register(self, name):
+        self.registered[name] = self.api(name=name,
+                                         **self._api_params)
+        self.registered[name].create()
+        self.logger.debug('setup {name} in storage {self!r}'.format(name=name, self=self))
 
-    def register(self, resource):
-        """
-        Registers the resource type in the resource storage manager.
-        :param resource: the resource to register.
-        """
-        self.registered[resource] = ResourceApi(self.driver, resource_name=resource)
 
-    def __getattr__(self, resource):
-        """
-        getattr is a shortcut to simple api
-
-        for Example:
-        >> storage = ResourceStorage(driver=FileSystemResourceDriver('/tmp'))
-        >> blueprint_resources = storage.blueprint
-        >> blueprint_resources.download(blueprint_id, destination='~/blueprint/')
-
-        :param str resource: resource name to download
-        :return: a storage object that mapped to the resource name
-        :rtype: ResourceApi
-        """
-        return super(ResourceStorage, self).__getattr__(resource)
+class ModelStorage(Storage):
+    def register(self, model):
+        model_name = storage_api.generate_lower_name(model)
+        self.registered[model_name] = self.api(name=model_name,
+                                               model_cls=model,
+                                               **self._api_params)
+        self.registered[model_name].create()
+        self.logger.debug('setup {name} in storage {self!r}'.format(name=model_name, self=self))

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/1bb1f6ba/aria/storage/api_driver/__init__.py
----------------------------------------------------------------------
diff --git a/aria/storage/api_driver/__init__.py b/aria/storage/api_driver/__init__.py
new file mode 100644
index 0000000..9d0095b
--- /dev/null
+++ b/aria/storage/api_driver/__init__.py
@@ -0,0 +1,18 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .file_system import FileSystemModelAPI, FileSystemResourceAPI
+from .in_memory import InMemoryModelAPI
+from .sql import SQLAlchemyModelAPI

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/1bb1f6ba/aria/storage/api_driver/file_system.py
----------------------------------------------------------------------
diff --git a/aria/storage/api_driver/file_system.py b/aria/storage/api_driver/file_system.py
new file mode 100644
index 0000000..45ecbe8
--- /dev/null
+++ b/aria/storage/api_driver/file_system.py
@@ -0,0 +1,229 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import os
+import shutil
+from functools import partial
+from multiprocessing import RLock
+from distutils import dir_util
+
+import jsonpickle
+
+from .. import (
+    storage_api,
+    StorageError
+)
+
+
+# TODO: fix docs
+
+
+class FileSystemAPI(storage_api.StorageAPI):
+    """
+    Base class which handles storage on the file system.
+    """
+    def __init__(self, *args, **kwargs):
+        super(FileSystemAPI, self).__init__(*args, **kwargs)
+        self._lock = RLock()
+
+    def _establish_connection(self):
+        self._lock.acquire()
+
+    def _destroy_connection(self):
+        self._lock.release()
+
+    def __getstate__(self):
+        obj_dict = super(FileSystemAPI, self).__getstate__()
+        del obj_dict['_lock']
+        return obj_dict
+
+    def __setstate__(self, obj_dict):
+        super(FileSystemAPI, self).__setstate__(obj_dict)
+        vars(self).update(_lock=RLock(), **obj_dict)
+
+
+class FileSystemModelAPI(storage_api.ModelAPI, FileSystemAPI):
+    """
+    File system model storage.
+    """
+
+    def __init__(self, directory, **kwargs):
+        """
+        File system implementation for storage api.
+        :param str directory: root dir for storage.
+        """
+        super(FileSystemModelAPI, self).__init__(**kwargs)
+        self.directory = directory
+
+        self._join_path = partial(os.path.join, self.directory)
+
+    def __repr__(self):
+        return '{cls.__name__}(directory={self.directory})'.format(
+            cls=self.__class__, self=self)
+
+    def create(self, **kwargs):
+        """
+        Create directory in storage by path.
+        tries to create the root directory as well.
+        :param str name: path of file in storage.
+        """
+        try:
+            os.makedirs(self.directory)
+        except (OSError, IOError):
+            pass
+        os.makedirs(self._join_path(self.name))
+
+    def get(self, entry_id, **kwargs):
+        """
+        Getter from storage.
+        :param str name: name of directory in storage.
+        :param str entry_id: id of the file to get from storage.
+        :return: value of file from storage.
+        :rtype: dict
+        """
+        with open(self._join_path(self.name, entry_id)) as file_obj:
+            return jsonpickle.loads(file_obj.read())
+
+    def store(self, entry, **kwargs):
+        """
+        Delete from storage.
+        :param Model entry: name of directory in storage.
+        """
+        with open(self._join_path(self.name, entry.id), 'w') as file_obj:
+            file_obj.write(jsonpickle.dumps(entry))
+
+    def delete(self, entry_id, **kwargs):
+        """
+        Delete from storage.
+        :param str name: name of directory in storage.
+        :param str entry_id: id of the file to delete from storage.
+        """
+        os.remove(self._join_path(self.name, entry_id))
+
+    def iter(self, filters=None, **kwargs):
+        """
+        Generator over the entries of directory in storage.
+        :param dict filters: filters for query
+        """
+        filters = filters or {}
+
+        for entry_id in os.listdir(self._join_path(self.name)):
+            value = self.get(self.name, entry_id=entry_id)
+            for filter_name, filter_value in filters.items():
+                if value.get(filter_name) != filter_value:
+                    break
+            else:
+                yield value
+
+    def update(self, entry_id, **kwargs):
+        """
+        Updates and entry in storage.
+
+        :param str name: name of table/document in storage.
+        :param str entry_id: id of the document to get from storage.
+        :param kwargs: the fields to update.
+        :return:
+        """
+        entry_dict = self.get(entry_id)
+        entry_dict.update(**kwargs)
+        self.store(entry_dict)
+
+
+class FileSystemResourceAPI(storage_api.ResourceAPI, FileSystemAPI):
+    """
+    File system resource storage.
+    """
+
+    def __init__(self, directory, **kwargs):
+        """
+        File system implementation for storage api.
+        :param str directory: root dir for storage.
+        """
+        super(FileSystemResourceAPI, self).__init__(**kwargs)
+        self.directory = directory
+        self._join_path = partial(os.path.join, self.directory)
+
+    def __repr__(self):
+        return '{cls.__name__}(directory={self.directory})'.format(
+            cls=self.__class__, self=self)
+
+    def create(self, **kwargs):
+        """
+        Create directory in storage by path.
+        tries to create the root directory as well.
+        :param str name: path of file in storage.
+        """
+        try:
+            os.makedirs(self.directory)
+        except (OSError, IOError):
+            pass
+        os.makedirs(self._join_path(self.name))
+
+    def data(self, entry_id, path=None, **_):
+        """
+        Retrieve the content of a file system storage resource.
+
+        :param str entry_type: the type of the entry.
+        :param str entry_id: the id of the entry.
+        :param str path: a path to a specific resource.
+        :return: the content of the file
+        :rtype: bytes
+        """
+        resource_relative_path = os.path.join(self.name, entry_id, path or '')
+        resource = os.path.join(self.directory, resource_relative_path)
+        if not os.path.exists(resource):
+            raise StorageError("Resource {0} does not exist".format(resource_relative_path))
+        if not os.path.isfile(resource):
+            resources = os.listdir(resource)
+            if len(resources) != 1:
+                raise StorageError('No resource in path: {0}'.format(resource))
+            resource = os.path.join(resource, resources[0])
+        with open(resource, 'rb') as resource_file:
+            return resource_file.read()
+
+    def download(self, entry_id, destination, path=None, **_):
+        """
+        Download a specific file or dir from the file system resource storage.
+
+        :param str entry_type: the name of the entry.
+        :param str entry_id: the id of the entry
+        :param str destination: the destination of the files.
+        :param str path: a path on the remote machine relative to the root of the entry.
+        """
+        resource_relative_path = os.path.join(self.name, entry_id, path or '')
+        resource = os.path.join(self.directory, resource_relative_path)
+        if not os.path.exists(resource):
+            raise StorageError("Resource {0} does not exist".format(resource_relative_path))
+        if os.path.isfile(resource):
+            shutil.copy2(resource, destination)
+        else:
+            dir_util.copy_tree(resource, destination)                                     # pylint: disable=no-member
+
+    def upload(self, entry_id, source, path=None, **_):
+        """
+        Uploads a specific file or dir to the file system resource storage.
+
+        :param str entry_type: the name of the entry.
+        :param str entry_id: the id of the entry
+        :param source: the source of  the files to upload.
+        :param path: the destination of the file/s relative to the entry root dir.
+        """
+        resource_directory = os.path.join(self.directory, self.name, entry_id)
+        if not os.path.exists(resource_directory):
+            os.makedirs(resource_directory)
+        destination = os.path.join(resource_directory, path or '')
+        if os.path.isfile(source):
+            shutil.copy2(source, destination)
+        else:
+            dir_util.copy_tree(source, destination)                                       # pylint: disable=no-member

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/1bb1f6ba/aria/storage/api_driver/in_memory.py
----------------------------------------------------------------------
diff --git a/aria/storage/api_driver/in_memory.py b/aria/storage/api_driver/in_memory.py
new file mode 100644
index 0000000..b2d76f1
--- /dev/null
+++ b/aria/storage/api_driver/in_memory.py
@@ -0,0 +1,146 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from collections import namedtuple
+
+from aria.storage.structures import db
+
+from .. import storage_api
+
+
+_Pointer = namedtuple('_Pointer', 'name, is_iter')
+
+storage = {}
+
+
+class InMemoryModelAPI(storage_api.ModelAPI):
+    def __init__(self, *args, **kwargs):
+        """
+        Managing the model in the storage, using the driver.
+
+        :param basestring name: the name of the model.
+        :param ModelDriver driver: the driver which supports this model in the storage.
+        :param Model model_cls: table/document class model.
+        """
+        super(InMemoryModelAPI, self).__init__(*args, **kwargs)
+        self.pointer_mapping = {}
+
+    def __iter__(self):
+        return self.iter()
+
+    def create(self):
+        """
+        Creates the model in the storage.
+        """
+        with self.connect():
+            storage[self.name] = {}
+            self._setup_pointers_mapping()
+
+    def _setup_pointers_mapping(self,):
+        for field_name, field_cls in vars(self.model_cls).items():
+            if not(isinstance(field_cls, db.RelationshipProperty) and field_cls.type):
+                continue
+            pointer_key = _Pointer(field_name, is_iter=False)
+            self.pointer_mapping[pointer_key] = self.__class__(
+                name=storage_api.generate_lower_name(field_cls.type),
+                model_cls=field_cls.type)
+
+    def get(self, entry_id, **kwargs):
+        """
+        Getter for the model from the storage.
+
+        :param basestring entry_id: the id of the table/document.
+        :return: model instance
+        :rtype: Model
+        """
+        with self.connect():
+            data = storage[self.name][entry_id]
+            data.update(self._get_pointers(data, **kwargs))
+        return self.model_cls(**data)
+
+    def store(self, entry, **kwargs):
+        """
+        Setter for the model in the storage.
+
+        :param Model entry: the table/document to store.
+        """
+        with self.connect():
+            data = entry.fields_dict
+            data.update(self._store_pointers(data, **kwargs))
+            storage[self.name][entry.id] = data
+
+    def delete(self, entry_id, **kwargs):
+        """
+        Delete the model from storage.
+
+        :param basestring entry_id: id of the entity to delete from storage.
+        """
+        entry = self.get(entry_id)
+        with self.connect():
+            self._delete_pointers(entry, **kwargs)
+            storage[self.name].pop(entry_id)
+
+    def iter(self, **kwargs):
+        """
+        Generator over the entries of model in storage.
+        """
+        with self.connect():
+            for data in storage[self.name].values():
+                data.update(self._get_pointers(data, **kwargs))
+                yield self.model_cls(**data)
+
+    def update(self, entry_id, **kwargs):
+        """
+        Updates and entry in storage.
+
+        :param str entry_id: the id of the table/document.
+        :param kwargs: the fields to update.
+        :return:
+        """
+        with self.connect():
+            storage[self.name][entry_id].update(**kwargs)
+
+    def _get_pointers(self, data, **kwargs):
+        pointers = {}
+        for field, schema in self.pointer_mapping.items():
+            if field.is_iter:
+                pointers[field.name] = [
+                    schema.get(entry_id=pointer_id, **kwargs)
+                    for pointer_id in data[field.name]
+                    if pointer_id]
+            elif data[field.name]:
+                pointers[field.name] = schema.get(entry_id=data[field.name], **kwargs)
+        return pointers
+
+    def _store_pointers(self, data, **kwargs):
+        pointers = {}
+        for field, model_api in self.pointer_mapping.items():
+            if field.is_iter:
+                pointers[field.name] = []
+                for iter_entity in data[field.name]:
+                    pointers[field.name].append(iter_entity.id)
+                    model_api.store(iter_entity, **kwargs)
+            else:
+                pointers[field.name] = data[field.name].id
+                model_api.store(data[field.name], **kwargs)
+        return pointers
+
+    def _delete_pointers(self, entry, **kwargs):
+        for field, schema in self.pointer_mapping.items():
+            if field.is_iter:
+                for iter_entry in getattr(entry, field.name):
+                    schema.delete(iter_entry.id, **kwargs)
+            else:
+                schema.delete(getattr(entry, field.name).id, **kwargs)

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/1bb1f6ba/aria/storage/api_driver/sql.py
----------------------------------------------------------------------
diff --git a/aria/storage/api_driver/sql.py b/aria/storage/api_driver/sql.py
new file mode 100644
index 0000000..f9bc4fe
--- /dev/null
+++ b/aria/storage/api_driver/sql.py
@@ -0,0 +1,44 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from .. import storage_api
+
+
+class SQLAlchemyModelAPI(storage_api.ModelAPI):
+    def _destroy_connection(self):
+        pass
+
+    def _establish_connection(self):
+        pass
+
+    def store(self, entry, **kwargs):
+        pass
+
+    def create(self):
+        pass
+
+    def update(self, entry_id, **kwargs):
+        pass
+
+    def delete(self, entry_id, **kwargs):
+        pass
+
+    def iter(self, **kwargs):
+        pass
+
+    def get(self, entry_id, **kwargs):
+        pass
+
+
+

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/1bb1f6ba/aria/storage/drivers.py
----------------------------------------------------------------------
diff --git a/aria/storage/drivers.py b/aria/storage/drivers.py
deleted file mode 100644
index 1f96956..0000000
--- a/aria/storage/drivers.py
+++ /dev/null
@@ -1,416 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-Aria's storage.drivers module
-Path: aria.storage.driver
-
-drivers module holds a generic abstract implementation of drivers.
-
-classes:
-    * Driver - abstract storage driver implementation.
-    * ModelDriver - abstract model base storage driver.
-    * ResourceDriver - abstract resource base storage driver.
-    * FileSystemModelDriver - file system implementation for model storage driver.
-    * FileSystemResourceDriver - file system implementation for resource storage driver.
-"""
-
-import distutils.dir_util                                                                           # pylint: disable=no-name-in-module, import-error
-import os
-import shutil
-from functools import partial
-from multiprocessing import RLock
-
-import jsonpickle
-
-from ..logger import LoggerMixin
-from .exceptions import StorageError
-
-__all__ = (
-    'ModelDriver',
-    'FileSystemModelDriver',
-    'ResourceDriver',
-    'FileSystemResourceDriver',
-)
-
-
-class Driver(LoggerMixin):
-    """
-    Driver: storage driver context manager - abstract driver implementation.
-    In the implementation level, It is a good practice to raise StorageError on Errors.
-    """
-
-    def __enter__(self):
-        """
-        Context manager entry method, executes connect.
-        :return: context manager instance
-        :rtype: Driver
-        """
-        self.connect()
-        return self
-
-    def __exit__(self, exc_type, exc_val, exc_tb):
-        """
-        Context manager exit method, executes disconnect.
-        """
-        self.disconnect()
-        if not exc_type:
-            return
-        # self.logger.debug(
-        #     '{name} had an error'.format(name=self.__class__.__name__),
-        #     exc_info=(exc_type, exc_val, exc_tb))
-        if StorageError in exc_type.mro():
-            return
-        raise StorageError('Exception had occurred, {type}: {message}'.format(
-            type=exc_type, message=str(exc_val)))
-
-    def connect(self):
-        """
-        Open storage connection.
-        In some cases, This method can get the connection from a connection pool.
-        """
-        pass
-
-    def disconnect(self):
-        """
-        Close storage connection.
-        In some cases, This method can release the connection to the connection pool.
-        """
-        pass
-
-    def create(self, name, *args, **kwargs):
-        """
-        Create table/document in storage by name.
-        :param str name: name of table/document in storage.
-        """
-        pass
-
-
-class ModelDriver(Driver):
-    """
-    ModelDriver context manager.
-    Base Driver for Model based storage.
-    """
-
-    def get(self, name, entry_id, **kwargs):
-        """
-        Getter from storage.
-        :param str name: name of table/document in storage.
-        :param str entry_id: id of the document to get from storage.
-        :return: value of entity from the storage.
-        """
-        raise NotImplementedError('Subclass must implement abstract get method')
-
-    def delete(self, name, entry_id, **kwargs):
-        """
-        Delete from storage.
-        :param str name: name of table/document in storage.
-        :param str entry_id: id of the entity to delete from storage.
-        :param dict kwargs: extra kwargs if needed.
-        """
-        raise NotImplementedError('Subclass must implement abstract delete method')
-
-    def store(self, name, entry_id, entry, **kwargs):
-        """
-        Setter to storage.
-        :param str name: name of table/document in storage.
-        :param str entry_id: id of the entity to store in the storage.
-        :param dict entry: content to store.
-        """
-        raise NotImplementedError('Subclass must implement abstract store method')
-
-    def iter(self, name, **kwargs):
-        """
-        Generator over the entries of table/document in storage.
-        :param str name: name of table/document/file in storage to iter over.
-        """
-        raise NotImplementedError('Subclass must implement abstract iter method')
-
-    def update(self, name, entry_id, **kwargs):
-        """
-        Updates and entry in storage.
-
-        :param str name: name of table/document in storage.
-        :param str entry_id: id of the document to get from storage.
-        :param kwargs: the fields to update.
-        :return:
-        """
-        raise NotImplementedError('Subclass must implement abstract store method')
-
-
-class ResourceDriver(Driver):
-    """
-    ResourceDriver context manager.
-    Base Driver for Resource based storage.
-
-    Resource storage structure is a file system base.
-    <resource root directory>/<resource_name>/<entry_id>/<entry>
-    entry: can be one single file or multiple files and directories.
-    """
-
-    def data(self, entry_type, entry_id, path=None, **kwargs):
-        """
-        Get the binary data from a file in a resource entry.
-        If the entry is a single file no path needed,
-        If the entry contain number of files the path will gide to the relevant file.
-
-        resource path:
-            <resource root directory>/<name>/<entry_id>/<path>
-
-        :param basestring entry_type: resource name.
-        :param basestring entry_id: id of the entity to resource in the storage.
-        :param basestring path: path to resource relative to entry_id folder in the storage.
-        :return: entry file object.
-        :rtype: bytes
-        """
-        raise NotImplementedError('Subclass must implement abstract get method')
-
-    def download(self, entry_type, entry_id, destination, path=None, **kwargs):
-        """
-        Download the resource to a destination.
-        Like data method bat this method isn't returning data,
-        Instead it create a new file in local file system.
-
-        resource path:
-            <resource root directory>/<name>/<entry_id>/<path>
-        copy to:
-            /<destination>
-        destination can be file or directory
-
-        :param basestring entry_type: resource name.
-        :param basestring entry_id: id of the entity to resource in the storage.
-        :param basestring destination: path in local file system to download to.
-        :param basestring path: path to resource relative to entry_id folder in the storage.
-        """
-        raise NotImplementedError('Subclass must implement abstract get method')
-
-    def upload(self, entry_type, entry_id, source, path=None, **kwargs):
-        """
-        Upload the resource from source.
-        source can be file or directory with files.
-
-        copy from:
-            /<source>
-        to resource path:
-            <resource root directory>/<name>/<entry_id>/<path>
-
-        :param basestring entry_type: resource name.
-        :param basestring entry_id: id of the entity to resource in the storage.
-        :param basestring source: source can be file or directory with files.
-        :param basestring path: path to resource relative to entry_id folder in the storage.
-        """
-        raise NotImplementedError('Subclass must implement abstract get method')
-
-
-class BaseFileSystemDriver(Driver):
-    """
-    Base class which handles storage on the file system.
-    """
-    def __init__(self, *args, **kwargs):
-        super(BaseFileSystemDriver, self).__init__(*args, **kwargs)
-        self._lock = RLock()
-
-    def connect(self):
-        self._lock.acquire()
-
-    def disconnect(self):
-        self._lock.release()
-
-    def __getstate__(self):
-        obj_dict = super(BaseFileSystemDriver, self).__getstate__()
-        del obj_dict['_lock']
-        return obj_dict
-
-    def __setstate__(self, obj_dict):
-        super(BaseFileSystemDriver, self).__setstate__(obj_dict)
-        vars(self).update(_lock=RLock(), **obj_dict)
-
-
-class FileSystemModelDriver(ModelDriver, BaseFileSystemDriver):
-    """
-    FileSystemModelDriver context manager.
-    """
-
-    def __init__(self, directory, **kwargs):
-        """
-        File system implementation for storage driver.
-        :param str directory: root dir for storage.
-        """
-        super(FileSystemModelDriver, self).__init__(**kwargs)
-        self.directory = directory
-
-        self._join_path = partial(os.path.join, self.directory)
-
-    def __repr__(self):
-        return '{cls.__name__}(directory={self.directory})'.format(
-            cls=self.__class__, self=self)
-
-    def create(self, name):
-        """
-        Create directory in storage by path.
-        tries to create the root directory as well.
-        :param str name: path of file in storage.
-        """
-        try:
-            os.makedirs(self.directory)
-        except (OSError, IOError):
-            pass
-        os.makedirs(self._join_path(name))
-
-    def get(self, name, entry_id, **kwargs):
-        """
-        Getter from storage.
-        :param str name: name of directory in storage.
-        :param str entry_id: id of the file to get from storage.
-        :return: value of file from storage.
-        :rtype: dict
-        """
-        with open(self._join_path(name, entry_id)) as file_obj:
-            return jsonpickle.loads(file_obj.read())
-
-    def store(self, name, entry_id, entry, **kwargs):
-        """
-        Delete from storage.
-        :param str name: name of directory in storage.
-        :param str entry_id: id of the file to delete from storage.
-        """
-        with open(self._join_path(name, entry_id), 'w') as file_obj:
-            file_obj.write(jsonpickle.dumps(entry))
-
-    def delete(self, name, entry_id, **kwargs):
-        """
-        Delete from storage.
-        :param str name: name of directory in storage.
-        :param str entry_id: id of the file to delete from storage.
-        """
-        os.remove(self._join_path(name, entry_id))
-
-    def iter(self, name, filters=None, **kwargs):
-        """
-        Generator over the entries of directory in storage.
-        :param str name: name of directory in storage to iter over.
-        :param dict filters: filters for query
-        """
-        filters = filters or {}
-
-        for entry_id in os.listdir(self._join_path(name)):
-            value = self.get(name, entry_id=entry_id)
-            for filter_name, filter_value in filters.items():
-                if value.get(filter_name) != filter_value:
-                    break
-            else:
-                yield value
-
-    def update(self, name, entry_id, **kwargs):
-        """
-        Updates and entry in storage.
-
-        :param str name: name of table/document in storage.
-        :param str entry_id: id of the document to get from storage.
-        :param kwargs: the fields to update.
-        :return:
-        """
-        entry_dict = self.get(name, entry_id)
-        entry_dict.update(**kwargs)
-        self.store(name, entry_id, entry_dict)
-
-
-class FileSystemResourceDriver(ResourceDriver, BaseFileSystemDriver):
-    """
-    FileSystemResourceDriver context manager.
-    """
-
-    def __init__(self, directory, **kwargs):
-        """
-        File system implementation for storage driver.
-        :param str directory: root dir for storage.
-        """
-        super(FileSystemResourceDriver, self).__init__(**kwargs)
-        self.directory = directory
-        self._join_path = partial(os.path.join, self.directory)
-
-    def __repr__(self):
-        return '{cls.__name__}(directory={self.directory})'.format(
-            cls=self.__class__, self=self)
-
-    def create(self, name):
-        """
-        Create directory in storage by path.
-        tries to create the root directory as well.
-        :param basestring name: path of file in storage.
-        """
-        try:
-            os.makedirs(self.directory)
-        except (OSError, IOError):
-            pass
-        os.makedirs(self._join_path(name))
-
-    def data(self, entry_type, entry_id, path=None):
-        """
-        Retrieve the content of a file system storage resource.
-
-        :param basestring entry_type: the type of the entry.
-        :param basestring entry_id: the id of the entry.
-        :param basestring path: a path to a specific resource.
-        :return: the content of the file
-        :rtype: bytes
-        """
-        resource_relative_path = os.path.join(entry_type, entry_id, path or '')
-        resource = os.path.join(self.directory, resource_relative_path)
-        if not os.path.exists(resource):
-            raise StorageError("Resource {0} does not exist".format(resource_relative_path))
-        if not os.path.isfile(resource):
-            resources = os.listdir(resource)
-            if len(resources) != 1:
-                raise StorageError('No resource in path: {0}'.format(resource))
-            resource = os.path.join(resource, resources[0])
-        with open(resource, 'rb') as resource_file:
-            return resource_file.read()
-
-    def download(self, entry_type, entry_id, destination, path=None):
-        """
-        Download a specific file or dir from the file system resource storage.
-
-        :param basestring entry_type: the name of the entry.
-        :param basestring entry_id: the id of the entry
-        :param basestring destination: the destination of the files.
-        :param basestring path: a path on the remote machine relative to the root of the entry.
-        """
-        resource_relative_path = os.path.join(entry_type, entry_id, path or '')
-        resource = os.path.join(self.directory, resource_relative_path)
-        if not os.path.exists(resource):
-            raise StorageError("Resource {0} does not exist".format(resource_relative_path))
-        if os.path.isfile(resource):
-            shutil.copy2(resource, destination)
-        else:
-            distutils.dir_util.copy_tree(resource, destination)                                     # pylint: disable=no-member
-
-    def upload(self, entry_type, entry_id, source, path=None):
-        """
-        Uploads a specific file or dir to the file system resource storage.
-
-        :param basestring entry_type: the name of the entry.
-        :param basestring entry_id: the id of the entry
-        :param source: the source of  the files to upload.
-        :param path: the destination of the file/s relative to the entry root dir.
-        """
-        resource_directory = os.path.join(self.directory, entry_type, entry_id)
-        if not os.path.exists(resource_directory):
-            os.makedirs(resource_directory)
-        destination = os.path.join(resource_directory, path or '')
-        if os.path.isfile(source):
-            shutil.copy2(source, destination)
-        else:
-            distutils.dir_util.copy_tree(source, destination)                                       # pylint: disable=no-member

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/1bb1f6ba/aria/storage/models.py
----------------------------------------------------------------------
diff --git a/aria/storage/models.py b/aria/storage/models.py
index d24ad75..17b0608 100644
--- a/aria/storage/models.py
+++ b/aria/storage/models.py
@@ -36,11 +36,13 @@ classes:
     * ProviderContext - provider context implementation model.
     * Plugin - plugin implementation model.
 """
-
 from datetime import datetime
-from types import NoneType
+from uuid import uuid4
 
-from .structures import Field, IterPointerField, Model, uuid_generator, PointerField
+from aria.storage import (
+    structures,
+    states,
+)
 
 __all__ = (
     'Model',
@@ -60,344 +62,516 @@ __all__ = (
 )
 
 # todo: sort this, maybe move from mgr or move from aria???
-ACTION_TYPES = ()
-ENTITY_TYPES = ()
+# TODO: this must change
+ACTION_TYPES = ('a')
+ENTITY_TYPES = ('b')
 
 
-class Blueprint(Model):
+def uuid_generator():
     """
-    A Model which represents a blueprint
+    wrapper function which generates ids
     """
-    plan = Field(type=dict)
-    id = Field(type=basestring, default=uuid_generator)
-    description = Field(type=(basestring, NoneType))
-    created_at = Field(type=datetime)
-    updated_at = Field(type=datetime)
-    main_file_name = Field(type=basestring)
+    return str(uuid4())
 
 
-class Snapshot(Model):
-    """
-    A Model which represents a snapshot
-    """
-    CREATED = 'created'
-    FAILED = 'failed'
-    CREATING = 'creating'
-    UPLOADED = 'uploaded'
-    END_STATES = [CREATED, FAILED, UPLOADED]
+class Blueprint(structures.SQLModelBase):
+    __tablename__ = 'blueprints'
 
-    id = Field(type=basestring, default=uuid_generator)
-    created_at = Field(type=datetime)
-    status = Field(type=basestring)
-    error = Field(type=basestring, default=None)
+    storage_id = structures.db.Column(structures.db.Integer, primary_key=True, autoincrement=True)
+    id = structures.db.Column(structures.db.Text, index=True)
 
+    created_at = structures.db.Column(structures.UTCDateTime, nullable=False, index=True)
+    main_file_name = structures.db.Column(structures.db.Text, nullable=False)
+    plan = structures.db.Column(structures.db.PickleType, nullable=False)
+    updated_at = structures.db.Column(structures.UTCDateTime)
+    description = structures.db.Column(structures.db.Text)
 
-class Deployment(Model):
-    """
-    A Model which represents a deployment
-    """
-    id = Field(type=basestring, default=uuid_generator)
-    description = Field(type=(basestring, NoneType))
-    created_at = Field(type=datetime)
-    updated_at = Field(type=datetime)
-    blueprint_id = Field(type=basestring)
-    workflows = Field(type=dict)
-    inputs = Field(type=dict, default=lambda: {})
-    policy_types = Field(type=dict, default=lambda: {})
-    policy_triggers = Field(type=dict, default=lambda: {})
-    groups = Field(type=dict, default=lambda: {})
-    outputs = Field(type=dict, default=lambda: {})
-    scaling_groups = Field(type=dict, default=lambda: {})
-
-
-class DeploymentUpdateStep(Model):
-    """
-    A Model which represents a deployment update step
-    """
-    id = Field(type=basestring, default=uuid_generator)
-    action = Field(type=basestring, choices=ACTION_TYPES)
-    entity_type = Field(type=basestring, choices=ENTITY_TYPES)
-    entity_id = Field(type=basestring)
-    supported = Field(type=bool, default=True)
-
-    def __hash__(self):
-        return hash((self.id, self.entity_id))
-
-    def __lt__(self, other):
-        """
-        the order is 'remove' < 'modify' < 'add'
-        :param other:
-        :return:
-        """
-        if not isinstance(other, self.__class__):
-            return not self >= other
-
-        if self.action != other.action:
-            if self.action == 'remove':
-                return_value = True
-            elif self.action == 'add':
-                return_value = False
-            else:
-                return_value = other.action == 'add'
-            return return_value
-
-        if self.action == 'add':
-            return self.entity_type == 'node' and other.entity_type == 'relationship'
-        if self.action == 'remove':
-            return self.entity_type == 'relationship' and other.entity_type == 'node'
-        return False
-
-
-class DeploymentUpdate(Model):
-    """
-    A Model which represents a deployment update
-    """
-    INITIALIZING = 'initializing'
-    SUCCESSFUL = 'successful'
-    UPDATING = 'updating'
-    FINALIZING = 'finalizing'
-    EXECUTING_WORKFLOW = 'executing_workflow'
-    FAILED = 'failed'
-
-    STATES = [
-        INITIALIZING,
-        SUCCESSFUL,
-        UPDATING,
-        FINALIZING,
-        EXECUTING_WORKFLOW,
-        FAILED,
-    ]
-
-    # '{0}-{1}'.format(kwargs['deployment_id'], uuid4())
-    id = Field(type=basestring, default=uuid_generator)
-    deployment_id = Field(type=basestring)
-    state = Field(type=basestring, choices=STATES, default=INITIALIZING)
-    deployment_plan = Field()
-    deployment_update_nodes = Field(default=None)
-    deployment_update_node_instances = Field(default=None)
-    deployment_update_deployment = Field(default=None)
-    modified_entity_ids = Field(default=None)
-    execution_id = Field(type=basestring)
-    steps = IterPointerField(type=DeploymentUpdateStep, default=())
-
-
-class Execution(Model):
-    """
-    A Model which represents an execution
-    """
 
-    class _Validation(object):
+class Snapshot(structures.SQLModelBase):
+    __tablename__ = 'snapshots'
 
-        @staticmethod
-        def execution_status_transition_validation(_, value, instance):
-            """Validation function that verifies execution status transitions are OK"""
-            try:
-                current_status = instance.status
-            except AttributeError:
-                return
-            valid_transitions = Execution.VALID_TRANSITIONS.get(current_status, [])
-            if current_status != value and value not in valid_transitions:
-                raise ValueError('Cannot change execution status from {current} to {new}'.format(
-                    current=current_status,
-                    new=value))
-
-    TERMINATED = 'terminated'
-    FAILED = 'failed'
-    CANCELLED = 'cancelled'
-    PENDING = 'pending'
-    STARTED = 'started'
-    CANCELLING = 'cancelling'
-    STATES = (
-        TERMINATED,
-        FAILED,
-        CANCELLED,
-        PENDING,
-        STARTED,
-        CANCELLING,
+    storage_id = structures.db.Column(structures.db.Integer, primary_key=True, autoincrement=True)
+    id = structures.db.Column(structures.db.Text, index=True)
+
+    created_at = structures.db.Column(structures.UTCDateTime, nullable=False, index=True)
+    status = structures.db.Column(structures.db.Enum(*states.SnapshotState.STATES, name='snapshot_status'))
+    error = structures.db.Column(structures.db.Text)
+
+
+class Deployment(structures.SQLModelBase):
+    __tablename__ = 'deployments'
+
+    # See base class for an explanation on these properties
+    join_properties = {
+        'blueprint_id': {
+            # No need to provide the Blueprint table, as it's already joined
+            'models': [Blueprint],
+            'column': Blueprint.id.label('blueprint_id')
+        },
+    }
+    join_order = 2
+
+    _private_fields = ['blueprint_storage_id']
+
+    storage_id = structures.db.Column(structures.db.Integer, primary_key=True, autoincrement=True)
+    id = structures.db.Column(structures.db.Text, index=True)
+
+    created_at = structures.db.Column(structures.UTCDateTime, nullable=False, index=True)
+    description = structures.db.Column(structures.db.Text)
+    inputs = structures.db.Column(structures.db.PickleType)
+    groups = structures.db.Column(structures.db.PickleType)
+    permalink = structures.db.Column(structures.db.Text)
+    policy_triggers = structures.db.Column(structures.db.PickleType)
+    policy_types = structures.db.Column(structures.db.PickleType)
+    outputs = structures.db.Column(structures.db.PickleType(comparator=lambda *a: False))
+    scaling_groups = structures.db.Column(structures.db.PickleType)
+    updated_at = structures.db.Column(structures.UTCDateTime)
+    workflows = structures.db.Column(structures.db.PickleType(comparator=lambda *a: False))
+
+    blueprint_storage_id = structures.foreign_key(Blueprint)
+    blueprint = structures.one_to_many_relationship(
+        child_class_name='Deployment',
+        column_name='blueprint_storage_id',
+        parent_class_name='Blueprint',
+        back_reference_name='deployments'
     )
-    END_STATES = [TERMINATED, FAILED, CANCELLED]
-    ACTIVE_STATES = [state for state in STATES if state not in END_STATES]
-    VALID_TRANSITIONS = {
-        PENDING: [STARTED, CANCELLED],
-        STARTED: END_STATES + [CANCELLING],
-        CANCELLING: END_STATES
+
+    @property
+    def blueprint_id(self):
+        return self.blueprint.id
+
+
+class Execution(structures.SQLModelBase):
+    __tablename__ = 'executions'
+
+    # See base class for an explanation on these properties
+    join_properties = {
+        'blueprint_id': {
+            'models': [Deployment, Blueprint],
+            'column': Blueprint.id.label('blueprint_id')
+        },
+        'deployment_id': {
+            'models': [Deployment],
+            'column': Deployment.id.label('deployment_id')
+        }
     }
+    join_order = 3
 
-    id = Field(type=basestring, default=uuid_generator)
-    status = Field(type=basestring, choices=STATES,
-                   validation_func=_Validation.execution_status_transition_validation)
-    deployment_id = Field(type=basestring)
-    workflow_id = Field(type=basestring)
-    blueprint_id = Field(type=basestring)
-    created_at = Field(type=datetime, default=datetime.utcnow)
-    started_at = Field(type=datetime, default=None)
-    ended_at = Field(type=datetime, default=None)
-    error = Field(type=basestring, default=None)
-    parameters = Field()
+    _private_fields = ['deployment_storage_id']
 
+    storage_id = structures.db.Column(structures.db.Integer, primary_key=True, autoincrement=True)
+    id = structures.db.Column(structures.db.Text, index=True)
 
-class Relationship(Model):
-    """
-    A Model which represents a relationship
-    """
-    id = Field(type=basestring, default=uuid_generator)
-    source_id = Field(type=basestring)
-    target_id = Field(type=basestring)
-    source_interfaces = Field(type=dict)
-    source_operations = Field(type=dict)
-    target_interfaces = Field(type=dict)
-    target_operations = Field(type=dict)
-    type = Field(type=basestring)
-    type_hierarchy = Field(type=list)
-    properties = Field(type=dict)
-
-
-class Node(Model):
-    """
-    A Model which represents a node
-    """
-    id = Field(type=basestring, default=uuid_generator)
-    blueprint_id = Field(type=basestring)
-    type = Field(type=basestring)
-    type_hierarchy = Field()
-    number_of_instances = Field(type=int)
-    planned_number_of_instances = Field(type=int)
-    deploy_number_of_instances = Field(type=int)
-    host_id = Field(type=basestring, default=None)
-    properties = Field(type=dict)
-    operations = Field(type=dict)
-    plugins = Field(type=list, default=())
-    relationships = IterPointerField(type=Relationship)
-    plugins_to_install = Field(type=list, default=())
-    min_number_of_instances = Field(type=int)
-    max_number_of_instances = Field(type=int)
-
-    def relationships_by_target(self, target_id):
-        """
-        Retreives all of the relationship by target.
-        :param target_id: the node id of the target  of the relationship
-        :yields: a relationship which target and node with the specified target_id
-        """
-        for relationship in self.relationships:
-            if relationship.target_id == target_id:
-                yield relationship
-        # todo: maybe add here Exception if isn't exists (didn't yield one's)
-
-
-class RelationshipInstance(Model):
-    """
-    A Model which represents a relationship instance
-    """
-    id = Field(type=basestring, default=uuid_generator)
-    target_id = Field(type=basestring)
-    target_name = Field(type=basestring)
-    source_id = Field(type=basestring)
-    source_name = Field(type=basestring)
-    type = Field(type=basestring)
-    relationship = PointerField(type=Relationship)
+    created_at = structures.db.Column(structures.UTCDateTime, nullable=False, index=True)
+    error = structures.db.Column(structures.db.Text)
+    is_system_workflow = structures.db.Column(structures.db.Boolean, nullable=False)
+    parameters = structures.db.Column(structures.db.PickleType)
+    status = structures.db.Column(
+        structures.db.Enum(*states.ExecutionState.STATES, name='execution_status')
+    )
+    workflow_id = structures.db.Column(structures.db.Text, nullable=False)
+
+    deployment_storage_id = structures.foreign_key(Deployment, nullable=True)
+    deployment = structures.one_to_many_relationship(
+        child_class_name='Execution',
+        column_name='deployment_storage_id',
+        parent_class_name='Deployment',
+        back_reference_name='executions'
+    )
 
+    @property
+    def deployment_id(self):
+        return self.deployment.id if self.deployment else None
+
+    @property
+    def blueprint_id(self):
+        return self.deployment.blueprint_id if self.deployment else None
+
+    def __str__(self):
+        id_name, id_value = self._get_unique_id()
+        return '<{0} {1}=`{2}` (status={3})>'.format(
+            self.__class__.__name__,
+            id_name,
+            id_value,
+            self.status
+        )
+
+
+class DeploymentUpdate(structures.SQLModelBase):
+    __tablename__ = 'deployment_updates'
+
+    # See base class for an explanation on these properties
+    join_properties = {
+        'execution_id': {
+            'models': [Execution],
+            'column': Execution.id.label('execution_id')
+        },
+        'deployment_id': {
+            'models': [Deployment],
+            'column': Deployment.id.label('deployment_id')
+        },
+    }
+    join_order = 4
+
+    _private_fields = ['execution_storage_id']
+
+    storage_id = structures.db.Column(structures.db.Integer, primary_key=True, autoincrement=True)
+    id = structures.db.Column(structures.db.Text, index=True)
+
+    created_at = structures.db.Column(structures.UTCDateTime, nullable=False, index=True)
+    deployment_plan = structures.db.Column(structures.db.PickleType)
+    deployment_update_node_instances = structures.db.Column(structures.db.PickleType)
+    deployment_update_deployment = structures.db.Column(structures.db.PickleType)
+    deployment_update_nodes = structures.db.Column(structures.db.PickleType)
+    modified_entity_ids = structures.db.Column(structures.db.PickleType)
+    state = structures.db.Column(structures.db.Text)
+
+    execution_storage_id = structures.foreign_key(Execution, nullable=True)
+    execution = structures.one_to_many_relationship(
+        child_class_name='DeploymentUpdate',
+        column_name='execution_storage_id',
+        parent_class_name='Execution',
+        back_reference_name='deployment_updates'
+    )
 
-class NodeInstance(Model):
-    """
-    A Model which represents a node instance
-    """
-    # todo: add statuses
-    UNINITIALIZED = 'uninitialized'
-    INITIALIZING = 'initializing'
-    CREATING = 'creating'
-    CONFIGURING = 'configuring'
-    STARTING = 'starting'
-    DELETED = 'deleted'
-    STOPPING = 'stopping'
-    DELETING = 'deleting'
-    STATES = (
-        UNINITIALIZED,
-        INITIALIZING,
-        CREATING,
-        CONFIGURING,
-        STARTING,
-        DELETED,
-        STOPPING,
-        DELETING
+    deployment_storage_id = structures.foreign_key(Deployment)
+    deployment = structures.one_to_many_relationship(
+        child_class_name='DeploymentUpdate',
+        column_name='deployment_storage_id',
+        parent_class_name='Deployment',
+        back_reference_name='deployment_updates'
     )
 
-    id = Field(type=basestring, default=uuid_generator)
-    deployment_id = Field(type=basestring)
-    runtime_properties = Field(type=dict)
-    state = Field(type=basestring, choices=STATES, default=UNINITIALIZED)
-    version = Field(type=(basestring, NoneType))
-    relationship_instances = IterPointerField(type=RelationshipInstance)
-    node = PointerField(type=Node)
-    host_id = Field(type=basestring, default=None)
-    scaling_groups = Field(default=())
-
-    def relationships_by_target(self, target_id):
-        """
-        Retreives all of the relationship by target.
-        :param target_id: the instance id of the target of the relationship
-        :yields: a relationship instance which target and node with the specified target_id
-        """
-        for relationship_instance in self.relationship_instances:
-            if relationship_instance.target_id == target_id:
-                yield relationship_instance
-        # todo: maybe add here Exception if isn't exists (didn't yield one's)
-
-
-class DeploymentModification(Model):
-    """
-    A Model which represents a deployment modification
-    """
-    STARTED = 'started'
-    FINISHED = 'finished'
-    ROLLEDBACK = 'rolledback'
-    END_STATES = [FINISHED, ROLLEDBACK]
-
-    id = Field(type=basestring, default=uuid_generator)
-    deployment_id = Field(type=basestring)
-    modified_nodes = Field(type=(dict, NoneType))
-    added_and_related = IterPointerField(type=NodeInstance)
-    removed_and_related = IterPointerField(type=NodeInstance)
-    extended_and_related = IterPointerField(type=NodeInstance)
-    reduced_and_related = IterPointerField(type=NodeInstance)
-    # before_modification = IterPointerField(type=NodeInstance)
-    status = Field(type=basestring, choices=(STARTED, FINISHED, ROLLEDBACK))
-    created_at = Field(type=datetime)
-    ended_at = Field(type=(datetime, NoneType))
-    context = Field()
-
-
-class ProviderContext(Model):
-    """
-    A Model which represents a provider context
-    """
-    id = Field(type=basestring, default=uuid_generator)
-    context = Field(type=dict)
-    name = Field(type=basestring)
+    @property
+    def execution_id(self):
+        return self.execution.id if self.execution else None
 
+    @property
+    def deployment_id(self):
+        return self.deployment.id
 
-class Plugin(Model):
-    """
-    A Model which represents a plugin
-    """
-    id = Field(type=basestring, default=uuid_generator)
-    package_name = Field(type=basestring)
-    archive_name = Field(type=basestring)
-    package_source = Field(type=dict)
-    package_version = Field(type=basestring)
-    supported_platform = Field(type=basestring)
-    distribution = Field(type=basestring)
-    distribution_version = Field(type=basestring)
-    distribution_release = Field(type=basestring)
-    wheels = Field()
-    excluded_wheels = Field()
-    supported_py_versions = Field(type=list)
-    uploaded_at = Field(type=datetime)
-
-
-class Task(Model):
+    def to_dict(self, suppress_error=False):
+        dep_update_dict = super(DeploymentUpdate, self).to_dict(suppress_error)
+        # Taking care of the fact the DeploymentSteps are objects
+        dep_update_dict['steps'] = [step.to_dict() for step in self.steps]
+        return dep_update_dict
+
+
+class DeploymentUpdateStep(structures.SQLModelBase):
+    __tablename__ = 'deployment_update_steps'
+
+    # See base class for an explanation on these properties
+    join_properties = {
+        'deployment_update_id': {
+            'models': [DeploymentUpdate],
+            'column': DeploymentUpdate.id.label('deployment_update_id')
+        },
+    }
+    join_order = 5
+
+    _private_fields = ['deployment_update_storage_id']
+
+    id = structures.db.Column(structures.db.Integer, primary_key=True, autoincrement=True)
+
+    action = structures.db.Column(structures.db.Enum(*ACTION_TYPES, name='action_type'))
+    entity_id = structures.db.Column(structures.db.Text, nullable=False)
+    entity_type = structures.db.Column(structures.db.Enum(*ENTITY_TYPES, name='entity_type'))
+
+    deployment_update_storage_id = structures.foreign_key(DeploymentUpdate)
+    deployment_update = structures.one_to_many_relationship(
+        child_class_name='DeploymentUpdateStep',
+        column_name='deployment_update_storage_id',
+        parent_class_name='DeploymentUpdate',
+        back_reference_name='steps'
+    )
+
+    @property
+    def tenant(self):
+        return self.deployment_update.tenant
+
+    @property
+    def deployment_update_id(self):
+        return self.deployment_update.id
+
+
+class DeploymentModification(structures.SQLModelBase):
+    __tablename__ = 'deployment_modifications'
+
+    # See base class for an explanation on these properties
+    join_properties = {
+        'deployment_id': {
+            'models': [Deployment],
+            'column': Deployment.id.label('deployment_id')
+        },
+    }
+    join_order = 3
+
+    _private_fields = ['deployment_storage_id']
+
+    storage_id = structures.db.Column(structures.db.Integer, primary_key=True, autoincrement=True)
+    id = structures.db.Column(structures.db.Text, index=True)
+
+    context = structures.db.Column(structures.db.PickleType)
+    created_at = structures.db.Column(structures.UTCDateTime, nullable=False, index=True)
+    ended_at = structures.db.Column(structures.UTCDateTime, index=True)
+    modified_nodes = structures.db.Column(structures.db.PickleType)
+    node_instances = structures.db.Column(structures.db.PickleType)
+    status = structures.db.Column(structures.db.Enum(
+        *states.DeploymentModificationState.STATES,
+        name='deployment_modification_status'
+    ))
+
+    deployment_storage_id = structures.foreign_key(Deployment)
+    deployment = structures.one_to_many_relationship(
+        child_class_name='DeploymentModification',
+        column_name='deployment_storage_id',
+        parent_class_name='Deployment',
+        back_reference_name='modifications'
+    )
+
+    @property
+    def tenant(self):
+        return self.deployment.tenant
+
+    @property
+    def deployment_id(self):
+        return self.deployment.id
+
+
+class Node(structures.SQLModelBase):
+    __tablename__ = 'nodes'
+
+    # See base class for an explanation on these properties
+    is_id_unique = False
+    join_properties = {
+        'blueprint_id': {
+            'models': [Deployment, Blueprint],
+            'column': Blueprint.id.label('blueprint_id')
+        },
+        'deployment_id': {
+            'models': [Deployment],
+            'column': Deployment.id.label('deployment_id')
+        },
+    }
+    join_order = 3
+
+    _private_fields = ['deployment_storage_id']
+
+    storage_id = structures.db.Column(structures.db.Integer, primary_key=True, autoincrement=True)
+    id = structures.db.Column(structures.db.Text, index=True)
+
+    deploy_number_of_instances = structures.db.Column(structures.db.Integer, nullable=False)
+    # TODO: This probably should be a foreign key, but there's no guarantee
+    # in the code, currently, that the host will be created beforehand
+    host_id = structures.db.Column(structures.db.Text)
+    max_number_of_instances = structures.db.Column(structures.db.Integer, nullable=False)
+    min_number_of_instances = structures.db.Column(structures.db.Integer, nullable=False)
+    number_of_instances = structures.db.Column(structures.db.Integer, nullable=False)
+    planned_number_of_instances = structures.db.Column(structures.db.Integer, nullable=False)
+    plugins = structures.db.Column(structures.db.PickleType)
+    plugins_to_install = structures.db.Column(structures.db.PickleType)
+    properties = structures.db.Column(structures.db.PickleType)
+    # relationships = structures.db.Column(structures.db.PickleType)
+    operations = structures.db.Column(structures.db.PickleType)
+    type = structures.db.Column(structures.db.Text, nullable=False, index=True)
+    type_hierarchy = structures.db.Column(structures.db.PickleType)
+
+    deployment_storage_id = structures.foreign_key(Deployment)
+    deployment = structures.one_to_many_relationship(
+        child_class_name='Node',
+        column_name='deployment_storage_id',
+        parent_class_name='Deployment',
+        back_reference_name='nodes'
+    )
+
+    @property
+    def tenant(self):
+        return self.deployment.tenant
+
+    @property
+    def deployment_id(self):
+        return self.deployment.id
+
+    @property
+    def blueprint_id(self):
+        return self.deployment.blueprint_id
+
+
+class Relationship(structures.SQLModelBase):
+    __tablename__ = 'relationships'
+
+    join_properties = {
+        'blueprint_id': {
+            'models': [Node, Deployment, Blueprint],
+            'column': Blueprint.id.label('blueprint_id')
+        },
+        'deployment_id': {
+            'models': [Node, Deployment],
+            'column': Deployment.id.label('deployment_id')
+        }
+    }
+    join_order = 4
+    _private_fields = ['relationship_storage_source_node_id',
+                       'relationship_storage_target_node_id']
+
+    storage_id = structures.db.Column(structures.db.Integer, primary_key=True, autoincrement=True)
+    id = structures.db.Column(structures.db.Text, index=True)
+
+    source_interfaces = structures.db.Column(structures.db.PickleType)
+    source_operations = structures.db.Column(structures.db.PickleType)
+    target_interfaces = structures.db.Column(structures.db.PickleType)
+    target_operations = structures.db.Column(structures.db.PickleType)
+    type = structures.db.Column(structures.db.String)
+    type_hierarchy = structures.db.Column(structures.db.PickleType)     # TODO: this should be list
+    properties = structures.db.Column(structures.db.PickleType)
+
+    source_node_storage_id = structures.foreign_key(Node)
+    target_node_storage_id = structures.foreign_key(Node)
+
+    source_node = structures.one_to_many_relationship(
+        child_class_name='Relationship',
+        column_name='source_node_storage_id',
+        parent_class_name='Node',
+        back_reference_name='relationship_source'
+    )
+    target_node = structures.one_to_many_relationship(
+        child_class_name='Relationship',
+        column_name='target_node_storage_id',
+        parent_class_name='Node',
+        back_reference_name='relationship_target'
+    )
+
+
+class NodeInstance(structures.SQLModelBase):
+    __tablename__ = 'node_instances'
+
+    # See base class for an explanation on these properties
+    join_properties = {
+        'node_id': {
+            'models': [Node],
+            'column': Node.id.label('node_id')
+        },
+        'deployment_id': {
+            'models': [Node, Deployment],
+            'column': Deployment.id.label('deployment_id')
+        },
+    }
+    join_order = 4
+
+    _private_fields = ['node_storage_id']
+
+    storage_id = structures.db.Column(structures.db.Integer, primary_key=True, autoincrement=True)
+    id = structures.db.Column(structures.db.Text, index=True)
+
+    # TODO: This probably should be a foreign key, but there's no guarantee
+    # in the code, currently, that the host will be created beforehand
+    host_id = structures.db.Column(structures.db.Text)
+    runtime_properties = structures.db.Column(structures.db.PickleType)
+    scaling_groups = structures.db.Column(structures.db.PickleType)
+    state = structures.db.Column(structures.db.Text, nullable=False)
+    version = structures.db.Column(structures.db.Integer, default=1)
+
+    node_storage_id = structures.foreign_key(Node)
+    node = structures.one_to_many_relationship(
+        child_class_name='NodeInstance',
+        column_name='node_storage_id',
+        parent_class_name='Node',
+        back_reference_name='node_instances'
+    )
+
+    @property
+    def tenant(self):
+        return self.deployment.tenant
+
+    @property
+    def node_id(self):
+        return self.node.id
+
+    @property
+    def deployment_id(self):
+        return self.node.deployment_id
+
+
+class RelationshipInstance(structures.SQLModelBase):
+    __tablename__ = 'relationship_instances'
+
+    join_properties = {
+        'blueprint_id': {
+            'models': [Relationship, Node, Deployment, Blueprint],
+            'column': Blueprint.id.label('blueprint_id')
+        },
+        'deployment_id': {
+            'models': [Relationship, Node, Deployment],
+            'column': Deployment.id.label('deployment_id')
+        }
+    }
+    join_order = 5
+
+    _private_fields = ['relationship_storage_id',
+                       'source_node_instance_id',
+                       'target_node_instance_id']
+
+    storage_id = structures.db.Column(structures.db.Integer, primary_key=True, autoincrement=True)
+    id = structures.db.Column(structures.db.Text, index=True)
+
+    type = structures.db.Column(structures.db.String)
+
+    source_node_instance_id = structures.foreign_key(NodeInstance)
+    source_node_instance = structures.one_to_many_relationship(
+        child_class_name='RelationshipInstance',
+        column_name='source_node_instance_id',
+        parent_class_name='NodeInstance',
+        back_reference_name='relationship_instance_source'
+    )
+    target_node_instance_id = structures.foreign_key(NodeInstance)
+    target_node_instance = structures.one_to_many_relationship(
+        child_class_name='RelationshipInstance',
+        column_name='target_node_instance_id',
+        parent_class_name='NodeInstance',
+        back_reference_name='relationship_instance_target'
+    )
+    relationship_storage_id = structures.foreign_key(Relationship)
+    relationship = structures.one_to_many_relationship(
+        child_class_name='RelationshipInstance',
+        column_name='relationship_storage_id',
+        parent_class_name='Relationship',
+        back_reference_name='relationship_instances'
+    )
+
+
+class ProviderContext(structures.SQLModelBase):
+    __tablename__ = 'provider_context'
+
+    id = structures.db.Column(structures.db.Text, primary_key=True)
+    name = structures.db.Column(structures.db.Text, nullable=False)
+    context = structures.db.Column(structures.db.PickleType, nullable=False)
+
+
+class Plugin(structures.SQLModelBase):
+    __tablename__ = 'plugins'
+
+    storage_id = structures.db.Column(structures.db.Integer, primary_key=True, autoincrement=True)
+    id = structures.db.Column(structures.db.Text, index=True)
+
+    archive_name = structures.db.Column(structures.db.Text, nullable=False, index=True)
+    distribution = structures.db.Column(structures.db.Text)
+    distribution_release = structures.db.Column(structures.db.Text)
+    distribution_version = structures.db.Column(structures.db.Text)
+    excluded_wheels = structures.db.Column(structures.db.PickleType)
+    package_name = structures.db.Column(structures.db.Text, nullable=False, index=True)
+    package_source = structures.db.Column(structures.db.Text)
+    package_version = structures.db.Column(structures.db.Text)
+    supported_platform = structures.db.Column(structures.db.PickleType)
+    supported_py_versions = structures.db.Column(structures.db.PickleType)
+    uploaded_at = structures.db.Column(structures.UTCDateTime, nullable=False, index=True)
+    wheels = structures.db.Column(structures.db.PickleType, nullable=False)
+
+
+class Task(structures.SQLModelBase):
     """
     A Model which represents an task
     """
+    __tablename__ = 'task'
 
     class _Validation(object):
 
@@ -408,37 +582,24 @@ class Task(Model):
                 raise ValueError('Max attempts can be either -1 (infinite) or any positive number. '
                                  'Got {value}'.format(value=value))
 
-    PENDING = 'pending'
-    RETRYING = 'retrying'
-    SENT = 'sent'
-    STARTED = 'started'
-    SUCCESS = 'success'
-    FAILED = 'failed'
-    STATES = (
-        PENDING,
-        RETRYING,
-        SENT,
-        STARTED,
-        SUCCESS,
-        FAILED,
-    )
-    WAIT_STATES = [PENDING, RETRYING]
-    END_STATES = [SUCCESS, FAILED]
     INFINITE_RETRIES = -1
 
-    id = Field(type=basestring, default=uuid_generator)
-    status = Field(type=basestring, choices=STATES, default=PENDING)
-    execution_id = Field(type=basestring)
-    due_at = Field(type=datetime, default=datetime.utcnow)
-    started_at = Field(type=datetime, default=None)
-    ended_at = Field(type=datetime, default=None)
-    max_attempts = Field(type=int, default=1, validation_func=_Validation.validate_max_attempts)
-    retry_count = Field(type=int, default=0)
-    retry_interval = Field(type=(int, float), default=0)
-    ignore_failure = Field(type=bool, default=False)
+    id = structures.db.Column(structures.db.String, primary_key=True, default=uuid_generator)
+    status = structures.db.Column(structures.db.Enum(*states.TaskState.STATES),
+                       name='task_status',
+                       default=states.TaskState.PENDING)
+
+    execution_id = structures.db.Column(structures.db.String)
+    due_at = structures.db.Column(structures.UTCDateTime, default=datetime.utcnow)
+    started_at = structures.db.Column(structures.UTCDateTime, default=None)
+    ended_at = structures.db.Column(structures.UTCDateTime, default=None)
+    max_attempts = structures.db.Column(structures.db.Integer, default=1) # , validation_func=_Validation.validate_max_attempts)
+    retry_count = structures.db.Column(structures.db.Integer, default=0)
+    retry_interval = structures.db.Column(structures.db.Float, default=0)
+    ignore_failure = structures.db.Column(structures.db.Boolean, default=False)
 
     # Operation specific fields
-    name = Field(type=basestring)
-    operation_mapping = Field(type=basestring)
-    actor = Field()
-    inputs = Field(type=dict, default=lambda: {})
+    name = structures.db.Column(structures.db.String)
+    operation_mapping = structures.db.Column(structures.db.String)
+    actor = structures.db.Column()
+    inputs = structures.db.column(structures.db.PickleType(comparator=lambda *a: False))



Mime
View raw message