From 6028b8797a341e3c6e2a967e2f648b21d9c6e9ea Mon Sep 17 00:00:00 2001
From: Roberto  Borghes <roberto.borghes@gaia.elettra.trieste.it>
Date: Mon, 25 Oct 2021 15:41:52 +0200
Subject: [PATCH] DonkiOrchestra: the director Tango device, alfa release

---
 Makefile                     |  19 +
 src/DataBuffer.py            | 178 ++++++++
 src/DataStorage.py           | 467 +++++++++++++++++++
 src/DirectorBgnThread.py     | 862 +++++++++++++++++++++++++++++++++++
 src/DonkiDirectorServer.py   | 751 ++++++++++++++++++++++++++++++
 src/DonkiDirectorServer.xmi  | 261 +++++++++++
 src/DonkiDirector_cmdline.py | 137 ++++++
 src/DonkiOrchestraLib.py     | 251 ++++++++++
 src/InfoServer.py            | 136 ++++++
 src/hdfwriter.py             | 673 +++++++++++++++++++++++++++
 src/tinydb/__init__.py       |  30 ++
 src/tinydb/database.py       | 470 +++++++++++++++++++
 src/tinydb/middlewares.py    | 116 +++++
 src/tinydb/operations.py     |  28 ++
 src/tinydb/queries.py        | 342 ++++++++++++++
 src/tinydb/storages.py       | 132 ++++++
 src/tinydb/utils.py          | 140 ++++++
 17 files changed, 4993 insertions(+)
 create mode 100644 Makefile
 create mode 100644 src/DataBuffer.py
 create mode 100644 src/DataStorage.py
 create mode 100644 src/DirectorBgnThread.py
 create mode 100755 src/DonkiDirectorServer.py
 create mode 100644 src/DonkiDirectorServer.xmi
 create mode 100755 src/DonkiDirector_cmdline.py
 create mode 100644 src/DonkiOrchestraLib.py
 create mode 100644 src/InfoServer.py
 create mode 100644 src/hdfwriter.py
 create mode 100644 src/tinydb/__init__.py
 create mode 100644 src/tinydb/database.py
 create mode 100644 src/tinydb/middlewares.py
 create mode 100644 src/tinydb/operations.py
 create mode 100644 src/tinydb/queries.py
 create mode 100644 src/tinydb/storages.py
 create mode 100644 src/tinydb/utils.py

diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..4ddf51e
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,19 @@
+NAME = donkidirector-srv
+MAIN = DonkiDirectorServer.py
+
+DIRNAME = $(NAME:-srv=)
+MODNAME = $(MAIN:.py=)
+
+default: bin
+	@cp src/*.py bin/${DIRNAME}
+	@cp src/tinydb/*.py bin/${DIRNAME}/tinydb
+	@echo "#!/usr/bin/env python\nimport sys\nsys.path.append(sys.path[0]+'/${DIRNAME}')\nfrom ${MODNAME} import main\nif __name__ == '__main__':\n    main()\n" > bin/${NAME}
+	@chmod +x bin/${NAME} bin/${DIRNAME}/${MAIN}
+
+bin:
+	@test -d $@/tinydb || mkdir -p $@/${DIRNAME}/tinydb
+
+clean:
+	@rm -fr bin/ src/*~ src/*.pyc src/tinydb/*.pyc
+
+.PHONY: clean
diff --git a/src/DataBuffer.py b/src/DataBuffer.py
new file mode 100644
index 0000000..21b5331
--- /dev/null
+++ b/src/DataBuffer.py
@@ -0,0 +1,178 @@
+#!/usr/bin/env python
+# -*- coding:utf-8 -*- 
+
+
+##############################################################################
+## license :
+##============================================================================
+##
+## File :        DonkiOrkiBuffer.py
+## 
+## Project :     DonkiOrkiBuffer
+##
+## This file is part of Tango device class.
+## 
+## Tango is free software: you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation, either version 3 of the License, or
+## (at your option) any later version.
+## 
+## Tango is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+## GNU General Public License for more details.
+## 
+## You should have received a copy of the GNU General Public License
+## along with Tango.  If not, see <http://www.gnu.org/licenses/>.
+## 
+##
+## $Author :      sci.comp$
+##
+## $Revision :    $
+##
+## $Date :        $
+##
+## $HeadUrl :     $
+##============================================================================
+##            This file is generated by POGO
+##    (Program Obviously used to Generate tango Object)
+##
+##        (c) - Software Engineering Group - ESRF
+##############################################################################
+
+import sys
+import json
+import threading 
+import time
+import numpy
+import traceback
+
+DEBUG = False 
+THREAD_DELAY_SEC = 0.05
+
+
+
+class MyEncoder(json.JSONEncoder):
+    def default(self, obj):
+        if isinstance(obj, numpy.integer):
+            return int(obj)
+        elif isinstance(obj, numpy.floating):
+            return float(obj)
+        elif isinstance(obj, numpy.ndarray):
+            return obj.tolist()
+        else:
+            return super(MyEncoder, self).default(obj)
+            
+
+class donkiBuffer(threading.Thread):
+
+#-----------------------------------------------------------------------------------
+#    __init__
+#-----------------------------------------------------------------------------------
+    def __init__(self, data_queue ,max_buffer_size = 5000000):
+        threading.Thread.__init__(self)
+        self.dbuffers = {}
+        self.trg_range = {}
+        self._alive = True
+        self.max_size = max_buffer_size
+        self.lock = threading.Lock()
+        self._last_trigger = -1
+        self.data_queue = data_queue
+        
+    def check_trg_range(self, data_name, trg_in):
+        max_offset = 10000
+        if data_name not in self.trg_range and trg_in >= 0:
+            self.trg_range[data_name] = [trg_in,trg_in]
+        elif trg_in < self.trg_range[data_name][0]:
+            if (trg_in != 1) and (self.trg_range[data_name][0]-trg_in > max_offset):
+                return False
+            self.trg_range[data_name][0] = trg_in
+        elif trg_in > self.trg_range[data_name][1]:
+            if trg_in - self.trg_range[data_name][1] > max_offset:
+                return False
+            self.trg_range[data_name][1] = trg_in
+        return True
+
+    def notify_new_data(self, new_data):
+        data_name, trg_in, trg_f, data_in = new_data
+        self.lock.acquire()
+        if not self.check_trg_range(data_name,trg_in):
+            # Fake trigger value
+            self.lock.release()
+            return
+        if data_name not in self.dbuffers:
+            self.dbuffers[data_name] = {}
+        elif trg_in == 1:
+            # First trigger for this data source!
+            if self._last_trigger > trg_in:
+                # First trigger ever! Clear all the buffers
+                self.dbuffers = {}
+                self.trg_range = {}
+            self.dbuffers[data_name] = {}
+            self.trg_range[data_name] = [1,1]
+        idx = 0
+        for dd in data_in:
+            if sys.getsizeof(self.dbuffers[data_name]) > self.max_size:
+                try:
+                    if (self.trg_range[data_name][0]) in (self.dbuffers[data_name]).keys():
+		    	del self.dbuffers[data_name][self.trg_range[data_name][0]]
+                    self.trg_range[data_name][0] = self.trg_range[data_name][0] + 1
+                except:
+                    traceback.print_exc()
+                
+            self.dbuffers[data_name][trg_in+idx] = dd
+        self._last_trigger = max(self._last_trigger,trg_f)
+        self.lock.release()
+
+    def retrieve_range(self, data_name, trg_in, trg_f):
+        self.lock.acquire()
+        try:
+            reply = [self.dbuffers[data_name].get(k, None) for k in range(trg_in,trg_f+1)]
+        except:
+            reply = []
+        self.lock.release()
+        return reply
+
+    def retrieve_last(self, data_name, triggers):
+        self.lock.acquire()
+        try:
+            reply = [self.dbuffers[data_name].get(k, None) for k in range(trg_f-(triggers-1),trg_f+1)]
+        except:
+            reply = []
+        self.lock.release()
+        return reply
+
+    def retrieve_all(self, data_name):
+        self.lock.acquire()
+        try:
+            min_tr = min(self.dbuffers[data_name].keys())
+            max_tr = max(self.dbuffers[data_name].keys())
+            reply = [self.dbuffers[data_name].get(k, None) for k in range(min_tr,max_tr+1)]
+        except:
+            reply = []
+        self.lock.release()
+        return reply
+
+    def stop_and_exit(self):
+        self._alive = False
+        
+    def run(self):
+        while self._alive:
+            if self.data_queue.qsize() == 0:
+                time.sleep(THREAD_DELAY_SEC)
+                continue
+            #
+            read_loop = 0
+            while self.data_queue.qsize() > 0 and read_loop < 100:
+                data_in = self.data_queue.get()
+                self.notify_new_data(data_in)
+                read_loop += 1
+                self.data_queue.task_done()
+
+
+def main():
+    dbt = donkiBuffer()
+    dbt.start()
+    
+if __name__ == '__main__':
+    main()
diff --git a/src/DataStorage.py b/src/DataStorage.py
new file mode 100644
index 0000000..6610911
--- /dev/null
+++ b/src/DataStorage.py
@@ -0,0 +1,467 @@
+# -*- coding: utf-8 -*-
+
+import sys
+import threading
+import multiprocessing
+import time
+import traceback
+import numpy
+from hdfwriter import HDFWriter
+
+
+HDFWTHREADS = 1
+MAX_HDFWTHREADS = 2
+
+HDF5FILESIZE = 15
+
+_dbg = False 
+
+class DataStorage(multiprocessing.Process):
+    """
+    Do management of all Data for the HDF5 files. 
+    
+    
+    """
+    def __init__(self, data_queue, task_queue, notif_queue):
+        multiprocessing.Process.__init__(self)
+        self.data_queue = data_queue
+        self.task_queue = task_queue
+        self.notify_queue = notif_queue
+        self.go_on = True
+        self.daq_running = False
+        self.data_archives = []
+        self.metadata_archives = {}
+
+        self._hdf_threads = HDFWTHREADS
+        #: The number of triggers to be saved for each file. This parameter
+        #: may be changed afterward and will affect the next HDF5 file to 
+        #: be created. The Default Value is 15.
+        self.file_size = HDF5FILESIZE
+        self.max_triggers = HDF5FILESIZE
+
+        #: an instance of :class:`~fermidaq.lib.xmlconfig.DaqXmlConfig` - has all the 
+        #: information necessary to know how to create the 
+        #: :class:`~fermidaq.lib.attribdaq.AttribDaq` and how to organize them inside
+        #: the HDF5 file.
+        #: 
+        #: List of :class:`~fermidaq.lib.bunchmanager.BunchManager.HDFs`.
+        #: 
+        self._hdfwriters = self._init_hdfs('', '')
+        self.file_prefix = ''
+        self.Files_contiguous = True
+        self.files_opened = 0
+        self._stop_at_current_file = False
+        self.allocated_bunch_range = (0, 0)
+        self.files2save = -1
+        self.first_shot_saved = 0
+        self._shots_saved = 0
+        self._daq_list = dict()
+        self._hdf_file_timeout = None
+        self.mutex = threading.Lock()
+
+
+    class HDFs():
+        """
+        Auxiliar class to help controlling the :class:`~fermidaq.lib.hdfwriter.HDFWriter`
+        threads. It has the following attributes: 
+        
+        * ptr2hdfwriter: instance of :class:`~fermidaq.lib.hdfwriter.HDFWriter`
+        * initial_bn   : the first bunch number for saving file for that instance.
+        * last_bn      : the last bunch number to save in the HDF5 file for that instance.
+        * working      : flag to indicate if it is busy or idle.
+        """
+        def __init__(self,pt):
+            self.ptr2hdfwriter = pt
+            self.hdf_key = pt.key
+            self.initial_bn = 0
+            self.last_bn = 0
+            self.working = False
+            
+    def run(self):
+        try:
+            [th.ptr2hdfwriter.start() for th in self._hdfwriters]
+            while self.go_on:
+                self._process_tasks()
+                if (not self.daq_running) or (self.data_queue.qsize() + len(self.data_archives)) == 0:
+                    time.sleep(0.01)
+                    continue
+                #
+                read_loop = 0
+                while self.data_queue.qsize() > 0 and read_loop < 100:
+                    data_in = self.data_queue.get()
+                    self.store_data(data_in)
+                    read_loop += 1
+                    self.data_queue.task_done()
+                # Sort archive on the base of bunchnumber
+                self.data_archives.sort(key=self.getKey)
+                #
+                store_loop = 0
+                while len(self.data_archives) > 0 and store_loop < 100:
+                    # Wait for slow data
+                    next_daq_key = (self.data_archives[0])[0]
+                    next_bn_in = (self.data_archives[0])[1]
+                    next_bn_fi = (self.data_archives[0])[2]
+                    self.mutex.acquire()
+                    self.data_ready(next_daq_key,next_bn_in,next_bn_fi)
+                    self.mutex.release()
+                    store_loop += 1
+        except:
+            print traceback.format_exc()
+        #
+        for _hdf in self._hdfwriters:
+            _hdf.ptr2hdfwriter.stop_thread()
+
+    
+    def _process_tasks(self):
+        while self.task_queue.qsize() > 0:
+            try:
+                next_task = self.task_queue.get()
+                if next_task[0] == 'stop':
+                    self.go_on = False
+                elif next_task[0] == 'stop_and_clear':
+                    for _hdf in self._hdfwriters:
+                        _hdf.ptr2hdfwriter._force_close_daq()
+                    while not self.data_queue.empty():
+                        self.data_queue.get()
+                    del self.data_archives[:]
+                    self.daq_running = False
+                elif next_task[0] == 'set_pause':
+					if next_task[1]:
+						self.notify_queue.put(['update_report','Sequence paused'])
+					else:
+						self.notify_queue.put(['update_report','Sequence restarted'])
+					for _hdf in self._hdfwriters:
+						_hdf.ptr2hdfwriter.set_paused(next_task[1])
+                elif next_task[0] == 'file_prefix':
+                    self.file_prefix = next_task[1]
+                    for _hdf in self._hdfwriters:
+                        _hdf.ptr2hdfwriter.file_prefix = next_task[1]
+                elif next_task[0] == 'file_path':
+                    self.file_path = next_task[1]
+                    for _hdf in self._hdfwriters:
+                        _hdf.ptr2hdfwriter.file_path = next_task[1]
+                elif next_task[0] == 'Files_contiguous':
+                    self.Files_contiguous = next_task[1]
+                elif next_task[0] == 'stop_at_this_file':
+                    self._stop_at_current_file = True
+                elif next_task[0] == 'File_size':
+                    self.file_size = next_task[1]
+                    if self.max_triggers > 0:
+                        self.files2save = self.max_triggers / self.file_size
+                        if self.max_triggers % self.file_size:
+                            self.files2save += 1
+                elif next_task[0] == 'max_triggers':
+                    self.max_triggers = next_task[1]
+                    if self.max_triggers > 0:
+                        if self.file_size > self.max_triggers:
+                            self.files2save = 1
+                        else:
+                            self.files2save = self.max_triggers / self.file_size
+                            if self.max_triggers % self.file_size:
+                                self.files2save += 1
+                    else:
+                        self.files2save = -1
+                elif next_task[0] == 'daq_switch_off':
+                    for _hdf in self._hdfwriters:
+                        _hdf.ptr2hdfwriter.daq_switch_off(next_task[1])
+                    for daq_key in next_task[1]:
+                        if daq_key in self._daq_list.keys():
+                            self._daq_list.pop(daq_key)
+                elif next_task[0] == 'daq_switch_on':
+                    for daq_key in next_task[1]:
+                        self._daq_list[daq_key] = 0
+                elif next_task[0] == 'start_daq':
+                    self.daq_running = True
+                    self._stop_at_current_file = False
+                    self.files_saved = 0
+                    self.files_opened = 0
+                    self.allocated_bunch_range = (0,0)
+                    self.first_shot_saved = 0
+                    self._shots_saved = 0
+                elif next_task[0] == 'hdf_finished':
+                    hdf_key = next_task[1]
+                    full_file_path = next_task[2]
+                    report = next_task[3]
+                    self.hdf_finished(hdf_key, full_file_path, report)
+                elif next_task[0] == 'set_file_timeout':
+                    self._hdf_file_timeout = next_task[1]
+                    for _hdf in self._hdfwriters:
+                        _hdf.ptr2hdfwriter.set_no_data_timeout_sec(next_task[1])
+                #
+                self.task_queue.task_done()
+            except:
+                print traceback.format_exc()
+
+        
+
+                
+    def getKey(self,dataitem):
+        # return bn_in for sorting the list
+        return dataitem[1]
+
+    def store_data(self,data_in):
+        try:
+            if len(data_in) == 4:
+                # Triggered Data source
+                daq_key = data_in[0]
+                bn_in = min(data_in[1],data_in[2])
+                bn_fi = max(data_in[1],data_in[2])
+                if daq_key not in self._daq_list.keys():
+                    self._daq_list[daq_key] = 0
+                if isinstance(data_in[3], list) or isinstance(data_in[3], numpy.ndarray):
+                    #self.data_ready(daq_key,bn_in,bn_fi,data_in[3])
+                    self.data_archives.append((daq_key,bn_in,bn_fi,data_in[3]))
+                    if len(data_in[3]) != (bn_fi - bn_in + 1):
+                        print "MMMMMMM.....",daq_key,bn_in,bn_fi,len(data_in[3])
+            elif len(data_in) == 3:
+                # Metadata
+                metadaq_key = data_in[0]
+                bn_in = data_in[1]
+                self.metadata_archives[metadaq_key] = [bn_in,data_in[2]]
+        except:
+            print traceback.format_exc()
+            
+
+    def get_metadata(self):
+        # Used by HDFwriters to store metadata
+        return self.metadata_archives.copy()
+        
+
+
+    def data_ready(self,daq_key,bn_in,bn_f):
+        """
+        Receive Notification from :class:`~fermidaq.lib.attribdaq.AttribDaq`
+        and pass them to the :class:`~fermidaq.lib.hdfwriter.HDFWriter`. 
+        
+        In order to do this, it will first, look for the list of the
+        busy :class:`~fermidaq.lib.hdfwriter.HDFWriter` so, to see, if 
+        one of those threads must be notified. 
+    
+        If none of those threads should be notified, it will pick one of 
+        the idle threads, and pass to it the notification, just after configuring
+        its acquisition range (:meth:`~fermidaq.lib.hdfwriter.HDFWriter.save_conf`).
+
+        """
+        try:
+            if _dbg:
+                print "DataServer Data Ready",daq_key,bn_in,bn_f
+            # first of all: we must be sure that there are hdfs allocated
+            # for the given range:
+            # so, if the last allocated bunch number is lower
+            # than the current given final bunch number, we should allocate
+            # more hdfs to store this data.
+            while (self.Files_contiguous or self.files_opened < 1) and \
+                    not self._stop_at_current_file and \
+                    self.allocated_bunch_range[1] < bn_f  \
+                    and (self.files_opened < self.files2save 
+                    or self.files2save == -1): # it will allocate up to files2save 
+                    #at this moment we do not accept that both, bn_in and bn_f is
+                    #so great that we would have to allocate more than one hdfs
+                    #at this single time.
+                    #assert 
+                    #bn_f - self.allocated_bunch_range[1] < self.file_size
+                    #new range:
+                    if (not self.Files_contiguous) or (self.allocated_bunch_range[0] < 0):
+                        all_bn_in = bn_in
+                    else:
+                        all_bn_in = self.allocated_bunch_range[1] + 1
+                    
+                    all_bn_f = (all_bn_in + self.file_size - 1)
+                    if self.max_triggers > 0:
+                        all_bn_f = min(all_bn_f, self.max_triggers)
+
+                    idle_hdfwriter = [hdf for hdf in self._hdfwriters 
+                                        if hdf.working == False]
+
+                    #check if there is a free hdfwriter
+                    if len(idle_hdfwriter) == 0:
+                        if len(self._hdfwriters) < MAX_HDFWTHREADS:
+                            new_id = len(self._hdfwriters)
+                            fpath = self.file_path
+                            fpref = self.file_prefix
+                            #
+                            key='hdf%d'%(new_id)
+                            self._hdfwriters += [self.HDFs(HDFWriter(key, self,
+                                                                     file_path=fpath, file_prefix=fpref))]
+                            #
+                            self._hdfwriters[-1].ptr2hdfwriter.start()
+                            time.sleep(0.01)
+                            idle_hdfwriter = [self._hdfwriters[-1]]
+                        else:
+                            # NO MORE HDFs!
+                            break
+        
+                    if len(idle_hdfwriter) > 0:
+                        #get the pointer to the free hdfwriter.
+                        free_hdfwriter = idle_hdfwriter[0]
+        
+                        #add one new hdfsPyTango.DevState.ON
+                        if _dbg:
+                            print ("""DataStorage: Allocating hdfwriter %s 
+                                for range %d->%d"""% (free_hdfwriter.hdf_key, 
+                                                      all_bn_in, all_bn_f))
+                        free_hdfwriter.ptr2hdfwriter.file_path = self.file_path
+                        free_hdfwriter.ptr2hdfwriter.file_prefix = self.file_prefix
+                        name_with_trigger_info = (self.files2save != 1)
+                        free_hdfwriter.ptr2hdfwriter.save_conf(all_bn_in, all_bn_f,self.Files_contiguous,name_with_trigger_info)
+                        if self._hdf_file_timeout is not None:
+                            free_hdfwriter.ptr2hdfwriter.set_no_data_timeout_sec(self._hdf_file_timeout)
+                        free_hdfwriter.initial_bn = all_bn_in
+                        free_hdfwriter.last_bn = all_bn_f
+                        free_hdfwriter.ptr2hdfwriter.daq_switch_on(self._daq_list.keys())
+                        free_hdfwriter.working = True
+        
+                        if (self.allocated_bunch_range[0] <= 0):
+                            self.allocated_bunch_range = (all_bn_in, all_bn_f)
+                        else:
+                            self.allocated_bunch_range = (min(all_bn_in,self.allocated_bunch_range[0]),max(all_bn_f,self.allocated_bunch_range[1]))
+                        self.files_opened += 1
+            #
+            # Extract data from internal data archive
+            data_in = (self.data_archives.pop(0))[3]
+            #
+            if (bn_f > self.allocated_bunch_range[1]):
+                if (bn_in > self.allocated_bunch_range[1]):
+                    # chunk of data cannot be allocated at the moment, skip.
+                    self.data_archives.append((daq_key, bn_in, bn_f,data_in))
+                    return
+                # not all shots can be saved (no more HDF threads)
+                # postpone 'overflow' shots
+                last_avail_bn = self.allocated_bunch_range[1]
+                self.data_archives.append((daq_key, last_avail_bn+1, bn_f,data_in[-(bn_f-last_avail_bn):]))
+                if len(data_in[-(bn_f-last_avail_bn):]) != (bn_f- (last_avail_bn + 1) + 1):
+                    print "UUUUUUU.....",daq_key,last_avail_bn+1,bn_f,len(data_in[-(bn_f-last_avail_bn):])
+                #
+                data_in = data_in[:-(bn_f-last_avail_bn)]
+                bn_f = last_avail_bn
+                if len(data_in) != (bn_f-bn_in+1):
+                    print "********",daq_key,len(data_in),(bn_f-bn_in),bn_in,bn_f
+            #
+            if (bn_in < self.allocated_bunch_range[0]):
+                # purge too old data
+                if (bn_f < self.allocated_bunch_range[0]):
+                    # chunk of data too old: forget about it
+                    return
+                data_in = data_in[-(bn_f-self.allocated_bunch_range[0]+1):]
+                bn_in = self.allocated_bunch_range[0]
+                if len(data_in) != (bn_f-bn_in+1):
+                    print "#########",daq_key,len(data_in),(bn_f-bn_in),bn_in,bn_f
+            #
+            #
+            # Look for all working hdfs which of them has the initial
+            # bunch and the final bunch in the range of the received
+            # triggers.
+            # So, if the initial bunch of the hdf writer is greater of
+            # the last bunch received or if the last bunch of the hdf writer
+            # is lower than the first bunch received, this means that the
+            # range of this hdfwriter is outside the range of this
+            # input and must be rejected.
+            # NOTE: working_entry[1] initial_bunch
+            #       working_entry[2] final_bunch
+            #
+            #  The rule is reject the hdf if:
+            #     hdf.initial_bunch > bn_f or hdf.final_bunch < bn_in
+            #
+            pass2this_working = None
+            working_entry = self._hdfwriters[0]
+            pass2this_working = [working_entry for working_entry in \
+                         self._hdfwriters \
+                         if working_entry.working == True and \
+                         not (working_entry.initial_bn > bn_f \
+                         or working_entry.last_bn < bn_in)]
+
+            daq_bn_f = -1
+            daq_bn_in = -1
+            last_bn_saved = -1
+            for hdfs_entry in pass2this_working:
+                hdf_key = hdfs_entry.hdf_key
+                hdf_bn_in = hdfs_entry.initial_bn
+                hdf_bn_f = hdfs_entry.last_bn
+                daq_bn_in = max(hdf_bn_in, bn_in)
+                daq_bn_f = min(hdf_bn_f, bn_f)
+                idx_in = daq_bn_in - bn_in
+                idx_f = daq_bn_f - bn_in + 1
+                last_bn_saved = max(last_bn_saved,daq_bn_f)
+                hdfs_entry.ptr2hdfwriter.save_data_list(daq_key, daq_bn_in, daq_bn_f,data_in[idx_in:idx_f])
+            
+            if (daq_bn_f != -1) and (daq_bn_in != -1):
+                if (self.Files_contiguous):
+                    if (self.first_shot_saved == 0):
+                        self.first_shot_saved = daq_bn_in
+                    new_shots_saved = daq_bn_f - self.first_shot_saved + 1
+                elif len(pass2this_working):
+                    # only one file opened at time
+                    new_shots_saved = (self.files_saved * self.file_size) + daq_bn_f - pass2this_working[0].initial_bn + 1
+                # Check the number of shot saved 
+                if new_shots_saved > self._shots_saved:
+                    self.notify_queue.put(['stored_shot_counter',new_shots_saved])
+                    self._shots_saved = new_shots_saved
+            else:
+                # Nothing has been done
+                # Big problem, debug messages only time to time
+                if int(time.time()) % 60 == 0:
+					msg = "Nothing has been done put %s,%s,%s" % (str(daq_key),str(bn_in),str(bn_f))
+					self.notify_queue.put(['update_report',msg])
+                self.data_archives.append((daq_key,bn_in,bn_f,data_in))
+                if len(data_in) != (bn_f - bn_in + 1):
+                    print "XXXXXXXXX.....",daq_key,bn_in,bn_f,len(data_in)
+        except:
+            print traceback.format_exc()
+            pass
+
+    def put_msg_in_notify_queue(self, topic_in):
+    	self.notify_queue.put(topic_in)
+
+    
+    def hdf_finished(self, hdf_key, full_file_path, report):
+        """
+        Receive the notification from :class:`~fermidaq.lib.hdfwriter.HDFWriter`
+        that it has concluded a file acquisition. 
+        
+        After receiving this notificationhdf_finished, it removes the 
+        :class:`~fermidaq.lib.hdfwriter.HDFWriter`
+        from the busy thread, making it available to receive a new request for 
+        saving a new HDF5 file.
+        
+        :param hdf_pt: Instance of :class:`~fermidaq.lib.hdfwriter.HDFWriter` that has concluded an HDF5 file. 
+        :param full_file_path: Path of the file just concluded.        
+                
+        """
+        self.mutex.acquire()
+        try:
+            # Set hdfwriter as not working and update allocated bunch range info
+            
+            min_bn_in = 0
+            for hdf in self._hdfwriters:
+                if hdf.hdf_key == hdf_key:
+                    hdf.working = False
+                else:
+                    if (min_bn_in == 0) or (hdf.initial_bn < min_bn_in):
+                        min_bn_in = hdf.initial_bn
+                
+            self.allocated_bunch_range = (min_bn_in,self.allocated_bunch_range[1])
+            self.notify_queue.put(['hdf_finished',hdf_key,full_file_path,report])
+
+            self.files_saved += 1
+            if not self.Files_contiguous:
+                self.files_opened -= 1
+        except:
+            print traceback.format_exc()
+        self.mutex.release()
+
+
+    def _init_hdfs(self, file_path, file_prefix):
+        """  Create a dictionary for be managed by the idle_hdfwriter
+
+        :returns: A list of :class:`~fermidaq.lib.bunchmanager.BunchManager.HDFs` for management of :class:`~fermidaq.lib.hdfwriter.HDFWriter` threads.
+        """
+        self.file_path = file_path
+        self.file_prefix = file_prefix
+        self.files_saved = 0
+
+        d = []
+        for i in range(self._hdf_threads):
+            key ='hdf%d'%(i)
+            d += [self.HDFs(HDFWriter(key,self,file_path=file_path,file_prefix=file_prefix))]
+        return d
+
diff --git a/src/DirectorBgnThread.py b/src/DirectorBgnThread.py
new file mode 100644
index 0000000..a6c607b
--- /dev/null
+++ b/src/DirectorBgnThread.py
@@ -0,0 +1,862 @@
+import time
+import threading
+import os
+from DonkiOrchestraLib import CommunicationClass
+import traceback
+import socket
+import multiprocessing
+from InfoServer import infoServerThread
+from DataStorage import DataStorage
+
+
+THREAD_DELAY_SECS = 1
+BUSY_PLAYER_TIMEOUT_SECS = 30
+
+DEBUG = False
+
+
+class directorThread(threading.Thread):
+
+    #-----------------------------------------------------------------------------------
+    #    DataSource utility class
+    #-----------------------------------------------------------------------------------
+    class DataSource():
+        def __init__(self, data_name, alias_name, metadata = False):
+            self.enabled = True
+            self.data_name = data_name.lower()
+            self.data_alias = alias_name
+            self.metadata = metadata
+            self.myState = "STANDBY"
+            self.last_ack_time = 0
+            self.last_trigger = 0
+
+
+#-----------------------------------------------------------------------------------
+#    __init__
+#-----------------------------------------------------------------------------------
+    def __init__(self, inforserver_port = 50010):
+        threading.Thread.__init__(self)
+        self._alive = True
+        self._started = False
+        self._paused = False
+        self._datastorage_under_pressure = False
+        self.actual_priority = 0
+        self.PlayersInfo = {}
+        self.DataAliases = {}
+        self.DaqSourcesEnabled = {}
+        self.Players_per_level = {}
+        self.MetaDataAliases = {}
+        self.MetaDaqsEnabled = {}
+        self.MetaDataPeriod = 10
+        self.busy_Players = []
+        self.trg = -1
+        self._state = 'OFF'
+        self.EnableDataSaving = False
+        self._Report = ''
+        self._last_player_table = None
+        self.last_files_saved = []
+        self.last_log_messages = []
+        self.daq_sources = {}
+        self._description_message = ''
+        self._players_timeout = BUSY_PLAYER_TIMEOUT_SECS
+        self._file_finished_callbacks = []
+        #
+        self.slowest_player_time = 1.0
+        #
+        self.zcc = CommunicationClass()
+        #
+        self.sub_socks = {}
+        self.sub_ports = {}
+        self.infoServer = infoServerThread(Port=inforserver_port, notif_function = self._info_db_changed)
+        #
+        director_tokens = ['donkidirector','director',self.zcc.my_pub_socket_info()]
+        self.infoServer.write_to_db(director_tokens)
+        #
+        for da in eval(self.infoServer.get_from_db(['dataaliases'])):
+            self.DataAliases[str(da['name'])] =  str(da['data'])
+        #
+        for da in eval(self.infoServer.get_from_db(['datasources_enabled'])):
+            self.DaqSourcesEnabled[str(da['name'])] =  eval(da['data'])
+        #
+        for da in eval(self.infoServer.get_from_db(['metadata_aliases'])):
+            self.MetaDataAliases[str(da['name'])] =  str(da['data'])
+            self.MetaDaqsEnabled[str(da['name'])] =  False
+        #
+        for da in eval(self.infoServer.get_from_db(['metadata_enabled'])):
+            self.MetaDaqsEnabled[str(da['name'])] =  eval(da['data'])
+
+        #
+        self.infoServer.start()
+        #
+        self.data_buffering_enabled = False
+        self.buffer_data_queue = multiprocessing.JoinableQueue()
+        #
+        self.datastorage_task_queue = multiprocessing.JoinableQueue()
+        self.datastorage_data_queue = multiprocessing.JoinableQueue() #(maxsize=50)
+        self.datastorage_notif_queue = multiprocessing.JoinableQueue()
+        self.datastorage = DataStorage(data_queue=self.datastorage_data_queue,
+                                     task_queue=self.datastorage_task_queue,
+                                     notif_queue=self.datastorage_notif_queue)
+        self.datastorage.start()
+        # set default properties about file saving
+        self.set_file_prefix('daqfile')
+        self.set_files_contiguous(True)
+        self.set_max_triggers(10)
+        self.set_file_size(100)
+        self.set_file_path(".")
+
+
+
+#-----------------------------------------------------------------------------------
+#    _check_players_changed
+#-----------------------------------------------------------------------------------
+    def _check_players_changed(self):
+        player_table = eval(self.infoServer.get_from_db(['donkiplayers']))
+        if self._last_player_table != player_table:
+            self._info_db_changed('donkiplayers', player_table)
+
+
+
+#-----------------------------------------------------------------------------------
+#    _info_db_changed
+#-----------------------------------------------------------------------------------
+    def _info_db_changed(self, tablename, new_info):
+        if str(tablename) == 'donkidirector':
+            return
+        elif str(tablename) == 'donkiplayers':
+            if self._last_player_table == new_info:
+                return
+            if DEBUG:
+                print "players changed",new_info
+            try:
+                self.PlayersInfo.clear()
+                for pl in new_info:
+                    self.PlayersInfo[str(pl['name'])] = {'url':str(pl['data'])}
+                self._last_player_table = new_info
+            except:
+                traceback.print_exc()
+        elif str(tablename) == 'dataaliases':
+            if DEBUG:
+                print "data aliases changed",new_info
+            try:
+                self.DataAliases.clear()
+                for da in new_info:
+                    self.DataAliases[da['name']] =  str(da['data'])
+            except:
+                traceback.print_exc()
+        elif str(tablename) == 'datasources_enabled':
+            if DEBUG:
+                print "data datasources_enabled changed",new_info
+            try:
+                self.DaqSourcesEnabled.clear()
+                for da in new_info:
+                    self.DaqSourcesEnabled[da['name']] =  eval(da['data'])
+            except:
+                traceback.print_exc()
+        elif str(tablename) == 'metadata_aliases':
+            if DEBUG:
+                print "metadata aliases changed",new_info
+            try:
+                self.MetaDataAliases.clear()
+                for da in new_info:
+                    self.MetaDataAliases[da['name']] =  str(da['data'])
+            except:
+                traceback.print_exc()
+        elif str(tablename) == 'metadata_enabled':
+            if DEBUG:
+                print "data metadata_enabled changed",new_info
+            try:
+                self.MetaDaqsEnabled.clear()
+                for da in new_info:
+                    self.MetaDaqsEnabled[da['name']] =  eval(da['data'])
+            except:
+                traceback.print_exc()
+
+
+
+#-----------------------------------------------------------------------------------
+#    _retrieve_players_info
+#-----------------------------------------------------------------------------------
+    def _retrieve_players_info(self, reconnect = False):
+        #
+        not_active_players = []
+        max_priority = -1
+        self.Players_per_level.clear()
+        self.Players_per_level[0] = []
+        try:
+            for pl_key in self.PlayersInfo.keys():
+                pl_name = str(pl_key)
+                if reconnect and (not self.zcc.create_sub_socket(pl_name,self.PlayersInfo[pl_name]['url'])):
+                    continue
+                if DEBUG:
+                    print "Asking info to", pl_name
+                info = self.zcc.ask_for_info(pl_name)
+                if DEBUG:
+                    print info
+                if len(info) == 0:
+                    not_active_players.append(pl_name)
+                    continue
+                if info['data'] == []:
+                    self.PlayersInfo[pl_name]['type'] ='ack'
+                else:
+                    self.PlayersInfo[pl_name]['type'] ='data'
+                    for dataname in info['data']:
+                        full_daq_name = pl_name+"/"+dataname.lower()
+                        if full_daq_name not in self.DataAliases:
+                            self.infoServer.write_to_db(['dataaliases',full_daq_name,full_daq_name])
+                        if full_daq_name not in self.DaqSourcesEnabled:
+                            self.infoServer.write_to_db(['datasources_enabled',full_daq_name,'True'])
+                        data_alias = self.DataAliases[full_daq_name]
+                        if full_daq_name not in self.daq_sources:
+                            self.daq_sources[full_daq_name] = self.DataSource(dataname, data_alias, metadata = False)
+                        if (self.daq_sources[full_daq_name].data_alias != data_alias):
+                            # Player Data alias has changed
+                            self.daq_sources[full_daq_name].data_alias = data_alias
+                if info['metadata'] != []:
+                    for dataname in info['metadata']:
+                        full_daq_name = pl_name+"/"+dataname.lower()
+                        if full_daq_name not in self.MetaDataAliases:
+                            self.infoServer.write_to_db(['metadata_aliases',full_daq_name,full_daq_name])
+                        if full_daq_name not in self.MetaDaqsEnabled:
+                            self.infoServer.write_to_db(['metadata_enabled',full_daq_name,'True'])
+                        data_alias = self.MetaDataAliases[full_daq_name]
+                        if full_daq_name not in self.daq_sources:
+                            self.daq_sources[full_daq_name] = self.DataSource(dataname, data_alias, metadata = True)
+                        if (self.daq_sources[full_daq_name].data_alias != data_alias):
+                            # Player Data alias has changed
+                            self.daq_sources[full_daq_name].data_alias = data_alias
+                #
+                self.PlayersInfo[pl_name]['status'] ='OFF'
+                #
+                dprio = info['prio']
+                if dprio < 0:
+                    self.PlayersInfo[pl_name]['prio'] = "Disabled"
+                else:
+                    self.PlayersInfo[pl_name]['prio'] = str(dprio)
+                if dprio > max_priority:
+				max_priority = dprio
+                if dprio not in self.Players_per_level.keys():
+				self.Players_per_level[dprio] = []
+                if not pl_name in self.Players_per_level[dprio]:
+				self.Players_per_level[dprio].append(pl_name)
+            #
+            for pl_name in not_active_players:
+                #self.PlayersInfo.pop(pl_name)
+                self.PlayersInfo[pl_name]['status'] ='ALARM'
+                self.PlayersInfo[pl_name]['prio'] = "Disabled"
+                self.PlayersInfo[pl_name]['type'] ='Unknown'
+            # Clear not existing metadata from the DB
+            for mda in self.MetaDataAliases.keys():
+                if mda not in self.daq_sources or not self.daq_sources[mda].metadata:
+                    self.MetaDataAliases.pop(mda)
+                    #self.infoServer.del_from_db(['metadata_aliases',mda])
+                    #self.infoServer.del_from_db(['metadata_enabled',mda])
+            # Clear not existing datasources from the DB
+            for da in self.DataAliases.keys():
+                if da not in self.daq_sources or self.daq_sources[da].metadata:
+                    self.DataAliases.pop(da)
+                    #self.infoServer.del_from_db(['dataaliases',da])
+                    #self.infoServer.del_from_db(['datasources_enabled',da])
+        except:
+            traceback.print_exc()
+        return max_priority
+
+
+#-----------------------------------------------------------------------------------
+#    _start_stop_Players
+#-----------------------------------------------------------------------------------
+    def _start_stop_Players(self, bool_in):
+        #
+        for a in self.PlayersInfo.keys():
+            if self.PlayersInfo[a]['prio'] == "Disabled":
+                continue
+            try:
+                if (bool_in):
+                    ok = self.zcc.publish_command('start', a, argin = self.MetaDataPeriod, timeout_sec=30)
+                    if not ok:
+                        msg = "Sequence aborted, failed start method of player %s" % a
+                        self._report_message(msg,with_date=True)
+                        return False
+                else:
+                    ok = self.zcc.publish_command('stop', a, timeout_sec=30)
+                self.PlayersInfo[a]['status'] ='OFF'
+            except:
+                self.PlayersInfo[a]['status'] ='ALARM'
+                if DEBUG:
+                    traceback.print_exc()
+        if bool_in:
+            # Force closing running hdfwriters
+            self.datastorage_task_queue.put(['stop_and_clear'])
+            self.datastorage_task_queue.join()
+            #
+            self.datastorage_task_queue.put(['start_daq'])
+            self.datastorage_task_queue.join()
+            if self._description_message != '':
+                self._notify_metadata("Description", 1, self._description_message)
+        return True
+
+#-----------------------------------------------------------------------------------
+#    _pause_Players
+#-----------------------------------------------------------------------------------
+    def _pause_Players(self, bool_in):
+        #
+        for a in self.PlayersInfo.keys():
+            if self.PlayersInfo[a]['prio'] == "Disabled":
+                continue
+            try:
+                ok = self.zcc.publish_command('pause',a,argin = bool_in,timeout_sec=30)
+            except:
+                if DEBUG:
+                    traceback.print_exc()
+        return
+
+#-----------------------------------------------------------------------------------
+#    set_player_priority
+#-----------------------------------------------------------------------------------
+    def set_player_priority(self, player_name, priority):
+        try:
+            ok = self.zcc.publish_command('priority', player_name, argin = priority)
+            if ok:
+                if priority < 0:
+                    self.PlayersInfo[player_name]['prio'] = "Disabled"
+                else:
+                    self.PlayersInfo[player_name]['prio'] = str(priority)
+
+            else:
+                log_msg = "Error: unable to set pripority of player %s"% player_name
+                self._report_message(log_msg,with_date = True)
+                if DEBUG:
+                    print log_msg
+        except:
+            if DEBUG:
+                traceback.print_exc()
+
+
+#-----------------------------------------------------------------------------------
+#    _notify_new_log
+#-----------------------------------------------------------------------------------
+    def _manage_message(self,pl,msg):
+        if msg[0] == 'log':
+            self._notify_new_log("%s [%d,%d] %s" % (msg[1],msg[2],msg[3],msg[4]))
+        elif msg[0] == 'data':
+            dataname = msg[1]
+            full_daq_name = pl +"/"+dataname.lower()
+            if full_daq_name not in self.daq_sources.keys():
+                return
+            self.daq_sources[full_daq_name].last_ack_time = time.time()
+            if not self.daq_sources[full_daq_name].enabled:
+                return
+            data_alias = self.daq_sources[full_daq_name].data_alias
+            if self.daq_sources[full_daq_name].metadata:
+                self._notify_metadata(data_alias,msg[2],msg[4])
+                self.daq_sources[full_daq_name].last_trigger = msg[2]
+            else:
+                self._notify_new_data(data_alias,msg[2],msg[3],msg[4])
+                self.daq_sources[full_daq_name].last_trigger = msg[3]
+            self.daq_sources[full_daq_name].myState = "ON"
+
+#-----------------------------------------------------------------------------------
+#    _notify_new_log
+#-----------------------------------------------------------------------------------
+    def _notify_new_log(self, log_msg):
+        if log_msg not in self.last_log_messages:
+            self.last_log_messages.append(log_msg)
+            self.last_log_messages = self.last_log_messages[-100:]
+            self._report_message(log_msg,with_date = True)
+
+
+#-----------------------------------------------------------------------------------
+#    _notify_new_data
+#-----------------------------------------------------------------------------------
+    def _notify_new_data(self, data_name, trg_in, trg_f, data_in):
+        if self._state != 'ON':
+            return
+        if abs(trg_in - self.trg) > 1000:
+            # Fake trigger value
+            return
+        if DEBUG:
+            print "NEW DATA",data_name,trg_in, trg_f
+        if self.data_buffering_enabled:
+            self.buffer_data_queue.put([data_name,trg_in,trg_f,data_in])
+        if not self.EnableDataSaving:
+            return
+        self.datastorage_data_queue.put([data_name,trg_in,trg_f,data_in])
+
+
+#-----------------------------------------------------------------------------------
+#    _notify_metadata
+#-----------------------------------------------------------------------------------
+    def _notify_metadata(self, metadata_name, trg_in, data_in):
+        if not self.EnableDataSaving:
+            return
+        if DEBUG:
+            print "NEW METADATA",metadata_name,trg_in
+        self.datastorage_data_queue.put([metadata_name,trg_in,data_in])
+
+
+#-----------------------------------------------------------------------------------
+#    _check_datastorage_notification
+#-----------------------------------------------------------------------------------
+    def _check_datastorage_notification(self):
+        while self.datastorage_notif_queue.qsize() > 0:
+            last_notif = self.datastorage_notif_queue.get()
+            if DEBUG:
+                print last_notif
+            if last_notif[0] == 'hdf_finished':
+                self.last_files_saved.append(last_notif[2])
+                self._report_message(last_notif[-1],with_date = True)
+                for cb in self._file_finished_callbacks:
+                    try:
+                        cb(last_notif[2])
+                    except:
+                        pass
+            elif last_notif[0] == 'data_not_stored_alarm':
+	    	self._report_message("Data Storage Alarm activated, sequence will be stopped", with_date = True)
+		self.abort()
+	    elif last_notif[0] == 'update_report':
+                self._report_message(last_notif[1],with_date = True)
+            elif last_notif[0] == 'stored_shot_counter':
+                stored_triggers = last_notif[1]
+                if (self.trg - stored_triggers) > max(self.file_size/2,1):
+                    if not self._datastorage_under_pressure:
+                        self._report_message("Data Storage Delay Alarm activated, sequence paused", with_date = True)
+                        self._datastorage_under_pressure = True
+                elif self._datastorage_under_pressure:
+                    self._report_message("Data Storage Delay under threshold", with_date = True)
+                    self._datastorage_under_pressure = False
+
+#-----------------------------------------------------------------------------------
+#    _report_message
+#-----------------------------------------------------------------------------------
+    def _report_message(self, message_in, with_date = False):
+        if with_date:
+            message_in = time.asctime() + " " + message_in
+        if DEBUG:
+            print message_in
+        new_report = ("\n".join([self._Report,message_in.strip()])).split("\n")
+        self._Report = "\n".join(new_report[-5000:])
+
+
+#-----------------------------------------------------------------------------------
+#    ResetReport
+#-----------------------------------------------------------------------------------
+    def ResetReport(self):
+        self._Report = ""
+
+
+#-----------------------------------------------------------------------------------
+#    set_DataAlias
+#-----------------------------------------------------------------------------------
+    def set_DataAlias(self, player_data_name, alias_name):
+        self.infoServer.write_to_db(['dataaliases',str(player_data_name),str(alias_name)])
+
+#-----------------------------------------------------------------------------------
+#    get_DataSources
+#-----------------------------------------------------------------------------------
+    def get_DataSources(self):
+        ret_dict = {}
+        try:
+            for ds in self.daq_sources:
+                if ds in self.DataAliases.keys():
+                    player_name = ds.split("/")[0]
+                    if (self.PlayersInfo[player_name]['prio'] == "Disabled") or (not self.daq_sources[ds].enabled):
+                        self.daq_sources[ds].myState = "OFF"
+                    else:
+                        self.daq_sources[ds].myState = self._state
+                        if self._state == "ON" and abs(self.daq_sources[ds].last_trigger - self.trg) > 1:
+                            self.daq_sources[ds].myState = 'ALARM'
+                    ret_dict[ds] = [self.DataAliases[ds], self.DaqSourcesEnabled[ds],self.daq_sources[ds].myState]
+        except:
+            pass
+        return ret_dict
+
+#-----------------------------------------------------------------------------------
+#    set_MetaDataAlias
+#-----------------------------------------------------------------------------------
+    def set_MetaDataAlias(self, player_data_name, alias_name):
+        self.infoServer.write_to_db(['metadata_aliases',player_data_name,alias_name])
+
+
+#-----------------------------------------------------------------------------------
+#    get_MetaData
+#-----------------------------------------------------------------------------------
+    def get_MetaData(self):
+        ret_dict = {}
+        try:
+            for ds in self.daq_sources:
+                if ds in self.MetaDataAliases.keys():
+                    player_name = ds.split("/")[0]
+                    if (self.PlayersInfo[player_name]['prio'] == "Disabled") or (not self.daq_sources[ds].enabled):
+                        self.daq_sources[ds].myState = "OFF"
+                    else:
+                        self.daq_sources[ds].myState = self._state
+                        if self._state == "ON" and abs(self.daq_sources[ds].last_trigger - self.trg) > self.MetaDataPeriod:
+                            self.daq_sources[ds].myState = 'ALARM'
+                    ret_dict[ds] = [self.MetaDataAliases[ds], self.MetaDaqsEnabled[ds],self.daq_sources[ds].myState]
+        except:
+            pass
+        return ret_dict
+
+#-----------------------------------------------------------------------------------
+#    set_file_prefix
+#-----------------------------------------------------------------------------------
+    def set_file_prefix(self, prefix):
+        self.datastorage_task_queue.put(['file_prefix',prefix])
+        self.datastorage_task_queue.join()
+        self.file_prefix = prefix
+
+#-----------------------------------------------------------------------------------
+#    set_file_path
+#-----------------------------------------------------------------------------------
+    def set_file_path(self, fpath):
+        self.datastorage_task_queue.put(['file_path',fpath])
+        self.datastorage_task_queue.join()
+        self.file_path = fpath
+
+#-----------------------------------------------------------------------------------
+#    set_files_contiguous
+#-----------------------------------------------------------------------------------
+    def set_files_contiguous(self, bool_in):
+        self.datastorage_task_queue.put(['Files_contiguous',bool_in])
+        self.datastorage_task_queue.join()
+        self.files_contiguous = bool_in
+
+
+#-----------------------------------------------------------------------------------
+#    set_file_size
+#-----------------------------------------------------------------------------------
+    def set_file_size(self, nTriggers):
+        self.datastorage_task_queue.put(['File_size',nTriggers])
+        self.datastorage_task_queue.join()
+        self.file_size = nTriggers
+
+
+#-----------------------------------------------------------------------------------
+#    get_max_triggers
+#-----------------------------------------------------------------------------------
+    def get_max_triggers(self):
+        return self.max_triggers
+
+#-----------------------------------------------------------------------------------
+#    set_max_triggers
+#-----------------------------------------------------------------------------------
+    def set_max_triggers(self, nTriggers):
+        self.datastorage_task_queue.put(['max_triggers', nTriggers])
+        self.datastorage_task_queue.join()
+        self.max_triggers = nTriggers
+
+
+#-----------------------------------------------------------------------------------
+#    set_daq_on_off
+#-----------------------------------------------------------------------------------
+    def set_daq_on_off(self, full_daq_name, value_in):
+        if value_in:
+            self.datastorage_task_queue.put(['daq_switch_on',[full_daq_name]])
+        else:
+            self.datastorage_task_queue.put(['daq_switch_off',[full_daq_name]])
+        self.datastorage_task_queue.join()
+        self.infoServer.write_to_db(['datasources_enabled',full_daq_name,str(value_in)])
+        self.daq_sources[full_daq_name].enabled = value_in
+        self.DaqSourcesEnabled[full_daq_name] = value_in
+
+#-----------------------------------------------------------------------------------
+#    set_metadata_on_off
+#-----------------------------------------------------------------------------------
+    def set_metadata_on_off(self, full_daq_name, value_in):
+        if value_in:
+            self.datastorage_task_queue.put(['daq_switch_on',[full_daq_name]])
+        else:
+            self.datastorage_task_queue.put(['daq_switch_off',[full_daq_name]])
+        self.datastorage_task_queue.join()
+        self.infoServer.write_to_db(['metadata_enabled',full_daq_name,str(value_in)])
+        self.daq_sources[full_daq_name].enabled = value_in
+        #self.MetaDaqsEnabled[full_daq_name] = value_in
+
+#-----------------------------------------------------------------------------------
+#    set_metadata_period
+#-----------------------------------------------------------------------------------
+    def set_metadata_period(self, value_in):
+        self.MetaDataPeriod = value_in
+
+#-----------------------------------------------------------------------------------
+#    get_metadata_period
+#-----------------------------------------------------------------------------------
+    def get_metadata_period(self):
+        return self.MetaDataPeriod
+
+#-----------------------------------------------------------------------------------
+#    get_file_prefix
+#-----------------------------------------------------------------------------------
+    def get_file_prefix(self):
+        return self.file_prefix
+
+#-----------------------------------------------------------------------------------
+#    get_file_path
+#-----------------------------------------------------------------------------------
+    def get_file_path(self):
+        return self.file_path
+
+#-----------------------------------------------------------------------------------
+#    get_files_contiguous
+#-----------------------------------------------------------------------------------
+    def get_files_contiguous(self):
+        return self.files_contiguous
+
+#-----------------------------------------------------------------------------------
+#    get_file_size
+#-----------------------------------------------------------------------------------
+    def get_file_size(self):
+        return self.file_size
+
+#-----------------------------------------------------------------------------------
+#    quit_and_exit
+#-----------------------------------------------------------------------------------
+    def quit_and_exit(self):
+        self._alive = False
+        self._started = False
+
+#-----------------------------------------------------------------------------------
+#    get_last_file_saved
+#-----------------------------------------------------------------------------------
+    def get_last_file_saved(self):
+        return self.last_files_saved
+
+#-----------------------------------------------------------------------------------
+#    get_report
+#-----------------------------------------------------------------------------------
+    def get_report(self):
+        return self._Report
+
+#-----------------------------------------------------------------------------------
+#    get_description_message
+#-----------------------------------------------------------------------------------
+    def get_description_message(self):
+        return self._description_message
+
+#-----------------------------------------------------------------------------------
+#    set_description_message
+#-----------------------------------------------------------------------------------
+    def set_description_message(self, msg_in):
+        self._description_message = msg_in
+
+
+#-----------------------------------------------------------------------------------
+#    get_players_timeout
+#-----------------------------------------------------------------------------------
+    def get_players_timeout(self):
+        return self._players_timeout
+
+#-----------------------------------------------------------------------------------
+#    set_players_timeout
+#-----------------------------------------------------------------------------------
+    def set_players_timeout(self, tmo_in):
+        self._players_timeout = tmo_in
+
+
+#-----------------------------------------------------------------------------------
+#    remove_player_info
+#-----------------------------------------------------------------------------------
+    def remove_player_info(self, player_name):
+        if player_name in self.PlayersInfo.keys():
+            self.infoServer.del_from_db(['donkiplayers',player_name])
+            ds_to_remove = []
+            for ds in self.daq_sources:
+                if player_name == ds.split("/")[0]:
+                    ds_to_remove.append(ds)
+            for dskey in ds_to_remove:
+                del self.daq_sources[dskey]
+            return True
+        else:
+            return False
+
+#-----------------------------------------------------------------------------------
+#    add_file_finished_callback
+#-----------------------------------------------------------------------------------
+    def add_file_finished_callback(self, method):
+        if method not in self._file_finished_callbacks:
+            self._file_finished_callbacks.append(method)
+
+#-----------------------------------------------------------------------------------
+#    external_buffering_enabled
+#-----------------------------------------------------------------------------------
+    def external_buffering_enabled(self, enabled_in):
+        self.data_buffering_enabled = enabled_in
+        if enabled_in:
+            # Return the queued used for data notification
+            return self.buffer_data_queue
+#-----------------------------------------------------------------------------------
+#    please_start
+#-----------------------------------------------------------------------------------
+    def please_start(self):
+        self._started = True
+
+#-----------------------------------------------------------------------------------
+#    set_paused
+#-----------------------------------------------------------------------------------
+    def set_paused(self, value_in):
+        self._paused = value_in
+        self.datastorage_task_queue.put(['set_pause',value_in])
+
+
+#-----------------------------------------------------------------------------------
+#    is_paused
+#-----------------------------------------------------------------------------------
+    def is_paused(self):
+        return self._paused
+
+#-----------------------------------------------------------------------------------
+#    abort
+#-----------------------------------------------------------------------------------
+    def abort(self):
+        self._started = False
+        self.datastorage_task_queue.put(['stop_and_clear'])
+
+#-----------------------------------------------------------------------------------
+#    run
+#-----------------------------------------------------------------------------------
+    def run(self):
+        knownPlayersInfo = self.PlayersInfo.copy()
+        while self._alive:
+            if not self._started:
+                # IDLE state, check only Players status
+                try:
+                    self._check_players_changed()
+                    if knownPlayersInfo != self.PlayersInfo:
+                        self._retrieve_players_info(reconnect = True)
+                        knownPlayersInfo = self.PlayersInfo.copy()
+                    else:
+                        # Send a dummy negative trigger, something like a 'ping'
+                        self.zcc.publish_trigger(-1, -1)
+                        not_responding_Players = self.PlayersInfo.keys()
+                        t0 = time.time()
+                        need_to_update_infos = False
+                        while not_responding_Players and not self._started:
+                            pl_msgs = self.zcc.wait_message(not_responding_Players)
+                            if pl_msgs is not None and len(pl_msgs):
+                                for pl in pl_msgs:
+                                    idx = not_responding_Players.index(pl)
+                                    del not_responding_Players[idx]
+                                    if 'status' not in self.PlayersInfo[pl].keys() or self.PlayersInfo[pl]['status'] == 'ALARM':
+                                        msg = "GOOD NEWS: DonkiPlayer %s start responding." % pl
+                                        self._report_message(msg, with_date=True)
+                                        need_to_update_infos = True
+                                    self.PlayersInfo[pl]['status'] = 'OFF'
+                                    new_msg = pl_msgs[pl]
+                                    topic = new_msg[0].lower()
+                                    if topic != 'ack':
+                                        self._manage_message(pl,new_msg)
+                            elif (time.time() - t0) > 5:
+                                if DEBUG:
+                                    print "NOT RESPONDING",not_responding_Players
+                                for pl in not_responding_Players:
+                                    if pl in self.PlayersInfo and 'status' in self.PlayersInfo[pl].keys():
+                                        if self.PlayersInfo[pl]['status'] != 'ALARM':
+                                            msg = "WARNING: DonkiPlayer %s not responding." % pl
+                                            self._report_message(msg, with_date=True)
+                                        self.PlayersInfo[pl]['status'] ='ALARM'
+                                break
+                        if need_to_update_infos:
+                            self._retrieve_players_info()
+                except:
+                    traceback.print_exc()
+            else:
+                # Received a start command
+                self.ResetReport()
+                upper_priority = self._retrieve_players_info()
+                if not self._start_stop_Players(True):
+                    self._started = False
+                    # Manage existing log messages
+                    pl_msgs = self.zcc.wait_message(self.PlayersInfo.keys(), timeout_sec = 0.2)
+                    if pl_msgs is not None and len(pl_msgs):
+                        for pl in pl_msgs:
+                            self._manage_message(pl,pl_msgs[pl])
+                    self._start_stop_Players(False)
+                    continue
+                # Set HDF file timeout on the base of the player's timeout
+                self.datastorage_task_queue.put(['set_file_timeout', self._players_timeout + 10])
+                self.datastorage_task_queue.join()
+                #
+                self.last_files_saved = []
+                self.last_log_messages = []
+                self._state = "ON"
+                self._report_message("DonkiDirector started",with_date=True)
+                self._paused = False
+                self._datastorage_under_pressure = False
+                self.slowest_player_time = 1.0
+                self.trg = 0
+                t_start = time.time()
+                while ((self.trg < self.max_triggers) or (self.max_triggers < 0)) and self._alive:
+                    if not self._started :
+                        break
+                    self._check_datastorage_notification()
+                    # Manage pause
+                    if (self._paused) or self._datastorage_under_pressure:
+                        if self._paused and self._state != "STANDBY":
+                            self._pause_Players(self._paused)
+                        self._state = "STANDBY"
+                        pl_msgs = self.zcc.wait_message(self.PlayersInfo.keys(), timeout_sec = 0.2)
+                        if pl_msgs is not None and len(pl_msgs):
+                            for pl in pl_msgs:
+                                self._manage_message(pl,pl_msgs[pl])
+                        continue
+                    elif not(self._paused) and self._state != "ON":
+                        self._pause_Players(self._paused)
+                    # System not paused
+                    self.trg += 1
+                    self._state = "ON"
+                    if DEBUG:
+                        self._report_message(("Sending Trigger %d of %d" % (self.trg,self.max_triggers)),with_date=True)
+                    self._notify_new_data("triggers_timestamp", self.trg, self.trg, [time.time()])
+                    for priority in range(upper_priority+1):
+                        if not priority in self.Players_per_level.keys():
+                            continue
+                        self.busy_Players= self.Players_per_level[priority][:]
+                        for pl in self.busy_Players:
+                            self.PlayersInfo[pl]['status'] ='ON'
+                        if DEBUG:
+                            print "----","TRIGGER:",self.trg,"PRIORITY:",priority,"----"
+                        self.actual_priority = priority
+                        t0 = time.time()
+                        self.zcc.publish_trigger(self.trg, priority)
+                        while self.busy_Players:# and self._started:
+                            pl_msgs = self.zcc.wait_message(self.busy_Players)
+                            if pl_msgs is not None and len(pl_msgs):
+                                for pl in pl_msgs:
+                                    new_msg = pl_msgs[pl]
+                                    topic = new_msg[0].lower()
+                                    trg = new_msg[1]
+                                    prio = new_msg[2]
+                                    if topic == 'ack' and trg == self.trg:
+                                        idx = self.busy_Players.index(pl)
+                                        del self.busy_Players[idx]
+                                    else:
+                                        self._manage_message(pl,new_msg)
+                            elif (time.time() - t0) > self._players_timeout:
+                                # 20.05.2021 RB: next line added to abort in timeout case
+                                self._started = False
+                                for pl in self.busy_Players:
+                                    self._report_message("Player %s Timeout: aborting" % pl,with_date=True)
+                                    self.PlayersInfo[pl]['status'] ='ALARM'
+                                    idx = self.busy_Players.index(pl)
+                                    del self.busy_Players[idx]
+                                t0 = time.time()
+                        if DEBUG:
+                            print "Delay:",(time.time()-t0) * 1000,"ms"
+                        if self.slowest_player_time < (time.time()-t0):
+                            self.slowest_player_time = (time.time()-t0)
+                self._start_stop_Players(False)
+                t_full = time.time() - t_start
+                self._state = "OFF"
+                self._report_message("DonkiDirector stopped",with_date=True)
+                self._started = False
+                self._report_message("Execution time: %g, Average time per trigger: %g" % (t_full, t_full/float(self.trg)),with_date=True)
+                # set hdfwriters timeout for closing waiting threads
+                self.datastorage_task_queue.put(['set_file_timeout', self.slowest_player_time*2])
+                self.datastorage_task_queue.join()
+            #
+            self._check_datastorage_notification()
+            time.sleep(THREAD_DELAY_SECS)
+        #
+        self.infoServer._stop_ = True
+        #
+        self.datastorage_task_queue.put(['stop'])
diff --git a/src/DonkiDirectorServer.py b/src/DonkiDirectorServer.py
new file mode 100755
index 0000000..3ce29f1
--- /dev/null
+++ b/src/DonkiDirectorServer.py
@@ -0,0 +1,751 @@
+#!/usr/bin/env python
+# -*- coding:utf-8 -*- 
+
+
+##############################################################################
+## license :
+##============================================================================
+##
+## File :        DonkiDirectorServer.py
+## 
+## Project :     DonkiDirector Tango interface
+##
+## This file is part of Tango device class.
+## 
+## Tango is free software: you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation, either version 3 of the License, or
+## (at your option) any later version.
+## 
+## Tango is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+## GNU General Public License for more details.
+## 
+## You should have received a copy of the GNU General Public License
+## along with Tango.  If not, see <http://www.gnu.org/licenses/>.
+## 
+##
+## $Author :      sci.comp$
+##
+## $Revision :    $
+##
+## $Date :        $
+##
+## $HeadUrl :     $
+##============================================================================
+##            This file is generated by POGO
+##    (Program Obviously used to Generate tango Object)
+##
+##        (c) - Software Engineering Group - ESRF
+##############################################################################
+
+"""DonkiDirector Tango device"""
+
+__all__ = ["DonkiDirectorServer", "DonkiDirectorServerClass", "main"]
+
+__docformat__ = 'restructuredtext'
+
+import PyTango
+import sys
+# Add additional import
+#----- PROTECTED REGION ID(DonkiDirectorServer.additionnal_import) ENABLED START -----#
+from DirectorBgnThread import directorThread
+from DataBuffer import donkiBuffer,MyEncoder
+import traceback
+import json
+#----- PROTECTED REGION END -----#	//	DonkiDirectorServer.additionnal_import
+
+## Device States Description
+## No states for this device
+
+class DonkiDirectorServer (PyTango.Device_4Impl):
+
+    #--------- Add you global variables here --------------------------
+    #----- PROTECTED REGION ID(DonkiDirectorServer.global_variables) ENABLED START -----#
+    def file_finished_callback(self, filename_in):
+        for attr in self.NotifyFileFinished:
+            try:
+                if attr not in self.file_finished_proxies.keys():
+                    self.file_finished_proxies[attr] = PyTango.AttributeProxy(attr)
+                (self.file_finished_proxies[attr]).write(filename_in)
+            except:
+                self.error_stream("Unable to set callback attribute: " + attr )
+                self.error_stream(traceback.format_exc())
+                
+            
+    #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.global_variables
+
+    def __init__(self,cl, name):
+        PyTango.Device_4Impl.__init__(self,cl,name)
+        self.debug_stream("In __init__()")
+        DonkiDirectorServer.init_device(self)
+        #----- PROTECTED REGION ID(DonkiDirectorServer.__init__) ENABLED START -----#
+
+        #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.__init__
+        
+    def delete_device(self):
+        self.debug_stream("In delete_device()")
+        #----- PROTECTED REGION ID(DonkiDirectorServer.delete_device) ENABLED START -----#
+        self.dt.quit_and_exit()
+        self.dataBuff.stop_and_exit()
+        self.dt.join()
+        del self.dt
+        #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.delete_device
+
+    def init_device(self):
+        self.debug_stream("In init_device()")
+        self.get_device_properties(self.get_device_class())
+        self.attr_MaxTriggers_read = 0
+        self.attr_EnableDataStoring_read = False
+        self.attr_FileSize_read = 0
+        self.attr_FilePrefix_read = ''
+        self.attr_FilePath_read = ''
+        self.attr_MetaDataPeriod_read = 0
+        self.attr_Trigger_read = 0
+        self.attr_Report_read = ''
+        self.attr_DescriptionMessage_read = ''
+        self.attr_PlayersTimeout_read = 0
+        self.attr_Paused_read = False
+        self.attr_Players_read = ['']
+        self.attr_DataSources_read = ['']
+        self.attr_MetaDataSources_read = ['']
+        self.attr_LastSavedFiles_read = ['']
+        #----- PROTECTED REGION ID(DonkiDirectorServer.init_device) ENABLED START -----#
+        self.file_finished_proxies = {}
+        self.dt = directorThread(self.infoserver_port)
+        self.dt.add_file_finished_callback(self.file_finished_callback)
+        self.dt.start()
+        data_queue = self.dt.external_buffering_enabled(True)
+        self.dataBuff = donkiBuffer(data_queue, self.DataBufferMaxSize)
+        self.dataBuff.start()
+        
+        self.attr_EnableDataStoring_read = True
+        self.dt.EnableDataSaving = self.attr_EnableDataStoring_read
+        
+        self.set_state(PyTango.DevState.STANDBY)
+        #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.init_device
+
+    def always_executed_hook(self):
+        self.debug_stream("In always_excuted_hook()")
+        #----- PROTECTED REGION ID(DonkiDirectorServer.always_executed_hook) ENABLED START -----#
+        if self.dt._started:
+            if self.get_state() != PyTango.DevState.ON:
+                self.set_state(PyTango.DevState.ON)
+        elif self.get_state() != PyTango.DevState.STANDBY:
+            self.set_state(PyTango.DevState.STANDBY)
+
+        
+        #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.always_executed_hook
+
+    #-----------------------------------------------------------------------------
+    #    DonkiDirectorServer read/write attribute methods
+    #-----------------------------------------------------------------------------
+    
+    def read_MaxTriggers(self, attr):
+        self.debug_stream("In read_MaxTriggers()")
+        #----- PROTECTED REGION ID(DonkiDirectorServer.MaxTriggers_read) ENABLED START -----#
+        self.attr_MaxTriggers_read = self.dt.get_max_triggers()
+        attr.set_value(self.attr_MaxTriggers_read)
+        #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.MaxTriggers_read
+        
+    def write_MaxTriggers(self, attr):
+        self.debug_stream("In write_MaxTriggers()")
+        data=attr.get_write_value()
+        #----- PROTECTED REGION ID(DonkiDirectorServer.MaxTriggers_write) ENABLED START -----#
+        self.dt.set_max_triggers(data)
+        
+        #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.MaxTriggers_write
+        
+    def read_EnableDataStoring(self, attr):
+        self.debug_stream("In read_EnableDataStoring()")
+        #----- PROTECTED REGION ID(DonkiDirectorServer.EnableDataStoring_read) ENABLED START -----#
+        attr.set_value(self.attr_EnableDataStoring_read)
+        
+        #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.EnableDataStoring_read
+        
+    def write_EnableDataStoring(self, attr):
+        self.debug_stream("In write_EnableDataStoring()")
+        data=attr.get_write_value()
+        #----- PROTECTED REGION ID(DonkiDirectorServer.EnableDataStoring_write) ENABLED START -----#
+        self.attr_EnableDataStoring_read = data
+        self.dt.EnableDataSaving = self.attr_EnableDataStoring_read
+        #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.EnableDataStoring_write
+        
+    def read_FileSize(self, attr):
+        self.debug_stream("In read_FileSize()")
+        #----- PROTECTED REGION ID(DonkiDirectorServer.FileSize_read) ENABLED START -----#
+        self.attr_FileSize_read = self.dt.get_file_size()
+        attr.set_value(self.attr_FileSize_read)
+        
+        #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.FileSize_read
+        
+    def write_FileSize(self, attr):
+        self.debug_stream("In write_FileSize()")
+        data=attr.get_write_value()
+        #----- PROTECTED REGION ID(DonkiDirectorServer.FileSize_write) ENABLED START -----#
+        self.dt.set_file_size(data)
+        
+        #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.FileSize_write
+        
+    def read_FilePrefix(self, attr):
+        self.debug_stream("In read_FilePrefix()")
+        #----- PROTECTED REGION ID(DonkiDirectorServer.FilePrefix_read) ENABLED START -----#
+        self.attr_FilePrefix_read = self.dt.file_prefix
+        attr.set_value(self.attr_FilePrefix_read)
+        
+        #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.FilePrefix_read
+        
+    def write_FilePrefix(self, attr):
+        self.debug_stream("In write_FilePrefix()")
+        data=attr.get_write_value()
+        #----- PROTECTED REGION ID(DonkiDirectorServer.FilePrefix_write) ENABLED START -----#
+        self.attr_FilePrefix_read = data
+        self.dt.set_file_prefix(self.attr_FilePrefix_read)
+        #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.FilePrefix_write
+        
+    def read_FilePath(self, attr):
+        self.debug_stream("In read_FilePath()")
+        #----- PROTECTED REGION ID(DonkiDirectorServer.FilePath_read) ENABLED START -----#
+        self.attr_FilePath_read = self.dt.file_path
+        attr.set_value(self.attr_FilePath_read)
+        
+        #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.FilePath_read
+        
+    def write_FilePath(self, attr):
+        self.debug_stream("In write_FilePath()")
+        data=attr.get_write_value()
+        #----- PROTECTED REGION ID(DonkiDirectorServer.FilePath_write) ENABLED START -----#
+        self.attr_FilePath_read = data
+        self.dt.set_file_path(self.attr_FilePath_read)
+        #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.FilePath_write
+        
+    def read_MetaDataPeriod(self, attr):
+        self.debug_stream("In read_MetaDataPeriod()")
+        #----- PROTECTED REGION ID(DonkiDirectorServer.MetaDataPeriod_read) ENABLED START -----#
+        self.attr_MetaDataPeriod_read = self.dt.get_metadata_period()
+        attr.set_value(self.attr_MetaDataPeriod_read)
+        
+        #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.MetaDataPeriod_read
+        
+    def write_MetaDataPeriod(self, attr):
+        self.debug_stream("In write_MetaDataPeriod()")
+        data=attr.get_write_value()
+        #----- PROTECTED REGION ID(DonkiDirectorServer.MetaDataPeriod_write) ENABLED START -----#
+        self.dt.set_metadata_period(data)
+        #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.MetaDataPeriod_write
+        
+    def read_Trigger(self, attr):
+        self.debug_stream("In read_Trigger()")
+        #----- PROTECTED REGION ID(DonkiDirectorServer.Trigger_read) ENABLED START -----#
+        self.attr_Trigger_read = self.dt.trg
+        attr.set_value(self.attr_Trigger_read)
+        
+        #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.Trigger_read
+        
+    def read_Report(self, attr):
+        self.debug_stream("In read_Report()")
+        #----- PROTECTED REGION ID(DonkiDirectorServer.Report_read) ENABLED START -----#
+        self.attr_Report_read = self.dt.get_report()
+        attr.set_value(self.attr_Report_read)
+        
+        #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.Report_read
+        
+    def read_DescriptionMessage(self, attr):
+        self.debug_stream("In read_DescriptionMessage()")
+        #----- PROTECTED REGION ID(DonkiDirectorServer.DescriptionMessage_read) ENABLED START -----#
+        self.attr_DescriptionMessage_read = self.dt.get_description_message()
+        attr.set_value(self.attr_DescriptionMessage_read)
+        
+        #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.DescriptionMessage_read
+        
+    def write_DescriptionMessage(self, attr):
+        self.debug_stream("In write_DescriptionMessage()")
+        data=attr.get_write_value()
+        #----- PROTECTED REGION ID(DonkiDirectorServer.DescriptionMessage_write) ENABLED START -----#
+        self.dt.set_description_message(data)
+        #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.DescriptionMessage_write
+        
+    def read_PlayersTimeout(self, attr):
+        self.debug_stream("In read_PlayersTimeout()")
+        #----- PROTECTED REGION ID(DonkiDirectorServer.PlayersTimeout_read) ENABLED START -----#
+        self.attr_PlayersTimeout_read = self.dt.get_players_timeout()
+        attr.set_value(self.attr_PlayersTimeout_read)
+        
+        #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.PlayersTimeout_read
+        
+    def write_PlayersTimeout(self, attr):
+        self.debug_stream("In write_PlayersTimeout()")
+        data=attr.get_write_value()
+        #----- PROTECTED REGION ID(DonkiDirectorServer.PlayersTimeout_write) ENABLED START -----#
+        self.dt.set_players_timeout(data)
+        #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.PlayersTimeout_write
+        
+    def read_Paused(self, attr):
+        self.debug_stream("In read_Paused()")
+        #----- PROTECTED REGION ID(DonkiDirectorServer.Paused_read) ENABLED START -----#
+        self.attr_Paused_read = self.dt.is_paused()
+        attr.set_value(self.attr_Paused_read)
+        
+        #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.Paused_read
+        
+    def write_Paused(self, attr):
+        self.debug_stream("In write_Paused()")
+        data=attr.get_write_value()
+        #----- PROTECTED REGION ID(DonkiDirectorServer.Paused_write) ENABLED START -----#
+        self.dt.set_paused(data)
+        #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.Paused_write
+        
+    def read_Players(self, attr):
+        self.debug_stream("In read_Players()")
+        #----- PROTECTED REGION ID(DonkiDirectorServer.Players_read) ENABLED START -----#
+        self.attr_Players_read = []
+        for item in self.dt.PlayersInfo.items():
+            self.attr_Players_read.append(str(item))
+        attr.set_value(self.attr_Players_read)
+        
+        #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.Players_read
+        
+    def read_DataSources(self, attr):
+        self.debug_stream("In read_DataSources()")
+        #----- PROTECTED REGION ID(DonkiDirectorServer.DataSources_read) ENABLED START -----#
+        
+        self.attr_DataSources_read = []
+        for item in (self.dt.get_DataSources()).items():
+            self.attr_DataSources_read.append(str(item))
+        attr.set_value(self.attr_DataSources_read)
+        
+        #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.DataSources_read
+        
+    def read_MetaDataSources(self, attr):
+        self.debug_stream("In read_MetaDataSources()")
+        #----- PROTECTED REGION ID(DonkiDirectorServer.MetaDataSources_read) ENABLED START -----#
+        self.attr_MetaDataSources_read = []
+        for item in (self.dt.get_MetaData()).items():
+            self.attr_MetaDataSources_read.append(str(item))
+        attr.set_value(self.attr_MetaDataSources_read)
+        
+        #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.MetaDataSources_read
+        
+    def read_LastSavedFiles(self, attr):
+        self.debug_stream("In read_LastSavedFiles()")
+        #----- PROTECTED REGION ID(DonkiDirectorServer.LastSavedFiles_read) ENABLED START -----#
+        self.attr_LastSavedFiles_read = self.dt.get_last_file_saved()
+        attr.set_value(self.attr_LastSavedFiles_read[-100:])
+        
+        #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.LastSavedFiles_read
+        
+    
+    
+        #----- PROTECTED REGION ID(DonkiDirectorServer.initialize_dynamic_attributes) ENABLED START -----#
+        
+        #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.initialize_dynamic_attributes
+            
+    def read_attr_hardware(self, data):
+        self.debug_stream("In read_attr_hardware()")
+        #----- PROTECTED REGION ID(DonkiDirectorServer.read_attr_hardware) ENABLED START -----#
+        
+        #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.read_attr_hardware
+
+
+    #-----------------------------------------------------------------------------
+    #    DonkiDirectorServer command methods
+    #-----------------------------------------------------------------------------
+    
+    def Start(self):
+        """ Starts a scheduler sequence
+        
+        :param : 
+        :type: PyTango.DevVoid
+        :return: 
+        :rtype: PyTango.DevVoid """
+        self.debug_stream("In Start()")
+        #----- PROTECTED REGION ID(DonkiDirectorServer.Start) ENABLED START -----#
+        self.dt.please_start()
+        #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.Start
+        
+    def Stop(self):
+        """ Stops a scheduler sequence
+        
+        :param : 
+        :type: PyTango.DevVoid
+        :return: 
+        :rtype: PyTango.DevVoid """
+        self.debug_stream("In Stop()")
+        #----- PROTECTED REGION ID(DonkiDirectorServer.Stop) ENABLED START -----#
+        self.dt.abort()
+        #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.Stop
+        
+    def SetPlayerPriority(self, argin):
+        """ Set scheduling prority for a player.
+        
+        :param argin: ([priority],[player_name])
+        :type: PyTango.DevVarLongStringArray
+        :return: 
+        :rtype: PyTango.DevVoid """
+        self.debug_stream("In SetPlayerPriority()")
+        #----- PROTECTED REGION ID(DonkiDirectorServer.SetPlayerPriority) ENABLED START -----#
+        self.dt.set_player_priority(argin[1][0],argin[0][0])
+        #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.SetPlayerPriority
+        
+    def RenameDataSource(self, argin):
+        """ Define an alias for a data source
+        
+        :param argin: (``datasource_fqn``,``alias``)
+        :type: PyTango.DevVarStringArray
+        :return: 
+        :rtype: PyTango.DevVoid """
+        self.debug_stream("In RenameDataSource()")
+        #----- PROTECTED REGION ID(DonkiDirectorServer.RenameDataSource) ENABLED START -----#
+        self.dt.set_DataAlias(argin[0],argin[1])
+        #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.RenameDataSource
+        
+    def RemovePlayer(self, argin):
+        """ Remove a player from the scheduler structure.
+        
+        :param argin: Player name.
+        :type: PyTango.DevString
+        :return: 
+        :rtype: PyTango.DevVoid """
+        self.debug_stream("In RemovePlayer()")
+        #----- PROTECTED REGION ID(DonkiDirectorServer.RemovePlayer) ENABLED START -----#
+        self.dt.remove_player_info(argin)
+        #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.RemovePlayer
+        
+    def EnableDataSource(self, argin):
+        """ Enable/disable data source saving
+        
+        :param argin: [[0/1],[DataSourceName]]
+        :type: PyTango.DevVarLongStringArray
+        :return: 
+        :rtype: PyTango.DevVoid """
+        self.debug_stream("In EnableDataSource()")
+        #----- PROTECTED REGION ID(DonkiDirectorServer.EnableDataSource) ENABLED START -----#
+        self.dt.set_daq_on_off(argin[1][0],(argin[0][0] == 1))
+        #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.EnableDataSource
+        
+    def EnableMetaData(self, argin):
+        """ Enable/disable metadata source saving
+        
+        :param argin: [[0/1],[MetaDataName]]
+        :type: PyTango.DevVarLongStringArray
+        :return: 
+        :rtype: PyTango.DevVoid """
+        self.debug_stream("In EnableMetaData()")
+        #----- PROTECTED REGION ID(DonkiDirectorServer.EnableMetaData) ENABLED START -----#
+        self.dt.set_metadata_on_off(argin[1][0],(argin[0][0] == 1))
+        #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.EnableMetaData
+        
+    def GetAll(self, argin):
+        """ Get the full data buffer relative to a specific datasource
+        
+        :param argin: datasource
+        :type: PyTango.DevString
+        :return: JSON message
+        :rtype: PyTango.DevString """
+        self.debug_stream("In GetAll()")
+        argout = ''
+        #----- PROTECTED REGION ID(DonkiDirectorServer.GetAll) ENABLED START -----#
+        if argin not in self.dataBuff.dbuffers.keys():
+            PyTango.Except.throw_exception("DataSource error",
+                                           "DataBuffer does not exist.",
+                                           "GetAll()")
+        argout = json.dumps(self.dataBuff.retrieve_all(argin),cls=MyEncoder)
+
+        #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.GetAll
+        return argout
+        
+    def GetRange(self, argin):
+        """ Get the content of the data buffer in the range [first_trigger,last_trigger] relative to a specific datasource
+        
+        :param argin: datasource, first_trigger, last_trigger
+        :type: PyTango.DevVarStringArray
+        :return: JSON message
+        :rtype: PyTango.DevString """
+        self.debug_stream("In GetRange()")
+        argout = ''
+        #----- PROTECTED REGION ID(DonkiDirectorServer.GetRange) ENABLED START -----#
+        if argin[0] not in self.dataBuff.dbuffers.keys():
+            PyTango.Except.throw_exception("DataSource error",
+                                           "DataBuffer does not exist.",
+                                           "GetRange()")
+        #
+        tr_min = int(argin[1])
+        tr_max = int(argin[2])
+        if tr_max < tr_min:
+                PyTango.Except.throw_exception("Input Argument error",
+                                               "please invert limits.",
+                                               "GetRange()")
+                                               
+        trg_range = self.dataBuff.trg_range[argin[0]]
+            
+        if (tr_min < trg_range[0]) or (tr_max > trg_range[1]):
+                PyTango.Except.throw_exception("DataSource error",
+                                               "Trigger out of range.",
+                                               "GetRange()")
+        argout = json.dumps(self.dataBuff.retrieve_range(argin[0],tr_min,tr_max),cls=MyEncoder)
+        #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.GetRange
+        return argout
+        
+    def GetLast(self, argin):
+        """ Get the last collected samples from the data buffer relative to a specific datasource
+        
+        :param argin: datasource, number_of_samples
+        :type: PyTango.DevVarStringArray
+        :return: JSON message
+        :rtype: PyTango.DevString """
+        self.debug_stream("In GetLast()")
+        argout = ''
+        #----- PROTECTED REGION ID(DonkiDirectorServer.GetLast) ENABLED START -----#
+        trigs = int(argin[1])
+                                               
+        if argin[0] not in self.dataBuff.dbuffers.keys():
+            PyTango.Except.throw_exception("DataSource error",
+                                           "DataBuffer does not exist.",
+                                           "GetLast()")
+        
+        trg_range = self.dataBuff.trg_range[argin[0]]
+        tr_min = max( trg_range[0], trg_range[1] - (int(argin[1]) -1) )
+            
+        argout = json.dumps(self.dataBuff.retrieve_range(argin[0],tr_min,trg_range[1]),cls=MyEncoder)
+        
+        #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.GetLast
+        return argout
+        
+    def GetTriggerRange(self, argin):
+        """ Get the available trigger range in the data buffer relative to a specific datasource
+        
+        :param argin: datasource
+        :type: PyTango.DevString
+        :return: [trg_min,trg_max]
+        :rtype: PyTango.DevVarLongArray """
+        self.debug_stream("In GetTriggerRange()")
+        argout = [0]
+        #----- PROTECTED REGION ID(DonkiDirectorServer.GetTriggerRange) ENABLED START -----#
+        if argin not in self.dataBuff.trg_range.keys():
+            PyTango.Except.throw_exception("DataSource error",
+                                           "DataBuffer does not exist.",
+                                           "GetTriggerRange()")
+        argout = self.dataBuff.trg_range[argin]
+        
+        #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.GetTriggerRange
+        return argout
+        
+
+class DonkiDirectorServerClass(PyTango.DeviceClass):
+    #--------- Add you global class variables here --------------------------
+    #----- PROTECTED REGION ID(DonkiDirectorServer.global_class_variables) ENABLED START -----#
+    
+    #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.global_class_variables
+
+    def dyn_attr(self, dev_list):
+        """Invoked to create dynamic attributes for the given devices.
+        Default implementation calls
+        :meth:`DonkiDirectorServer.initialize_dynamic_attributes` for each device
+    
+        :param dev_list: list of devices
+        :type dev_list: :class:`PyTango.DeviceImpl`"""
+    
+        for dev in dev_list:
+            try:
+                dev.initialize_dynamic_attributes()
+            except:
+                import traceback
+                dev.warn_stream("Failed to initialize dynamic attributes")
+                dev.debug_stream("Details: " + traceback.format_exc())
+        #----- PROTECTED REGION ID(DonkiDirectorServer.dyn_attr) ENABLED START -----#
+        
+        #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.dyn_attr
+
+    #    Class Properties
+    class_property_list = {
+        }
+
+
+    #    Device Properties
+    device_property_list = {
+        'infoserver_port':
+            [PyTango.DevULong,
+            "TCP port for the InfoServer",
+            [50010]],
+        'NotifyFileFinished':
+            [PyTango.DevVarStringArray,
+            "List of Tango commands or attributes that will receive the notification of a new file produced.\nOnly the first working command or attribute will be used.\nSyntax: \nTangoCommand: [HOST:PORT/]family/domain/dev->command\nTangoAttribute: [HOST:PORT/]family/domani/dev/attribute",
+            [] ],
+        'DataBufferMaxSize':
+            [PyTango.DevULong,
+            "Max Data Buffer Size for every Datasource",
+            [5000000]],
+        }
+
+
+    #    Command definitions
+    cmd_list = {
+        'Start':
+            [[PyTango.DevVoid, "none"],
+            [PyTango.DevVoid, "none"]],
+        'Stop':
+            [[PyTango.DevVoid, "none"],
+            [PyTango.DevVoid, "none"]],
+        'SetPlayerPriority':
+            [[PyTango.DevVarLongStringArray, "([priority],[player_name])"],
+            [PyTango.DevVoid, "none"]],
+        'RenameDataSource':
+            [[PyTango.DevVarStringArray, "(``datasource_fqn``,``alias``)"],
+            [PyTango.DevVoid, "none"]],
+        'RemovePlayer':
+            [[PyTango.DevString, "Player name."],
+            [PyTango.DevVoid, "none"]],
+        'EnableDataSource':
+            [[PyTango.DevVarLongStringArray, "[[0/1],[DataSourceName]]"],
+            [PyTango.DevVoid, "none"]],
+        'EnableMetaData':
+            [[PyTango.DevVarLongStringArray, "[[0/1],[MetaDataName]]"],
+            [PyTango.DevVoid, "none"]],
+        'GetAll':
+            [[PyTango.DevString, "datasource"],
+            [PyTango.DevString, "JSON message"]],
+        'GetRange':
+            [[PyTango.DevVarStringArray, "datasource, first_trigger, last_trigger"],
+            [PyTango.DevString, "JSON message"]],
+        'GetLast':
+            [[PyTango.DevVarStringArray, "datasource, number_of_samples"],
+            [PyTango.DevString, "JSON message"]],
+        'GetTriggerRange':
+            [[PyTango.DevString, "datasource"],
+            [PyTango.DevVarLongArray, "[trg_min,trg_max]"]],
+        }
+
+
+    #    Attribute definitions
+    attr_list = {
+        'MaxTriggers':
+            [[PyTango.DevLong,
+            PyTango.SCALAR,
+            PyTango.READ_WRITE],
+            {
+                'description': "Maximum number of triggers of the sequence",
+                'Memorized':"true"
+            } ],
+        'EnableDataStoring':
+            [[PyTango.DevBoolean,
+            PyTango.SCALAR,
+            PyTango.READ_WRITE],
+            {
+                'description': "Enable/disable data storing in HDF5 archives.",
+                'Memorized':"true"
+            } ],
+        'FileSize':
+            [[PyTango.DevUShort,
+            PyTango.SCALAR,
+            PyTango.READ_WRITE],
+            {
+                'description': "Number of triggers contained in each HDF5 file.",
+                'Memorized':"true"
+            } ],
+        'FilePrefix':
+            [[PyTango.DevString,
+            PyTango.SCALAR,
+            PyTango.READ_WRITE],
+            {
+                'description': "HDF5 file prefix.",
+                'Memorized':"true"
+            } ],
+        'FilePath':
+            [[PyTango.DevString,
+            PyTango.SCALAR,
+            PyTango.READ_WRITE],
+            {
+                'description': "Full HDF5 storage path.",
+                'Memorized':"true"
+            } ],
+        'MetaDataPeriod':
+            [[PyTango.DevUShort,
+            PyTango.SCALAR,
+            PyTango.READ_WRITE],
+            {
+                'unit': "sec",
+                'display unit': "sec",
+                'format': "sec",
+                'min value': "1",
+                'description': "Period of metadata readout in sec.",
+                'Memorized':"true"
+            } ],
+        'Trigger':
+            [[PyTango.DevLong,
+            PyTango.SCALAR,
+            PyTango.READ]],
+        'Report':
+            [[PyTango.DevString,
+            PyTango.SCALAR,
+            PyTango.READ]],
+        'DescriptionMessage':
+            [[PyTango.DevString,
+            PyTango.SCALAR,
+            PyTango.READ_WRITE],
+            {
+                'description': "Description message that will be automatically stored in the HDF5 archives as metadata.",
+                'Memorized':"true"
+            } ],
+        'PlayersTimeout':
+            [[PyTango.DevUShort,
+            PyTango.SCALAR,
+            PyTango.READ_WRITE],
+            {
+                'unit': "sec",
+                'standard unit': "sec",
+                'display unit': "sec",
+                'description': "Maximum players timeout.",
+                'Memorized':"true"
+            } ],
+        'Paused':
+            [[PyTango.DevBoolean,
+            PyTango.SCALAR,
+            PyTango.READ_WRITE],
+            {
+                'description': "Read/Write attribute that could be used to pause the Sequencer.",
+            } ],
+        'Players':
+            [[PyTango.DevString,
+            PyTango.SPECTRUM,
+            PyTango.READ, 10000],
+            {
+                'description': "List of DonkiPlayers and related info",
+            } ],
+        'DataSources':
+            [[PyTango.DevString,
+            PyTango.SPECTRUM,
+            PyTango.READ, 10000],
+            {
+                'description': "List of DonkiPlayers and related info",
+            } ],
+        'MetaDataSources':
+            [[PyTango.DevString,
+            PyTango.SPECTRUM,
+            PyTango.READ, 10000],
+            {
+                'description': "List of DonkiPlayers and related info",
+            } ],
+        'LastSavedFiles':
+            [[PyTango.DevString,
+            PyTango.SPECTRUM,
+            PyTango.READ, 100]],
+        }
+
+
+def main():
+    try:
+        py = PyTango.Util(sys.argv)
+        py.add_class(DonkiDirectorServerClass,DonkiDirectorServer,'DonkiDirectorServer')
+
+        U = PyTango.Util.instance()
+        U.server_init()
+        U.server_run()
+
+    except PyTango.DevFailed,e:
+        print '-------> Received a DevFailed exception:',e
+    except Exception,e:
+        print '-------> An unforeseen exception occured....',e
+
+if __name__ == '__main__':
+    main()
diff --git a/src/DonkiDirectorServer.xmi b/src/DonkiDirectorServer.xmi
new file mode 100644
index 0000000..634f349
--- /dev/null
+++ b/src/DonkiDirectorServer.xmi
@@ -0,0 +1,261 @@
+<?xml version="1.0" encoding="ASCII"?>
+<pogoDsl:PogoSystem xmi:version="2.0" xmlns:xmi="http://www.omg.org/XMI" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:pogoDsl="http://www.esrf.fr/tango/pogo/PogoDsl">
+  <classes name="DonkiDirectorServer" pogoRevision="8.1">
+    <description description="DonkiDirector Tango device" title="DonkiDirector Tango interface" sourcePath="/home/rob/lavoro/TANGO_dev/DonkiOrchestra/DonkiDirector_upgraded" language="Python" filestogenerate="XMI   file,Code files" license="GPL" hasMandatoryProperty="false" hasConcreteProperty="true" hasAbstractCommand="false" hasAbstractAttribute="false">
+      <inheritances classname="Device_Impl" sourcePath=""/>
+      <identification contact="at elettra.eu - sci.comp" author="sci.comp" emailDomain="elettra.eu" classFamily="Sequencer" siteSpecific="" platform="All Platforms" bus="Not Applicable" manufacturer="none" reference=""/>
+    </description>
+    <deviceProperties name="infoserver_port" description="TCP port for the InfoServer">
+      <type xsi:type="pogoDsl:UIntType"/>
+      <status abstract="false" inherited="false" concrete="true" concreteHere="true"/>
+      <DefaultPropValue>50010</DefaultPropValue>
+    </deviceProperties>
+    <deviceProperties name="NotifyFileFinished" description="List of Tango commands or attributes that will receive the notification of a new file produced.&#xA;Only the first working command or attribute will be used.&#xA;Syntax: &#xA;TangoCommand: [HOST:PORT/]family/domain/dev->command&#xA;TangoAttribute: [HOST:PORT/]family/domani/dev/attribute">
+      <type xsi:type="pogoDsl:StringVectorType"/>
+      <status abstract="false" inherited="false" concrete="true" concreteHere="true"/>
+    </deviceProperties>
+    <deviceProperties name="DataBufferMaxSize" description="Max Data Buffer Size for every Datasource">
+      <type xsi:type="pogoDsl:UIntType"/>
+      <status abstract="false" inherited="false" concrete="true" concreteHere="true"/>
+      <DefaultPropValue>5000000</DefaultPropValue>
+    </deviceProperties>
+    <commands name="State" description="This command gets the device state (stored in its device_state data member) and returns it to the caller." execMethod="dev_state" displayLevel="OPERATOR" polledPeriod="0">
+      <argin description="none">
+        <type xsi:type="pogoDsl:VoidType"/>
+      </argin>
+      <argout description="Device state">
+        <type xsi:type="pogoDsl:StateType"/>
+      </argout>
+      <status abstract="true" inherited="true" concrete="true"/>
+    </commands>
+    <commands name="Status" description="This command gets the device status (stored in its device_status data member) and returns it to the caller." execMethod="dev_status" displayLevel="OPERATOR" polledPeriod="0">
+      <argin description="none">
+        <type xsi:type="pogoDsl:VoidType"/>
+      </argin>
+      <argout description="Device status">
+        <type xsi:type="pogoDsl:ConstStringType"/>
+      </argout>
+      <status abstract="true" inherited="true" concrete="true"/>
+    </commands>
+    <commands name="Start" description="Starts a scheduler sequence" execMethod="start" displayLevel="OPERATOR" polledPeriod="0">
+      <argin description="">
+        <type xsi:type="pogoDsl:VoidType"/>
+      </argin>
+      <argout description="">
+        <type xsi:type="pogoDsl:VoidType"/>
+      </argout>
+      <status abstract="false" inherited="false" concrete="true" concreteHere="true"/>
+    </commands>
+    <commands name="Stop" description="Stops a scheduler sequence" execMethod="stop" displayLevel="OPERATOR" polledPeriod="0">
+      <argin description="">
+        <type xsi:type="pogoDsl:VoidType"/>
+      </argin>
+      <argout description="">
+        <type xsi:type="pogoDsl:VoidType"/>
+      </argout>
+      <status abstract="false" inherited="false" concrete="true" concreteHere="true"/>
+    </commands>
+    <commands name="SetPlayerPriority" description="Set scheduling prority for a player." execMethod="set_player_priority" displayLevel="OPERATOR" polledPeriod="0">
+      <argin description="([priority],[player_name])">
+        <type xsi:type="pogoDsl:LongStringArrayType"/>
+      </argin>
+      <argout description="">
+        <type xsi:type="pogoDsl:VoidType"/>
+      </argout>
+      <status abstract="false" inherited="false" concrete="true" concreteHere="true"/>
+    </commands>
+    <commands name="RenameDataSource" description="Define an alias for a data source" execMethod="rename_data_source" displayLevel="OPERATOR" polledPeriod="0">
+      <argin description="(``datasource_fqn``,``alias``)">
+        <type xsi:type="pogoDsl:StringArrayType"/>
+      </argin>
+      <argout description="">
+        <type xsi:type="pogoDsl:VoidType"/>
+      </argout>
+      <status abstract="false" inherited="false" concrete="true" concreteHere="true"/>
+    </commands>
+    <commands name="RemovePlayer" description="Remove a player from the scheduler structure." execMethod="remove_player" displayLevel="OPERATOR" polledPeriod="0">
+      <argin description="Player name.">
+        <type xsi:type="pogoDsl:StringType"/>
+      </argin>
+      <argout description="">
+        <type xsi:type="pogoDsl:VoidType"/>
+      </argout>
+      <status abstract="false" inherited="false" concrete="true" concreteHere="true"/>
+    </commands>
+    <commands name="EnableDataSource" description="Enable/disable data source saving" execMethod="enable_data_source" displayLevel="OPERATOR" polledPeriod="0">
+      <argin description="[[0/1],[DataSourceName]]">
+        <type xsi:type="pogoDsl:LongStringArrayType"/>
+      </argin>
+      <argout description="">
+        <type xsi:type="pogoDsl:VoidType"/>
+      </argout>
+      <status abstract="false" inherited="false" concrete="true" concreteHere="true"/>
+    </commands>
+    <commands name="EnableMetaData" description="Enable/disable metadata source saving" execMethod="enable_meta_data" displayLevel="OPERATOR" polledPeriod="0">
+      <argin description="[[0/1],[MetaDataName]]">
+        <type xsi:type="pogoDsl:LongStringArrayType"/>
+      </argin>
+      <argout description="">
+        <type xsi:type="pogoDsl:VoidType"/>
+      </argout>
+      <status abstract="false" inherited="false" concrete="true" concreteHere="true"/>
+    </commands>
+    <commands name="GetAll" description="Get the full data buffer relative to a specific datasource" execMethod="get_all" displayLevel="OPERATOR" polledPeriod="0">
+      <argin description="datasource">
+        <type xsi:type="pogoDsl:StringType"/>
+      </argin>
+      <argout description="JSON message">
+        <type xsi:type="pogoDsl:StringType"/>
+      </argout>
+      <status abstract="false" inherited="false" concrete="true" concreteHere="true"/>
+    </commands>
+    <commands name="GetRange" description="Get the content of the data buffer in the range [first_trigger,last_trigger] relative to a specific datasource" execMethod="get_range" displayLevel="OPERATOR" polledPeriod="0">
+      <argin description="datasource, first_trigger, last_trigger">
+        <type xsi:type="pogoDsl:StringArrayType"/>
+      </argin>
+      <argout description="JSON message">
+        <type xsi:type="pogoDsl:StringType"/>
+      </argout>
+      <status abstract="false" inherited="false" concrete="true" concreteHere="true"/>
+    </commands>
+    <commands name="GetLast" description="Get the last collected samples from the data buffer relative to a specific datasource" execMethod="get_last" displayLevel="OPERATOR" polledPeriod="0">
+      <argin description="datasource, number_of_samples">
+        <type xsi:type="pogoDsl:StringArrayType"/>
+      </argin>
+      <argout description="JSON message">
+        <type xsi:type="pogoDsl:StringType"/>
+      </argout>
+      <status abstract="false" inherited="false" concrete="true" concreteHere="true"/>
+    </commands>
+    <commands name="GetTriggerRange" description="Get the available trigger range in the data buffer relative to a specific datasource" execMethod="get_trigger_range" displayLevel="OPERATOR" polledPeriod="0">
+      <argin description="datasource">
+        <type xsi:type="pogoDsl:StringType"/>
+      </argin>
+      <argout description="[trg_min,trg_max]">
+        <type xsi:type="pogoDsl:IntArrayType"/>
+      </argout>
+      <status abstract="false" inherited="false" concrete="true" concreteHere="true"/>
+    </commands>
+    <attributes name="MaxTriggers" attType="Scalar" rwType="READ_WRITE" displayLevel="OPERATOR" polledPeriod="0" maxX="" maxY="" memorized="true" memorizedAtInit="true" allocReadMember="true" isDynamic="false">
+      <dataType xsi:type="pogoDsl:IntType"/>
+      <changeEvent fire="false" libCheckCriteria="false"/>
+      <archiveEvent fire="false" libCheckCriteria="false"/>
+      <dataReadyEvent fire="false" libCheckCriteria="true"/>
+      <status abstract="false" inherited="false" concrete="true" concreteHere="true"/>
+      <properties description="Maximum number of triggers of the sequence" label="" unit="" standardUnit="" displayUnit="" format="" maxValue="" minValue="" maxAlarm="" minAlarm="" maxWarning="" minWarning="" deltaTime="" deltaValue=""/>
+    </attributes>
+    <attributes name="EnableDataStoring" attType="Scalar" rwType="READ_WRITE" displayLevel="OPERATOR" polledPeriod="0" maxX="" maxY="" memorized="true" memorizedAtInit="true" allocReadMember="true" isDynamic="false">
+      <dataType xsi:type="pogoDsl:BooleanType"/>
+      <changeEvent fire="false" libCheckCriteria="false"/>
+      <archiveEvent fire="false" libCheckCriteria="false"/>
+      <dataReadyEvent fire="false" libCheckCriteria="true"/>
+      <status abstract="false" inherited="false" concrete="true" concreteHere="true"/>
+      <properties description="Enable/disable data storing in HDF5 archives." label="" unit="" standardUnit="" displayUnit="" format="" maxValue="" minValue="" maxAlarm="" minAlarm="" maxWarning="" minWarning="" deltaTime="" deltaValue=""/>
+    </attributes>
+    <attributes name="FileSize" attType="Scalar" rwType="READ_WRITE" displayLevel="OPERATOR" polledPeriod="0" maxX="" maxY="" memorized="true" memorizedAtInit="true" allocReadMember="true" isDynamic="false">
+      <dataType xsi:type="pogoDsl:UShortType"/>
+      <changeEvent fire="false" libCheckCriteria="false"/>
+      <archiveEvent fire="false" libCheckCriteria="false"/>
+      <dataReadyEvent fire="false" libCheckCriteria="true"/>
+      <status abstract="false" inherited="false" concrete="true" concreteHere="true"/>
+      <properties description="Number of triggers contained in each HDF5 file." label="" unit="" standardUnit="" displayUnit="" format="" maxValue="" minValue="" maxAlarm="" minAlarm="" maxWarning="" minWarning="" deltaTime="" deltaValue=""/>
+    </attributes>
+    <attributes name="FilePrefix" attType="Scalar" rwType="READ_WRITE" displayLevel="OPERATOR" polledPeriod="0" maxX="" maxY="" memorized="true" memorizedAtInit="true" allocReadMember="true" isDynamic="false">
+      <dataType xsi:type="pogoDsl:StringType"/>
+      <changeEvent fire="false" libCheckCriteria="false"/>
+      <archiveEvent fire="false" libCheckCriteria="false"/>
+      <dataReadyEvent fire="false" libCheckCriteria="true"/>
+      <status abstract="false" inherited="false" concrete="true" concreteHere="true"/>
+      <properties description="HDF5 file prefix." label="" unit="" standardUnit="" displayUnit="" format="" maxValue="" minValue="" maxAlarm="" minAlarm="" maxWarning="" minWarning="" deltaTime="" deltaValue=""/>
+    </attributes>
+    <attributes name="FilePath" attType="Scalar" rwType="READ_WRITE" displayLevel="OPERATOR" polledPeriod="0" maxX="" maxY="" memorized="true" memorizedAtInit="true" allocReadMember="true" isDynamic="false">
+      <dataType xsi:type="pogoDsl:StringType"/>
+      <changeEvent fire="false" libCheckCriteria="false"/>
+      <archiveEvent fire="false" libCheckCriteria="false"/>
+      <dataReadyEvent fire="false" libCheckCriteria="true"/>
+      <status abstract="false" inherited="false" concrete="true" concreteHere="true"/>
+      <properties description="Full HDF5 storage path." label="" unit="" standardUnit="" displayUnit="" format="" maxValue="" minValue="" maxAlarm="" minAlarm="" maxWarning="" minWarning="" deltaTime="" deltaValue=""/>
+    </attributes>
+    <attributes name="MetaDataPeriod" attType="Scalar" rwType="READ_WRITE" displayLevel="OPERATOR" polledPeriod="0" maxX="" maxY="" memorized="true" memorizedAtInit="true" allocReadMember="true" isDynamic="false">
+      <dataType xsi:type="pogoDsl:UShortType"/>
+      <changeEvent fire="false" libCheckCriteria="false"/>
+      <archiveEvent fire="false" libCheckCriteria="false"/>
+      <dataReadyEvent fire="false" libCheckCriteria="true"/>
+      <status abstract="false" inherited="false" concrete="true" concreteHere="true"/>
+      <properties description="Period of metadata readout in sec." label="" unit="sec" standardUnit="" displayUnit="sec" format="sec" maxValue="" minValue="1" maxAlarm="" minAlarm="" maxWarning="" minWarning="" deltaTime="" deltaValue=""/>
+    </attributes>
+    <attributes name="Trigger" attType="Scalar" rwType="READ" displayLevel="OPERATOR" polledPeriod="0" maxX="" maxY="" allocReadMember="true" isDynamic="false">
+      <dataType xsi:type="pogoDsl:IntType"/>
+      <changeEvent fire="false" libCheckCriteria="false"/>
+      <archiveEvent fire="false" libCheckCriteria="false"/>
+      <dataReadyEvent fire="false" libCheckCriteria="true"/>
+      <status abstract="false" inherited="false" concrete="true" concreteHere="true"/>
+      <properties description="" label="" unit="" standardUnit="" displayUnit="" format="" maxValue="" minValue="" maxAlarm="" minAlarm="" maxWarning="" minWarning="" deltaTime="" deltaValue=""/>
+    </attributes>
+    <attributes name="Report" attType="Scalar" rwType="READ" displayLevel="OPERATOR" polledPeriod="0" maxX="10000" maxY="" allocReadMember="true" isDynamic="false">
+      <dataType xsi:type="pogoDsl:StringType"/>
+      <changeEvent fire="false" libCheckCriteria="false"/>
+      <archiveEvent fire="false" libCheckCriteria="false"/>
+      <dataReadyEvent fire="false" libCheckCriteria="true"/>
+      <status abstract="false" inherited="false" concrete="true" concreteHere="true"/>
+      <properties description="" label="" unit="" standardUnit="" displayUnit="" format="" maxValue="" minValue="" maxAlarm="" minAlarm="" maxWarning="" minWarning="" deltaTime="" deltaValue=""/>
+    </attributes>
+    <attributes name="DescriptionMessage" attType="Scalar" rwType="READ_WRITE" displayLevel="OPERATOR" polledPeriod="0" maxX="" maxY="" memorized="true" memorizedAtInit="true" allocReadMember="true" isDynamic="false">
+      <dataType xsi:type="pogoDsl:StringType"/>
+      <changeEvent fire="false" libCheckCriteria="false"/>
+      <archiveEvent fire="false" libCheckCriteria="false"/>
+      <dataReadyEvent fire="false" libCheckCriteria="true"/>
+      <status abstract="false" inherited="false" concrete="true" concreteHere="true"/>
+      <properties description="Description message that will be automatically stored in the HDF5 archives as metadata." label="" unit="" standardUnit="" displayUnit="" format="" maxValue="" minValue="" maxAlarm="" minAlarm="" maxWarning="" minWarning="" deltaTime="" deltaValue=""/>
+    </attributes>
+    <attributes name="PlayersTimeout" attType="Scalar" rwType="READ_WRITE" displayLevel="OPERATOR" polledPeriod="0" maxX="" maxY="" memorized="true" memorizedAtInit="true" allocReadMember="true" isDynamic="false">
+      <dataType xsi:type="pogoDsl:UShortType"/>
+      <changeEvent fire="false" libCheckCriteria="false"/>
+      <archiveEvent fire="false" libCheckCriteria="false"/>
+      <dataReadyEvent fire="false" libCheckCriteria="true"/>
+      <status abstract="false" inherited="false" concrete="true" concreteHere="true"/>
+      <properties description="Maximum players timeout." label="" unit="sec" standardUnit="sec" displayUnit="sec" format="" maxValue="" minValue="" maxAlarm="" minAlarm="" maxWarning="" minWarning="" deltaTime="" deltaValue=""/>
+    </attributes>
+    <attributes name="Paused" attType="Scalar" rwType="READ_WRITE" displayLevel="OPERATOR" polledPeriod="0" maxX="" maxY="" allocReadMember="true" isDynamic="false">
+      <dataType xsi:type="pogoDsl:BooleanType"/>
+      <changeEvent fire="false" libCheckCriteria="false"/>
+      <archiveEvent fire="false" libCheckCriteria="false"/>
+      <dataReadyEvent fire="false" libCheckCriteria="true"/>
+      <status abstract="false" inherited="false" concrete="true" concreteHere="true"/>
+      <properties description="Read/Write attribute that could be used to pause the Sequencer." label="" unit="" standardUnit="" displayUnit="" format="" maxValue="" minValue="" maxAlarm="" minAlarm="" maxWarning="" minWarning="" deltaTime="" deltaValue=""/>
+    </attributes>
+    <attributes name="Players" attType="Spectrum" rwType="READ" displayLevel="OPERATOR" polledPeriod="0" maxX="10000" maxY="" allocReadMember="true" isDynamic="false">
+      <dataType xsi:type="pogoDsl:StringType"/>
+      <changeEvent fire="false" libCheckCriteria="false"/>
+      <archiveEvent fire="false" libCheckCriteria="false"/>
+      <dataReadyEvent fire="false" libCheckCriteria="true"/>
+      <status abstract="false" inherited="false" concrete="true" concreteHere="true"/>
+      <properties description="List of DonkiPlayers and related info" label="" unit="" standardUnit="" displayUnit="" format="" maxValue="" minValue="" maxAlarm="" minAlarm="" maxWarning="" minWarning="" deltaTime="" deltaValue=""/>
+    </attributes>
+    <attributes name="DataSources" attType="Spectrum" rwType="READ" displayLevel="OPERATOR" polledPeriod="0" maxX="10000" maxY="" allocReadMember="true" isDynamic="false">
+      <dataType xsi:type="pogoDsl:StringType"/>
+      <changeEvent fire="false" libCheckCriteria="false"/>
+      <archiveEvent fire="false" libCheckCriteria="false"/>
+      <dataReadyEvent fire="false" libCheckCriteria="true"/>
+      <status abstract="false" inherited="false" concrete="true" concreteHere="true"/>
+      <properties description="List of DonkiPlayers and related info" label="" unit="" standardUnit="" displayUnit="" format="" maxValue="" minValue="" maxAlarm="" minAlarm="" maxWarning="" minWarning="" deltaTime="" deltaValue=""/>
+    </attributes>
+    <attributes name="MetaDataSources" attType="Spectrum" rwType="READ" displayLevel="OPERATOR" polledPeriod="0" maxX="10000" maxY="" allocReadMember="true" isDynamic="false">
+      <dataType xsi:type="pogoDsl:StringType"/>
+      <changeEvent fire="false" libCheckCriteria="false"/>
+      <archiveEvent fire="false" libCheckCriteria="false"/>
+      <dataReadyEvent fire="false" libCheckCriteria="true"/>
+      <status abstract="false" inherited="false" concrete="true" concreteHere="true"/>
+      <properties description="List of DonkiPlayers and related info" label="" unit="" standardUnit="" displayUnit="" format="" maxValue="" minValue="" maxAlarm="" minAlarm="" maxWarning="" minWarning="" deltaTime="" deltaValue=""/>
+    </attributes>
+    <attributes name="LastSavedFiles" attType="Spectrum" rwType="READ" displayLevel="OPERATOR" polledPeriod="0" maxX="100" maxY="" allocReadMember="true" isDynamic="false">
+      <dataType xsi:type="pogoDsl:StringType"/>
+      <changeEvent fire="false" libCheckCriteria="false"/>
+      <archiveEvent fire="false" libCheckCriteria="false"/>
+      <dataReadyEvent fire="false" libCheckCriteria="true"/>
+      <status abstract="false" inherited="false" concrete="true" concreteHere="true"/>
+      <properties description="" label="" unit="" standardUnit="" displayUnit="" format="" maxValue="" minValue="" maxAlarm="" minAlarm="" maxWarning="" minWarning="" deltaTime="" deltaValue=""/>
+    </attributes>
+    <preferences docHome="./doc_html" makefileHome="/usr/share/pogo/preferences"/>
+  </classes>
+</pogoDsl:PogoSystem>
diff --git a/src/DonkiDirector_cmdline.py b/src/DonkiDirector_cmdline.py
new file mode 100755
index 0000000..1cf0669
--- /dev/null
+++ b/src/DonkiDirector_cmdline.py
@@ -0,0 +1,137 @@
+#!/usr/bin/env python
+
+from DirectorBgnThread import directorThread
+import traceback
+
+"""
+if __name__ == "__main__":
+    dt = directorThread(None)
+    dt.start()
+    dt.max_triggers = 50
+    dt.EnableDataSaving = False
+    nFiles = 1
+    dt.set_file_prefix("mytest")
+    dt.set_file_path(".")
+    dt.set_files_contiguous(True)
+    dt.set_files_to_save(nFiles)
+    dt.set_file_size(dt.max_triggers/nFiles)
+
+    time.sleep(6)
+    dt.set_DataAlias("paperino/image","paperino/mandelbrot")
+    dt.set_player_priority("paperino",0)
+    dt.set_player_priority("pippo",1)
+    dt.set_player_priority("pluto",1)
+
+    dt._started = True
+    while (dt._started):
+        time.sleep(1)
+    print "-------------",dt.zcc.ask_for_log("paperino")
+    print "-------------",dt.zcc.ask_for_log("pluto")
+    print "-------------",dt.zcc.ask_for_log("pippo")
+    #dt.join()
+    print dt.PlayersInfo
+
+"""
+def get_user_input_loop(dt):
+        while True:    # infinite loop
+            try:
+                n = raw_input("\n\nEnter command (type ? for help): ")
+                cmd_in = (n.lower()).strip(' ')
+                if cmd_in == "start":
+                    dt._started = True
+                    print "OK"
+                elif cmd_in == "stop":
+                    dt._started = False
+                    print "OK"
+                elif cmd_in == "state?":
+                    print dt._state
+                elif cmd_in == "players?":
+                    print dt.PlayersInfo
+                elif "priority[" in cmd_in:
+                    plname = (cmd_in.split("[")[1]).split("]")[0]
+                    prio = int((cmd_in.split("="))[-1])
+                    dt.set_player_priority(plname,prio)
+                    print "OK"
+                elif "triggers=" in cmd_in:
+                    arg_in = cmd_in.split('=')[-1]
+                    if arg_in == '?':
+                        print dt.get_max_triggers()
+                    else:
+                        max_triggers = int(arg_in)
+                        dt.set_max_triggers(max_triggers)
+                        print "OK"
+                elif "filesize=" in cmd_in:
+                    arg_in = cmd_in.split('=')[-1]
+                    if arg_in == '?':
+                        print dt.get_file_size()
+                    else:
+                        file_size = int(arg_in)
+                        dt.set_file_size(file_size)
+                        print "OK"
+                elif "savedata=" in cmd_in:
+                    arg_in = cmd_in.split("=")[1]
+                    if arg_in == 'yes':
+                        dt.EnableDataSaving = True
+                        print "OK"
+                    elif arg_in == 'no':
+                        dt.EnableDataSaving = False
+                        print "OK"
+                    else:
+                        print "NAK"
+                elif "datasources?" in cmd_in:
+                    print dt.get_DataSources()
+                elif "metadata?" in cmd_in:
+                    print dt.get_MetaData()
+                elif "dataname[" in cmd_in:
+                    dname = (cmd_in.split("[")[1]).split("]")[0]
+                    alias = (cmd_in.split("="))[-1]
+                    dt.set_DataAlias(dname,alias)
+                elif "remplayer[" in cmd_in:
+                    pname = (cmd_in.split("[")[1]).split("]")[0]
+                    if dt.remove_player_info(pname):
+                        print "OK"
+                    else:
+                        print "NAK"
+                elif "datasource[" in cmd_in:
+                    pname = (cmd_in.split("[")[1]).split("]")[0]
+                    arg_in = (cmd_in.split("=")[1]).lower()
+                    if arg_in not in ['on','off']:
+                        print "NAK"
+                    else:
+                        dt.set_daq_on_off(pname,arg_in=="on")
+                        print "OK"
+                elif "metadata[" in cmd_in:
+                    pname = (cmd_in.split("[")[1]).split("]")[0]
+                    arg_in = (cmd_in.split("=")[1]).lower()
+                    if arg_in not in ['on','off']:
+                        print "NAK"
+                    else:
+                        dt.set_metadata_on_off(pname,arg_in=="on")
+                        print "OK"
+                elif cmd_in == "quit":
+                    return  # stops the loop
+                elif cmd_in == "?":
+                    print "Available commands:"
+                    print "\n\t start \n\t stop \n\t state? \n\t players? \n\t triggers=(N/?) \n\t filesize=(N/?)",
+                    print "\n\t priority[plname]=N \n\t datasources? \n\t dataname[datasource]=alias",
+                    print "\n\t datasource[d_name]=(on/off) \n\t metadata? \n\t metadata[md_name]=(on/off)",
+                    print "\n\t savedata=(yes/no) \n\t remplayer[plname] \n\t quit"
+            except KeyboardInterrupt:
+                print "Bye"
+                return
+            except Exception:
+                traceback.print_exc()
+
+    
+
+if __name__ == "__main__":
+    dt = directorThread(inforserver_port = 50010)
+    dt.start()
+    #
+    dt.EnableDataSaving = False
+    dt.set_file_prefix("mytest")
+    dt.set_file_path(".")
+    dt.set_files_contiguous(True)
+    #
+    get_user_input_loop(dt)
+    dt.quit_and_exit()
diff --git a/src/DonkiOrchestraLib.py b/src/DonkiOrchestraLib.py
new file mode 100644
index 0000000..6d755b0
--- /dev/null
+++ b/src/DonkiOrchestraLib.py
@@ -0,0 +1,251 @@
+import zmq
+import traceback
+import socket
+import time
+
+DEBUG = False
+
+class CommunicationClass:
+    def __init__(self, name='director'):
+        self.context = zmq.Context()
+        self.poller = zmq.Poller()
+        self.pub_sock = None
+        self.sub_socks = {}
+        self.pub_tag = name
+        #
+        self.create_pub_socket()
+
+
+
+#-----------------------------------------------------------------------------------
+#    create_pub_socket:
+#
+#-----------------------------------------------------------------------------------
+    def create_pub_socket(self):
+        try:
+            self.pub_sock = self.context.socket(zmq.PUB)
+            self.pub_port = self.pub_sock.bind_to_random_port("tcp://0.0.0.0")
+            if DEBUG:
+                print "PUB " + "tcp://" + str(self.pub_port)
+        except:
+            traceback.print_exc()
+            self.pub_sock = None
+
+#-----------------------------------------------------------------------------------
+#    create_sub_socket:
+#
+#-----------------------------------------------------------------------------------
+    def create_sub_socket(self, name, url):
+        try:
+            if name in self.sub_socks:
+                self.poller.unregister(self.sub_socks[name])
+                self.sub_socks[name].close()
+            self.sub_socks[name] = self.context.socket(zmq.SUB)
+            self.sub_socks[name].setsockopt(zmq.SUBSCRIBE, '')
+            self.sub_socks[name].connect("tcp://"+str(url))
+            self.poller.register(self.sub_socks[name], zmq.POLLIN)
+        except:
+            traceback.print_exc()
+            if DEBUG:
+                print "tcp://"+str(url)
+            del self.sub_socks[name]
+            return False
+        return True
+
+#-----------------------------------------------------------------------------------
+#    my_pub_socket_info :
+#
+#-----------------------------------------------------------------------------------
+    def my_pub_socket_info(self):
+        return socket.gethostname()+":"+str(self.pub_port)
+
+#-----------------------------------------------------------------------------------
+#    publish_ack :
+#
+#-----------------------------------------------------------------------------------
+    def publish_ack(self, ack_tag, trg_start, trg_stop):
+        # At the moment just use send_pyobj
+        try:
+            self.pub_sock.send_pyobj([ack_tag, trg_start,trg_stop])
+        except:
+            traceback.print_exc()
+
+
+#-----------------------------------------------------------------------------------
+#    publish_data :
+#
+#-----------------------------------------------------------------------------------
+    def publish_data(self, tag, trg_start, trg_stop, data_value):
+        # At the moment just use send_pyobj
+        try:
+            self.pub_sock.send_pyobj(['data',tag.lower(), trg_start,trg_stop,data_value])
+        except:
+            traceback.print_exc()
+
+#-----------------------------------------------------------------------------------
+#    publish_metadata :
+#
+#-----------------------------------------------------------------------------------
+    def publish_metadata(self, tag, trg, metadata_value):
+        # At the moment just use send_pyobj
+        try:
+            self.pub_sock.send_pyobj(['data',tag.lower(), trg, trg, metadata_value])
+        except:
+            traceback.print_exc()
+#-----------------------------------------------------------------------------------
+#    publish_log :
+#
+#-----------------------------------------------------------------------------------
+    def publish_log(self, tag, trg_start, trg_stop, data_value):
+        # At the moment just use send_pyobj
+        try:
+            self.pub_sock.send_pyobj(['log',tag.lower(), trg_start,trg_stop,data_value])
+        except:
+            traceback.print_exc()
+
+
+#-----------------------------------------------------------------------------------
+#    publish_info :
+#
+#-----------------------------------------------------------------------------------
+    def publish_info( self, priority = -1, data_names=[], metadata_names=[]):
+        # At the moment just use send_pyobj
+        try:
+            self.pub_sock.send_pyobj(['info',{'prio':priority,'data':data_names,'metadata':metadata_names}])
+        except:
+            traceback.print_exc()
+
+
+#-----------------------------------------------------------------------------------
+#    ask_for_info :
+#
+#-----------------------------------------------------------------------------------
+    def ask_for_info(self, srv_name, timeout_sec=1):
+        # At the moment just use send_pyobj
+        self.pub_sock.send_pyobj(["info", srv_name])
+        msg = []
+        sub_socket = self.sub_socks[srv_name]
+        max_retries = 5
+        retry = 0
+        t0 = time.time()
+        while retry < max_retries and msg == []:
+            try:
+                socks = dict(self.poller.poll((1000./max_retries)*timeout_sec))
+            except:
+                time.sleep(0.1)
+                continue
+            if sub_socket in socks and socks[sub_socket] == zmq.POLLIN:
+                try:
+                    reply = sub_socket.recv_pyobj()
+                    if reply[0] == 'info':
+                        msg = reply[1]
+                except:
+                    traceback.print_exc()
+                    msg = []
+            if time.time() - t0 > timeout_sec:
+                retry += 1
+        return msg
+
+
+#-----------------------------------------------------------------------------------
+#    ask_for_log :
+#
+#-----------------------------------------------------------------------------------
+    def ask_for_log(self, srv_name, timeout_sec=1):
+        # At the moment just use send_pyobj
+        self.pub_sock.send_pyobj(["playerlog", srv_name])
+        msg = []
+        sub_socket = self.sub_socks[srv_name]
+        max_retries = 5
+        retry = 0
+        t0 = time.time()
+        while retry < max_retries and msg == []:
+            try:
+                socks = dict(self.poller.poll((1000./max_retries)*timeout_sec))
+            except:
+                time.sleep(0.1)
+                continue
+            if sub_socket in socks and socks[sub_socket] == zmq.POLLIN:
+                try:
+                    reply = sub_socket.recv_pyobj()
+                    if reply[0] == 'data' and reply[1] == 'playerlog':
+                        msg = reply[4]
+                except:
+                    traceback.print_exc()
+                    msg = []
+            if time.time() - t0 > timeout_sec:
+                retry += 1
+        return msg
+
+
+#-----------------------------------------------------------------------------------
+#    wait_message :
+#
+#-----------------------------------------------------------------------------------
+    def wait_message(self, srv_names, timeout_sec=1):
+        try:
+            msg = {}
+            socks = dict(self.poller.poll(1000*timeout_sec))
+            if len(socks) == 0:
+                return msg
+            for sn in srv_names:
+                s = self.sub_socks[sn]
+                if s in socks and socks[s] == zmq.POLLIN:
+                    recv_msg = s.recv_pyobj()
+                    msg[sn] = recv_msg
+        except:
+            traceback.print_exc()
+            msg = None
+        return msg
+
+#-----------------------------------------------------------------------------------
+#    publish_command :
+#
+#-----------------------------------------------------------------------------------
+    def publish_command(self, command, srv_name, argin=None, timeout_sec=1):
+        # At the moment just use send_pyobj
+        self.pub_sock.send_pyobj([command, srv_name, argin])
+        if DEBUG:
+            print "Sent command:", command, srv_name, argin, timeout_sec
+        msg = []
+        sub_socket = self.sub_socks[srv_name]
+        max_retries = 5
+        retry = 0
+        t0 = time.time()
+        while retry < max_retries and msg == []:
+            try:
+                socks = dict(self.poller.poll((1000./max_retries)*timeout_sec))
+            except:
+                time.sleep(0.1)
+                if time.time() - t0 > timeout_sec:
+                    retry += 1
+                continue
+            if sub_socket in socks and socks[sub_socket] == zmq.POLLIN:
+                try:
+                    reply = sub_socket.recv_pyobj()
+                    if reply[0] == command:
+                        if reply[1] == reply[2] == 1:
+                            return True
+                        elif reply[1] == reply[2] == -1:
+                            return False
+                    else:
+                        print "=>",reply
+                        retry += 1
+                except:
+                    traceback.print_exc()
+                    return False
+            if time.time() - t0 > timeout_sec:
+                retry += 1
+        return False
+
+
+#-----------------------------------------------------------------------------------
+#    publish_trigger :
+#
+#-----------------------------------------------------------------------------------
+    def publish_trigger(self, trigger_value, priority):
+        # At the moment just use send_pyobj
+        try:
+            self.pub_sock.send_pyobj(["trigger", trigger_value, priority])
+        except:
+            traceback.print_exc()
diff --git a/src/InfoServer.py b/src/InfoServer.py
new file mode 100644
index 0000000..4880d3c
--- /dev/null
+++ b/src/InfoServer.py
@@ -0,0 +1,136 @@
+#!/usr/bin/env python
+import sys
+import os
+import time
+import threading
+import thread
+import signal
+from socket import *
+import types
+from tinydb import TinyDB, Query, where
+import traceback
+import select
+
+#
+class infoServerThread(threading.Thread):
+
+#-----------------------------------------------------------------------------------
+#    __init__
+#-----------------------------------------------------------------------------------
+    def __init__(self, Port, tinydb_file_path="./db.json", notif_function = None):
+        threading.Thread.__init__(self)
+        self.Port = Port
+        self._stop_ = False
+        self.BUFFSIZE = 50000
+        self.notif_function = notif_function
+        self.mutex = threading.Lock()
+        try:
+            self.db = TinyDB(tinydb_file_path)
+        except:
+            print "Unable to open", tinydb_file_path, "content cleared"
+            os.rename(tinydb_file_path,"%s_corrupted" % tinydb_file_path)
+        self.db = TinyDB(tinydb_file_path)
+
+
+#-----------------------------------------------------------------------------------
+#    __del__
+#-----------------------------------------------------------------------------------
+    def __del__(self):
+        self._stop_ = True
+
+#-----------------------------------------------------------------------------------
+#    run
+#-----------------------------------------------------------------------------------
+    def run(self):
+        #
+        ADDR = ("", self.Port)
+        serversock = socket(AF_INET, SOCK_STREAM)
+        serversock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
+        serversock.bind(ADDR)
+        serversock.listen(5)
+        read_list = [serversock]
+        while not self._stop_ :
+            readable, writable, errored = select.select(read_list, [], [], 3)
+            for s in readable:
+                if s is serversock:
+                    clientsock, addr = serversock.accept()
+                    thread.start_new_thread(self.handler, (clientsock, addr))
+
+
+#-----------------------------------------------------------------------------------
+#    handler
+#-----------------------------------------------------------------------------------
+    def handler(self,clientsock,addr):
+        while 1:
+            data = clientsock.recv(self.BUFFSIZE)
+            if not data: break
+            request = data.rstrip("\r\n")
+            tokens = request.split(" ")
+            self.mutex.acquire()
+            if tokens[0].lower() == "set":
+                reply_str = self.write_to_db(tokens[1:])
+            elif tokens[0].lower() == "get":
+                reply_str = self.get_from_db(tokens[1:])
+            elif tokens[0].lower() == "del":
+                reply_str = self.del_from_db(tokens[1:])
+            elif tokens[0].lower() == "exit":
+                break
+            else:
+                reply_str = "*** ERROR: UNKNOWN COMMAND"
+            #
+            self.mutex.release()
+            reply_str += "\r\n"
+            clientsock.send(reply_str)
+        clientsock.close()
+
+#-----------------------------------------------------------------------------------
+#    write_to_db
+#-----------------------------------------------------------------------------------
+    def write_to_db(self, tokens):
+        try:
+            table = tokens[0]
+            item = {}
+            item['name'] = tokens[1]
+            item['data'] = tokens[2]
+            self.db.table(table).remove(where ('name') == item['name'])
+            self.db.table(table).insert(item)
+            if self.notif_function:
+                self.notif_function( table , self.db.table(table).all())
+        except Exception,e:
+            traceback.print_exc()
+            return '*** ERROR: '+ str(e)
+        return 'OK'
+
+#-----------------------------------------------------------------------------------
+#    get_from_db
+#-----------------------------------------------------------------------------------
+    def get_from_db(self, tokens):
+        try:
+            table = tokens[0]
+            tbl = self.db.table(table)
+            resp = str( tbl.all())
+        except Exception,e:
+            return '*** ERROR: '+ str(e)
+        return resp
+
+#-----------------------------------------------------------------------------------
+#    del_from_db
+#-----------------------------------------------------------------------------------
+    def del_from_db(self, tokens):
+        try:
+            table = tokens[0]
+            if len(tokens) > 1:
+                name = tokens[1]
+                self.db.table(table).remove(where ('name') == name)
+            else:
+                self.db.purge_table(table)
+            if self.notif_function:
+                self.notif_function( table , self.db.table(table).all())
+        except Exception,e:
+            return '*** ERROR: '+ str(e)
+        return 'OK'
+
+if __name__ == '__main__':
+    srv = infoServerThread(55004,tinydb_file_path="./db.json")
+    srv.start()
+    srv.join()
diff --git a/src/hdfwriter.py b/src/hdfwriter.py
new file mode 100644
index 0000000..0d8eaf7
--- /dev/null
+++ b/src/hdfwriter.py
@@ -0,0 +1,673 @@
+# -*- coding: utf-8 -*-
+"""
+The main class inside the module :mod:`~fermidaq.lib.hdfwriter` is the
+:class:`~fermidaq.lib.hdfwriter.HDFWriter` that it the resposible for
+producing the HDF5 files from the data acquired from :mod:`~fermidaq.lib.attribdaq`.
+
+In order to parallelize the production of the HDF5 files (see  :ref:`daq_architecture`), the :class:`~fermidaq.lib.hdfwriter.HDFWriter` classes are threads. So, it will be possible to configure one or more threads to cope with the performance requirements.
+
+Some tests were done, :mod:`~fermidaq.test.bunchmanagertest`, and we were able to
+collect Images 2160x2600 @10Hz using no more than 3 threads.
+
+
+Look at the main attributes and methods from  :class:`~fermidaq.lib.hdfwriter.HDFWriter`:
+
+.. image:: /images/hdfwriter_uml.png
+
+Attributes :attr:`~fermidaq.lib.hdfwriter.HDFWriter.file_path` and
+:attr:`~fermidaq.lib.hdfwriter.HDFWriter.file_prefix` controls the name and
+path of the HDF5 file to be created.
+
+:attr:`~fermidaq.lib.hdfwriter.HDFWriter.key` is an auxiliar attribute that
+helps debbuging the system with an human readable name for the thread.
+
+The methods :meth:`~fermidaq.lib.hdfwriter.HDFWriter.daq_switch_on` and
+:meth:`~fermidaq.lib.hdfwriter.HDFWriter.daq_switch_off` help the :class:`~fermidaq.lib.hdfwriter.HDFWriter` to know 'a priori' which are the data it
+should expect to receive. For performance reason, it is advizable to
+inform these threads if it should not expect data from an specific
+:class:`~fermidaq.lib.attribdaq.AttribDaq` to be received. This information
+is very usefull to be able to know when a file may be considered concluded. And the thread will be able to notify the  :class:`~fermidaq.lib.bunchmanager.BunchManager` that it may receive more request to save data (:meth:`~fermidaq.lib.hdfwriter.HDFWriter._file_concluded`).
+
+
+Finally, the main methods are :meth:`~fermidaq.lib.hdfwriter.HDFWriter.save_conf` that informs this thread about what are the range of bunch numbers it should expected to save the data. While  :meth:`~fermidaq.lib.hdfwriter.HDFWriter.save_data_list` will pass the reference it needs to get the data from
+:class:`~fermidaq.lib.attribdaq.AttribDaq` through its method :class:`~fermidaq.lib.attribdaq.AttribDaq.get_data`.
+
+.. seealso::
+
+   For examples on how to configure and use :mod:`~fermidaq.lib.hdfwriter`,
+   see :mod:`~fermidaq.test.bunchmanagertest`.
+
+
+HDFWriter
+=========
+
+.. autoclass:: fermidaq.lib.hdfwriter.HDFWriter (bunch_manager, key, file_path, file_prefix [, logger = MyLogger()])
+    :members:
+
+Protected Members:
+------------------
+
+.. automethod:: fermidaq.lib.hdfwriter.HDFWriter._file_concluded
+
+.. automethod:: fermidaq.lib.hdfwriter.HDFWriter._close_data_acquisition
+
+.. automethod:: fermidaq.lib.hdfwriter.HDFWriter._save_hdf
+
+
+Exceptions
+==========
+
+.. autoexception:: fermidaq.lib.hdfwriter.HDFWriterBusy
+
+.. autoexception:: fermidaq.lib.hdfwriter.HDFWriterNotStarted
+
+
+"""
+import time
+import numpy
+import threading
+import h5py
+import traceback
+import os
+from Queue import (Queue, Full, Empty)
+
+DEBUG = False
+LOGFILTER = 100
+NO_TIMEOUT = -1
+
+def ensure_dir(f):
+    d = os.path.dirname(f)
+    if not os.path.exists(d):
+        os.makedirs(d)
+
+
+class HDFWriterBusy(Exception):
+    """
+    Notify that :class:`~fermidaq.lib.hdfwriter.HDFWriter` is not
+    able to process another request.
+    """
+    pass
+
+class HDFWriterNotStarted(Exception):
+    """
+    Notify that the thread :class:`~fermidaq.lib.hdfwriter.HDFWriter`
+    was not started.
+    """
+    pass
+
+HDFW_INIT = 0
+HDFW_IDLE = 1
+HDFW_BUSY = 2
+
+class HDFWriter(threading.Thread):
+    """
+    Responsible for collecting the data from the
+    :class:`~fermidaq.lib.attribdaq.AttribDaq` objects and placing them
+    in HDF5 files.
+
+    They are generated and controlled by
+    :class:`~fermidaq.lib.bunchmanager.BunchManager`. The constructor requires:
+
+    :param bunch_manager: The instance of the :class:`~fermidaq.lib.bunchmanager.BunchManager` object.
+    :param           key: A string to identify this thread.
+    :param     file_path: The file system path where to save the produced files.
+    :param   file_prefix: The initial part of the HDF5 file that will be created.
+
+    Optionally it may receive:
+
+    :param logger: An instance of Tango Server logger in order to produce loggings inside the Tango logging system.
+
+
+    The way it will produce HDF5 files are:
+
+    * It must receive the range of triggers to save :meth:`~fermidaq.lib.hdfwriter.HDFWriter.save_conf`
+    * Them it will receive all the data to save through :meth:`~fermidaq.lib.hdfwriter.HDFWriter.save_data_list`.
+    * After finishing the acquisition of all the data to produce the file it will notify the :class:`~fermidaq.lib.bunchmanager.BunchManager` through its method :meth:`~fermidaq.lib.bunchmanager.BunchManger.hdf_finished` in order to announce that it is free to acquire a another file.
+
+
+    Internally, it will manage a Queue of data to save entries that
+    will be dealt with inside its main thread execution (:meth:`~fermidaq.lib.hdfwriter.HDFWriter.run`).
+
+    The attributes :attr:`~fermidaq.lib.hdfwriter.HDFWriter.file_path` and
+    :attr:`~fermidaq.lib.hdfwriter.HDFWriter.file_prefix` may be changed
+    and they will affect the next file to be produced.
+
+    .. todo::
+
+        Change the notification method to add the full path name of the file.
+        Provide a notificatio service for problems inside this thread, to notify BunchManager.
+
+    """
+    def __init__(self, key, dataserver, file_path, file_prefix):
+        threading.Thread.__init__(self)
+        #public:
+
+        #: identifies this thread in a human way
+        self.key = key
+        #
+        self.dataserver = dataserver
+        #: define where the file will be created.
+        self.file_path=file_path
+        #: define the beggining of the file, the
+        #: full name of the file will be:
+        #: file_path + file_prefix + first_bunch_nubmer + .h5
+        self.file_prefix = file_prefix
+        
+
+        #this list is filled through the save_data_list method, and
+        #consumed inside the main thread, specially inside
+        #the _save_hdf method.
+        self._save_list = Queue(-1) # Original version => Queue(100)
+
+        #indicates id there is an opened file being filled.
+        self._hdf_file = None
+
+        #flag to indicate that thread should close.
+        self._stop = False
+        #dictionary that deals with the FermiDaq to acquire, it
+        #is used mainly to allow the HDFWriter to know if it has acquired all
+        #the necessary data.
+        self._daq_list = dict()
+        self._intState = HDFW_INIT
+        self.first_bunch = 0
+        self.last_bunch = 0
+        self.qt = 0
+        self.dsets = []
+        self.dsets_ptr = {}
+        self._log_filter = 0
+        self.report = ""
+        self.Files_contiguous = True
+        self.name_with_trigger_info = True
+        self._paused = False
+        self.no_data_timeout = NO_TIMEOUT
+
+    def save_data_list(self, daq_key, bn_in, bn_fin,data_in):
+        """
+        Called from :class:`~fermidaq.lib.bunchmanager.BunchManager` in order
+        to inform the list of data available.
+
+        It will fulfill an internal queue that will be dealt inside the
+        main thread - :meth:`~fermidaq.lib.hdfwriter.HDFWriter.run`.
+
+        This queue may be full, due, for example, for the fact that this
+        thread is not able to put to file all the data in the time it
+        should do. In this case, this method will raise an
+        :exc:`~fermidaq.FermiDaqSystem.FatalInternalError`. At this
+        exception, the :class:`~fermidaq.lib.bunchmanager.BunchManager`
+        should abort the excecution of this thread.
+
+        :param daq_pt: instance of :class:`~fermidaq.lib.attribdaq.AttribDaq`
+        :param  bn_in: first bunch number to ask for.
+        :param bn_fin: last bunch number to ask for.
+
+        With this entry, the :class:`~fermidaq.lib.hdfwriter.HDFWriter`
+        assumes that it may call the
+        :meth:`~fermidaq.lib.attribdaq.AttribDaq.get_data` ::
+
+            daq_pt.get_data(bn_in, bn_fin)
+
+        And it will receive the data to produce the HDF5 file.
+
+        """
+        try:
+            #assert self._intState == HDFW_BUSY
+            assert bn_in >= self.first_bunch
+            assert bn_fin <= self.last_bunch
+        except AssertionError, ex:
+            print traceback.format_exc()
+            return
+        self._log_filter += 1
+        if DEBUG or not self._log_filter % LOGFILTER:
+            self._log_filter = 0
+            print ("HDFW %s daq = %s %d->%d" %(self.key,daq_key,bn_in,bn_fin))
+        try:
+            #insert inside the save_list
+            self._save_list.put((daq_key, bn_in, bn_fin,data_in),timeout=1)
+        except Full:
+            raise FatalInternalError("Queue is full, thread is not working")
+
+    def set_no_data_timeout_sec(self,timeout_in):
+        """
+        Set the no_data_timeout variable that is used to automatically close
+        the HDF5 file if no data arrives.
+        """
+        self.no_data_timeout = timeout_in
+
+
+    def daq_switch_on(self,daq_pt_list):
+        """
+        Initialize the entries for the expected acquisition daqs.
+
+        :param daq_pt_list: List of instances of :class:`~fermidaq.lib.attribdaq.AttribDaq` or a single :class:`~fermidaq.lib.attribdaq.AttribDaq`.
+
+        The :class:`~fermidaq.lib.hdfwriter.HDFWriter` will wait for all
+        entries in this list to provide the data in the range defined at
+        :meth:`~fermidaq.lib.hdfwriter.HDFWriter.save_conf` before closing
+        the file.
+        """
+        if isinstance(daq_pt_list, list):
+            for daq_key in daq_pt_list:
+                self._daq_list[daq_key] = (0,0)
+        else:
+            print("HDFW %s daq_switch_on(), unknown input argument")
+
+
+    def daq_switch_off(self,daq_pt_list):
+        """
+        Removes from its internal management of acquired data, those
+        entries that should not produce data anymore.
+
+        It should be called by :class:`~fermidaq.lib.bunchmanager.BunchManager`
+        in order to inform that :class:`~fermidaq.lib.hdfwriter.HDFWriter`
+        should not expect to receive data from those
+        :class:`~fermidaq.lib.attribdaq.AttribDaq` any more.
+
+        :param daq_pt_list: list of :class:`~fermidaq.lib.attribdaq.AttribDaq` or a single :class:`~fermidaq.lib.attribdaq.AttribDaq`.
+
+        .. warning::
+
+            Not being notified that it should not expect data from one
+            :class:`~fermidaq.lib.attribdaq.AttribDaq` that does not produce
+            data will damage the performance of the
+            :class:`~fermidaq.lib.hdfwriter.HDFWriter`, because it will not
+            be able to know exactly when it should close the file.
+
+        """
+        if isinstance(daq_pt_list,list):
+            for daq_key in daq_pt_list:
+                if daq_key in self._daq_list.keys():
+                    self._daq_list.pop(daq_key)
+        else:
+            print("HDFW %s daq_switch_off(), unknown input argument")
+
+
+
+    def save_conf(self,bn_in, bn_fin, Files_contiguous, name_with_trigger_info = True):
+        """
+        Define the range of the HDF5 file to be saved.
+
+        This method is called from
+        :class:`~fermidaq.lib.bunchmanager.BunchManager` and define the range
+        of bunch numbers the :class:`~fermidaq.lib.hdfwriter.HDFWriter`
+        should expect to produce the HDF5 file.
+
+        For example::
+
+            hdf_writer.save_conf( 5, 105)
+
+        Will inform the :class:`~fermidaq.lib.hdfwriter.HDFWriter` that it
+        should create an HDF5 file and that that file will save the data
+        from bunch number 5 till bunch number 105. So, it will be able
+        to know when the acquisition has finished in order to notify
+        the :class:`~fermidaq.lib.bunchmanager.BunchManager`.
+
+        :param bn_in: The first bunch number it will save in HDF5 file.
+        :param bn_fin: The last bunch number for the file.
+
+        .. warning::
+
+            :class:`~fermidaq.lib.hdfwriter.HDFWriter` does not accept a new
+            configuration if it is acquiring data for the current file.
+            If it occurrs, it will raise the exception
+            :exc:`~fermidaq.lib.hdfwriter.HDFWriterBusy`.
+
+        .. warning::
+
+            :class:`~fermidaq.lib.hdfwriter.HDFWriter` is a thread, and
+            there is no sense to configure an acquisition if the thread
+            is not alive. So, it will raise the
+            :exc:`~fermidaq.lib.hdfwriter.HDFWriterNotStarted` exception
+            if the thread is not alive.
+
+        """
+        if self._intState == HDFW_BUSY:
+            raise HDFWriterBusy("HDFW %s is already configured for %d->%d"
+            %(self.key,self.first_bunch,self.last_bunch))
+        if self._intState == HDFW_INIT:
+            raise HDFWriterNotStarted("HDFW %s is not started"%(
+                                self.key))
+
+        if DEBUG:
+            print("HDFW %s config file for %d->%d"%(self.key, bn_in,bn_fin))
+        self.first_bunch = bn_in
+        self.last_bunch = bn_fin
+        self.qt = bn_fin - bn_in + 1
+        self.Files_contiguous = Files_contiguous
+        self._intState = HDFW_BUSY
+        self.name_with_trigger_info = name_with_trigger_info
+        
+
+
+
+    def stop_thread(self):
+        """ Ask thread to finish itself.
+
+        It is an assynchronous method, and the correct way to ensure
+        the thread finishes it self is through::
+
+            hdf_writer.start()
+
+            #do its jobs...
+
+            hdf_writer.stop_thread()
+            hdf_writer.join()
+
+
+        """
+        self._stop = True
+
+
+    def set_paused(self,value_in):
+        """ Ask thread to pause actions.
+        """
+        if not value_in:
+			# Restart, avoid timeout problems
+			self.last_savetime = time.time()
+        self._paused = value_in
+	
+	
+
+    def run(self):
+        """
+        The main function of the :class:`~fermidaq.lib.hdfwriter.HDFWriter`.
+
+        It 'consumes' its internal queue with the entries of data
+        to save that were populated by the
+        :meth:`~fermidaq.lib.hdfwriter.HDFWriter.save_data_list`.
+        With the data acquired, it produces the HDF5 files through
+        the :meth:`~fermidaq.lib.hdfwriter.HDFWriter._save_hdf`.
+
+        If the queue is empty - there is no data for saving in the queue -
+        it will investigate if there is no more data to acquire
+        (:meth:`~fermidaq.lib.hdfwriter.HDFWriter._file_concluded`).
+
+        If it has received the whole expected data, it will close
+        the file and notify the
+        :class:`~fermidaq.lib.bunchmanager.BunchManager`
+        (:meth:`~fermidaq.lib.hdfwriter.hdfwriter._close_data_acquisition`)
+
+        """
+        if DEBUG:
+            print("HDFW %s started"%(self.key))
+        self._intState = HDFW_IDLE
+        self.last_savetime = time.time()
+
+        while not self._stop:
+            try:
+                #receive the new entry
+                if self._save_list.qsize() > 0:
+                    try:
+                        (daq_key, bn_in, bn_fin, data_in) = self._save_list.get()
+                        if isinstance(data_in, list):
+                            self._save_hdf(daq_key,bn_in, bn_fin, numpy.asarray(data_in))
+                        else:
+                            self._save_hdf(daq_key,bn_in, bn_fin, data_in)
+                        self.last_savetime = time.time()
+                    except:
+                        print traceback.format_exc()
+                elif self._file_concluded():
+                    self._close_data_acquisition(timeout=False)
+                elif (not self._paused) and (self.no_data_timeout > 0) and (self._hdf_file):
+                    if ((time.time() - self.last_savetime) > self.no_data_timeout):
+                        self._close_data_acquisition(timeout=True)
+                else:
+                    time.sleep(0.01)
+
+            except:
+                #THIS MEANS THAT LESS THEN 3 VALUES WERE PASSED,
+                #THIS IS THE STOP REQUEST.
+                print traceback.format_exc()
+                print('HDFW %s received STOP Request' % self.key)
+                self.report += "Received STOP Request" + "\n"
+
+        self._close_data_acquisition()
+        if DEBUG:
+            print("HDFW %s thread finished"%(self.key))
+
+
+    def _file_concluded(self):
+        """ Returns True if there is one file just filled, that should
+        be closed.
+
+        The difficulty to know if the file has finished is that the
+        :class:`~fermidaq.lib.hdfwriter.HDFWriter`
+        may data from :class:`~fermidaq.lib.attribdaq.AttribDaq` in a 'random'
+        order. This, method, tries to ensure that it has acquired all the
+        bunch ranges from all the :class:`~fermidaq.lib.attribdaq.AttribDaq`
+        it should receive data from.
+
+
+        .. warning::
+
+            If there is one :class:`~fermidaq.lib.attribdaq.AttribDaq` from
+            whom the :class:`~fermidaq.lib.hdfwriter.HDFWriter` does not
+            receive new data for at least 3 seconds after another
+            :class:`~fermidaq.lib.attribdaq.AttribDaq` reaches the last
+            bunch number to acquire, it will assumes that
+            this :class:`~fermidaq.lib.attribdaq.AttribDaq` is not working
+            correctly and it will remove that entry from the expected
+            data to receive.
+
+        .. todo::
+
+            It should expect at least one
+            :class:`~fermidaq.lib.attribdaq.AttribDaq` to reach the
+            last bunch number before assuming that one of the
+            :class:`~fermidaq.lib.attribdaq.AttribDaq` may have problem.
+
+        """
+
+        # if there is not open file, it does not finished.
+        if not self._hdf_file:
+            return False
+        elif self._intState == HDFW_IDLE:
+            return True
+        elif len(self._daq_list.keys()) == 0:
+            # Not arrived any data yet
+	    return False
+	   
+        #get all the daqattrib that has reached the last number
+        l= [(key,value[1]) for (key,value) in self._daq_list.items()
+                    if value[1] == self.last_bunch ]
+                        
+        # if all the values reached the last value, the file concluded
+        return len(l) == len(self._daq_list.keys())
+
+    def _force_close_daq(self):
+        while not self._save_list.empty():
+            self._save_list.get()
+        self._intState = HDFW_IDLE
+        self._paused = False
+
+    def _close_data_acquisition(self,timeout=False):
+        """
+        Finishes the HDF5 file and notify the
+        :class:`~fermidaq.lib.bunchmanager.BunchManager` that it is free to
+        receive new request.
+
+        .. todo::
+
+            It should add here the metadata information.
+        """
+        if self._hdf_file == None:
+            return
+        if DEBUG:
+            print("HDFW finished key = %s [%d,%d]"% (
+                    self.key,self.first_bunch,self.last_bunch))
+        self._intState = HDFW_IDLE
+        try:
+            self._hdf_file.create_dataset('triggers',
+                data=numpy.arange(self.first_bunch,self.last_bunch+1),
+                             dtype=numpy.int32)
+        except:
+            print("Failed to create the triggers dataset!" )
+            self.report += "Error: " + "failed to create the bunchnumber dataset!" + "\n"
+        last_metadata = self.dataserver.get_metadata()
+        for metakey in last_metadata.keys():
+            try:
+                # Create HDF dataset
+                self._hdf_file.create_dataset(str(metakey), data=last_metadata[metakey][1])
+            except Exception, ex:
+                print "store_metadata exception",metakey,last_metadata[metakey][1]
+                traceback.print_exc()
+        del last_metadata
+
+
+        try:
+            self.dsets = []
+            file_name = self._hdf_file.filename
+            tt0 = time.time()
+            self._hdf_file.flush()
+            self._hdf_file.close()
+            if DEBUG:
+                print "FILE",file_name," closed",1000.*(time.time() - tt0)        
+            self._hdf_file = None
+            self.report += file_name.split("/")[-1] + " Closed.\n"
+            #if at least one reached the last bunch:
+            if timeout:
+                #get all the daqattrib that has reached the last number
+                not_finished_l= [(key,value[1]) for (key,value)
+                            in self._daq_list.items()
+                            if value[1] != self.last_bunch ]
+                #
+                if DEBUG:
+                    print self.key,"TIMEOUT",self.last_bunch,not_finished_l
+            self._daq_list.clear()
+            self.dataserver.hdf_finished(self.key,file_name,self.report)
+            #self.dataserver.task_queue.put(['hdf_finished',self.key,file_name,self.report])
+        except:
+            print traceback.format_exc()
+
+    def _save_hdf(self, daq_key, bn_in, bn_fin, data_in):
+        """
+        Save data from :class:`~fermidaq.lib.attribdaq.AttribDaq` in HDF5.
+
+        Usually, the :class:`~fermidaq.lib.hdfwriter.HDFWriter` does not
+        know the kind of data it will receive from the
+        :class:`~fermidaq.lib.attribdaq.AttribDaq`.
+
+        The algorithm for saving HDF5 file is the following:
+
+        * Ensure that it already knows the :class:`~fermidaq.lib.attribdaq.AttribDaq` that is producing this data, and keep track of the triggers that it has acquired from this Daq.
+
+        * Ensure it has already opened a file for this new acquisition.
+
+        * Acquire the data.
+
+        * Check if there is an HDF5 dataset configured for this data.
+
+            If not, it will check the
+            :attr:`~fermidaq.lib.attribdaq.AttribDaq.attr_type` to know
+            if it should create a scalar, spectrum or image dataset.
+            From the data itself, it will get the data type, and also
+            deduce the shape of the dataset to create.
+
+        * Update the dataset
+
+        * Check if the acquisition is done for the current configured range - :meth:`~fermidaq.lib.hdfwriter.HDFWriter.save_conf`, eventually, conclude the acquisition.
+
+        """
+        self._log_filter += 1
+        if DEBUG and not self._log_filter % LOGFILTER:
+            print("HDFW %s saving data %s %d %d"% (
+                        self.key, daq_key,bn_in,bn_fin))
+
+        #keep track of the daq and the triggers already acquired.
+        dic_entry = self._daq_list.get(daq_key,None)
+        if not dic_entry:
+            self._daq_list[daq_key] = ( bn_in, bn_fin)
+        else:
+            self._daq_list[daq_key] = ( bn_in, bn_fin)
+
+        self.report = ""
+        #open a new file if there is no file opened.
+        if self._hdf_file == None:
+            if self.file_path[-1] != '/':
+                self.file_path += '/'
+            if self.name_with_trigger_info:
+                f_name = "%s%s_%07d.h5"% (self.file_path , self.file_prefix,self.first_bunch)
+            else:
+                f_name = "%s%s.h5"% (self.file_path , self.file_prefix)
+            if DEBUG:
+                print("HDFW %s opening file %s"% (self.key, f_name))
+            ensure_dir(f_name)
+            self._hdf_file = h5py.File(f_name,'w')
+            #self._hdf_file = h5py.File(f_name,'w',libver='latest')
+            self.last_savetime = time.time()
+	    try:
+            	self._hdf_file.attrs['timestamp'] = str(time.ctime())
+            	self.dataserver.notify_queue.put(['update_report',f_name.split("/")[-1] + " Opened.\n"])
+	    except:
+	    	traceback.print_exc()
+	
+
+        #check if its data set was already configured.
+        if daq_key not in self.dsets:
+            tokens = daq_key.split("/")
+            groupname=""
+            dsetname = tokens[-1]
+            for idx in range(len(tokens)-1):
+                groupname += "/"
+                groupname += tokens[idx]
+                try:
+                    g=self._hdf_file.create_group(groupname)
+                except:
+                    #print "****", groupname,"NOT CREATED"
+                    # Probably the grooup already exists... does not matter
+                    pass
+
+            #
+            try:
+                if (groupname != ""):
+                    g = self._hdf_file[groupname]
+                    if len(data_in.shape) == 1: # 'scalar':
+                        g.create_dataset(dsetname,shape=(self.qt,),dtype=data_in.dtype)
+                    elif len(data_in.shape) == 2: #'spectrum'
+                        g.create_dataset(dsetname,shape=(self.qt, data_in.shape[-1]),dtype=data_in.dtype)
+                    elif len(data_in.shape) == 3: #'image'
+                        g.create_dataset(dsetname,shape=(self.qt, data_in.shape[-2], data_in.shape[-1]),dtype=data_in.dtype)
+                else:
+                    if len(data_in.shape) == 1: # 'scalar':
+                        self._hdf_file.create_dataset(daq_key,shape=(self.qt,),dtype=data_in.dtype)
+                    elif len(data_in.shape) == 2: #'spectrum'
+                        self._hdf_file.create_dataset(daq_key,shape=(self.qt,data_in.shape[-1]),dtype=data_in.dtype)
+                    elif len(data_in.shape) == 3: #'image'
+                        self._hdf_file.create_dataset(daq_key,shape=(self.qt, data_in.shape[-2], data_in.shape[-1]),dtype=data_in.dtype)
+                #
+                self.dsets += [daq_key]
+                self._hdf_file.flush()
+                self.dsets_ptr[daq_key] = self._hdf_file.get(daq_key, None)
+                #attempt to solve the problem of Bug#2
+            except ValueError:
+                print('file %s data_set %s already exist'%(self._hdf_file.filename, daq_key))
+                self.report += "Error: dataset " + daq_key + " already exist." + "\n"
+            except Exception, ex:
+                print traceback.format_exc()
+                print('HDFW %s file %s data_set %s creation error %s' %(self.key, self._hdf_file.filename, daq_key,str(ex)))
+                self.report += "Error: dataset " + daq_key + " creation error." + "\n"
+
+        try:
+            #update the dataset
+            assert self.dsets_ptr[daq_key]
+            try:
+                slicc = slice(bn_in - self.first_bunch, bn_fin - self.first_bunch + 1)
+                self.dsets_ptr[daq_key][slicc] = data_in
+            except Exception, ex:
+                print traceback.format_exc()
+                print daq_key,slicc,self.first_bunch,bn_in,bn_fin,self._hdf_file.filename
+                self.report += "Error: dataset " + daq_key + " write error." + "\n"
+            #check if the file is finished.
+            if self._file_concluded():
+                if DEBUG:
+                    print('HDFW %s file concluded' % self.key)
+                self._close_data_acquisition()
+        except ValueError, ex:
+            self.report += "Error: dataset " + daq_key + " h5py.get[dataset] failed." + "\n"
+            #Bug#3, found that sometimes it is not able to get the correct dataset
+            print("HDFW %s dataset %s h5py.get[dataset] failed %s" %
+                (self.key, daq_key, str(ex)))
+        except AssertionError, ex:
+            #Bug#3
+            self.report += "Error: dataset " + daq_key + " assertion error." + "\n"
+            print("HDFW %s dataset %s assertion error" %
+                    (self.key, daq_key))
diff --git a/src/tinydb/__init__.py b/src/tinydb/__init__.py
new file mode 100644
index 0000000..d427fb5
--- /dev/null
+++ b/src/tinydb/__init__.py
@@ -0,0 +1,30 @@
+"""
+TinyDB is a tiny, document oriented database optimized for your happiness :)
+
+TinyDB stores differrent types of python data types using a configurable
+backend. It has support for handy querying and tables.
+
+.. codeauthor:: Markus Siemens <markus@m-siemens.de>
+
+Usage example:
+
+>>> from tinydb import TinyDB, where
+>>> from tinydb.storages import MemoryStorage
+>>> db = TinyDB(storage=MemoryStorage)
+>>> db.insert({'data': 5})  # Insert into '_default' table
+>>> db.search(where('data') == 5)
+[{'data': 5, '_id': 1}]
+>>> # Now let's create a new table
+>>> tbl = db.table('our_table')
+>>> for i in range(10):
+...     tbl.insert({'data': i})
+...
+>>> len(tbl.search(where('data') < 5))
+5
+"""
+
+from tinydb.queries import Query, where
+from tinydb.storages import Storage, JSONStorage
+from tinydb.database import TinyDB
+
+__all__ = ('TinyDB', 'Storage', 'JSONStorage', 'Query', 'where')
diff --git a/src/tinydb/database.py b/src/tinydb/database.py
new file mode 100644
index 0000000..da035a7
--- /dev/null
+++ b/src/tinydb/database.py
@@ -0,0 +1,470 @@
+"""
+Contains the :class:`database <tinydb.database.TinyDB>` and
+:class:`tables <tinydb.database.Table>` implementation.
+"""
+from tinydb import JSONStorage
+from tinydb.utils import LRUCache, iteritems, itervalues
+
+
+class Element(dict):
+    """
+    Represents an element stored in the database.
+
+    This is a transparent proxy for database elements. It exists
+    to provide a way to access an element's id via ``el.eid``.
+    """
+    def __init__(self, value=None, eid=None, **kwargs):
+        super(Element, self).__init__(**kwargs)
+
+        if value is not None:
+            self.update(value)
+            self.eid = eid
+
+
+class StorageProxy(object):
+    def __init__(self, storage, table_name):
+        self._storage = storage
+        self._table_name = table_name
+
+    def read(self):
+        try:
+            raw_data = (self._storage.read() or {})[self._table_name]
+        except KeyError:
+            self.write({})
+            return {}
+
+        data = {}
+        for key, val in iteritems(raw_data):
+            eid = int(key)
+            data[eid] = Element(val, eid)
+
+        return data
+
+    def write(self, values):
+        data = self._storage.read() or {}
+        data[self._table_name] = values
+        self._storage.write(data)
+
+    def purge_table(self):
+        try:
+            data = self._storage.read() or {}
+            del data[self._table_name]
+            self._storage.write(data)
+        except KeyError:
+            pass
+
+
+class TinyDB(object):
+    """
+    The main class of TinyDB.
+
+    Gives access to the database, provides methods to insert/search/remove
+    and getting tables.
+    """
+
+    DEFAULT_TABLE = '_default'
+    DEFAULT_STORAGE = JSONStorage
+
+    def __init__(self, *args, **kwargs):
+        """
+        Create a new instance of TinyDB.
+
+        All arguments and keyword arguments will be passed to the underlying
+        storage class (default: :class:`~tinydb.storages.JSONStorage`).
+
+        :param storage: The class of the storage to use. Will be initialized
+                        with ``args`` and ``kwargs``.
+        """
+
+        storage = kwargs.pop('storage', TinyDB.DEFAULT_STORAGE)
+        table = kwargs.pop('default_table', TinyDB.DEFAULT_TABLE)
+
+        # Prepare the storage
+        self._opened = False
+
+        #: :type: Storage
+        self._storage = storage(*args, **kwargs)
+
+        self._opened = True
+
+        # Prepare the default table
+
+        self._table_cache = {}
+        self._table = self.table(table)
+
+    def table(self, name=DEFAULT_TABLE, **options):
+        """
+        Get access to a specific table.
+
+        Creates a new table, if it hasn't been created before, otherwise it
+        returns the cached :class:`~tinydb.Table` object.
+
+        :param name: The name of the table.
+        :type name: str
+        :param cache_size: How many query results to cache.
+        """
+
+        if name in self._table_cache:
+            return self._table_cache[name]
+
+        table = self.table_class(StorageProxy(self._storage, name), **options)
+
+        self._table_cache[name] = table
+        
+        # table._read will create an empty table in the storage, if necessary
+        table._read()
+
+        return table
+
+    def tables(self):
+        """
+        Get the names of all tables in the database.
+
+        :returns: a set of table names
+        :rtype: set[str]
+        """
+
+        return set(self._storage.read())
+
+    def purge_tables(self):
+        """
+        Purge all tables from the database. **CANNOT BE REVERSED!**
+        """
+
+        self._storage.write({})
+        self._table_cache.clear()
+
+    def purge_table(self, name):
+        """
+        Purge a specific table from the database. **CANNOT BE REVERSED!**
+
+        :param name: The name of the table.
+        :type name: str
+        """
+        if name in self._table_cache:
+            del self._table_cache[name]
+
+        proxy = StorageProxy(self._storage, name)
+        proxy.purge_table()
+
+    def close(self):
+        """
+        Close the database.
+        """
+        self._opened = False
+        self._storage.close()
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, *args):
+        if self._opened is True:
+            self.close()
+
+    def __getattr__(self, name):
+        """
+        Forward all unknown attribute calls to the underlying standard table.
+        """
+        return getattr(self._table, name)
+
+    # Methods that are executed on the default table
+    # Because magic methods are not handlet by __getattr__ we need to forward
+    # them manually here
+
+    def __len__(self):
+        """
+        Get the total number of elements in the default table.
+
+        >>> db = TinyDB('db.json')
+        >>> len(db)
+        0
+        """
+        return len(self._table)
+
+
+class Table(object):
+    """
+    Represents a single TinyDB Table.
+    """
+
+    def __init__(self, storage, cache_size=10):
+        """
+        Get access to a table.
+
+        :param storage: Access to the storage
+        :type storage: StorageProxyus
+        :param cache_size: Maximum size of query cache.
+        """
+
+        self._storage = storage
+        self._query_cache = LRUCache(capacity=cache_size)
+
+        data = self._read()
+        if data:
+            self._last_id = max(i for i in data)
+        else:
+            self._last_id = 0
+
+    def process_elements(self, func, cond=None, eids=None):
+        """
+        Helper function for processing all elements specified by condition
+        or IDs.
+
+        A repeating pattern in TinyDB is to run some code on all elements
+        that match a condition or are specified by their ID. This is
+        implemented in this function.
+        The function passed as ``func`` has to be a callable. It's first
+        argument will be the data currently in the database. It's second
+        argument is the element ID of the currently processed element.
+
+        See: :meth:`~.update`, :meth:`.remove`
+
+        :param func: the function to execute on every included element.
+                     first argument: all data
+                     second argument: the current eid
+        :param cond: elements to use, or
+        :param eids: elements to use
+        :returns: the element IDs that were affected during processed
+        """
+
+        data = self._read()
+
+        if eids is not None:
+            # Processed element specified by id
+            for eid in eids:
+                func(data, eid)
+
+        else:
+            # Collect affected eids
+            eids = []
+
+            # Processed elements specified by condition
+            for eid in list(data):
+                if cond(data[eid]):
+                    func(data, eid)
+                    eids.append(eid)
+
+        self._write(data)
+
+        return eids
+
+    def clear_cache(self):
+        """
+        Clear the query cache.
+
+        A simple helper that clears the internal query cache.
+        """
+        self._query_cache.clear()
+
+    def _get_next_id(self):
+        """
+        Increment the ID used the last time and return it
+        """
+
+        current_id = self._last_id + 1
+        self._last_id = current_id
+
+        return current_id
+
+    def _read(self):
+        """
+        Reading access to the DB.
+
+        :returns: all values
+        :rtype: dict
+        """
+
+        return self._storage.read()
+
+    def _write(self, values):
+        """
+        Writing access to the DB.
+
+        :param values: the new values to write
+        :type values: dict
+        """
+
+        self._query_cache.clear()
+        self._storage.write(values)
+
+    def __len__(self):
+        """
+        Get the total number of elements in the table.
+        """
+        return len(self._read())
+
+    def all(self):
+        """
+        Get all elements stored in the table.
+
+        :returns: a list with all elements.
+        :rtype: list[Element]
+        """
+
+        return list(itervalues(self._read()))
+
+    def insert(self, element):
+        """
+        Insert a new element into the table.
+
+        :param element: the element to insert
+        :returns: the inserted element's ID
+        """
+
+        eid = self._get_next_id()
+
+        if not isinstance(element, dict):
+            raise ValueError('Element is not a dictionary')
+
+        data = self._read()
+        data[eid] = element
+        self._write(data)
+
+        return eid
+
+    def insert_multiple(self, elements):
+        """
+        Insert multiple elements into the table.
+
+        :param elements: a list of elements to insert
+        :returns: a list containing the inserted elements' IDs
+        """
+
+        eids = []
+        data = self._read()
+
+        for element in elements:
+            eid = self._get_next_id()
+            eids.append(eid)
+
+            data[eid] = element
+
+        self._write(data)
+
+        return eids
+
+    def remove(self, cond=None, eids=None):
+        """
+        Remove all matching elements.
+
+        :param cond: the condition to check against
+        :type cond: query
+        :param eids: a list of element IDs
+        :type eids: list
+        :returns: a list containing the removed element's ID
+        """
+
+        return self.process_elements(lambda data, eid: data.pop(eid),
+                                     cond, eids)
+
+    def update(self, fields, cond=None, eids=None):
+        """
+        Update all matching elements to have a given set of fields.
+
+        :param fields: the fields that the matching elements will have
+                       or a method that will update the elements
+        :type fields: dict | dict -> None
+        :param cond: which elements to update
+        :type cond: query
+        :param eids: a list of element IDs
+        :type eids: list
+        :returns: a list containing the updated element's ID
+        """
+
+        if callable(fields):
+            return self.process_elements(
+                lambda data, eid: fields(data[eid]),
+                cond, eids
+            )
+        else:
+            return self.process_elements(
+                lambda data, eid: data[eid].update(fields),
+                cond, eids
+            )
+
+    def purge(self):
+        """
+        Purge the table by removing all elements.
+        """
+
+        self._write({})
+        self._last_id = 0
+
+    def search(self, cond):
+        """
+        Search for all elements matching a 'where' cond.
+
+        :param cond: the condition to check against
+        :type cond: Query
+
+        :returns: list of matching elements
+        :rtype: list[Element]
+        """
+
+        if cond in self._query_cache:
+            return self._query_cache[cond]
+
+        elements = [element for element in self.all() if cond(element)]
+        self._query_cache[cond] = elements
+
+        return elements
+
+    def get(self, cond=None, eid=None):
+        """
+        Get exactly one element specified by a query or and ID.
+
+        Returns ``None`` if the element doesn't exist
+
+        :param cond: the condition to check against
+        :type cond: Query
+
+        :param eid: the element's ID
+
+        :returns: the element or None
+        :rtype: Element | None
+        """
+
+        # Cannot use process_elements here because we want to return a
+        # specific element
+
+        if eid is not None:
+            # Element specified by ID
+            return self._read().get(eid, None)
+
+        # Element specified by condition
+        for element in self.all():
+            if cond(element):
+                return element
+
+    def count(self, cond):
+        """
+        Count the elements matching a condition.
+
+        :param cond: the condition use
+        :type cond: Query
+        """
+
+        return len(self.search(cond))
+
+    def contains(self, cond=None, eids=None):
+        """
+        Check wether the database contains an element matching a condition or
+        an ID.
+
+        If ``eids`` is set, it checks if the db contains an element with one
+        of the specified.
+
+        :param cond: the condition use
+        :type cond: Query
+        :param eids: the element IDs to look for
+        """
+
+        if eids is not None:
+            # Elements specified by ID
+            return any(self.get(eid=eid) for eid in eids)
+
+        # Element specified by condition
+        return self.get(cond) is not None
+
+# Set the default table class
+TinyDB.table_class = Table
diff --git a/src/tinydb/middlewares.py b/src/tinydb/middlewares.py
new file mode 100644
index 0000000..2dbe5ae
--- /dev/null
+++ b/src/tinydb/middlewares.py
@@ -0,0 +1,116 @@
+"""
+Contains the :class:`base class <tinydb.middlewares.Middleware>` for
+middlewares and implementations.
+"""
+from tinydb import TinyDB
+
+
+class Middleware(object):
+    """
+    The base class for all Middlewares.
+
+    Middlewares hook into the read/write process of TinyDB allowing you to
+    extend the behaviour by adding caching, logging, ...
+
+    Your middleware's ``__init__`` method has to accept exactly one
+    argument which is the class of the "real" storage. It has to be stored as
+    ``_storage_cls`` (see :class:`~tinydb.middlewares.CachingMiddleware` for an
+    example).
+    """
+
+    def __init__(self, storage_cls=TinyDB.DEFAULT_STORAGE):
+        self._storage_cls = storage_cls
+        self.storage = None
+
+    def __call__(self, *args, **kwargs):
+        """
+        Create the storage instance and store it as self.storage.
+
+        Usually a user creates a new TinyDB instance like this::
+
+            TinyDB(storage=StorageClass)
+
+        The storage kwarg is used by TinyDB this way::
+
+            self.storage = storage(*args, **kwargs)
+
+        As we can see, ``storage(...)`` runs the constructor and returns the
+        new storage instance.
+
+
+        Using Middlewares, the user will call::
+
+                                       The 'real' storage class
+                                       v
+            TinyDB(storage=Middleware(StorageClass))
+                       ^
+                       Already an instance!
+
+        So, when running ``self.storage = storage(*args, **kwargs)`` Python
+        now will call ``__call__`` and TinyDB will expect the return value to
+        be the storage (or Middleware) instance. Returning the instance is
+        simple, but we also got the underlying (*real*) StorageClass as an
+        __init__ argument that still is not an instance.
+        So, we initialize it in __call__ forwarding any arguments we recieve
+        from TinyDB (``TinyDB(arg1, kwarg1=value, storage=...)``).
+
+        In case of nested Middlewares, calling the instance as if it was an
+        class results in calling ``__call__`` what initializes the next
+        nested Middleware that itself will initialize the next Middleware and
+        so on.
+        """
+
+        self.storage = self._storage_cls(*args, **kwargs)
+
+        return self
+
+    def __getattr__(self, name):
+        """
+        Forward all unknown attribute calls to the underlying storage so we
+        remain as transparent as possible.
+        """
+
+        return getattr(self.__dict__['storage'], name)
+
+
+class CachingMiddleware(Middleware):
+    """
+    Add some caching to TinyDB.
+
+    This Middleware aims to improve the performance of TinyDB by writing only
+    the last DB state every :attr:`WRITE_CACHE_SIZE` time and reading always
+    from cache.
+    """
+
+    #: The number of write operations to cache before writing to disc
+    WRITE_CACHE_SIZE = 1000
+
+    def __init__(self, storage_cls=TinyDB.DEFAULT_STORAGE):
+        super(CachingMiddleware, self).__init__(storage_cls)
+
+        self.cache = None
+        self._cache_modified_count = 0
+
+    def read(self):
+        if self.cache is None:
+            self.cache = self.storage.read()
+        return self.cache
+
+    def write(self, data):
+        self.cache = data
+        self._cache_modified_count += 1
+
+        if self._cache_modified_count >= self.WRITE_CACHE_SIZE:
+            self.flush()
+
+    def flush(self):
+        """
+        Flush all unwritten data to disk.
+        """
+        if self._cache_modified_count > 0:
+            self.storage.write(self.cache)
+            self._cache_modified_count = 0
+
+    def close(self):
+        self.flush()  # Flush potentially unwritten data
+        self.storage.close()
diff --git a/src/tinydb/operations.py b/src/tinydb/operations.py
new file mode 100644
index 0000000..fb18964
--- /dev/null
+++ b/src/tinydb/operations.py
@@ -0,0 +1,28 @@
+def delete(field):
+    """
+    Delete a given field from the element.
+    """
+    def transform(element):
+        del element[field]
+
+    return transform
+
+
+def increment(field):
+    """
+    Increment a given field in the element.
+    """
+    def transform(element):
+        element[field] += 1
+
+    return transform
+
+
+def decrement(field):
+    """
+    Decrement a given field in the element.
+    """
+    def transform(element):
+        element[field] -= 1
+
+    return transform
diff --git a/src/tinydb/queries.py b/src/tinydb/queries.py
new file mode 100644
index 0000000..d62b9df
--- /dev/null
+++ b/src/tinydb/queries.py
@@ -0,0 +1,342 @@
+"""
+Contains the querying interface.
+
+Starting with :class:`~tinydb.queries.Query` you can construct complex
+queries:
+
+>>> ((where('f1') == 5) & (where('f2') != 2)) | where('s').matches(r'^\w+$')
+(('f1' == 5) and ('f2' != 2)) or ('s' ~= ^\w+$ )
+
+Queries are executed by using the ``__call__``:
+
+>>> q = where('val') == 5
+>>> q({'val': 5})
+True
+>>> q({'val': 1})
+False
+"""
+
+import re
+import sys
+
+from tinydb.utils import catch_warning, freeze
+
+__all__ = ('Query', 'where')
+
+
+def is_sequence(obj):
+    return hasattr(obj, '__iter__')
+
+
+class QueryImpl(object):
+    """
+    A query implementation.
+
+    This query implementation wraps a test function which is run when the
+    query is evaluated by calling the object.
+
+    Queries can be combined with logical and/or and modified with logical not.
+    """
+    def __init__(self, test, hashval):
+        self.test = test
+        self.hashval = hashval
+
+    def __call__(self, value):
+        return self.test(value)
+
+    def __hash__(self):
+        return hash(self.hashval)
+
+    def __repr__(self):
+        return 'QueryImpl{0}'.format(self.hashval)
+
+    def __eq__(self, other):
+        return self.hashval == other.hashval
+
+    # --- Query modifiers -----------------------------------------------------
+
+    def __and__(self, other):
+        # We use a frozenset for the hash as the AND operation is commutative
+        # (a | b == b | a)
+        return QueryImpl(lambda value: self(value) and other(value),
+                         ('and', frozenset([self.hashval, other.hashval])))
+
+    def __or__(self, other):
+        # We use a frozenset for the hash as the OR operation is commutative
+        # (a & b == b & a)
+        return QueryImpl(lambda value: self(value) or other(value),
+                         ('or', frozenset([self.hashval, other.hashval])))
+
+    def __invert__(self):
+        return QueryImpl(lambda value: not self(value),
+                         ('not', self.hashval))
+
+
+class Query(object):
+    """
+    TinyDB Queries.
+
+    Allows to build queries for TinyDB databases. There are two main ways of
+    using queries:
+
+    1) ORM-like usage:
+
+    >>> User = Query()
+    >>> db.search(User.name == 'John Doe')
+    >>> db.search(User['logged-in'] == True)
+
+    2) Classical usage:
+
+    >>> db.search(where('value') == True)
+
+    Note that ``where(...)`` is a shorthand for ``Query(...)`` allowing for
+    a more fluent syntax.
+
+    Besides the methods documented here you can combine queries using the
+    binary AND and OR operators:
+
+    >>> db.search(where('field1').exists() & where('field2') == 5)  # Binary AND
+    >>> db.search(where('field1').exists() | where('field2') == 5)  # Binary OR
+
+    Queries are executed by calling the resulting object. They expect to get the
+    element to test as the first argument and return ``True`` or ``False``
+    depending on whether the elements matches the query or not.
+    """
+
+    def __init__(self):
+        self._path = []
+
+    def __getattr__(self, item):
+        query = Query()
+        query._path = self._path + [item]
+
+        return query
+
+    __getitem__ = __getattr__
+
+    def _generate_test(self, test, hashval):
+        """
+        Generate a query based on a test function.
+
+        :param test: The test the query executes.
+        :param hashval: The hash of the query.
+        :return: A :class:`~tinydb.queries.QueryImpl` object
+        """
+        if not self._path:
+            raise ValueError('Query has no path')
+
+        def impl(value):
+            try:
+                # Resolve the path
+                for part in self._path:
+                    value = value[part]
+            except (KeyError, TypeError):
+                return False
+            else:
+                return test(value)
+
+        return QueryImpl(impl, hashval)
+
+    def __eq__(self, rhs):
+        """
+        Test a dict value for equality.
+
+        >>> Query().f1 == 42
+
+        :param rhs: The value to compare against
+        """
+        if sys.version_info <= (3, 0):  # pragma: no cover
+            # Special UTF-8 handling on Python 2
+            def test(value):
+                with catch_warning(UnicodeWarning):
+                    try:
+                        return value == rhs
+                    except UnicodeWarning:
+                        # Dealing with a case, where 'value' or 'rhs'
+                        # is unicode and the other is a byte string.
+                        if isinstance(value, str):
+                            return value.decode('utf-8') == rhs
+                        elif isinstance(rhs, str):
+                            return value == rhs.decode('utf-8')
+
+        else:  # pragma: no cover
+            def test(value):
+                return value == rhs
+
+        return self._generate_test(lambda value: test(value),
+                                   ('==', tuple(self._path), freeze(rhs)))
+
+    def __ne__(self, rhs):
+        """
+        Test a dict value for inequality.
+
+        >>> Query().f1 != 42
+
+        :param rhs: The value to compare against
+        """
+        return self._generate_test(lambda value: value != rhs,
+                                   ('!=', tuple(self._path), freeze(rhs)))
+
+    def __lt__(self, rhs):
+        """
+        Test a dict value for being lower than another value.
+
+        >>> Query().f1 < 42
+
+        :param rhs: The value to compare against
+        """
+        return self._generate_test(lambda value: value < rhs,
+                                   ('<', tuple(self._path), rhs))
+
+    def __le__(self, rhs):
+        """
+        Test a dict value for being lower than or equal to another value.
+
+        >>> where('f1') <= 42
+
+        :param rhs: The value to compare against
+        """
+        return self._generate_test(lambda value: value <= rhs,
+                                   ('<=', tuple(self._path), rhs))
+
+    def __gt__(self, rhs):
+        """
+        Test a dict value for being greater than another value.
+
+        >>> Query().f1 > 42
+
+        :param rhs: The value to compare against
+        """
+        return self._generate_test(lambda value: value > rhs,
+                                   ('>', tuple(self._path), rhs))
+
+    def __ge__(self, rhs):
+        """
+        Test a dict value for being greater than or equal to another value.
+
+        >>> Query().f1 >= 42
+
+        :param rhs: The value to compare against
+        """
+        return self._generate_test(lambda value: value >= rhs,
+                                   ('>=', tuple(self._path), rhs))
+
+    def exists(self):
+        """
+        Test for a dict where a provided key exists.
+
+        >>> Query().f1.exists() >= 42
+
+        :param rhs: The value to compare against
+        """
+        return self._generate_test(lambda _: True,
+                                   ('exists', tuple(self._path)))
+
+    def matches(self, regex):
+        """
+        Run a regex test against a dict value (whole string has to match).
+
+        >>> Query().f1.matches(r'^\w+$')
+
+        :param regex: The regular expression to use for matching
+        """
+        return self._generate_test(lambda value: re.match(regex, value),
+                                   ('matches', tuple(self._path), regex))
+
+    def search(self, regex):
+        """
+        Run a regex test against a dict value (only substring string has to
+        match).
+
+        >>> Query().f1.search(r'^\w+$')
+
+        :param regex: The regular expression to use for matching
+        """
+        return self._generate_test(lambda value: re.search(regex, value),
+                                   ('search', tuple(self._path), regex))
+
+    def test(self, func, *args):
+        """
+        Run a user-defined test function against a dict value.
+
+        >>> def test_func(val):
+        ...     return val == 42
+        ...
+        >>> Query().f1.test(test_func)
+
+        :param func: The function to call, passing the dict as the first
+                     argument
+        :param args: Additional arguments to pass to the test function
+        """
+        return self._generate_test(lambda value: func(value, *args),
+                                   ('test', tuple(self._path), func, args))
+
+    def any(self, cond):
+        """
+        Checks if a condition is met by any element in a list,
+        where a condition can also be a sequence (e.g. list).
+
+        >>> Query().f1.any(Query().f2 == 1)
+
+        Matches::
+
+            {'f1': [{'f2': 1}, {'f2': 0}]}
+
+        >>> Query().f1.any([1, 2, 3])
+        # Match f1 that contains any element from [1, 2, 3]
+
+        Matches::
+
+            {'f1': [1, 2]}
+            {'f1': [3, 4, 5]}
+
+        :param cond: Either a query that at least one element has to match or
+                     a list of which at least one element has to be contained
+                     in the tested element.
+-       """
+        if callable(cond):
+            def _cmp(value):
+                return is_sequence(value) and any(cond(e) for e in value)
+
+        else:
+            def _cmp(value):
+                return is_sequence(value) and any(e in cond for e in value)
+
+        return self._generate_test(lambda value: _cmp(value),
+                                   ('any', tuple(self._path), freeze(cond)))
+
+    def all(self, cond):
+        """
+        Checks if a condition is met by any element in a list,
+        where a condition can also be a sequence (e.g. list).
+
+        >>> Query().f1.all(Query().f2 == 1)
+
+        Matches::
+
+            {'f1': [{'f2': 1}, {'f2': 1}]}
+
+        >>> Query().f1.all([1, 2, 3])
+        # Match f1 that contains any element from [1, 2, 3]
+
+        Matches::
+
+            {'f1': [1, 2, 3, 4, 5]}
+
+        :param cond: Either a query that all elements have to match or a list
+                     which has to be contained in the tested element.
+        """
+        if callable(cond):
+            def _cmp(value):
+                return is_sequence(value) and all(cond(e) for e in value)
+
+        else:
+            def _cmp(value):
+                return is_sequence(value) and all(e in value for e in cond)
+
+        return self._generate_test(lambda value: _cmp(value),
+                                   ('all', tuple(self._path), freeze(cond)))
+
+
+def where(key):
+    return Query()[key]
diff --git a/src/tinydb/storages.py b/src/tinydb/storages.py
new file mode 100644
index 0000000..134d907
--- /dev/null
+++ b/src/tinydb/storages.py
@@ -0,0 +1,132 @@
+"""
+Contains the :class:`base class <tinydb.storages.Storage>` for storages and
+implementations.
+"""
+
+from abc import ABCMeta, abstractmethod
+import os
+
+from tinydb.utils import with_metaclass
+
+
+try:
+    import ujson as json
+except ImportError:
+    import json
+
+
+def touch(fname, times=None, create_dirs=False):
+    if create_dirs:
+        base_dir = os.path.dirname(fname)
+        if not os.path.exists(base_dir):
+            os.makedirs(base_dir)
+    with open(fname, 'a'):
+        os.utime(fname, times)
+
+
+class Storage(with_metaclass(ABCMeta, object)):
+    """
+    The abstract base class for all Storages.
+
+    A Storage (de)serializes the current state of the database and stores it in
+    some place (memory, file on disk, ...).
+    """
+
+    # Using ABCMeta as metaclass allows instantiating only storages that have
+    # implemented read and write
+
+    @abstractmethod
+    def read(self):
+        """
+        Read the last stored state.
+
+        Any kind of deserialization should go here.
+        Return ``None`` here to indicate that the storage is empty.
+
+        :rtype: dict
+        """
+
+        raise NotImplementedError('To be overridden!')
+
+    @abstractmethod
+    def write(self, data):
+        """
+        Write the current state of the database to the storage.
+
+        Any kind of serialization should go here.
+
+        :param data: The current state of the database.
+        :type data: dict
+        """
+
+        raise NotImplementedError('To be overridden!')
+
+    def close(self):
+        """
+        Optional: Close open file handles, etc.
+        """
+
+        pass
+
+
+class JSONStorage(Storage):
+    """
+    Store the data in a JSON file.
+    """
+
+    def __init__(self, path, create_dirs=False, **kwargs):
+        """
+        Create a new instance.
+
+        Also creates the storage file, if it doesn't exist.
+
+        :param path: Where to store the JSON data.
+        :type path: str
+        """
+
+        super(JSONStorage, self).__init__()
+        touch(path, create_dirs=create_dirs)  # Create file if not exists
+        self.kwargs = kwargs
+        self._handle = open(path, 'r+')
+
+    def close(self):
+        self._handle.close()
+
+    def read(self):
+        # Get the file size
+        self._handle.seek(0, os.SEEK_END)
+        size = self._handle.tell()
+
+        if not size:
+            # File is empty
+            return None
+        else:
+            self._handle.seek(0)
+            return json.load(self._handle)
+
+    def write(self, data):
+        self._handle.seek(0)
+        serialized = json.dumps(data, **self.kwargs)
+        self._handle.write(serialized)
+        self._handle.flush()
+        self._handle.truncate()
+
+
+class MemoryStorage(Storage):
+    """
+    Store the data as JSON in memory.
+    """
+
+    def __init__(self):
+        """
+        Create a new instance.
+        """
+
+        super(MemoryStorage, self).__init__()
+        self.memory = None
+
+    def read(self):
+        return self.memory
+
+    def write(self, data):
+        self.memory = data
diff --git a/src/tinydb/utils.py b/src/tinydb/utils.py
new file mode 100644
index 0000000..4a32270
--- /dev/null
+++ b/src/tinydb/utils.py
@@ -0,0 +1,140 @@
+"""
+Utility functions.
+"""
+
+from contextlib import contextmanager
+import warnings
+
+# Python 2/3 independant dict iteration
+iteritems = getattr(dict, 'iteritems', dict.items)
+itervalues = getattr(dict, 'itervalues', dict.values)
+
+
+class LRUCache(dict):
+    """
+    A simple LRU cache.
+    """
+
+    def __init__(self, *args, **kwargs):
+        """
+        :param capacity: How many items to store before cleaning up old items
+                         or ``None`` for an unlimited cache size
+        """
+
+        self.capacity = kwargs.pop('capacity', None) or float('nan')
+        self.lru = []
+
+        super(LRUCache, self).__init__(*args, **kwargs)
+
+    def refresh(self, key):
+        """
+        Push a key to the tail of the LRU queue
+        """
+        if key in self.lru:
+            self.lru.remove(key)
+        self.lru.append(key)
+
+    def get(self, key, default=None):
+        item = super(LRUCache, self).get(key, default)
+        self.refresh(key)
+
+        return item
+
+    def __getitem__(self, key):
+        item = super(LRUCache, self).__getitem__(key)
+        self.refresh(key)
+
+        return item
+
+    def __setitem__(self, key, value):
+        super(LRUCache, self).__setitem__(key, value)
+
+        self.refresh(key)
+
+        # Check, if the cache is full and we have to remove old items
+        # If the queue is of unlimited size, self.capacity is NaN and
+        # x > NaN is always False in Python and the cache won't be cleared.
+        if len(self) > self.capacity:
+            self.pop(self.lru.pop(0))
+
+    def __delitem__(self, key):
+        super(LRUCache, self).__delitem__(key)
+        self.lru.remove(key)
+
+    def clear(self):
+        super(LRUCache, self).clear()
+        del self.lru[:]
+
+
+# Source: https://github.com/PythonCharmers/python-future/blob/466bfb2dfa36d865285dc31fe2b0c0a53ff0f181/future/utils/__init__.py#L102-L134
+def with_metaclass(meta, *bases):
+    """
+    Function from jinja2/_compat.py. License: BSD.
+
+    Use it like this::
+
+        class BaseForm(object):
+            pass
+
+        class FormType(type):
+            pass
+
+        class Form(with_metaclass(FormType, BaseForm)):
+            pass
+
+    This requires a bit of explanation: the basic idea is to make a
+    dummy metaclass for one level of class instantiation that replaces
+    itself with the actual metaclass.  Because of internal type checks
+    we also need to make sure that we downgrade the custom metaclass
+    for one level to something closer to type (that's why __call__ and
+    __init__ comes back from type etc.).
+
+    This has the advantage over six.with_metaclass of not introducing
+    dummy classes into the final MRO.
+    """
+
+    class Metaclass(meta):
+        __call__ = type.__call__
+        __init__ = type.__init__
+
+        def __new__(cls, name, this_bases, d):
+            if this_bases is None:
+                return type.__new__(cls, name, (), d)
+            return meta(name, bases, d)
+
+    return Metaclass('temporary_class', None, {})
+
+
+@contextmanager
+def catch_warning(warning_cls):
+    with warnings.catch_warnings():
+        warnings.filterwarnings('error', category=warning_cls)
+
+        yield
+
+
+class FrozenDict(dict):
+    def __hash__(self):
+        return hash(tuple(sorted(self.items())))
+
+    def _immutable(self, *args, **kws):
+        raise TypeError('object is immutable')
+
+    __setitem__ = _immutable
+    __delitem__ = _immutable
+    clear = _immutable
+    update = _immutable
+    setdefault = _immutable
+    pop = _immutable
+    popitem = _immutable
+
+
+def freeze(obj):
+    if isinstance(obj, dict):
+        return FrozenDict((k, freeze(v)) for k, v in obj.items())
+    elif isinstance(obj, list):
+        return tuple(freeze(el) for el in obj)
+    elif isinstance(obj, set):
+        return frozenset(obj)
+    else:
+        return obj
-- 
GitLab