From a7caba09746f52e5f107ab2182501b67b445153f Mon Sep 17 00:00:00 2001
From: "roberto.borghes" <roberto.borghes@elettra.eu>
Date: Tue, 14 Feb 2023 14:22:14 +0100
Subject: [PATCH] Adapted and briefly tested for pyhton3

---
 src/DataBuffer.py            |  4 +--
 src/DataStorage.py           | 27 +++++++-------
 src/DirectorBgnThread.py     | 64 +++++++++++++++++----------------
 src/DonkiDirectorServer.py   |  4 +--
 src/DonkiDirector_cmdline.py |  8 ++---
 src/DonkiOrchestraLib.py     | 10 +++---
 src/InfoServer.py            | 23 ++++++------
 src/hdfwriter.py             | 68 ++++++++++++++++++------------------
 8 files changed, 106 insertions(+), 102 deletions(-)

diff --git a/src/DataBuffer.py b/src/DataBuffer.py
index 4aafe89..567281e 100644
--- a/src/DataBuffer.py
+++ b/src/DataBuffer.py
@@ -112,10 +112,10 @@ class donkiBuffer(threading.Thread):
             self.trg_range[data_name] = [1,1]
         idx = 0
         for dd in data_in:
-	    if hasattr(dd, "nbytes"):
+            if hasattr(dd, "nbytes"):
                 # numpy ndarray case
                 buffersize = len(self.dbuffers[data_name]) * dd.nbytes
-	    else:
+            else:
                 buffersize = len(self.dbuffers[data_name]) * sys.getsizeof(dd)
             if buffersize > self.max_size:
                 try:
diff --git a/src/DataStorage.py b/src/DataStorage.py
index 73695e8..45e5f2b 100644
--- a/src/DataStorage.py
+++ b/src/DataStorage.py
@@ -19,8 +19,7 @@ _dbg = False
 class DataStorage(multiprocessing.Process):
     """
     Do management of all Data for the HDF5 files. 
-    
-    
+  
     """
     def __init__(self, data_queue, task_queue, notif_queue):
         multiprocessing.Process.__init__(self)
@@ -106,7 +105,7 @@ class DataStorage(multiprocessing.Process):
                     self.mutex.release()
                     store_loop += 1
         except:
-            print traceback.format_exc()
+            traceback.print_exc()
         #
         for _hdf in self._hdfwriters:
             _hdf.ptr2hdfwriter.stop_thread()
@@ -191,7 +190,7 @@ class DataStorage(multiprocessing.Process):
                 #
                 self.task_queue.task_done()
             except:
-                print traceback.format_exc()
+                traceback.print_exc()
 
         
 
@@ -211,14 +210,14 @@ class DataStorage(multiprocessing.Process):
                 if isinstance(data_in[3], list) or isinstance(data_in[3], numpy.ndarray):
                     self.data_archives.append((daq_key,bn_in,bn_fi,data_in[3]))
                     if len(data_in[3]) != (bn_fi - bn_in + 1):
-                        print "MMMMMMM.....",daq_key,bn_in,bn_fi,len(data_in[3])
+                        print("MMMMMMM.....",daq_key,bn_in,bn_fi,len(data_in[3]))
             elif len(data_in) == 3:
                 # Metadata
                 metadaq_key = data_in[0]
                 bn_in = data_in[1]
                 self.metadata_archives[metadaq_key] = [bn_in,data_in[2]]
         except:
-            print traceback.format_exc()
+            traceback.print_exc()
             
 
     def get_metadata(self):
@@ -243,7 +242,7 @@ class DataStorage(multiprocessing.Process):
         """
         try:
             if _dbg:
-                print "DataServer Data Ready",daq_key,bn_in,bn_f
+                print("DataServer Data Ready",daq_key,bn_in,bn_f)
             # first of all: we must be sure that there are hdfs allocated
             # for the given range:
             # so, if the last allocated bunch number is lower
@@ -316,7 +315,7 @@ class DataStorage(multiprocessing.Process):
                                     for hdfw in self._hdfwriters:
                                         hdfw.ptr2hdfwriter.daq_switch_off([dkey])
                                     self.notify_queue.put(['update_report',"Removed inactive data source %s" % dkey])
-                        free_hdfwriter.ptr2hdfwriter.daq_switch_on(self._daq_list.keys())
+                        free_hdfwriter.ptr2hdfwriter.daq_switch_on(list(self._daq_list.keys()))
                         free_hdfwriter.working = True
                         if (self.allocated_bunch_range[0] <= 0):
                             self.allocated_bunch_range = (all_bn_in, all_bn_f)
@@ -337,12 +336,12 @@ class DataStorage(multiprocessing.Process):
                 last_avail_bn = self.allocated_bunch_range[1]
                 self.data_archives.append((daq_key, last_avail_bn+1, bn_f,data_in[-(bn_f-last_avail_bn):]))
                 if len(data_in[-(bn_f-last_avail_bn):]) != (bn_f- (last_avail_bn + 1) + 1):
-                    print "UUUUUUU.....",daq_key,last_avail_bn+1,bn_f,len(data_in[-(bn_f-last_avail_bn):])
+                    print("UUUUUUU.....",daq_key,last_avail_bn+1,bn_f,len(data_in[-(bn_f-last_avail_bn):]))
                 #
                 data_in = data_in[:-(bn_f-last_avail_bn)]
                 bn_f = last_avail_bn
                 if len(data_in) != (bn_f-bn_in+1):
-                    print "********",daq_key,len(data_in),(bn_f-bn_in),bn_in,bn_f
+                    print("********",daq_key,len(data_in),(bn_f-bn_in),bn_in,bn_f)
             #
             if (bn_in < self.allocated_bunch_range[0]):
                 # purge too old data
@@ -352,7 +351,7 @@ class DataStorage(multiprocessing.Process):
                 data_in = data_in[-(bn_f-self.allocated_bunch_range[0]+1):]
                 bn_in = self.allocated_bunch_range[0]
                 if len(data_in) != (bn_f-bn_in+1):
-                    print "#########",daq_key,len(data_in),(bn_f-bn_in),bn_in,bn_f
+                    print("#########",daq_key,len(data_in),(bn_f-bn_in),bn_in,bn_f)
             #
             #
             # Look for all working hdfs which of them has the initial
@@ -411,9 +410,9 @@ class DataStorage(multiprocessing.Process):
                     self.notify_queue.put(['update_report',msg])
                 self.data_archives.append((daq_key,bn_in,bn_f,data_in))
                 if len(data_in) != (bn_f - bn_in + 1):
-                    print "XXXXXXXXX.....",daq_key,bn_in,bn_f,len(data_in)
+                    print("XXXXXXXXX.....",daq_key,bn_in,bn_f,len(data_in))
         except:
-            print traceback.format_exc()
+            traceback.print_exc()
             pass
 
     def put_msg_in_notify_queue(self, topic_in):
@@ -452,7 +451,7 @@ class DataStorage(multiprocessing.Process):
             if not self.Files_contiguous:
                 self.files_opened -= 1
         except:
-            print traceback.format_exc()
+            traceback.print_exc()
         self.mutex.release()
 
 
diff --git a/src/DirectorBgnThread.py b/src/DirectorBgnThread.py
index 7699fec..e999297 100644
--- a/src/DirectorBgnThread.py
+++ b/src/DirectorBgnThread.py
@@ -36,7 +36,7 @@ class directorThread(threading.Thread):
     def __init__(self, inforserver_port = 50010):
         threading.Thread.__init__(self)
         self._alive = True
-        self._started = False
+        self._is_started = False
         self._paused = False
         self._datastorage_under_pressure = False
         self.actual_priority = 0
@@ -126,7 +126,7 @@ class directorThread(threading.Thread):
             if self._last_player_table == new_info:
                 return
             if DEBUG:
-                print "players changed",new_info
+                print("players changed",new_info)
             try:
                 self.PlayersInfo.clear()
                 for pl in new_info:
@@ -136,7 +136,7 @@ class directorThread(threading.Thread):
                 traceback.print_exc()
         elif str(tablename) == 'dataaliases':
             if DEBUG:
-                print "data aliases changed",new_info
+                print("data aliases changed",new_info)
             try:
                 self.DataAliases.clear()
                 for da in new_info:
@@ -145,7 +145,7 @@ class directorThread(threading.Thread):
                 traceback.print_exc()
         elif str(tablename) == 'datasources_enabled':
             if DEBUG:
-                print "data datasources_enabled changed",new_info
+                print("data datasources_enabled changed",new_info)
             try:
                 self.DaqSourcesEnabled.clear()
                 for da in new_info:
@@ -154,7 +154,7 @@ class directorThread(threading.Thread):
                 traceback.print_exc()
         elif str(tablename) == 'metadata_aliases':
             if DEBUG:
-                print "metadata aliases changed",new_info
+                print("metadata aliases changed",new_info)
             try:
                 self.MetaDataAliases.clear()
                 for da in new_info:
@@ -163,7 +163,7 @@ class directorThread(threading.Thread):
                 traceback.print_exc()
         elif str(tablename) == 'metadata_enabled':
             if DEBUG:
-                print "data metadata_enabled changed",new_info
+                print("data metadata_enabled changed",new_info)
             try:
                 self.MetaDaqsEnabled.clear()
                 for da in new_info:
@@ -188,10 +188,10 @@ class directorThread(threading.Thread):
                 if reconnect and (not self.zcc.create_sub_socket(pl_name,self.PlayersInfo[pl_name]['url'])):
                     continue
                 if DEBUG:
-                    print "Asking info to", pl_name
+                    print("Asking info to", pl_name)
                 info = self.zcc.ask_for_info(pl_name)
                 if DEBUG:
-                    print info
+                    print(info)
                 if len(info) == 0:
                     not_active_players.append(pl_name)
                     continue
@@ -251,13 +251,15 @@ class directorThread(threading.Thread):
                 self.PlayersInfo[pl_name]['type'] ='Unknown'
                 self.PlayersInfo[pl_name]['freerun'] = False
             # Clear not existing metadata from the DB
-            for mda in self.MetaDataAliases.keys():
+            _mdal = list(self.MetaDataAliases.keys())
+            for mda in _mdal:
                 if mda not in self.daq_sources or not self.daq_sources[mda].metadata:
                     self.MetaDataAliases.pop(mda)
                     #self.infoServer.del_from_db(['metadata_aliases',mda])
                     #self.infoServer.del_from_db(['metadata_enabled',mda])
+            _dal = list(self.DataAliases.keys())
             # Clear not existing datasources from the DB
-            for da in self.DataAliases.keys():
+            for da in _dal:
                 if da not in self.daq_sources or self.daq_sources[da].metadata:
                     self.DataAliases.pop(da)
                     #self.infoServer.del_from_db(['dataaliases',da])
@@ -340,7 +342,7 @@ class directorThread(threading.Thread):
                 log_msg = "Error: unable to set priority of player %s"% player_name
                 self._report_message(log_msg,with_date = True)
                 if DEBUG:
-                    print log_msg
+                    print(log_msg)
         except:
             if DEBUG:
                 traceback.print_exc()
@@ -357,7 +359,7 @@ class directorThread(threading.Thread):
                 log_msg = "Error: unable to set freerun of player %s"% player_name
                 self._report_message(log_msg,with_date = True)
                 if DEBUG:
-                    print log_msg
+                    print(log_msg)
         except:
             if DEBUG:
                 traceback.print_exc()
@@ -405,7 +407,7 @@ class directorThread(threading.Thread):
             # Fake trigger value
             return
         if DEBUG:
-            print "NEW DATA",data_name,trg_in, trg_f
+            print("NEW DATA",data_name,trg_in, trg_f)
         if self.data_buffering_enabled:
             self.buffer_data_queue.put([data_name,trg_in,trg_f,data_in])
         if not self.EnableDataSaving:
@@ -420,7 +422,7 @@ class directorThread(threading.Thread):
         if not self.EnableDataSaving:
             return
         if DEBUG:
-            print "NEW METADATA",metadata_name,trg_in
+            print("NEW METADATA",metadata_name,trg_in)
         self.datastorage_data_queue.put([metadata_name,trg_in,data_in])
 
 
@@ -431,7 +433,7 @@ class directorThread(threading.Thread):
         while self.datastorage_notif_queue.qsize() > 0:
             last_notif = self.datastorage_notif_queue.get()
             if DEBUG:
-                print last_notif
+                print(last_notif)
             if last_notif[0] == 'hdf_finished':
                 self.last_files_saved.append(last_notif[2])
                 self._report_message(last_notif[-1],with_date = True)
@@ -462,7 +464,7 @@ class directorThread(threading.Thread):
         if with_date:
             message_in = time.asctime() + " " + message_in
         if DEBUG:
-            print message_in
+            print(message_in)
         new_report = ("\n".join([self._Report,message_in.strip()])).split("\n")
         self._Report = "\n".join(new_report[-5000:])
 
@@ -643,7 +645,7 @@ class directorThread(threading.Thread):
 #-----------------------------------------------------------------------------------
     def quit_and_exit(self):
         self._alive = False
-        self._started = False
+        self._is_started = False
 
 #-----------------------------------------------------------------------------------
 #    get_last_file_saved
@@ -718,7 +720,7 @@ class directorThread(threading.Thread):
 #    please_start
 #-----------------------------------------------------------------------------------
     def please_start(self):
-        self._started = True
+        self._is_started = True
 
 #-----------------------------------------------------------------------------------
 #    set_paused
@@ -738,7 +740,7 @@ class directorThread(threading.Thread):
 #    abort
 #-----------------------------------------------------------------------------------
     def abort(self):
-        self._started = False
+        self._is_started = False
         self.datastorage_task_queue.put(['stop_and_clear'])
 
 #-----------------------------------------------------------------------------------
@@ -747,7 +749,7 @@ class directorThread(threading.Thread):
     def run(self):
         knownPlayersInfo = self.PlayersInfo.copy()
         while self._alive:
-            if not self._started:
+            if not self._is_started:
                 # IDLE state, check only Players status
                 try:
                     self._check_players_changed()
@@ -757,10 +759,10 @@ class directorThread(threading.Thread):
                     else:
                         # Send a dummy negative trigger, something like a 'ping'
                         self.zcc.publish_trigger(-1, -1)
-                        not_responding_Players = self.PlayersInfo.keys()
+                        not_responding_Players = list(self.PlayersInfo.keys())
                         t0 = time.time()
                         need_to_update_infos = False
-                        while not_responding_Players and not self._started:
+                        while not_responding_Players and not self._is_started:
                             pl_msgs = self.zcc.wait_message(not_responding_Players)
                             if pl_msgs is not None and len(pl_msgs):
                                 for pl in pl_msgs:
@@ -777,7 +779,7 @@ class directorThread(threading.Thread):
                                         self._manage_message(pl,new_msg)
                             elif (time.time() - t0) > 5:
                                 if DEBUG:
-                                    print "NOT RESPONDING",not_responding_Players
+                                    print("NOT RESPONDING",not_responding_Players)
                                 for pl in not_responding_Players:
                                     if pl in self.PlayersInfo and 'status' in self.PlayersInfo[pl].keys():
                                         if self.PlayersInfo[pl]['status'] != 'ALARM':
@@ -794,7 +796,7 @@ class directorThread(threading.Thread):
                 self.ResetReport()
                 upper_priority = self._retrieve_players_info()
                 if not self._start_stop_Players(True):
-                    self._started = False
+                    self._is_started = False
                     # Manage existing log messages
                     pl_msgs = self.zcc.wait_message(self.PlayersInfo.keys(), timeout_sec = 0.2)
                     if pl_msgs is not None and len(pl_msgs):
@@ -816,7 +818,7 @@ class directorThread(threading.Thread):
                 self.trg = 0
                 t_start = time.time()
                 while ((self.trg < self.max_triggers) or (self.max_triggers < 0)) and self._alive:
-                    if not self._started :
+                    if not self._is_started :
                         break
                     self._check_datastorage_notification()
                     # Manage pause
@@ -844,11 +846,11 @@ class directorThread(threading.Thread):
                         for pl in self.busy_Players:
                             self.PlayersInfo[pl]['status'] ='ON'
                         if DEBUG:
-                            print "----","TRIGGER:",self.trg,"PRIORITY:",priority,"----"
+                            print("----","TRIGGER:",self.trg,"PRIORITY:",priority,"----")
                         self.actual_priority = priority
                         t0 = time.time()
                         self.zcc.publish_trigger(self.trg, priority)
-                        while self.busy_Players:# and self._started:
+                        while self.busy_Players:# and self._is_started:
                             pl_msgs = self.zcc.wait_message(self.busy_Players)
                             if pl_msgs is not None and len(pl_msgs):
                                 for pl in pl_msgs:
@@ -863,7 +865,7 @@ class directorThread(threading.Thread):
                                         self._manage_message(pl,new_msg)
                             elif (time.time() - t0) > self._players_timeout:
                                 # 20.05.2021 RB: next line added to abort in timeout case
-                                self._started = False
+                                self._is_started = False
                                 for pl in self.busy_Players:
                                     self._report_message("Player %s Timeout: aborting" % pl,with_date=True)
                                     self.PlayersInfo[pl]['status'] ='ALARM'
@@ -871,7 +873,7 @@ class directorThread(threading.Thread):
                                     del self.busy_Players[idx]
                                 t0 = time.time()
                         if DEBUG:
-                            print "Delay:",(time.time()-t0) * 1000,"ms"
+                            print("Delay:",(time.time()-t0) * 1000,"ms")
                         if self.slowest_player_time < (time.time()-t0):
                             self.slowest_player_time = (time.time()-t0)
                 # Acquisition loop finished
@@ -883,7 +885,7 @@ class directorThread(threading.Thread):
                 for pl in self.PlayersInfo:
                     if self.PlayersInfo[pl]['freerun']:
                         self.busy_Players.append(pl)
-                while self._started and self._alive and self.busy_Players:
+                while self._is_started and self._alive and self.busy_Players:
                     pl_msgs = self.zcc.wait_message(self.busy_Players)
                     if pl_msgs is not None and len(pl_msgs):
                         for pl in pl_msgs:
@@ -895,7 +897,7 @@ class directorThread(threading.Thread):
                             self._manage_message(pl,new_msg)
                 self._state = "OFF"
                 self._report_message("DonkiDirector stopped",with_date=True)
-                self._started = False
+                self._is_started = False
                 # set hdfwriters timeout for closing waiting threads
                 self.datastorage_task_queue.put(['set_file_timeout', max(self.slowest_player_time*2, 10)])
                 self.datastorage_task_queue.join()
diff --git a/src/DonkiDirectorServer.py b/src/DonkiDirectorServer.py
index 5ed8743..9f2ac43 100755
--- a/src/DonkiDirectorServer.py
+++ b/src/DonkiDirectorServer.py
@@ -127,7 +127,7 @@ class DonkiDirectorServer (PyTango.LatestDeviceImpl):
     def always_executed_hook(self):
         self.debug_stream("In always_excuted_hook()")
         #----- PROTECTED REGION ID(DonkiDirectorServer.always_executed_hook) ENABLED START -----#
-        if self.dt._started:
+        if self.dt._is_started:
             if self.get_state() != PyTango.DevState.ON:
                 self.set_state(PyTango.DevState.ON)
         elif self.get_state() != PyTango.DevState.STANDBY:
@@ -372,7 +372,7 @@ class DonkiDirectorServer (PyTango.LatestDeviceImpl):
         #----- PROTECTED REGION ID(DonkiDirectorServer.SetPlayerPriority) ENABLED START -----#
         for i in range(len(argin[0])):
             if argin[0][i] >= -1:
-				self.dt.set_player_priority(argin[1][i],argin[0][i])
+                self.dt.set_player_priority(argin[1][i],argin[0][i])
         #----- PROTECTED REGION END -----#	//	DonkiDirectorServer.SetPlayerPriority
         
     def RenameDataSource(self, argin):
diff --git a/src/DonkiDirector_cmdline.py b/src/DonkiDirector_cmdline.py
index 1cf0669..4f611a8 100755
--- a/src/DonkiDirector_cmdline.py
+++ b/src/DonkiDirector_cmdline.py
@@ -22,8 +22,8 @@ if __name__ == "__main__":
     dt.set_player_priority("pippo",1)
     dt.set_player_priority("pluto",1)
 
-    dt._started = True
-    while (dt._started):
+    dt._is_started = True
+    while (dt._is_started):
         time.sleep(1)
     print "-------------",dt.zcc.ask_for_log("paperino")
     print "-------------",dt.zcc.ask_for_log("pluto")
@@ -38,10 +38,10 @@ def get_user_input_loop(dt):
                 n = raw_input("\n\nEnter command (type ? for help): ")
                 cmd_in = (n.lower()).strip(' ')
                 if cmd_in == "start":
-                    dt._started = True
+                    dt._is_started = True
                     print "OK"
                 elif cmd_in == "stop":
-                    dt._started = False
+                    dt._is_started = False
                     print "OK"
                 elif cmd_in == "state?":
                     print dt._state
diff --git a/src/DonkiOrchestraLib.py b/src/DonkiOrchestraLib.py
index 64ef6c2..c6ff3e8 100644
--- a/src/DonkiOrchestraLib.py
+++ b/src/DonkiOrchestraLib.py
@@ -26,7 +26,7 @@ class CommunicationClass:
             self.pub_sock = self.context.socket(zmq.PUB)
             self.pub_port = self.pub_sock.bind_to_random_port("tcp://0.0.0.0")
             if DEBUG:
-                print "PUB " + "tcp://" + str(self.pub_port)
+                print ("PUB tcp://%s" % str(self.pub_port))
         except:
             traceback.print_exc()
             self.pub_sock = None
@@ -41,13 +41,13 @@ class CommunicationClass:
                 self.poller.unregister(self.sub_socks[name])
                 self.sub_socks[name].close()
             self.sub_socks[name] = self.context.socket(zmq.SUB)
-            self.sub_socks[name].setsockopt(zmq.SUBSCRIBE, '')
+            self.sub_socks[name].setsockopt(zmq.SUBSCRIBE, b'')
             self.sub_socks[name].connect("tcp://"+str(url))
             self.poller.register(self.sub_socks[name], zmq.POLLIN)
         except:
             traceback.print_exc()
             if DEBUG:
-                print "tcp://"+str(url)
+                print ("tcp://%s" % str(url))
             del self.sub_socks[name]
             return False
         return True
@@ -206,7 +206,7 @@ class CommunicationClass:
         # At the moment just use send_pyobj
         self.pub_sock.send_pyobj([command, srv_name, argin])
         if DEBUG:
-            print "Sent command:", command, srv_name, argin, timeout_sec
+            print ( "Sent command: %s %s %s %d" % (command, srv_name, str(argin), timeout_sec))
         msg = []
         sub_socket = self.sub_socks[srv_name]
         max_retries = 5
@@ -229,7 +229,7 @@ class CommunicationClass:
                         elif reply[1] == reply[2] == -1:
                             return False
                     else:
-                        print "=>",reply
+                        print ("=> %s" % str(reply))
                         retry += 1
                 except:
                     traceback.print_exc()
diff --git a/src/InfoServer.py b/src/InfoServer.py
index 4880d3c..2f8475f 100644
--- a/src/InfoServer.py
+++ b/src/InfoServer.py
@@ -3,7 +3,10 @@ import sys
 import os
 import time
 import threading
-import thread
+if sys.version_info[0] == 2:
+    import thread
+else:
+    import _thread as thread
 import signal
 from socket import *
 import types
@@ -27,7 +30,7 @@ class infoServerThread(threading.Thread):
         try:
             self.db = TinyDB(tinydb_file_path)
         except:
-            print "Unable to open", tinydb_file_path, "content cleared"
+            print("Unable to open", tinydb_file_path, "content cleared")
             os.rename(tinydb_file_path,"%s_corrupted" % tinydb_file_path)
         self.db = TinyDB(tinydb_file_path)
 
@@ -64,7 +67,7 @@ class infoServerThread(threading.Thread):
         while 1:
             data = clientsock.recv(self.BUFFSIZE)
             if not data: break
-            request = data.rstrip("\r\n")
+            request = (data.decode()).rstrip("\r\n")
             tokens = request.split(" ")
             self.mutex.acquire()
             if tokens[0].lower() == "set":
@@ -80,7 +83,7 @@ class infoServerThread(threading.Thread):
             #
             self.mutex.release()
             reply_str += "\r\n"
-            clientsock.send(reply_str)
+            clientsock.send(reply_str.encode())
         clientsock.close()
 
 #-----------------------------------------------------------------------------------
@@ -96,9 +99,9 @@ class infoServerThread(threading.Thread):
             self.db.table(table).insert(item)
             if self.notif_function:
                 self.notif_function( table , self.db.table(table).all())
-        except Exception,e:
+        except:
             traceback.print_exc()
-            return '*** ERROR: '+ str(e)
+            return '*** ERROR: %s' % traceback.format_exc()
         return 'OK'
 
 #-----------------------------------------------------------------------------------
@@ -109,8 +112,8 @@ class infoServerThread(threading.Thread):
             table = tokens[0]
             tbl = self.db.table(table)
             resp = str( tbl.all())
-        except Exception,e:
-            return '*** ERROR: '+ str(e)
+        except:
+            return '*** ERROR: %s' % traceback.format_exc()
         return resp
 
 #-----------------------------------------------------------------------------------
@@ -126,8 +129,8 @@ class infoServerThread(threading.Thread):
                 self.db.purge_table(table)
             if self.notif_function:
                 self.notif_function( table , self.db.table(table).all())
-        except Exception,e:
-            return '*** ERROR: '+ str(e)
+        except:
+            return '*** ERROR: %s' % traceback.format_exc()
         return 'OK'
 
 if __name__ == '__main__':
diff --git a/src/hdfwriter.py b/src/hdfwriter.py
index 0d8eaf7..784822b 100644
--- a/src/hdfwriter.py
+++ b/src/hdfwriter.py
@@ -68,8 +68,11 @@ import numpy
 import threading
 import h5py
 import traceback
-import os
-from Queue import (Queue, Full, Empty)
+import os, sys
+if sys.version_info[0] == 2:
+    from Queue import (Queue, Full, Empty)
+else:
+    from queue import (Queue, Full, Empty)
 
 DEBUG = False
 LOGFILTER = 100
@@ -163,7 +166,7 @@ class HDFWriter(threading.Thread):
         self._hdf_file = None
 
         #flag to indicate that thread should close.
-        self._stop = False
+        self._stop_ = False
         #dictionary that deals with the FermiDaq to acquire, it
         #is used mainly to allow the HDFWriter to know if it has acquired all
         #the necessary data.
@@ -213,8 +216,8 @@ class HDFWriter(threading.Thread):
             #assert self._intState == HDFW_BUSY
             assert bn_in >= self.first_bunch
             assert bn_fin <= self.last_bunch
-        except AssertionError, ex:
-            print traceback.format_exc()
+        except AssertionError as ex:
+            traceback.print_exc()
             return
         self._log_filter += 1
         if DEBUG or not self._log_filter % LOGFILTER:
@@ -249,7 +252,7 @@ class HDFWriter(threading.Thread):
             for daq_key in daq_pt_list:
                 self._daq_list[daq_key] = (0,0)
         else:
-            print("HDFW %s daq_switch_on(), unknown input argument")
+            print("HDFW %s daq_switch_on(), unknown input argument", daq_pt_list)
 
 
     def daq_switch_off(self,daq_pt_list):
@@ -354,15 +357,15 @@ class HDFWriter(threading.Thread):
 
 
         """
-        self._stop = True
+        self._stop_ = True
 
 
     def set_paused(self,value_in):
         """ Ask thread to pause actions.
         """
         if not value_in:
-			# Restart, avoid timeout problems
-			self.last_savetime = time.time()
+            # Restart, avoid timeout problems
+            self.last_savetime = time.time()
         self._paused = value_in
 	
 	
@@ -392,7 +395,7 @@ class HDFWriter(threading.Thread):
         self._intState = HDFW_IDLE
         self.last_savetime = time.time()
 
-        while not self._stop:
+        while not self._stop_:
             try:
                 #receive the new entry
                 if self._save_list.qsize() > 0:
@@ -404,7 +407,7 @@ class HDFWriter(threading.Thread):
                             self._save_hdf(daq_key,bn_in, bn_fin, data_in)
                         self.last_savetime = time.time()
                     except:
-                        print traceback.format_exc()
+                        traceback.print_exc()
                 elif self._file_concluded():
                     self._close_data_acquisition(timeout=False)
                 elif (not self._paused) and (self.no_data_timeout > 0) and (self._hdf_file):
@@ -416,7 +419,7 @@ class HDFWriter(threading.Thread):
             except:
                 #THIS MEANS THAT LESS THEN 3 VALUES WERE PASSED,
                 #THIS IS THE STOP REQUEST.
-                print traceback.format_exc()
+                traceback.print_exc()
                 print('HDFW %s received STOP Request' % self.key)
                 self.report += "Received STOP Request" + "\n"
 
@@ -464,7 +467,7 @@ class HDFWriter(threading.Thread):
             return True
         elif len(self._daq_list.keys()) == 0:
             # Not arrived any data yet
-	    return False
+            return False
 	   
         #get all the daqattrib that has reached the last number
         l= [(key,value[1]) for (key,value) in self._daq_list.items()
@@ -507,8 +510,8 @@ class HDFWriter(threading.Thread):
             try:
                 # Create HDF dataset
                 self._hdf_file.create_dataset(str(metakey), data=last_metadata[metakey][1])
-            except Exception, ex:
-                print "store_metadata exception",metakey,last_metadata[metakey][1]
+            except:
+                print("store_metadata exception",metakey,last_metadata[metakey][1])
                 traceback.print_exc()
         del last_metadata
 
@@ -520,7 +523,7 @@ class HDFWriter(threading.Thread):
             self._hdf_file.flush()
             self._hdf_file.close()
             if DEBUG:
-                print "FILE",file_name," closed",1000.*(time.time() - tt0)        
+                print("FILE",file_name," closed",1000.*(time.time() - tt0))
             self._hdf_file = None
             self.report += file_name.split("/")[-1] + " Closed.\n"
             #if at least one reached the last bunch:
@@ -531,12 +534,12 @@ class HDFWriter(threading.Thread):
                             if value[1] != self.last_bunch ]
                 #
                 if DEBUG:
-                    print self.key,"TIMEOUT",self.last_bunch,not_finished_l
+                    print(self.key,"TIMEOUT",self.last_bunch,not_finished_l)
             self._daq_list.clear()
             self.dataserver.hdf_finished(self.key,file_name,self.report)
             #self.dataserver.task_queue.put(['hdf_finished',self.key,file_name,self.report])
         except:
-            print traceback.format_exc()
+            traceback.print_exc()
 
     def _save_hdf(self, daq_key, bn_in, bn_fin, data_in):
         """
@@ -594,13 +597,11 @@ class HDFWriter(threading.Thread):
             self._hdf_file = h5py.File(f_name,'w')
             #self._hdf_file = h5py.File(f_name,'w',libver='latest')
             self.last_savetime = time.time()
-	    try:
-            	self._hdf_file.attrs['timestamp'] = str(time.ctime())
-            	self.dataserver.notify_queue.put(['update_report',f_name.split("/")[-1] + " Opened.\n"])
-	    except:
-	    	traceback.print_exc()
-	
-
+            try:
+                self._hdf_file.attrs['timestamp'] = str(time.ctime())
+                self.dataserver.notify_queue.put(['update_report',f_name.split("/")[-1] + " Opened.\n"])
+            except:
+                traceback.print_exc()
         #check if its data set was already configured.
         if daq_key not in self.dsets:
             tokens = daq_key.split("/")
@@ -615,7 +616,6 @@ class HDFWriter(threading.Thread):
                     #print "****", groupname,"NOT CREATED"
                     # Probably the grooup already exists... does not matter
                     pass
-
             #
             try:
                 if (groupname != ""):
@@ -641,9 +641,9 @@ class HDFWriter(threading.Thread):
             except ValueError:
                 print('file %s data_set %s already exist'%(self._hdf_file.filename, daq_key))
                 self.report += "Error: dataset " + daq_key + " already exist." + "\n"
-            except Exception, ex:
-                print traceback.format_exc()
-                print('HDFW %s file %s data_set %s creation error %s' %(self.key, self._hdf_file.filename, daq_key,str(ex)))
+            except:
+                traceback.print_exc()
+                print('HDFW %s file %s data_set %s creation error %s' %(self.key, self._hdf_file.filename, daq_key,traceback.fomrat_exc()))
                 self.report += "Error: dataset " + daq_key + " creation error." + "\n"
 
         try:
@@ -652,21 +652,21 @@ class HDFWriter(threading.Thread):
             try:
                 slicc = slice(bn_in - self.first_bunch, bn_fin - self.first_bunch + 1)
                 self.dsets_ptr[daq_key][slicc] = data_in
-            except Exception, ex:
-                print traceback.format_exc()
-                print daq_key,slicc,self.first_bunch,bn_in,bn_fin,self._hdf_file.filename
+            except:
+                traceback.print_exc()
+                print(daq_key,slicc,self.first_bunch,bn_in,bn_fin,self._hdf_file.filename)
                 self.report += "Error: dataset " + daq_key + " write error." + "\n"
             #check if the file is finished.
             if self._file_concluded():
                 if DEBUG:
                     print('HDFW %s file concluded' % self.key)
                 self._close_data_acquisition()
-        except ValueError, ex:
+        except ValueError as ex:
             self.report += "Error: dataset " + daq_key + " h5py.get[dataset] failed." + "\n"
             #Bug#3, found that sometimes it is not able to get the correct dataset
             print("HDFW %s dataset %s h5py.get[dataset] failed %s" %
                 (self.key, daq_key, str(ex)))
-        except AssertionError, ex:
+        except AssertionError as ex:
             #Bug#3
             self.report += "Error: dataset " + daq_key + " assertion error." + "\n"
             print("HDFW %s dataset %s assertion error" %
-- 
GitLab