diff --git a/docs/usage.rst b/docs/usage.rst
index f630e7cc3178108747ddb6fc96025b807d4ef87b..0e89575f242c132ec7edf319298fe8ed623e0417 100644
--- a/docs/usage.rst
+++ b/docs/usage.rst
@@ -1,3 +1,13 @@
+How this does work?
+===================
+
+Data is stored in so called chunks. A chunk's last page can be actively appended to, or a chunk
+is immutable.
+
+When there is a request to fetch some data, a chunk is loaded into memory. It will not
+be automatically unloaded, to do this, you must periodically call
+:meth:`~tempsdb.series.TimeSeries.close_chunks`.
+
 Usage
 =====
 
@@ -23,3 +33,4 @@ You retrieve their data via Iterators:
 Appending the data is done via :meth:`~tempsdb.series.TimeSeries.append`. Since time series are
 allocated in entire pages, so your files will be padded to a page in size. This makes writes
 quite fast, as in 99.9% cases it is just a memory operation.
+
diff --git a/tempsdb/database.pxd b/tempsdb/database.pxd
index 57240a2447a4f1d2d61c8cf1d0d6867c9e143216..d87d2df4c298b55ad8fc310797b8242780744a22 100644
--- a/tempsdb/database.pxd
+++ b/tempsdb/database.pxd
@@ -3,12 +3,12 @@ from .series cimport TimeSeries
 
 cdef class Database:
     cdef:
-        str path
+        readonly str path
         bint closed
         object lock
         object mpm
 
-    cpdef void close(self)
+    cpdef int close(self) except -1
     cpdef TimeSeries get_series(self, str name)
     cpdef void register_memory_pressure_manager(self, object mpm)
     cpdef TimeSeries create_series(self, str name, int block_size,
diff --git a/tempsdb/database.pyx b/tempsdb/database.pyx
index bee86189612bec100a3cf47c7f61f3ad5a32d84a..8a72b905a8b7303110006ac8f380629ac3516205 100644
--- a/tempsdb/database.pyx
+++ b/tempsdb/database.pyx
@@ -10,6 +10,8 @@ cdef class Database:
     A basic TempsDB object.
 
     :param path: path to the directory with the database
+
+    :ivar path: path to  the directory with the database (str)
     """
     def __init__(self, path: str):
         self.path = path
@@ -65,6 +67,7 @@ cdef class Database:
         :type page_size: int
         :return: new series
         :rtype: TimeSeries
+        :raises AlreadyExists: series with given name already exists
         """
         if os.path.isdir(os.path.join(self.path, name)):
             raise AlreadyExists('Series already exists')
@@ -86,12 +89,13 @@ cdef class Database:
         self.mpm = mpm
         cdef TimeSeries series
         for series in self.open_series.values():
-            series.register_memory_pressure_manager(mpm)
+            if not series.closed:
+                series.register_memory_pressure_manager(mpm)
 
     def __del__(self):
         self.close()
 
-    cpdef void close(self):
+    cpdef int close(self) except -1:
         """
         Close this TempsDB database
         """
diff --git a/tempsdb/series.pyx b/tempsdb/series.pyx
index f7a350e8992602c3d028cc3e48c55c5af49c2033..41a9bb7a55416b7383c856b062e19c598fcda7d5 100644
--- a/tempsdb/series.pyx
+++ b/tempsdb/series.pyx
@@ -282,7 +282,7 @@ cdef class TimeSeries:
 
     cpdef int close_chunks(self) except -1:
         """
-        Close all superficially opened chunks
+        Close all superficially opened chunks.
         """
         if self.last_chunk is None:
             return 0
@@ -295,12 +295,14 @@ cdef class TimeSeries:
 
         with self.open_lock:
             for chunk_name in chunks:
-                if chunk_name != last_chunk_name:
+                if chunk_name == last_chunk_name:
                     continue
-                elif not self.refs_chunks[chunk_name]:
+                elif not self.refs_chunks.get(chunk_name, 0):
                     self.open_chunks[chunk_name].close()
-                    del self.open_chunks[chunk_name]
-                    del self.refs_chunks[chunk_name]
+                    try:
+                        del self.refs_chunks[chunk_name]
+                    except KeyError:
+                        pass
         return 0
 
     cpdef int append(self, unsigned long long timestamp, bytes data) except -1: