From f4c5aed4180321c868e23d3bf5acb11114d9dae2 Mon Sep 17 00:00:00 2001
From: hofmockel <dreagonfly@gmx.de>
Date: Sat, 31 May 2014 17:59:29 +0200
Subject: [PATCH] Make the 'compact_range' call available in python

(cherry picked from commit c6e4014136aa3d81ecd4288150b3eefca9849b0b)
---
 docs/api/database.rst    | 32 ++++++++++++++++++++++++++++++++
 rocksdb/_rocksdb.pyx     | 28 ++++++++++++++++++++++++++++
 rocksdb/db.pxd           |  4 ++--
 rocksdb/tests/test_db.py |  7 +++++++
 4 files changed, 69 insertions(+), 2 deletions(-)

diff --git a/docs/api/database.rst b/docs/api/database.rst
index 3eac695..8b3f2ca 100644
--- a/docs/api/database.rst
+++ b/docs/api/database.rst
@@ -237,6 +237,38 @@ Database object
 
         ``largest_seqno``
             largest seqno in file
+
+    .. py:method:: compact_range(begin=None, end=None, reduce_level=False, target_level=-1)
+
+        Compact the underlying storage for the key range [begin,end].
+        The actual compaction interval might be superset of [begin, end].
+        In particular, deleted and overwritten versions are discarded,
+        and the data is rearranged to reduce the cost of operations
+        needed to access the data.
+
+        This operation should typically only be invoked by users who understand
+        the underlying implementation.
+
+        ``begin == None`` is treated as a key before all keys in the database.
+        ``end == None`` is treated as a key after all keys in the database.
+        Therefore the following call will compact the entire database: ``db.compact_range()``.
+
+        Note that after the entire database is compacted, all data are pushed
+        down to the last level containing any data. If the total data size
+        after compaction is reduced, that level might not be appropriate for
+        hosting all the files. In this case, client could set reduce_level
+        to ``True``, to move the files back to the minimum level capable of holding
+        the data set or a given level (specified by non-negative target_level).
+
+        :param bytes begin: Key where to start compaction.
+                            If ``None`` start at the beginning of the database.
+        :param bytes end: Key where to end compaction.
+                          If ``None`` end at the last key of the database.
+        :param bool reduce_level:  If ``True`` allow rocksdb to move the data to
+                                   another level, if the current is not big enouth.
+                                   If ``False`` you may end with a bigger level
+                                   than configured.
+        :param int target_level: Level where to push the the range to compact.
         
     .. py:attribute:: options
 
diff --git a/rocksdb/_rocksdb.pyx b/rocksdb/_rocksdb.pyx
index 97427f7..31cfac2 100644
--- a/rocksdb/_rocksdb.pyx
+++ b/rocksdb/_rocksdb.pyx
@@ -1533,6 +1533,34 @@ cdef class DB(object):
 
         return ret
 
+    def compact_range(self, begin=None, end=None, reduce_level=False, target_level=-1):
+
+        cdef Status st
+        cdef Slice begin_val
+        cdef Slice end_val
+
+        cdef Slice* begin_ptr
+        cdef Slice* end_ptr
+
+        begin_ptr = NULL
+        end_ptr = NULL
+
+        if begin is not None:
+            begin_val = bytes_to_slice(begin)
+            begin_ptr = cython.address(begin_val)
+
+        if end is not None:
+            end_val = bytes_to_slice(end)
+            end_ptr = cython.address(end_val)
+
+
+        st = self.db.CompactRange(
+            begin_ptr,
+            end_ptr,
+            reduce_level,
+            target_level)
+        check_status(st)
+
     @staticmethod
     def __parse_read_opts(
         verify_checksums=False,
diff --git a/rocksdb/db.pxd b/rocksdb/db.pxd
index 4f2ef01..a077dbd 100644
--- a/rocksdb/db.pxd
+++ b/rocksdb/db.pxd
@@ -91,10 +91,10 @@ cdef extern from "rocksdb/db.h" namespace "rocksdb":
             int,
             uint64_t*) nogil except+
 
-        void CompactRange(
+        Status CompactRange(
             const Slice*,
             const Slice*,
-            bool,
+            cpp_bool,
             int) nogil except+
 
         int NumberLevels() nogil except+
diff --git a/rocksdb/tests/test_db.py b/rocksdb/tests/test_db.py
index d54c446..3433924 100644
--- a/rocksdb/tests/test_db.py
+++ b/rocksdb/tests/test_db.py
@@ -189,6 +189,13 @@ class TestDB(unittest.TestCase, TestHelper):
         self.assertIsNotNone(self.db.get_property(b'rocksdb.num-files-at-level0'))
         self.assertIsNone(self.db.get_property(b'does not exsits'))
 
+    def test_compact_range(self):
+        for x in range(10000):
+            x = int_to_bytes(x)
+            self.db.put(x, x)
+
+        self.db.compact_range()
+
 
 class AssocCounter(rocksdb.interfaces.AssociativeMergeOperator):
     def merge(self, key, existing_value, value):
-- 
GitLab