diff --git a/docs/api/options.rst b/docs/api/options.rst
index 3b23e87198d1c4e1482626c6e0b93770d5273d91..dc37d55b41f62bd022ffb9af9802f3a7f96cd7fd 100644
--- a/docs/api/options.rst
+++ b/docs/api/options.rst
@@ -109,45 +109,6 @@ Options object
         | *Type:* ``int``
         | *Default:* ``5000``
 
-    .. py:attribute:: block_cache
-
-        Control over blocks (user data is stored in a set of blocks, and
-        a block is the unit of reading from disk).
-
-        If not ``None`` use the specified cache for blocks.
-        If ``None``, rocksdb will automatically create and use an 8MB internal cache.
-
-        | *Type:* Instace of :py:class:`rocksdb.LRUCache`
-        | *Default:* ``None``
-
-    .. py:attribute:: block_cache_compressed
-
-        If not ``None`` use the specified cache for compressed blocks.
-        If ``None``, rocksdb will not use a compressed block cache.
-
-        | *Type:* Instace of :py:class:`rocksdb.LRUCache`
-        | *Default:* ``None``
-
-    .. py:attribute:: block_size
-
-        Approximate size of user data packed per block.  Note that the
-        block size specified here corresponds to uncompressed data.  The
-        actual size of the unit read from disk may be smaller if
-        compression is enabled.  This parameter can be changed dynamically.
- 
-        | *Type:* ``int``
-        | *Default:* ``4096``
-
-
-    .. py:attribute:: block_restart_interval
-
-        Number of keys between restart points for delta encoding of keys.
-        This parameter can be changed dynamically.  Most clients should
-        leave this parameter alone.
- 
-        | *Type:* ``int``
-        | *Default:* ``16``
-
     .. py:attribute:: compression
 
         Compress blocks using the specified compression algorithm.
@@ -156,15 +117,6 @@ Options object
         | *Type:* Member of :py:class:`rocksdb.CompressionType`
         | *Default:* :py:attr:`rocksdb.CompressionType.snappy_compression`
 
-    .. py:attribute:: whole_key_filtering
-
-        If ``True``, place whole keys in the filter (not just prefixes).
-        This must generally be true for gets to be efficient.
-
-        | *Type:* ``bool``
-        | *Default:* ``True``
-
-
     .. py:attribute:: num_levels
 
         Number of levels for this database
@@ -315,15 +267,6 @@ Options object
         | *Type:* ``bool``
         | *Default:* ``False``
 
-    .. py:attribute:: db_stats_log_interval
-
-        This number controls how often a new scribe log about
-        db deploy stats is written out.
-        -1 indicates no logging at all.
-
-        | *Type:* ``int``
-        | *Default:* ``1800``
-
     .. py:attribute:: db_log_dir
 
         This specifies the info LOG dir.
@@ -346,16 +289,6 @@ Options object
         | *Type:* ``unicode``
         | *Default:* ``""``
 
-    .. py:attribute:: disable_seek_compaction
-
-        Disable compaction triggered by seek.
-        With bloomfilter and fast storage, a miss on one level
-        is very cheap if the file handle is cached in table cache
-        (which is true if max_open_files is large).
-
-        | *Type:* ``bool``
-        | *Default:* ``True``
-
     .. py:attribute:: delete_obsolete_files_period_micros
 
         The periodicity when obsolete files get deleted. The default
@@ -454,15 +387,6 @@ Options object
         | *Type:* ``int``
         | *Default:* ``(2**64) - 1``
 
-    .. py:attribute:: no_block_cache
-
-        Disable block cache. If this is set to true,
-        then no block cache should be used, and the block_cache should
-        point to ``None``
-
-        | *Type:* ``bool``
-        | *Default:* ``False``
-
     .. py:attribute:: table_cache_numshardbits
 
         Number of shards used for table cache.
@@ -579,17 +503,6 @@ Options object
         | *Type:* ``int``
         | *Default:* ``3600``
 
-    .. py:attribute:: block_size_deviation
-
-        This is used to close a block before it reaches the configured
-        'block_size'. If the percentage of free space in the current block is less
-        than this specified number and adding a new record to the block will
-        exceed the configured block size, then this block will be closed and the
-        new record will be written to the next block.
-
-        | *Type:* ``int``
-        | *Default:* ``10``
-
     .. py:attribute:: advise_random_on_open
 
         If set true, will hint the underlying file system that the file
@@ -797,15 +710,6 @@ Options object
         
         *Default:* ``None``
 
-    .. py:attribute:: filter_policy
-
-        If not ``None`` use the specified filter policy to reduce disk reads.
-        A python filter policy must implement the
-        :py:class:`rocksdb.interfaces.FilterPolicy` interface.
-        Recommendes is a instance of :py:class:`rocksdb.BloomFilterPolicy`
-
-        *Default:* ``None``
-
     .. py:attribute:: prefix_extractor
 
         If not ``None``, use the specified function to determine the
@@ -902,7 +806,8 @@ https://github.com/facebook/rocksdb/wiki/A-Tutorial-of-RocksDB-SST-formats
 
     Wraps BlockBasedTableFactory of RocksDB.
 
-    .. py:method:: __init__(index_type='binary_search', hash_index_allow_collision=True, checksum='crc32')
+    .. py:method:: __init__(index_type='binary_search', hash_index_allow_collision=True, checksum='crc32', block_cache, block_cache_compressed, filter_policy=None, no_block_cache=False, block_size=None, block_size_deviation=None, block_restart_interval=None, whole_key_filtering=None):
+
 
     :param string index_type:
         * ``binary_search`` a space efficient index block that is optimized
@@ -922,6 +827,56 @@ https://github.com/facebook/rocksdb/wiki/A-Tutorial-of-RocksDB-SST-formats
         even though they have different checksum type.
         Can be either ``crc32`` or ``xxhash``.
 
+    :param block_cache:
+        Control over blocks (user data is stored in a set of blocks, and
+        a block is the unit of reading from disk).
+
+        If ``None``, rocksdb will automatically create and use an 8MB internal cache.
+        If not ``None`` use the specified cache for blocks. In that case it must
+        be an instance of :py:class:`rocksdb.LRUCache`
+
+    :param block_cache_compressed:
+        If ``None``, rocksdb will not use a compressed block cache.
+        If not ``None`` use the specified cache for compressed blocks. In that
+        case it must be an instance of :py:class:`rocksdb.LRUCache`
+
+    :param filter_policy:
+        If not ``None`` use the specified filter policy to reduce disk reads.
+        A python filter policy must implement the
+        :py:class:`rocksdb.interfaces.FilterPolicy` interface.
+        Recommended is a instance of :py:class:`rocksdb.BloomFilterPolicy`
+
+    :param bool no_block_cache:
+        Disable block cache. If this is set to true,
+        then no block cache should be used, and the block_cache should
+        point to ``None``
+
+    :param int block_size:
+        If set to ``None`` the rocksdb default of ``4096`` is used.
+        Approximate size of user data packed per block.  Note that the
+        block size specified here corresponds to uncompressed data.  The
+        actual size of the unit read from disk may be smaller if
+        compression is enabled.  This parameter can be changed dynamically.
+
+    :param int block_size_deviation:
+        If set to ``None`` the rocksdb default of ``10`` is used.
+        This is used to close a block before it reaches the configured
+        'block_size'. If the percentage of free space in the current block is less
+        than this specified number and adding a new record to the block will
+        exceed the configured block size, then this block will be closed and the
+        new record will be written to the next block.
+
+    :param int block_restart_interval:
+        If set to ``None`` the rocksdb default of ``16`` is used.
+        Number of keys between restart points for delta encoding of keys.
+        This parameter can be changed dynamically.  Most clients should
+        leave this parameter alone.
+
+    :param bool whole_key_filtering:
+        If set to ``None`` the rocksdb default of ``True`` is used.
+        If ``True``, place whole keys in the filter (not just prefixes).
+        This must generally be true for gets to be efficient.
+
 .. py:class:: rocksdb.PlainTableFactory
 
     Plain Table with prefix-only seek. It wraps rocksdb PlainTableFactory.
diff --git a/docs/changelog.rst b/docs/changelog.rst
index d6a552fcf39492a4323bcafc55ddb888317872c1..398f19fdcc9ae07c23e55af9ead643a58bfcf1fa 100644
--- a/docs/changelog.rst
+++ b/docs/changelog.rst
@@ -35,7 +35,20 @@ For DB.iterkeys and DB.iteritems ``itertools.takewhile`` is a possible solution.
 **SST Table Builders:**
 
 * Removed ``NewTotalOrderPlainTableFactory``, because rocksdb drops it too.
+
+**Changed Options:**
+
+In newer versions of rocksdb a bunch of options were moved or removed.
+
 * Rename ``bloom_bits_per_prefix`` of :py:class:`rocksdb.PlainTableFactory` to ``bloom_bits_per_key``
+* Removed ``Options.db_stats_log_interval``.
+* Removed ``Options.disable_seek_compaction``
+* Moved ``Options.no_block_cache`` to ``BlockBasedTableFactory``
+* Moved ``Options.block_size`` to ``BlockBasedTableFactory``
+* Moved ``Options.block_size_deviation`` to ``BlockBasedTableFactory``
+* Moved ``Options.block_restart_interval`` to ``BlockBasedTableFactory``
+* Moved ``Options.whole_key_filtering`` to ``BlockBasedTableFactory``
+
 
 New:
 ^^^^
@@ -44,7 +57,6 @@ New:
 * Add more option to :py:class:`rocksdb.PlainTableFactory`
 
 
-
 Version 0.2
 -----------
 
diff --git a/docs/tutorial/index.rst b/docs/tutorial/index.rst
index 9527542e983d20deda80fa76ccc3ec584ded2750..01598760f742345768322c5b97d124a42e278350 100644
--- a/docs/tutorial/index.rst
+++ b/docs/tutorial/index.rst
@@ -20,9 +20,11 @@ A more production ready open can look like this ::
     opts.write_buffer_size = 67108864
     opts.max_write_buffer_number = 3
     opts.target_file_size_base = 67108864
-    opts.filter_policy = rocksdb.BloomFilterPolicy(10)
-    opts.block_cache = rocksdb.LRUCache(2 * (1024 ** 3))
-    opts.block_cache_compressed = rocksdb.LRUCache(500 * (1024 ** 2))
+
+    opts.table_factory = rocksdb.BlockBasedTableFactory(
+        filter_policy=rocksdb.BloomFilterPolicy(10),
+        block_cache=rocksdb.LRUCache(2 * (1024 ** 3)),
+        block_cache_compressed=rocksdb.LRUCache(500 * (1024 ** 2)))
 
     db = rocksdb.DB("test.db", opts)