diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 8f496387405f44a0edb573639efba0d71dda12e6..eb54a8be4a9cfcdcccc5d7170d5368ffa0fd39c5 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -7,7 +7,7 @@ jobs:
     environment: Env1
     strategy:
       matrix:
-        python-version: [ "3.7", "3.8", "3.9", "3.10", "3.11", "3.12" ]
+        python-version: [ "3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "pypy3.9" ]
     steps:
       - uses: actions/checkout@main
       - uses: actions/setup-python@main
diff --git a/.gitignore b/.gitignore
index c3176ae640c8bad9067a8456d3e85627f3db4d19..4241751493487b4e939ada7a3e541a74493b2a0d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -166,14 +166,13 @@ var
 sdist
 develop-eggs
 .installed.cfg
-
+test_files/
 # Installer logs
 pip-log.txt
 
 # Unit test / coverage reports
 .coverage
 .tox
-
 #Translations
 *.mo
 
diff --git a/.readthedocs.yaml b/.readthedocs.yaml
index 393cba875b39464976830e746f347d4d25de2701..5ce3c4d777ffacf08ea0c08f463e97faea76bf71 100644
--- a/.readthedocs.yaml
+++ b/.readthedocs.yaml
@@ -3,6 +3,8 @@ build:
   os: ubuntu-22.04
   tools:
     python: "3.9"
-
+python:
+  install:
+    - psutil
 sphinx:
   configuration: docs/conf.py
diff --git a/CHANGELOG.md b/CHANGELOG.md
index f58623ce61b01d92a284ce94835706c92368b672..5ffe064b8860d17b4855ec53632178508f5d1de0 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,7 @@
-# v2.25.1
+# v2.25.4
 
-* added parameters for EmptyContextManager
+* add JSONAbleDataObject
+* improved typing for reraise_as
+* added weak_refs to SingletonWithRegardsTo
+* added jump_to_directory
+* fixed get_size() to RuntimeError on PyPy instead of hanging
diff --git a/docs/coding/ctxt_managers.rst b/docs/coding/ctxt_managers.rst
index 2bcd52fa3c498cbd2fa967c375ee92f876962d00..ab613eed02f40db57236ab02c091b82b55e29822 100644
--- a/docs/coding/ctxt_managers.rst
+++ b/docs/coding/ctxt_managers.rst
@@ -3,3 +3,5 @@ Context managers
 
 .. autoclass:: satella.coding.EmptyContextManager
     :members:
+
+.. autofunction:: satella.coding.wrap_callable_in_context_manager
diff --git a/docs/coding/transforms.rst b/docs/coding/transforms.rst
index 4f44c5eef1958dbb3897dd6df09574e251f19553..fa0ad0448a9ad993435c20d2457c06bba003b2fe 100644
--- a/docs/coding/transforms.rst
+++ b/docs/coding/transforms.rst
@@ -19,6 +19,8 @@ Rudimentary data transforms and algorithms
 
 .. autofunction:: satella.coding.transforms.b64encode
 
+.. autofunction:: satella.coding.transforms.list_values_to_indices
+
 pad_to_multiple_of_length
 -------------------------
 
diff --git a/docs/files.rst b/docs/files.rst
index 7f2b1b01b59dc72cd52a9a21f03a9e1f7c26f7ab..718dc9220820f313a32584fec1f57eed2c4fc97a 100644
--- a/docs/files.rst
+++ b/docs/files.rst
@@ -9,6 +9,14 @@ A file-like object that will dispose of your content.
 .. autoclass:: satella.files.DevNullFilelikeObject
     :members:
 
+
+jump_to_directory
+-----------------
+
+.. autofunction:: jump_to_directory
+    :members:
+
+
 safe_listdir
 ------------
 
diff --git a/docs/json.rst b/docs/json.rst
index 3ef1e4858bc32a7c52bbd905ddf4ffc6102b2e82..937b50dab7755fc01c7d3d86080213324013e599 100644
--- a/docs/json.rst
+++ b/docs/json.rst
@@ -8,6 +8,9 @@ with
 .. autoclass:: satella.json.JSONAble
     :members:
 
+.. autoclass:: satella.json.JSONAbleDataObject
+    :members:
+
 Then you can convert structures made out of standard serializable Python JSON objects, such as dicts
 and lists, and also JSONAble objects, by this all
 
diff --git a/satella/__init__.py b/satella/__init__.py
index 6ec890752e9e56077799501480d3ad597933d09c..b0f93f54dca729afce51939170ad8d7a63e2c0b3 100644
--- a/satella/__init__.py
+++ b/satella/__init__.py
@@ -1 +1 @@
-__version__ = '2.25.1'
+__version__ = '2.25.4'
diff --git a/satella/coding/__init__.py b/satella/coding/__init__.py
index d29657213076dc82348d5f221f296bea18004dd0..6226b1c031661e6e117212f4cfea214291cc5cec 100644
--- a/satella/coding/__init__.py
+++ b/satella/coding/__init__.py
@@ -4,7 +4,7 @@ Just useful objects to make your coding nicer every day
 
 from .algos import merge_dicts
 from .concurrent import Monitor, RMonitor
-from .ctxt_managers import EmptyContextManager
+from .ctxt_managers import EmptyContextManager, wrap_callable_in_context_manager
 from .decorators import precondition, short_none, has_keys, \
     wraps, chain_functions, postcondition, queue_get, auto_adapt_to_methods, \
     attach_arguments, for_argument
@@ -26,7 +26,7 @@ from .recast_exceptions import rethrow_as, silence_excs, catch_exception, log_ex
 
 __all__ = [
     'EmptyContextManager', 'Context', 'length',
-    'assert_equal', 'InequalityReason', 'Inequal',
+    'assert_equal', 'InequalityReason', 'Inequal', 'wrap_callable_in_context_manager',
     'Closeable', 'contains', 'enum_value',
     'expect_exception',
     'overload', 'class_or_instancemethod', 'TypeSignature',
diff --git a/satella/coding/ctxt_managers.py b/satella/coding/ctxt_managers.py
index 7dd1123b718adb432a1e672dd15f0dabdac95f03..d9928ba6b11601e1b60d1a0f43f3ce6fd0e4c1bf 100644
--- a/satella/coding/ctxt_managers.py
+++ b/satella/coding/ctxt_managers.py
@@ -1,3 +1,6 @@
+from satella.coding.decorators import wraps
+
+
 class EmptyContextManager:
     """
     A context manager that does nothing. Only to support conditional change of context managers,
@@ -21,3 +24,25 @@ class EmptyContextManager:
 
     def __exit__(self, exc_type, exc_val, exc_tb):
         return False
+
+
+def wrap_callable_in_context_manager(clbl, ctxt_mgr, *my_args, **my_kwargs):
+    """
+    Wrap a callable in context manager.
+
+    Roughly equivalent to:
+
+    >>> def inner(*args, **kwargs):
+    >>>     with ctxt_mgr(*my_args, **my_kwargs):
+    >>>         return clbl(*args, **kwargs)
+    >>> return inner
+
+    To be used as:
+
+    >>> clbl = wrap_callable_in_context_manager(lambda y: 5, tracing.start_new_span, 'New span')
+    """
+    @wraps(clbl)
+    def inner(*args, **kwargs):
+        with ctxt_mgr(*my_args, **my_kwargs):
+            return clbl(*args, **kwargs)
+    return inner
diff --git a/satella/coding/structures/dictionaries/cache_dict.py b/satella/coding/structures/dictionaries/cache_dict.py
index 5985e81227c4b20f9fa202ebb5ddf637739c1152..08bb4c13fbc7342790ff55cb0a3fdc6a5f1561b2 100644
--- a/satella/coding/structures/dictionaries/cache_dict.py
+++ b/satella/coding/structures/dictionaries/cache_dict.py
@@ -3,9 +3,11 @@ import time
 import typing as tp
 from concurrent.futures import ThreadPoolExecutor, Executor, Future
 
+from satella.coding.decorators.decorators import short_none
 from satella.coding.recast_exceptions import silence_excs
 from satella.coding.structures.lru import LRU
 from satella.coding.typing import K, V, NoArgCallable
+from satella.time import parse_time_string
 
 logger = logging.getLogger(__name__)
 
@@ -29,15 +31,17 @@ class CacheDict(tp.Mapping[K, V]):
 
     :param stale_interval: time in seconds after which an entry will be stale, ie.
         it will be served from cache, but a task will be launched in background to
-        refresh it
+        refresh it. Note that this will accept time-like strings eg. 23m.
     :param expiration_interval: time in seconds after which an entry will be ejected
-        from dict, and further calls to get it will block until the entry is available
+        from dict, and further calls to get it will block until the entry is available.
+        Note that this will accept time-like strings eg. 23m.
     :param value_getter: a callable that accepts a key, and returns a value for given entry.
         If value_getter raises KeyError, then given entry will be evicted from the cache
     :param value_getter_executor: an executor to execute the value_getter function in background.
         If None is passed, a ThreadPoolExecutor will be used with max_workers of 4.
     :param cache_failures_interval: if any other than None is defined, this is the timeout
         for which failed lookups will be cached. By default they won't be cached at all.
+        Note that this will accept time-like strings eg. 23m.
     :param time_getter: a routine used to get current time in seconds
     :param default_value_factory: if given, this is the callable that will return values
         that will be given to user instead of throwing KeyError. If not given (default),
@@ -80,17 +84,17 @@ class CacheDict(tp.Mapping[K, V]):
 
         return False
 
-    def __init__(self, stale_interval: float, expiration_interval: float,
+    def __init__(self, stale_interval: tp.Union[float, int, str], expiration_interval: tp.Union[float, int, str],
                  value_getter: tp.Callable[[K], V],
                  value_getter_executor: tp.Optional[Executor] = None,
-                 cache_failures_interval: tp.Optional[float] = None,
+                 cache_failures_interval: tp.Optional[tp.Union[float, int, str]] = None,
                  time_getter: NoArgCallable[float] = time.monotonic,
                  default_value_factory: tp.Optional[NoArgCallable[V]] = None):
-        assert stale_interval <= expiration_interval, 'Stale interval may not be larger ' \
-                                                      'than expiration interval!'
-        self.stale_interval = stale_interval
+        self.stale_interval = parse_time_string(stale_interval)
+        self.expiration_interval = parse_time_string(expiration_interval)
+        assert self.stale_interval <= self.expiration_interval, 'Stale interval may not be larger ' \
+                                                                'than expiration interval!'
         self.default_value_factory = default_value_factory
-        self.expiration_interval = expiration_interval
         self.value_getter = value_getter
         if value_getter_executor is None:
             value_getter_executor = ThreadPoolExecutor(max_workers=4)
@@ -99,7 +103,7 @@ class CacheDict(tp.Mapping[K, V]):
         self.timestamp_data = {}  # type: tp.Dict[K, float]
         self.cache_missed = set()  # type: tp.Set[K]
         self.cache_failures = cache_failures_interval is not None
-        self.cache_failures_interval = cache_failures_interval
+        self.cache_failures_interval = short_none(parse_time_string)(cache_failures_interval)
         self.time_getter = time_getter
 
     def get_value_block(self, key: K) -> V:
diff --git a/satella/coding/structures/singleton.py b/satella/coding/structures/singleton.py
index 3dc9e83169f44234ac86e7e1ee41708856a7fa92..1744c96995394bc47706604733f7f9b2501a78d2 100644
--- a/satella/coding/structures/singleton.py
+++ b/satella/coding/structures/singleton.py
@@ -1,4 +1,5 @@
 import typing as tp
+import weakref
 
 from satella.coding.decorators.decorators import wraps
 
@@ -36,7 +37,7 @@ def Singleton(cls):
 
 
 # noinspection PyPep8Naming
-def SingletonWithRegardsTo(num_args: int):
+def SingletonWithRegardsTo(num_args: int, weak_refs: bool = False):
     """
     Make a memoized singletion depending on the arguments.
 
@@ -55,6 +56,13 @@ def SingletonWithRegardsTo(num_args: int):
     >>> c = MyClass('dev1')
     >>> assert a is c
     >>> assert b is not c
+
+    :param num_args: number of arguments to consume
+    :param weak_refs: if True, then singleton will be stored within a weak dictionary, so that it cleans up after itself
+                      when the values are gone.
+
+    .. warning:: If you set weak_refs to False and have a potentially unbounded number of argument values, you better
+                 watch out for the memory usage.
     """
 
     def inner(cls):
@@ -65,7 +73,10 @@ def SingletonWithRegardsTo(num_args: int):
         def singleton_new(cls, *args, **kw):
             it = cls.__dict__.get('__it__')
             if it is None:
-                it = cls.__it__ = {}
+                if weak_refs:
+                    it = cls.__it__ = weakref.WeakValueDictionary()
+                else:
+                    it = cls.__it__ = {}
 
             key = args[:num_args]
             if key in it:
diff --git a/satella/coding/transforms/__init__.py b/satella/coding/transforms/__init__.py
index ec6192641883e9031477b2c3a97de9eddbfa67ea..b87b5a050c37f8cae343ba340b024e4cf3de989c 100644
--- a/satella/coding/transforms/__init__.py
+++ b/satella/coding/transforms/__init__.py
@@ -12,11 +12,12 @@ from .merger import merge_series
 from .percentile import percentile
 from .predicates import is_subset
 from .words import hashables_to_int
+from .misc import list_values_to_indices
 
 __all__ = ['stringify', 'split_shuffle_and_join', 'one_tuple', 'none_if_false',
            'merge_series', 'pad_to_multiple_of_length', 'clip', 'hashables_to_int',
            'jsonify', 'intify', 'percentile', 'b64encode', 'linear_interpolate',
-           'merge_list', 'is_subset', 'unpack_dict']
+           'merge_list', 'is_subset', 'unpack_dict', 'list_values_to_indices']
 
 from satella.coding.typing import T, NoArgCallable, Appendable, Number, Predicate, K, V
 
diff --git a/satella/coding/transforms/misc.py b/satella/coding/transforms/misc.py
new file mode 100644
index 0000000000000000000000000000000000000000..b29213f0131d4c6ed715634d54baf2936fda6bc0
--- /dev/null
+++ b/satella/coding/transforms/misc.py
@@ -0,0 +1,24 @@
+import typing as tp
+
+from satella.coding.typing import V
+
+
+def list_values_to_indices(lst: tp.List[V]) -> tp.Dict[V, int]:
+    """
+    Transform a list of entries into a dict mapping where given entry can be found.
+
+    Example:
+
+    >>> a = ['abc', 'def', 'ghi']
+    >>> b = list_values_to_indices(a)
+    >>> assert b == {'abc': 0, 'def': 1, 'ghi': 2}
+
+    :param lst: list to process. Take care for the list to be composed of unique entries.
+    :raises ValueError: item was found more than once
+    """
+    result = {}
+    for idx, val in enumerate(lst):
+        if val in result:
+            raise ValueError(f'Entry {val} found more than once!')
+        result[val] = idx
+    return result
diff --git a/satella/coding/typing.py b/satella/coding/typing.py
index 8b79d2cf82a7d3a8df245bb2f41105bf8551febf..9f86680a8ab2604b2553c222106f1fcc7e35ed36 100644
--- a/satella/coding/typing.py
+++ b/satella/coding/typing.py
@@ -17,7 +17,7 @@ NoArgCallable = tp.Callable[[], T]
 Predicate = tp.Callable[[T], bool]
 
 ExceptionClassType = tp.Type[Exception]
-ExceptionList = tp.Union[ExceptionClassType, tp.Tuple[ExceptionClassType]]
+ExceptionList = tp.Union[ExceptionClassType, tp.Tuple[ExceptionClassType, ...]]
 
 
 class ClassComparable(metaclass=ABCMeta):
diff --git a/satella/files.py b/satella/files.py
index 2fb00052dfeab4fc9881ab6fd202a396e2cb66cb..eca97481c85c737a621aa51f15e2a2131c2a1e55 100644
--- a/satella/files.py
+++ b/satella/files.py
@@ -10,7 +10,8 @@ import typing as tp
 
 __all__ = ['read_re_sub_and_write', 'find_files', 'split', 'read_in_file', 'write_to_file',
            'write_out_file_if_different', 'make_noncolliding_name', 'try_unlink',
-           'DevNullFilelikeObject', 'read_lines', 'AutoflushFile']
+           'DevNullFilelikeObject', 'read_lines', 'AutoflushFile',
+           'jump_to_directory']
 
 from satella.coding import wraps
 from satella.coding.recast_exceptions import silence_excs, reraise_as
@@ -37,6 +38,35 @@ def value_error_on_closed_file(getter):
 closed_devnull = value_error_on_closed_file(lambda y: y.is_closed)
 
 
+class jump_to_directory(object):
+    """
+    This will temporarily change current working directory. Note however is doesn't proof you against deliberately
+    changing the working directory by the user.
+
+    Non existing directories will be created.
+
+    :ivar path: (str) target path
+    :ivar prev_path: (str) path that was here before this was called.
+    """
+
+    __slots__ = 'path', 'prev_path'
+
+    def __init__(self, path: tp.Optional[str], mode=0o777):
+        self.path = path
+        self.prev_path = None
+        os.makedirs(self.path, mode=mode, exist_ok=True)
+
+    def __enter__(self):
+        self.prev_path = os.getcwd()
+        os.chdir(self.path)
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        assert self.prev_path is not None
+        os.chdir(self.prev_path)
+        return False
+
+
 class DevNullFilelikeObject(io.FileIO):
     """
     A /dev/null filelike object. For multiple uses.
diff --git a/satella/instrumentation/memory/get_object_size.py b/satella/instrumentation/memory/get_object_size.py
index ffdeabb52771d6f2cee535bc15fb9447dfa6aa0c..67c604bd312ee9546e51777eb2b256f46d0bef4e 100644
--- a/satella/instrumentation/memory/get_object_size.py
+++ b/satella/instrumentation/memory/get_object_size.py
@@ -1,3 +1,4 @@
+import platform
 import sys
 
 
@@ -11,10 +12,9 @@ def get_size(obj, seen=None) -> int:
     :return: size in bytes of the object and all of it's subcomponents
     :raises RuntimeError: when ran on PyPy
     """
-    try:
-        size = sys.getsizeof(obj)
-    except TypeError:
-        raise RuntimeError('Running on PyPy?')
+    if platform.python_implementation() != 'CPython':
+        raise RuntimeError('Runnable only on CPython')
+    size = sys.getsizeof(obj)
     if seen is None:
         seen = set()
     obj_id = id(obj)
diff --git a/satella/instrumentation/metrics/__init__.py b/satella/instrumentation/metrics/__init__.py
index 7c782e9229ca8f18fd09ed3cc856b45c50434503..64553ac043da39e2b49142f9141fc347fa1acd08 100644
--- a/satella/instrumentation/metrics/__init__.py
+++ b/satella/instrumentation/metrics/__init__.py
@@ -31,6 +31,33 @@ def adjust_metric_level_for_root(metric_level: tp.Optional[MetricLevel],
 ALLOWED_CHARACTERS = string.ascii_uppercase + string.digits + ':' + '_' + '.'
 
 
+def _process_metric(name, name_index, metric_level, metric_level_to_set_for_children, name_part, root_metric,
+                    metric_name, metric_type, kwargs):
+    tentative_name = '.'.join(name[:name_index])
+    if tentative_name not in metrics:
+        if tentative_name == '':
+            metric = Metric('',
+                            None,
+                            adjust_metric_level_for_root(metric_level,
+                                                         metric_level_to_set_for_children),
+                            **kwargs)
+            metric.level = MetricLevel.RUNTIME
+            root_metric = metric
+        elif metric_name == tentative_name:
+            metric = METRIC_NAMES_TO_CLASSES[metric_type](name_part, root_metric,
+                                                          metric_level, **kwargs)
+        else:
+            metric = Metric(name_part, root_metric, metric_level_to_set_for_children,
+                            **kwargs)
+        metrics[tentative_name] = metric
+        if metric != root_metric:  # prevent infinite recursion errors
+            root_metric.append_child(metric)
+    else:
+        metric = metrics[tentative_name]
+    root_metric = metric
+    return metric, root_metric
+
+
 # noinspection PyPep8Naming
 def getMetric(metric_name: str = '',
               metric_type: str = 'base',
@@ -65,28 +92,8 @@ def getMetric(metric_name: str = '',
                                           existing_type=metrics[metric_name].CLASS_NAME)
 
         for name_index, name_part in itertools.chain(((0, ''),), enumerate(name, start=1)):
-            tentative_name = '.'.join(name[:name_index])
-            if tentative_name not in metrics:
-                if tentative_name == '':
-                    metric = Metric('',
-                                    None,
-                                    adjust_metric_level_for_root(metric_level,
-                                                                 metric_level_to_set_for_children),
-                                    **kwargs)
-                    metric.level = MetricLevel.RUNTIME
-                    root_metric = metric
-                elif metric_name == tentative_name:
-                    metric = METRIC_NAMES_TO_CLASSES[metric_type](name_part, root_metric,
-                                                                  metric_level, **kwargs)
-                else:
-                    metric = Metric(name_part, root_metric, metric_level_to_set_for_children,
-                                    **kwargs)
-                metrics[tentative_name] = metric
-                if metric != root_metric:  # prevent infinite recursion errors
-                    root_metric.append_child(metric)
-            else:
-                metric = metrics[tentative_name]
-            root_metric = metric
+            metric, root_metric = _process_metric(name, name_index, metric_level, metric_level_to_set_for_children,
+                                                  name_part, root_metric, metric_name, metric_type, kwargs)
 
         if metric_level is not None:
             metric.level = metric_level
diff --git a/satella/json.py b/satella/json.py
index 073412df2b619a4fc6c6fe7f07935d9f438a5009..ac1286b211a1f8e481c19fffcbbe484add59d2cb 100644
--- a/satella/json.py
+++ b/satella/json.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
 import enum
 import json
 import typing as tp
@@ -8,7 +10,8 @@ from satella.coding.typing import NoneType
 from satella.files import write_out_file_if_different
 
 __all__ = ['JSONEncoder', 'JSONAble', 'json_encode', 'read_json_from_file',
-           'write_json_to_file', 'write_json_to_file_if_different']
+           'write_json_to_file', 'write_json_to_file_if_different', 'JSONAbleDataObject']
+
 
 Jsonable = tp.TypeVar('Jsonable', list, dict, str, int, float, None)
 
@@ -119,3 +122,57 @@ def read_json_from_file(path: str) -> JSONAble:
             except json.decoder.JSONDecodeError as e:
                 raise ValueError(str(e))
     return v
+
+
+class JSONAbleDataObject:
+    """
+    A data-class that supports conversion of its classes to JSON
+
+    Define like this:
+
+    >>> class CultureContext(JSONAbleDataObject):
+    >>>     language: str
+    >>>     timezone: str
+    >>>     units: str = 'metric'
+
+    Note that type annotation is mandatory and default values are supported. Being data value objects, these are
+    eq-able and hashable.
+
+    And use like this:
+
+    >>> a = CultureContext(language='pl', timezone='Europe/Warsaw')
+    >>> assert a.to_json() == {'language': 'pl', 'timezone': 'Europe/Warsaw', 'units': 'metric'}
+    >>> assert CultureContext.from_json(a.to_json) == a
+    """
+
+    def __eq__(self, other) -> bool:
+        return all(getattr(self, annotation) == getattr(other, annotation) for annotation in self.__class__.__annotations__.keys())
+
+    def __hash__(self) -> int:
+        hash_ = 0
+        for annotation in self.__class__.__annotations__.keys():
+            hash_ ^= hash(getattr(self, annotation))
+        return hash_
+
+    def __init__(self, **kwargs):
+        """
+        :raises ValueError: a non-default value was not provided
+        """
+        for annotation in self.__class__.__annotations__.keys():
+            try:
+                annot_val = kwargs.pop(annotation)
+                setattr(self, annotation, annot_val)
+            except KeyError:
+                if not hasattr(self, annotation):
+                    raise ValueError(f'Argument {annotation} not provided!')
+
+    def to_json(self) -> tp.Dict:
+        """Convert self to JSONable value"""
+        result = {}
+        for annotation in self.__class__.__annotations__.keys():
+            result[annotation] = getattr(self, annotation)
+        return result
+
+    @classmethod
+    def from_json(cls, jsonable) -> JSONAbleDataObject:
+        return cls(**jsonable)
diff --git a/tests/test_coding/test_concurrent.py b/tests/test_coding/test_concurrent.py
index c2278655065904901b099ac0c0f359557f7c8404..51f9bd29675d68c47d2ec74b27176502da0950f2 100644
--- a/tests/test_coding/test_concurrent.py
+++ b/tests/test_coding/test_concurrent.py
@@ -656,7 +656,7 @@ class TestConcurrent(unittest.TestCase):
         mt.start().join()
         mt.terminate()
 
-    @unittest.skipUnless(sys.implementation.name == 'cpython', 'Does not work on PyPy :(')
+    @unittest.skipUnless(sys.implementation.name != 'cpython', 'Does not work on PyPy :(')
     def test_condition(self):
         dct = {'a': False}
         slf = self
diff --git a/tests/test_coding/test_decorators.py b/tests/test_coding/test_decorators.py
index cb69d32b2961425980f2388564036d61a5d0d729..38ff234a19dfe471f622d2beab8f4797d1523d75 100644
--- a/tests/test_coding/test_decorators.py
+++ b/tests/test_coding/test_decorators.py
@@ -5,7 +5,7 @@ from socket import socket
 
 import time
 from satella.coding import wraps, chain_functions, postcondition, \
-    log_exceptions, queue_get, precondition, short_none
+    log_exceptions, queue_get, precondition, short_none, wrap_callable_in_context_manager
 from satella.coding.decorators import auto_adapt_to_methods, attach_arguments, \
     execute_before, loop_while, memoize, copy_arguments, replace_argument_if, \
     retry, return_as_list, default_return, transform_result, transform_arguments, \
@@ -18,6 +18,23 @@ logger = logging.getLogger(__name__)
 
 
 class TestDecorators(unittest.TestCase):
+    def test_wrap_ctxt_mgr(self):
+        a = None
+        class CtxtMgr:
+            def __init__(self, value):
+                nonlocal a
+                a = value
+
+            def __enter__(self):
+                return self
+
+            def __exit__(self, exc_type, exc_val, exc_tb):
+                return False
+
+        clbl = lambda y: y
+        clbl = wrap_callable_in_context_manager(clbl, CtxtMgr, 5)
+        clbl(3)
+        self.assertEqual(a, 5)
 
     def test_cached_property(self):
         class Example:
diff --git a/tests/test_coding/test_singleton.py b/tests/test_coding/test_singleton.py
index d4079f1e8d7cebb36d5acc832aef29769343f305..91ea5188dc60653b36d3957f5fbe6819d37078dc 100644
--- a/tests/test_coding/test_singleton.py
+++ b/tests/test_coding/test_singleton.py
@@ -1,3 +1,4 @@
+import gc
 import queue
 import unittest
 
@@ -57,3 +58,24 @@ class TestSingleton(unittest.TestCase):
         self.assertEqual(set(get_instances_for_singleton(MyClass)), {('a',), ('b',)})
         delete_singleton_for(MyClass, 'a')
         self.assertEqual(set(get_instances_for_singleton(MyClass)), {('b',)})
+
+    def test_singleton_with_regards_to_weak_refs(self):
+        instantiations = 0
+        @SingletonWithRegardsTo(num_args=1, weak_refs=True)
+        class MyClass:
+            def __init__(self, device_id: str):
+                nonlocal instantiations
+                self.device_id = device_id
+                instantiations += 1
+
+        a = MyClass('a')
+        b = MyClass('b')
+        c = MyClass('a')
+        self.assertEqual(instantiations, 2)
+        del a
+        a = MyClass('a')
+        self.assertEqual(instantiations, 2)
+        del b
+        gc.collect()
+        b = MyClass('b')
+        self.assertEqual(instantiations, 3)
diff --git a/tests/test_coding/test_structures.py b/tests/test_coding/test_structures.py
index 101778349bf5a59fee25b14f849e8da263e494e2..40526c1326d1a2b3464ae9102d106b3613601dce 100644
--- a/tests/test_coding/test_structures.py
+++ b/tests/test_coding/test_structures.py
@@ -446,7 +446,7 @@ class TestStructures(unittest.TestCase):
                 return self.value
 
         cg = TestCacheGetter()
-        cd = CacheDict(1, 2, cg)
+        cd = CacheDict('1s', '2s', cg)
         now = time.monotonic()
         self.assertEqual(cd[2], 2)
         self.assertEqual(list(cd), [2])
@@ -866,6 +866,15 @@ class TestStructures(unittest.TestCase):
 
 class TestDictObject(unittest.TestCase):
 
+    def test_constructor2(self):
+        a = {'a': 4, 'b': {'c': 1, 'd': 2}}
+        a = apply_dict_object(a)
+        self.assertEqual(a.a, 4)
+        self.assertEqual(a.b.c, 1)
+        self.assertEqual(a.b.d, 2)
+        self.assertEqual(a['a'], 4)
+        self.assertEqual(a['b'], {'c': 1, 'd': 2})
+
     def test_constructor(self):
         a = DictObject({'a': 5})
         b = apply_dict_object(a)
diff --git a/tests/test_coding/test_transforms.py b/tests/test_coding/test_transforms.py
index 7f49e282106f35be20ca225700d489f81d6c6285..ddcabc28849da335bd7c0d43d10dc19e9a507916 100644
--- a/tests/test_coding/test_transforms.py
+++ b/tests/test_coding/test_transforms.py
@@ -6,11 +6,18 @@ import base64
 from satella.coding.predicates import x
 from satella.coding.transforms import stringify, split_shuffle_and_join, one_tuple, \
     merge_series, pad_to_multiple_of_length, clip, b64encode, linear_interpolate, \
-    hashables_to_int, none_if_false, merge_list, is_subset, unpack_dict
+    hashables_to_int, none_if_false, merge_list, is_subset, unpack_dict, list_values_to_indices
 
 
 class TestTransforms(unittest.TestCase):
 
+    def test_list_values_to_indices(self):
+        a = ['abc', 'def', 'ghi']
+        b = list_values_to_indices(a)
+        self.assertEqual(b, {'abc': 0, 'def': 1, 'ghi': 2})
+        c = ['abc', 'abc', 'def']
+        self.assertRaises(ValueError, list_values_to_indices, c)
+
     def test_unpack_dict(self):
         a, b, c = unpack_dict({1: 2, 2: 3, 4: 5}, 1, 2, 4)
         self.assertTrue(a == 2 and b == 3 and c == 5)
diff --git a/tests/test_files.py b/tests/test_files.py
index 7bc2a6416729bbe39ca47fd19f9824213f6cf7da..6206d6f02b99fa2f53df3c1688915be45869fff6 100644
--- a/tests/test_files.py
+++ b/tests/test_files.py
@@ -6,7 +6,7 @@ import unittest
 import shutil
 from satella.files import read_re_sub_and_write, find_files, split, read_in_file, write_to_file, \
     write_out_file_if_different, make_noncolliding_name, try_unlink, DevNullFilelikeObject, \
-    read_lines, AutoflushFile
+    read_lines, AutoflushFile, jump_to_directory
 
 
 def putfile(path: str) -> None:
@@ -16,6 +16,11 @@ def putfile(path: str) -> None:
 
 class TestFiles(unittest.TestCase):
 
+    def test_monotonous(self):
+        with jump_to_directory('test_d/path'):
+            path = os.getcwd()
+            self.assertTrue(path.endswith('path'))
+
     def test_read_nonexistent_file(self):
         self.assertRaises(FileNotFoundError, lambda: read_in_file('moot'))
 
diff --git a/tests/test_instrumentation/test_memory.py b/tests/test_instrumentation/test_memory.py
index c072e8ce1ab8cd7d53f81d48e02186a75abcaea6..be51355ec22ea06f6910586088be62d5fa17b83c 100644
--- a/tests/test_instrumentation/test_memory.py
+++ b/tests/test_instrumentation/test_memory.py
@@ -22,7 +22,6 @@ class OnDemandCondition(CustomCondition):
 
 
 class TestMemory(unittest.TestCase):
-    @unittest.skipIf(platform.python_implementation(), 'This will not work on PyPy')
     def test_get_size_dict(self):
         a = {'aba': 'aba'}
 
@@ -30,8 +29,12 @@ class TestMemory(unittest.TestCase):
             def __init__(self):
                 self.aba = 'aba'
 
-        self.assertGreater(get_size(a), 6)
-        self.assertGreater(get_size(Aba()), 6)
+        if platform.python_implementation() == 'PyPy':
+            self.assertRaises(RuntimeError, get_size, a)
+            self.assertRaises(RuntimeError, get_size, Aba())
+        else:
+            self.assertGreater(get_size(a), 6)
+            self.assertGreater(get_size(Aba()), 6)
 
     @unittest.skipIf(sys.platform == 'win32', 'testable only on unices')
     def test_install_dump_on(self):
diff --git a/tests/test_json.py b/tests/test_json.py
index 0294141df6c8a73042ec52505593e3060578b282..334dffce80615cb03634985834b79a7d36e52a99 100644
--- a/tests/test_json.py
+++ b/tests/test_json.py
@@ -4,11 +4,22 @@ import typing as tp
 import unittest
 
 from satella.json import JSONAble, json_encode, read_json_from_file, write_json_to_file, \
-    write_json_to_file_if_different, JSONEncoder
+    write_json_to_file_if_different, JSONEncoder, JSONAbleDataObject
 
 
 class TestJson(unittest.TestCase):
 
+    def test_jsonable_data_object(self):
+        class CultureContext(JSONAbleDataObject):
+            units: str = 'metric'
+            timezone: str
+            language: str
+
+        a = CultureContext(language='pl', timezone='Europe/Warsaw')
+        self.assertEqual(a.to_json(), {'language': 'pl', 'timezone': 'Europe/Warsaw', 'units': 'metric'})
+        self.assertEqual(CultureContext.from_json(a.to_json()), a)
+        self.assertEqual(hash(CultureContext.from_json(a.to_json())), hash(a))
+
     def test_json_encoder_enums(self):
         enc = JSONEncoder()