From cd5cd188c51bfb768b333f110c2f69ed57c990bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Piotr=20Ma=C5=9Blanka?= <piotr.maslanka@henrietta.com.pl> Date: Fri, 6 Dec 2019 17:00:20 +0100 Subject: [PATCH] Feature v2.1/configuration (#31) * v2.1 * travis - don't use python 2.7 * fix not to use six * NoReturn removed * doc fix * ported configuration section from metapod * can load config schema from a dict now * .convert() -> .__call__ --- .travis.yml | 2 - CHANGELOG.md | 6 + LICENSE | 4 +- README.md | 2 +- requirements.txt | 3 - satella/__init__.py | 3 +- satella/coding/__init__.py | 14 +- satella/coding/algos.py | 3 - satella/coding/concurrent/callablegroup.py | 10 +- satella/coding/concurrent/locked_dataset.py | 6 +- satella/coding/decorators.py | 107 ++++++-- satella/coding/fun_static.py | 2 +- satella/coding/structures/singleton.py | 73 ++--- satella/coding/structures/structures.py | 48 +--- satella/coding/typecheck/__init__.py | 25 -- satella/coding/typecheck/argparse.py | 234 ---------------- satella/coding/typecheck/basics.py | 37 --- satella/coding/typecheck/corechk.py | 78 ------ satella/coding/typecheck/decorators.py | 253 ------------------ satella/configuration/__init__.py | 9 + satella/configuration/schema/__init__.py | 8 + satella/configuration/schema/descriptors.py | 245 +++++++++++++++++ satella/configuration/sources/__init__.py | 14 + satella/configuration/sources/base.py | 15 ++ satella/configuration/sources/derivative.py | 65 +++++ satella/configuration/sources/envvars.py | 43 +++ satella/configuration/sources/file.py | 128 +++++++++ satella/configuration/sources/format.py | 95 +++++++ satella/configuration/sources/from_dict.py | 78 ++++++ satella/exceptions.py | 17 ++ satella/instrumentation/__init__.py | 4 +- satella/instrumentation/trace_back.py | 102 +++---- satella/posix/__init__.py | 2 +- satella/posix/daemon.py | 15 +- satella/posix/signals.py | 8 +- setup.cfg | 8 - setup.py | 5 +- tests/test_coding/test_concurrent.py | 1 - tests/test_coding/test_debug.py | 196 +------------- tests/test_coding/test_fun_static.py | 4 +- tests/test_coding/test_monitor.py | 2 +- tests/test_coding/test_rethrow.py | 1 - tests/test_coding/test_structures.py | 3 +- tests/test_coding/test_treat_result_with.py | 17 -- tests/test_configuration/__init__.py | 2 + .../test_load_source_from_dict.py | 40 +++ tests/test_configuration/test_schema.py | 101 +++++++ .../test_sources/__init__.py | 3 + .../test_sources/test_envvars.py | 12 + .../test_sources/test_file.py | 55 ++++ .../test_sources/test_json.py | 33 +++ .../test_sources/test_optional.py | 21 ++ .../test_configuration/test_sources/utils.py | 47 ++++ tests/test_instrumentation/test_trace_back.py | 5 + 54 files changed, 1255 insertions(+), 1059 deletions(-) delete mode 100644 satella/coding/typecheck/__init__.py delete mode 100644 satella/coding/typecheck/argparse.py delete mode 100644 satella/coding/typecheck/basics.py delete mode 100644 satella/coding/typecheck/corechk.py delete mode 100644 satella/coding/typecheck/decorators.py create mode 100644 satella/configuration/__init__.py create mode 100644 satella/configuration/schema/__init__.py create mode 100644 satella/configuration/schema/descriptors.py create mode 100644 satella/configuration/sources/__init__.py create mode 100644 satella/configuration/sources/base.py create mode 100644 satella/configuration/sources/derivative.py create mode 100644 satella/configuration/sources/envvars.py create mode 100644 satella/configuration/sources/file.py create mode 100644 satella/configuration/sources/format.py create mode 100644 satella/configuration/sources/from_dict.py delete mode 100644 tests/test_coding/test_treat_result_with.py create mode 100644 tests/test_configuration/__init__.py create mode 100644 tests/test_configuration/test_load_source_from_dict.py create mode 100644 tests/test_configuration/test_schema.py create mode 100644 tests/test_configuration/test_sources/__init__.py create mode 100644 tests/test_configuration/test_sources/test_envvars.py create mode 100644 tests/test_configuration/test_sources/test_file.py create mode 100644 tests/test_configuration/test_sources/test_json.py create mode 100644 tests/test_configuration/test_sources/test_optional.py create mode 100644 tests/test_configuration/test_sources/utils.py diff --git a/.travis.yml b/.travis.yml index 8bdeb12f..4cefbdb1 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,11 +1,9 @@ language: python python: - - "2.7" - "3.5" - "3.6" - "3.7-dev" - "pypy3.5" - - "pypy" cache: pip install: - pip install -r requirements.txt diff --git a/CHANGELOG.md b/CHANGELOG.md index 30d1cf43..06edc670 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,9 @@ +# v2.1 + +* removed support for Python 2.7 +* most of the typecheck module was cut +* implemented tracking standard stack frames, not only the exception ones + ## v2.0.24 * added [LockedDataset](/satella/coding/concurrent/locked_dataset.py) diff --git a/LICENSE b/LICENSE index 1ff5ac18..cb166672 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2013-2017 Piotr MaĹlanka +Copyright (c) 2013-2019 Piotr MaĹlanka All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: @@ -9,4 +9,4 @@ Redistribution and use in source and binary forms, with or without modification, 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/README.md b/README.md index 34292350..c61e58d3 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,7 @@ satella _satella.posix_ in unavailable on non-POSIX systems, but the rest should work OK. -Satella is a Python library for writing server applications, especially those dealing with mundane but useful things. +Satella is a Python 3.5+ library for writing server applications, especially those dealing with mundane but useful things. See [LICENSE](LICENSE) for text of the license. diff --git a/requirements.txt b/requirements.txt index e65b5374..e69de29b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +0,0 @@ -six -monotonic -typing diff --git a/satella/__init__.py b/satella/__init__.py index f059db8f..4520c3dc 100644 --- a/satella/__init__.py +++ b/satella/__init__.py @@ -1,2 +1,3 @@ # coding=UTF-8 -__version__ = '2.0.25a2' +__version__ = '2.1.1rc2' + diff --git a/satella/coding/__init__.py b/satella/coding/__init__.py index f4b70080..c8b7ad46 100644 --- a/satella/coding/__init__.py +++ b/satella/coding/__init__.py @@ -7,27 +7,17 @@ from __future__ import print_function, absolute_import, division from .algos import merge_dicts from .concurrent import Monitor, RMonitor, CallableGroup, LockedDataset from .recast_exceptions import rethrow_as, silence_excs -from .structures import TimeBasedHeap, Heap, typednamedtuple, OmniHashableMixin from .structures import TimeBasedHeap, Heap, typednamedtuple, OmniHashableMixin, Singleton -from .typecheck import typed, Callable, Sequence, \ - TypeVar, Mapping, Iterable, Any, Optional, CallSignature, \ - Number, coerce, Set, Dict, List, Tuple, checked_coerce, for_argument, \ - precondition, PreconditionError -from .decorators import treat_result_with +from .decorators import precondition, for_argument from .fun_static import static_var __all__ = [ 'typednamedtuple', 'OmniHashableMixin' 'TimeBasedHeap', 'Heap', 'CallableGroup', 'Monitor', 'RMonitor', 'CallableGroup', 'LockedDataset', 'merge_dicts', - 'typed', 'NewType', 'Callable', 'Sequence', 'coerce' - 'TypeVar', 'Mapping', 'Iterable', 'Union', 'Any', - 'Optional', - 'CallSignature', 'Number', - 'Set', 'Dict', 'List', 'Tuple', 'checked_coerce', 'for_argument', + 'for_argument', 'precondition', 'PreconditionError', 'rethrow_as', 'silence_excs', 'Singleton', - 'treat_result_with', 'static_var' ] diff --git a/satella/coding/algos.py b/satella/coding/algos.py index 8a42c61e..c94c28f4 100644 --- a/satella/coding/algos.py +++ b/satella/coding/algos.py @@ -1,8 +1,6 @@ # coding=UTF-8 from __future__ import print_function, absolute_import, division -from .typecheck import typed - def _merge(v1, v2): if isinstance(v1, dict) and isinstance(v2, dict): @@ -20,6 +18,5 @@ def _merge(v1, v2): return v2 -@typed(dict, dict, returns=dict) def merge_dicts(first, second): return _merge(first, second) diff --git a/satella/coding/concurrent/callablegroup.py b/satella/coding/concurrent/callablegroup.py index fcb55b14..7fe875b9 100644 --- a/satella/coding/concurrent/callablegroup.py +++ b/satella/coding/concurrent/callablegroup.py @@ -2,8 +2,7 @@ from __future__ import print_function, absolute_import, division import logging - -from ..typecheck import typed, Callable +import typing as tp logger = logging.getLogger(__name__) @@ -31,7 +30,7 @@ class CallableGroup(object): # todo not threadsafe with oneshots - def __init__(self, gather=False, swallow_exceptions=False): + def __init__(self, gather: bool = True, swallow_exceptions: bool = False): """ :param gather: if True, results from all callables will be gathered into a list and returned from __call__ @@ -43,13 +42,12 @@ class CallableGroup(object): self.gather = gather self.swallow_exceptions = swallow_exceptions - @typed(None, Callable, bool) - def add(self, callable, oneshot=False): + def add(self, callable_: tp.Callable, oneshot: bool = False): """ :param callable: callable :param oneshot: if True, callable will be unregistered after single call """ - self.callables.append((callable, oneshot)) + self.callables.append((callable_, oneshot)) def __call__(self, *args, **kwargs): """ diff --git a/satella/coding/concurrent/locked_dataset.py b/satella/coding/concurrent/locked_dataset.py index f0a3d81e..0d348fa3 100644 --- a/satella/coding/concurrent/locked_dataset.py +++ b/satella/coding/concurrent/locked_dataset.py @@ -3,7 +3,6 @@ import typing import threading import inspect import functools -import six from ...exceptions import ResourceLocked, ResourceNotLocked @@ -73,10 +72,7 @@ class LockedDataset(object): return super(LockedDataset, self).__setattr__(key, value) def __call__(self, blocking=True, timeout=-1): - if six.PY2: - get_internal(self).args = blocking, - else: - get_internal(self).args = blocking, timeout + get_internal(self).args = blocking, timeout return self def __enter__(self): diff --git a/satella/coding/decorators.py b/satella/coding/decorators.py index 529d04b2..1f196e07 100644 --- a/satella/coding/decorators.py +++ b/satella/coding/decorators.py @@ -1,30 +1,103 @@ import logging import typing import functools +import itertools import warnings + logger = logging.getLogger(__name__) -__all__ = ['treat_result_with'] +from ..exceptions import PreconditionError + +__all__ = ['treat_result_with', 'precondition', 'for_argument'] + + +_NOP = lambda x: x +_TRUE = lambda x: True + +def precondition(*t_ops): + """ + Check that a precondition happens for given parameter. + Only positional arguments are supported. + + You can do it like this: + + @precondition(lambda x: x == 1) + def return_two(x): + return x*2 + + or + + @precondition('x == 1') + def return_two(x): + .. + + If None is passed then argument will be always assumed to be True. + You can use all standard locals in precondition. + + You function call will return a PreconditionError (subclass of + ValueError) if a precondition fails + """ + + tn_ops = [] + + for t_op in t_ops: + if t_op is None: + precond = _TRUE + elif isinstance(t_op, str): + q = dict(globals()) + exec('_precond = lambda x: ' + t_op, q) + precond = q['_precond'] + else: + precond = t_op + + tn_ops.append(precond) + + from satella.coding.recast_exceptions import rethrow_as + + def outer(fun): + @functools.wraps(fun) + def inner(*args, **kwargs): + assert len(args) >= len(tn_ops), 'More preconditions than positional arguments!' + with rethrow_as(TypeError, PreconditionError): + for arg, precond in itertools.zip_longest(args, tn_ops, fillvalue=_TRUE): + print(arg, precond, precond.__doc__) + if not precond(arg): + raise PreconditionError( + 'Argument of value %s failed precondition check' % (arg,)) + return fun(*args, **kwargs) + return inner -def treat_result_with(callable): + return outer + + +def for_argument(*t_ops, **t_kwops): """ - Before this function returns, process it's result with callable + Calls a callable for each of the arguments. + + returns is a special keyword, a callable to process the result through - @treat_result_with(callable) - def fun(*args, **kwargs): - ... + Use like: - is equivalent to: - def fun(*args, **kwargs): - ... + @for_argument(int, str, typed=bool, returns=int) + def check(val1, val2, typed='True'): + if typed: + return val1 + int(val2) - fun = lambda *args, **kwargs: callable(fun(*args, **kwargs)) """ - warnings.warn('Use for_argument(returns=x) instead', DeprecationWarning) - def inner(f): - @functools.wraps(f) - def inner2(*args, **kwargs): - return callable(f(*args, **kwargs)) - return inner2 - return inner + t_ops = [_NOP if op == 'self' else op for op in t_ops] + returns = t_kwops.pop('returns', _NOP) + + def outer(fun): + @functools.wraps(fun) + def inner(*args, **kwargs): + # add extra 'None' argument if unbound method + assert len(args) >= len(t_ops) + a = fun(*((_NOP if op is None else op)(arg) for arg, op in + itertools.zip_longest(args, t_ops, fillvalue=None)), + **{k: t_kwops.get(k, _NOP)(v) for k, v in kwargs.items()}) + return returns(a) + + return inner + + return outer diff --git a/satella/coding/fun_static.py b/satella/coding/fun_static.py index 9563c010..f00dc602 100644 --- a/satella/coding/fun_static.py +++ b/satella/coding/fun_static.py @@ -15,7 +15,7 @@ def static_var(var_name, value): def count(): count.counter += 1 - or (syntax valid only on Python 3) + or: class MyClass: @static_var('counter', 2) diff --git a/satella/coding/structures/singleton.py b/satella/coding/structures/singleton.py index dd4de72a..99225b85 100644 --- a/satella/coding/structures/singleton.py +++ b/satella/coding/structures/singleton.py @@ -3,68 +3,37 @@ from __future__ import print_function, absolute_import, division import functools -import six __all__ = [ 'Singleton', ] -if six.PY3: +# Taken from https://wiki.python.org/moin/PythonDecoratorLibrary +def Singleton(cls): + """ + Make a singleton out of decorated class. - # Taken from https://wiki.python.org/moin/PythonDecoratorLibrary - def singleton(cls): - """ - Make a singleton out of decorated class. + Usage: - Usage: + @Singleton + class MyClass(object): + ... + """ - @Singleton - class MyClass(object): - ... - """ + cls.__new_old__ = cls.__new__ - cls.__new_old__ = cls.__new__ - - @functools.wraps(cls.__new__) - def singleton_new(cls, *args, **kw): - it = cls.__dict__.get('__it__') - if it is not None: - return it - - cls.__it__ = it = cls.__new_old__(cls, *args, **kw) - it.__init_old__(*args, **kw) + @functools.wraps(cls.__new__) + def singleton_new(cls, *args, **kw): + it = cls.__dict__.get('__it__') + if it is not None: return it - cls.__new__ = singleton_new - cls.__init_old__ = cls.__init__ - cls.__init__ = object.__init__ - - return cls -else: - class _SingletonWrapper: - """ - A singleton wrapper class. Its instances would be created - for each decorated class. - """ - - def __init__(self, cls): - self.__wrapped__ = cls - self._instance = None - - def __call__(self, *args, **kwargs): - """Returns a single instance of decorated class""" - if self._instance is None: - self._instance = self.__wrapped__(*args, **kwargs) - return self._instance - + cls.__it__ = it = cls.__new_old__(cls, *args, **kw) + it.__init_old__(*args, **kw) + return it - # taken from https://pypi.python.org/pypi/singleton-decorator/1.0.0 - def singleton(cls): - """ - A singleton decorator. Returns a wrapper objects. A call on that object - returns a single instance object of decorated class. Use the __wrapped__ - attribute to access decorated class directly in unit tests. - """ - return _SingletonWrapper(cls) + cls.__new__ = singleton_new + cls.__init_old__ = cls.__init__ + cls.__init__ = object.__init__ -Singleton = singleton + return cls diff --git a/satella/coding/structures/structures.py b/satella/coding/structures/structures.py index 6266d2c5..c8840d15 100644 --- a/satella/coding/structures/structures.py +++ b/satella/coding/structures/structures.py @@ -5,14 +5,10 @@ import copy import functools import heapq import logging +import typing as tp import operator import time -import six - -from satella.coding.typecheck import typed, Callable, Iterable, Optional, \ - Number - logger = logging.getLogger(__name__) __all__ = [ @@ -21,9 +17,6 @@ __all__ = [ 'OmniHashableMixin' ] -returns_bool = typed(returns=bool) -returns_iterable = typed(returns=Iterable) - class OmniHashableMixin(object): _HASH_FIELDS_TO_USE = [] @@ -68,8 +61,7 @@ class Heap(object): self.heap = list(from_list) heapq.heapify(self.heap) - @typed(object, Iterable) - def push_many(self, items): + def push_many(self, items: tp.Iterable): for item in items: self.push(item) @@ -101,16 +93,15 @@ class Heap(object): def __iter__(self): return self.heap.__iter__() - @typed(returns=object) - def pop(self): + def pop(self) -> tp.Any: """ Return smallest element of the heap. :raises IndexError: on empty heap """ return heapq.heappop(self.heap) - @typed('self', Optional(Callable), Optional(Callable)) - def filtermap(self, filter_fun=None, map_fun=None): + def filtermap(self, filter_fun: tp.Optional[tp.Callable] = None, + map_fun: tp.Optional[tp.Callable] = None): """ Get only items that return True when condition(item) is True. Apply a transform: item' = item(condition) on @@ -122,15 +113,13 @@ class Heap(object): self.heap = heap heapq.heapify(self.heap) - @returns_bool - def __bool__(self): + def __bool__(self) -> bool: """ Is this empty? """ return len(self.heap) > 0 - @returns_iterable - def iter_ascending(self): + def iter_ascending(self) -> tp.Iterator: """ Return an iterator returning all elements in this heap sorted ascending. State of the heap is not changed @@ -140,8 +129,7 @@ class Heap(object): while heap: yield heapq.heappop(heap) - @returns_iterable - def iter_descending(self): + def iter_descending(self) -> tp.Iterator: """ Return an iterator returning all elements in this heap sorted descending. State of the heap is not changed @@ -149,21 +137,16 @@ class Heap(object): """ return reversed(list(self.iter_ascending())) - @typed(returns=six.integer_types) - def __len__(self): + def __len__(self) -> int: return len(self.heap) def __str__(self): return '<satella.coding.Heap: %s elements>' % (len(self, )) - def __unicode__(self): - return six.text_type(str(self)) - def __repr__(self): return u'<satella.coding.Heap>' - @returns_bool - def __contains__(self, item): + def __contains__(self, item) -> bool: return item in self.heap @@ -182,7 +165,6 @@ class TimeBasedHeap(Heap): Use default_clock_source to pass a callable: * time.time - * monotonic.monotonic #notthreadsafe """ @@ -190,16 +172,14 @@ class TimeBasedHeap(Heap): def __repr__(self): return u'<satella.coding.TimeBasedHeap>' - @typed(returns=Iterable) - def items(self): + def items(self) -> tp.Iterable: """ Return an iterator, but WITHOUT timestamps (only items), in unspecified order """ return (ob for ts, ob in self.heap) - @typed('self', Optional(Callable((), Number))) - def __init__(self, default_clock_source=None): + def __init__(self, default_clock_source: tp.Callable[[], int] = None): """ Initialize an empty heap """ @@ -222,9 +202,7 @@ class TimeBasedHeap(Heap): assert timestamp is not None self.push((timestamp, item)) - @returns_iterable - @typed('self', Optional(Number)) - def pop_less_than(self, less=None): + def pop_less_than(self, less: tp.Optional[tp.Union[int, float]] = None) -> tp.Iterator: """ Return all elements less (sharp inequality) than particular value. diff --git a/satella/coding/typecheck/__init__.py b/satella/coding/typecheck/__init__.py deleted file mode 100644 index b1d6bc06..00000000 --- a/satella/coding/typecheck/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -# coding=UTF-8 -from __future__ import print_function, absolute_import, division - -""" -Decorator for debug-time typechecking - -If you are simultaneously using @typed and @coerce, use them in following order: - - @coerce(...) - @typed(...) - def fun(..): - ... - -""" - -from .basics import * -from .argparse import * -from .decorators import * - -from .decorators import __all__ as __all_decorators__ -from .argparse import __all__ as __all_argparse__ -from .basics import __all__ as __all_basics__ - -__all__ = filter(lambda p: not p.startswith('_'), - __all_decorators__ + __all_argparse__ + __all_basics__) diff --git a/satella/coding/typecheck/argparse.py b/satella/coding/typecheck/argparse.py deleted file mode 100644 index 5f94c41f..00000000 --- a/satella/coding/typecheck/argparse.py +++ /dev/null @@ -1,234 +0,0 @@ -# coding=UTF-8 -from __future__ import print_function, absolute_import, division - -import inspect -import itertools -from collections import namedtuple -from copy import copy - -from .basics import * - -__all__ = [ - '_CSArgument', - 'CSArgument', - 'CSVarargsPlaceholder', - 'CSKwargsPlaceholder', - 'CSBadTypeError', 'CSMultipleValuesGivenError', 'CSNotGivenError', 'CSTypeError', - 'CallSignature' -] - -_CSArgument = namedtuple('_CSArgument', ('name', 'required', 'default_value')) - - -class CSArgument(_CSArgument): - def __str__(self): - p = ['Argument ' + self.name] - if not self.required: - p.append('optional with default %s' % (self.default_value,)) - return ' '.join(p) - - def __eq__(self, other): - """ - Is other argument same as this, in name/default value if present? - :param other: CSArgument - :return: bool - """ - return self.name == other.name and \ - self.required == other.required and \ - self.default_value == other.default_value - - -class CSVarargsPlaceholder(CSArgument): - pass - - -class CSKwargsPlaceholder(CSArgument): - pass - - -class TypeErrorReason(object): - pass - - -class CSTypeError(TypeError): - """ - A TypeError exception on steroids - """ - - def __str__(self): - return 'Problem with argument %s' % (self.arg.name,) - - def __init__(self, arg): - """ - :param arg: Argument definition - :type arg: CSArgument - """ - super(CSTypeError, self).__init__(str(self)) - self.arg = arg - - -class CSBadTypeError(CSTypeError): - def __init__(self, arg, expected, got): - super(CSBadTypeError, self).__init__(arg) - - self.expected = expected - self.got = got - - def __str__(self): - return 'Bad type given for arg %s, expected %s got %s' % ( - self.arg.name, self.expected, self.got) - - -class CSNotGivenError(CSTypeError): - def __str__(self): - return 'Value for argument %s not given' % (self.arg.name,) - - -class CSMultipleValuesGivenError(CSTypeError): - def __str__(self): - return 'Got multiple values for argument %s' % (self.arg.name,) - - -class CallSignature(object): - """ - Call signature of a callable. - - Properties: - - has_varargs (Bool) - if has varargs - - has_kwargs (Bool) - if has kwargs - - locals (Dict[str => CSArgument]) - list of locals this function call - will generate - - pos_args (List[CSArgument)] - list of positional arguments - - varargs_name ((str, None)) - name of varargs argument, or None if - not present - - kwargs_name ((str, None)) - name of kwargs argument, or None if - not present - """ - - def count_required_positionals(self): - c = 0 - for a in self.pos_args: - if a.required: - c += 1 - return c - - def __eq__(self, other): - """ - Compare if two call signatures are IDENTICAL - :param other: CallSignature - :return: bool - """ - - if any(a != b for a, b in zip(self.pos_args, other.pos_args)): - return False - - return self.has_kwargs == other.has_kwargs and \ - self.has_varargs == other.has_varargs - - def __init__(self, callable): - args, varargs, kwargs, defaults = inspect.getargspec(callable) - - defaults = defaults or () - # pad them - while len(defaults) < len(args): - defaults = [_NoDefault] + list(defaults) - - # process positionals - self.pos_args = [] - self.locals = {} - for arg, default in zip(args, defaults): - cs_arg = CSArgument(arg, - default is _NoDefault, - default) - self.pos_args.append(cs_arg) - self.locals[arg] = cs_arg - - self.varargs_name = varargs - if varargs is not None: - self.has_varargs = True - self.locals[self.varargs_name] = CSVarargsPlaceholder( - self.varargs_name, False, []) - else: - self.has_varargs = False - - self.kwargs_name = kwargs - if kwargs is not None: - self.has_kwargs = True - self.locals[self.kwargs_name] = CSKwargsPlaceholder( - self.kwargs_name, False, {}) - else: - self.has_kwargs = False - - def to_invocation(self, locals): - """ - Return an invocation to the function reconstructed from its locals - :param locals: as returned by .result() - :return: tuple of (args, kwargs) - """ - locals = copy(locals) - args = [] - - for arg in self.pos_args: - if arg.name in locals: - args.append(locals.pop(arg.name)) - elif not arg.required: - args.append(arg.default_value) - del locals[arg.name] - - return args, locals - - def result(self, *args, **kwargs): - """ - Simulate a function call, see what locals are defined - - Return a dict of (local_variable_name => it's value), - or TypeError - - :param args: function call parameters - :param kwargs: function call parameters - :return: dict - :raise CSTypeError: call would raise a TypeError - """ - assert len(args) >= self.count_required_positionals() - - locals = {} - - # positional - for arg, value in itertools.izip_longest(self.pos_args, - args[:len(self.pos_args)], - fillvalue=_NotGiven): - - if value is _NotGiven: - if arg.required: - raise CSNotGivenError(arg) - else: - value = arg.default_value - - locals[arg.name] = value - - # varargs - if self.has_varargs: - locals[self.varargs_name] = args[len(self.pos_args):] - - # kwargs - if self.has_kwargs: - locals[self.kwargs_name] = kwargs - - return locals - - def is_match_amount(self, *args, **kwargs): - """ - Would a function call with these arguments succeed, based solely on - number and "keywordnessity" or parameters? - """ - if len(args) > len(self.pos_args): - if not self.has_varargs: - return False # *args expected - - if len(args) < self.count_required_positionals(): - return False # Not enough posits - - if len(kwargs) > 0 and not self.has_kwargs: - return False # kwargs expected - - return True diff --git a/satella/coding/typecheck/basics.py b/satella/coding/typecheck/basics.py deleted file mode 100644 index 5eeac9d7..00000000 --- a/satella/coding/typecheck/basics.py +++ /dev/null @@ -1,37 +0,0 @@ -# coding=UTF-8 -from __future__ import print_function, absolute_import, division - -import numbers -import typing - -__all__ = [ - 'Callable', 'Sequence', 'Number', 'Mapping', 'Iterable', 'Any', - 'Optional', 'TypeVar', 'List', 'Tuple', 'Dict', 'Set', - '_NotGiven', '_NoDefault', '_NOP', '_TRUE' -] - -Callable = lambda *args: typing.Callable -Sequence = typing.Sequence -Number = numbers.Real -Mapping = typing.Mapping -Iterable = typing.Iterable -Any = typing.Any -Optional = lambda opt: opt + (None,) if isinstance(opt, tuple) else (opt, None) -TypeVar = typing.TypeVar -List = lambda *opt: list -Tuple = lambda *opt: tuple -Dict = lambda *p: dict -Set = lambda *p: set - - -# Internal tokens - only instances will be -class _NotGiven(object): - pass - - -class _NoDefault(object): - pass - - -_NOP = lambda x: x -_TRUE = lambda x: True diff --git a/satella/coding/typecheck/corechk.py b/satella/coding/typecheck/corechk.py deleted file mode 100644 index c78a7d77..00000000 --- a/satella/coding/typecheck/corechk.py +++ /dev/null @@ -1,78 +0,0 @@ -# coding=UTF-8 -from __future__ import print_function, absolute_import, division - -import six - -__all__ = [ - '_typeinfo_to_tuple_of_types', 'istype', '_do_if_not_type' -] - -from .basics import * - - -def _typeinfo_to_tuple_of_types(typeinfo, operator=type): - if typeinfo == 'self': - return None - elif typeinfo is None: - return operator(None), - elif typeinfo == int and six.PY2: - return six.integer_types - else: - if isinstance(typeinfo, (tuple, list)): - new_tup = [] - for elem in typeinfo: - new_tup.extend(_typeinfo_to_tuple_of_types(elem)) - return tuple(new_tup) - else: - return typeinfo, - - -def istype(var, type_): - ret_v = False - - if type_ is None or type_ == 'self': - ret_v = True - - elif type(type_) == tuple: - ret_v = any(istype(var, subtype) for subtype in type_) - - elif type_ in (Callable, Iterable, Sequence, Mapping): - if type_ == Callable: - ret_v = hasattr(var, '__call__') - elif type_ == Iterable: - ret_v = hasattr(var, '__iter__') - elif type_ == Sequence: - ret_v = hasattr(var, '__iter__') and hasattr(var, '__getattr__') and hasattr(var, - '__len__') - elif type_ == Mapping: - ret_v = hasattr(var, '__getitem__') - else: - try: - if isinstance(var, type_): - ret_v = True - except TypeError: # must be a typing.* annotation - ret_v = type(var) == type_ - - return ret_v - - -def _do_if_not_type(var, type_, fun='default'): - if type_ in ((type(None),),) and (fun == 'default'): - ret_v = None - - elif type_ in (None, (None,), 'self'): - ret_v = var - - elif not istype(var, type_): - - if fun == 'default': - ret_v = None if type_[0] == type(None) else type_[0](var) - else: - q = fun() - if isinstance(q, Exception): - raise q - ret_v = q - else: - ret_v = var - - return ret_v diff --git a/satella/coding/typecheck/decorators.py b/satella/coding/typecheck/decorators.py deleted file mode 100644 index 9f16660f..00000000 --- a/satella/coding/typecheck/decorators.py +++ /dev/null @@ -1,253 +0,0 @@ -# coding=UTF-8 -from __future__ import print_function, absolute_import, division -import functools -import warnings -import six - -from .corechk import * -from .basics import * -from ...exceptions import PreconditionError - -__all__ = [ - 'typed', - 'coerce', - 'checked_coerce', - 'for_argument', - 'PreconditionError', 'precondition' -] - - -def typed(*t_args, **t_kwargs): - """ - Check for given types before a function is called. Raise TypeError if something goes wrong - - Use like: - - @typed(int, six.text_type) - def display(times, text): - ... - - You can also check for return type with kw argument of "returns", ie. - - @typed(int, int, returns=int) - def sum(a, b): - return a+b - - Or - @typed('self', a, b): - def method(self, a, b): - .. - - If you specify extra argument - mandatory=True - type will always be - checked, regardless if debug mode is enabled - - Same rules apply. - - int will automatically include long for checking (Python 3 compatibility) - If you want to check for None, type (None, ) - None for an argument means "do no checking", (None, ) means "type must be - NoneType". You can pass tuples or lists to match for multiple types - """ - - t_args = [(_typeinfo_to_tuple_of_types(x) if x is not None else None) - for x in t_args] - - t_retarg = t_kwargs.get('returns', None) - is_mandatory = t_kwargs.get('mandatory', False) - - if t_retarg is not None: - t_retarg = _typeinfo_to_tuple_of_types(t_retarg) - - def outer(fun): - if (not __debug__) and (not is_mandatory): - return fun - - @functools.wraps(fun) - def inner(*args, **kwargs): - # add extra 'None' argument if unbound method - for argument, typedescr in zip(args, t_args): - if not istype(argument, typedescr): - raise TypeError('Got %s, expected %s' % ( - type(argument), typedescr)) - - rt = fun(*args, **kwargs) - - if not istype(rt, t_retarg): - raise TypeError('Returned %s, expected %s' % ( - type(rt), t_retarg)) - - return rt - - return inner - - return outer - - -def coerce(*t_args, **t_kwargs): - """#todo banana banana banana""" - warnings.warn('Using coerce is considered harmful', DeprecationWarning) - - t_args = [(_typeinfo_to_tuple_of_types(x, operator=_NOP)) - for x in t_args] - - def argify(args): - return [_do_if_not_type(argument, typedescr) \ - for argument, typedescr in six.moves.zip_longest(args, t_args)] - - t_retarg = t_kwargs.get('returns', None) - - t_retarg = _typeinfo_to_tuple_of_types(t_retarg, operator=_NOP) - - def outer(fun): - @functools.wraps(fun) - def inner(*args, **kwargs): - # add extra 'None' argument if unbound method - - new_args = argify(args) - - rt = fun(*new_args, **kwargs) - return _do_if_not_type(rt, t_retarg) - - return inner - - return outer - - -def checked_coerce(*t_args, **t_kwargs): - """#todo banana banana banana""" - warnings.warn('Using checked_coerce is considered harmful', DeprecationWarning) - - def ptc(item, pt=list): - if item is None: - return None, None - elif isinstance(item, pt): - if len(item) == 2: - return item[0], item[1] - return item, None - - def sselector(q, z, operator=None, pt=list): - s = ptc(q, pt=pt)[z] - if s is None and operator is None: - return None - return _typeinfo_to_tuple_of_types(s, operator=operator) - - t_args_t = [sselector(x, 0) for x in t_args] - t_args_c = [sselector(x, 1, operator=_NOP) for x in t_args] - - def argify(args): - return [_do_if_not_type(argument, typedescr) \ - for argument, typedescr in six.moves.zip_longest(args, t_args_c)] - - t_retarg = t_kwargs.get('returns', None) - t_retarg_t = sselector(t_retarg, 0, pt=tuple) - t_retarg_c = sselector(t_retarg, 1, operator=_NOP, pt=tuple) - - def outer(fun): - @functools.wraps(fun) - def inner(*args, **kwargs): - # add extra 'None' argument if unbound method - - for argument, typedescr in zip(args, t_args_t): - if not istype(argument, typedescr): - raise TypeError('Got %s, expected %s' % ( - type(argument), typedescr)) - - rt = fun(*argify(args), **kwargs) - if not istype(rt, t_retarg_t): - raise TypeError('Returned %s, expected %s' % ( - type(rt), t_retarg_t)) - - return _do_if_not_type(rt, t_retarg_c) - - return inner - - return outer - - -def precondition(*t_ops): - """ - Check that a precondition happens for given parameter. - Only positional arguments are supported. - - You can do it like this: - - @precondition(lambda x: x == 1) - def return_two(x): - return x*2 - - or - - @precondition('x == 1') - def return_two(x): - .. - - If None is passed then argument will be always assumed to be True. - You can use all standard locals in precondition. - - You function call will return a PreconditionError (subclass of - ValueError) if a precondition fails - """ - - tn_ops = [] - - for t_op in t_ops: - if t_op is None: - precond = _TRUE - elif isinstance(t_op, six.string_types): - q = dict(globals()) - exec('_precond = lambda x: ' + t_op, q) - precond = q['_precond'] - else: - precond = t_op - - tn_ops.append(precond) - - from satella.coding.recast_exceptions import rethrow_as - - def outer(fun): - @functools.wraps(fun) - def inner(*args, **kwargs): - assert len(args) >= len(tn_ops), 'More preconditions than positional arguments!' - with rethrow_as(TypeError, PreconditionError): - for arg, precond in six.moves.zip_longest(args, tn_ops, fillvalue=_TRUE): - print(arg, precond, precond.__doc__) - if not precond(arg): - raise PreconditionError( - 'Argument of value %s failed precondition check' % (arg,)) - return fun(*args, **kwargs) - - return inner - - return outer - - -def for_argument(*t_ops, **t_kwops): - """ - Calls a callable for each of the arguments. - - returns is a special keyword, a callable to process the result through - - Use like: - - @for_argument(int, str, typed=bool, returns=int) - def check(val1, val2, typed='True'): - if typed: - return val1 + int(val2) - - """ - t_ops = [_NOP if op == 'self' else op for op in t_ops] - returns = t_kwops.pop('returns', _NOP) - - def outer(fun): - @functools.wraps(fun) - def inner(*args, **kwargs): - # add extra 'None' argument if unbound method - assert len(args) >= len(t_ops) - a = fun(*((_NOP if op is None else op)(arg) for arg, op in - six.moves.zip_longest(args, t_ops, fillvalue=None)), - **{k: t_kwops.get(k, _NOP)(v) for k, v in kwargs.items()}) - return returns(a) - - return inner - - return outer diff --git a/satella/configuration/__init__.py b/satella/configuration/__init__.py new file mode 100644 index 00000000..1b9f6af6 --- /dev/null +++ b/satella/configuration/__init__.py @@ -0,0 +1,9 @@ +from . import sources + +from .sources import load_source_from_list, load_source_from_dict + +__all__ = [ + 'load_source_from_list', + 'load_source_from_dict', + 'sources' +] diff --git a/satella/configuration/schema/__init__.py b/satella/configuration/schema/__init__.py new file mode 100644 index 00000000..f59ff435 --- /dev/null +++ b/satella/configuration/schema/__init__.py @@ -0,0 +1,8 @@ +""" +A schema to validate your config-dict! +""" + +from . import descriptors +from .descriptors import * + +__all__ = descriptors.__all__ diff --git a/satella/configuration/schema/descriptors.py b/satella/configuration/schema/descriptors.py new file mode 100644 index 00000000..d21ad8ab --- /dev/null +++ b/satella/configuration/schema/descriptors.py @@ -0,0 +1,245 @@ +import copy +import typing as tp +import re + +from satella.coding import rethrow_as, CallableGroup, for_argument +from ...exceptions import ConfigurationValidationError, ConfigurationSchemaError + +__all__ = [ + 'Descriptor', + 'Integer', 'Float', 'String', + 'IPv4', + 'List', 'Dict', 'create_key', + 'must_be_type', + 'must_be_one_of', + 'CheckerCondition', + 'ConfigDictValue', + 'descriptor_from_dict', +] + +ConfigDictValue = tp.Optional[tp.Union[int, float, str, dict, list, bool]] +CheckerConditionType = tp.Callable[[ConfigDictValue], bool] +ObjectMakerType = tp.Callable[ + [ConfigDictValue], tp.Any] # might raise ConfigurationSchemaError as well + +_NOP = lambda v: v + + +class CheckerCondition(object): + PRE_CHECKER = 0 + POST_CHECKER = 1 + + def __init__(self, conditon: CheckerConditionType, description: str = u'', + is_pre_checker: bool = True): + self.condition = conditon + self.description = description + self.is_pre_checker = is_pre_checker + + def __call__(self, value): + if not self.condition(value): + raise ConfigurationValidationError(self.description, value) + + +def must_be_type(*cls_or_tuple): + return CheckerCondition(conditon=lambda v: isinstance(v, cls_or_tuple), + description='not one of types %s' % (cls_or_tuple,)) + + +def must_be_one_of(*items): + return CheckerCondition(conditon=lambda v: v in items, + description='not in set %s' % (items,), + is_pre_checker=False) + + +class Descriptor(object): + BASIC_MAKER = _NOP # of ObjectMakerType + MY_EXCEPTIONS = [TypeError, ValueError] # a list of Exception classes + CHECKERS = [] # a list of CheckerCondition + + def __init__(self): + self.pre_checkers = CallableGroup() + self.post_checkers = CallableGroup() + self.name = None + self.optional = None + self.default = None + + for checker in self.__class__.CHECKERS: + self.add_checker(checker) + + def __call__(self, value: ConfigDictValue) -> tp.Any: + """ + raises ConfigurationSchemaError: on invalid schema + """ + self.pre_checkers(value) + + try: + value = self.BASIC_MAKER(value) + except cls.MY_EXCEPTIONS: + raise ConfigurationValidationError('could not pass to maker', value) + + self.post_checkers(value) + + return value + + def add_checker(self, checker: CheckerCondition): + if checker.is_pre_checker: + self.pre_checkers.add(checker) + else: + self.post_checkers.add(checker) + + +class Integer(Descriptor): + BASIC_MAKER = int + + +class Float(Descriptor): + BASIC_MAKER = float + + +class String(Descriptor): + BASIC_MAKER = str + + +class Regexp(String): + REGEXP = r'.*' + + def __init__(self): + super(Regexp, self).__init__() + if isinstance(self.REGEXP, str): + self.REGEXP = re.compile(self.REGEXP) + + def __call__(self, value: ConfigDictValue) -> str: + value = super(Regexp, self).__call__(value) + + match = self.REGEXP.match(value) + if not match: + raise ConfigurationValidationError('value does not match %s' % (self.REGEXP.pattern, ), value) + + return match.group(0) + + +class IPv4(Regexp): + REGEXP = r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}' + + +class List(Descriptor): + CHECKERS = [must_be_type(list, tuple)] + BASIC_MAKER = list + MY_EXCEPTIONS = [] + + def __init__(self, type_desciptor: tp.Optional[Descriptor] = None): + super(List, self).__init__() + self.type_descriptor = type_desciptor or Descriptor() + + def __call__(self, value: ConfigDictValue) -> tp.List: + value = super(List, self).__call__(value) + + return [self.type_descriptor(p) for p in value] + + +DictDescriptorKey = tp.NewType('DictDescriptorKey', Descriptor) + + +def create_key(descriptor: Descriptor, name: str, optional: bool = False, + default=None) -> DictDescriptorKey: + descriptor.name = name + descriptor.optional = optional + descriptor.default = default + + return descriptor + + +class Dict(Descriptor): + BASIC_MAKER = dict + MY_EXCEPTIONS = [] + CHECKERS = [must_be_type(dict)] + + UnknownKeyHandlerType = tp.Callable[[str, ConfigDictValue], tp.Any] + + def __init__(self, keys: tp.List[DictDescriptorKey], + unknown_key_mapper: UnknownKeyHandlerType = lambda str, + data: data): + super(Dict, self).__init__() + self.keys = {item.name: item for item in keys} # tp.Dict[str, DictDescriptorKey] + self.unknown_key_mapper = unknown_key_mapper # Dict.UnknownKeyHandlerType + + def __call__(self, value: ConfigDictValue) -> dict: + value = copy.copy(value) + assert isinstance(value, dict) + value = super(Dict, self).__call__(value) + assert isinstance(value, dict) + + output = {} + + for key, key_descriptor in self.keys.items(): + try: + v = value.pop(key) + except KeyError: + if key_descriptor.optional: + output[key] = key_descriptor.default + else: + raise ConfigurationValidationError('required key %s not found' % (key, )) + else: + output[key] = key_descriptor(v) + + for k, v in value.items(): + output[k] = self.unknown_key_mapper(k, v) + + return output + + +BASE_LOOKUP_TABLE = {'int': Integer, 'float': Float, 'str': String, 'ipv4': IPv4, 'list': List, 'dict': Dict} + + +def _get_descriptor_for(key: str, value: tp.Any) -> Descriptor: + if value == '': + return Descriptor() + elif isinstance(value, str): + if value in ('int', 'float', 'str', 'ipv4'): + return create_key(BASE_LOOKUP_TABLE[value](), + key, False, None) + elif isinstance(value, dict): + if 'type' not in value: + return create_key(descriptor_from_dict(value), + key, False, None) + else: + args = () + type = value['type'] + if type == 'list': + of = _get_descriptor_for('', value.get('of', '')) + args = (of, ) + optional = value.get('optional', False) + default = value.get('default', None) + descriptor = BASE_LOOKUP_TABLE[type](*args) + return create_key(descriptor, key, optional, default) + else: + raise ConfigurationSchemaError('invalid schema, unrecognized config object %s' % (value, )) + + +def descriptor_from_dict(dct: dict) -> Descriptor: + """ + Giving a Python dictionary-defined schema of the configuration, return a Descriptor-based one + + :param dct: something like + { + "a": "int", + "b": "str", + "c": { + "type": "int" + "optional": True, + "default": 5 + }, + "d": { + "a": "int", + "b": "str" + } + } + :return: a Descriptor-based schema + """ + fields = [] + + for key, value in dct.items(): + descriptor = _get_descriptor_for(key, value) + fields.append(descriptor) + + return Dict(fields) diff --git a/satella/configuration/sources/__init__.py b/satella/configuration/sources/__init__.py new file mode 100644 index 00000000..55041835 --- /dev/null +++ b/satella/configuration/sources/__init__.py @@ -0,0 +1,14 @@ +from . import base +from . import derivative +from . import envvars +from . import file +from . import format +from . import from_dict +from .base import * +from .derivative import * +from .envvars import * +from .file import * +from .format import * +from .from_dict import * + +__all__ = format.__all__ + envvars.__all__ + derivative.__all__ + file.__all__ + base.__all__ + from_dict.__all__ diff --git a/satella/configuration/sources/base.py b/satella/configuration/sources/base.py new file mode 100644 index 00000000..b64b99ee --- /dev/null +++ b/satella/configuration/sources/base.py @@ -0,0 +1,15 @@ +__all__ = [ + 'BaseSource' +] + + +class BaseSource(object): + """Base class for all configuration sources""" + + def provide(self) -> dict: + """ + Return your configuration, as a dict + + :raise ConfigurationError: on invalid configuration + """ + return {} diff --git a/satella/configuration/sources/derivative.py b/satella/configuration/sources/derivative.py new file mode 100644 index 00000000..b1f54872 --- /dev/null +++ b/satella/configuration/sources/derivative.py @@ -0,0 +1,65 @@ +from satella.coding import merge_dicts + +from satella.exceptions import ConfigurationError +from .base import BaseSource + +__all__ = [ + 'AlternativeSource', 'OptionalSource', 'MergingSource' +] + + +class AlternativeSource(BaseSource): + def __init__(self, *sources: BaseSource): + """ + If one fails, use the next + """ + self.sources = sources + + def provide(self) -> dict: + """ + :raises ConfigurationError: when backup fails too + """ + for source in self.sources: + try: + s = source.provide() + assert isinstance(s, dict), 'provide() returned a non-dict' + return s + except ConfigurationError: + pass + else: + raise ConfigurationError('all sources failed!') + + +class OptionalSource(AlternativeSource): + def __init__(self, source: BaseSource): + """ + This will substitute for empty dict if underlying config would fail. + + Apply this to your sources if you expect that they will fail. + + Use as + + OptionalSource(SomeOtherSource1) + + """ + super(OptionalSource, self).__init__(source, BaseSource()) + + +class MergingSource(BaseSource): + """ + Source that merges configuration from a bunch of sources + """ + + def __init__(self, *sources: BaseSource): + self.sources = sources + + def provide(self) -> dict: + cfg = {} + + for source in self.sources: + p = source.provide() + assert isinstance(p, dict) + cfg = merge_dicts(cfg, p) + assert isinstance(cfg, dict) + + return cfg diff --git a/satella/configuration/sources/envvars.py b/satella/configuration/sources/envvars.py new file mode 100644 index 00000000..02ff9a9a --- /dev/null +++ b/satella/configuration/sources/envvars.py @@ -0,0 +1,43 @@ +import logging +import os +import sys + +from satella.coding import rethrow_as +from satella.exceptions import ConfigurationError +from .base import BaseSource +from .format import JSONSource + +logger = logging.getLogger(__name__) + +__all__ = [ + 'EnvVarsSource', 'EnvironmentSource', +] + + +class EnvironmentSource(BaseSource): + """ + This just returns a dictionary of { env_name => that env's value } + """ + + def __init__(self, env_name: str, cast_to=lambda v: v): + super(EnvironmentSource, self).__init__() + self.env_name = env_name + self.cast_to = cast_to + + @rethrow_as([ValueError, TypeError, KeyError], ConfigurationError) + def provide(self) -> dict: + v = self.cast_to(os.environ[self.env_name]) + + return {self.env_name: v} + + +class EnvVarsSource(JSONSource): + def __init__(self, env_name: str): + super(EnvVarsSource, self).__init__('', + encoding=sys.getfilesystemencoding()) + self.env_name = env_name + + @rethrow_as(KeyError, ConfigurationError) + def provide(self) -> dict: + self.root = os.environ[self.env_name] + return super(EnvVarsSource, self).provide() diff --git a/satella/configuration/sources/file.py b/satella/configuration/sources/file.py new file mode 100644 index 00000000..83ef90d6 --- /dev/null +++ b/satella/configuration/sources/file.py @@ -0,0 +1,128 @@ +import logging +import os +import typing as tb + +from satella.coding import rethrow_as +from .base import BaseSource +from .derivative import MergingSource +from .format import FORMAT_SOURCES, FormatSource +from ...exceptions import ConfigurationError + +logger = logging.getLogger(__name__) + +__all__ = ['FileSource', 'DirectorySource'] + +FormatSourceType = tb.Union[tb.Type[FormatSource], str] + + +class FileSource(BaseSource): + """ + Try to read a file and parse it with a known format. + """ + + def __init__(self, path: str, + encoding: str = 'utf-8', + interpret_as: tb.List[FormatSourceType] = FORMAT_SOURCES): + """ + :param interpret_as: names or classes of format sources to parse with + """ + super(FileSource, self).__init__() + from .. import sources + self.source_classes = [ # list of tb.Type[FormatSource] + (p if not isinstance(p, str) else getattr(sources, p)) for p in + interpret_as] + self.path = path + self.encoding = encoding + + @rethrow_as([IOError, OSError], ConfigurationError) + def provide(self) -> dict: + + with open(self.path, 'rb') as fin: + data = fin.read() + + for source_class in self.source_classes: + try: + s = source_class(data, encoding=self.encoding).provide() + if not isinstance(s, dict): + raise ConfigurationError() + return s + except ConfigurationError: + pass + else: + raise ConfigurationError('no reader could parse the file') + + +class DirectorySource(FileSource): + """ + Load all files from given directory and merge them + """ + + def __init__(self, path, encoding: str = 'utf-8', + interpret_as=FORMAT_SOURCES, + fname_filter: tb.Callable[[str], bool] = lambda fullpath: True, + scan_subdirectories: bool = True): + """ + :param filter: callable that tells whether to use this file (or subdirectory if scan_subdirectories is enabled) + """ + super(DirectorySource, self).__init__(path, encoding, interpret_as) + self.filter = lambda files: filter(fname_filter, files) # tb.Callable[[tb.List[str]], tb.List[str]] + self.scan_subdirectories = scan_subdirectories + + def get_sources_from_directory(self, directory: str) -> tb.List[FileSource]: + + sources = [] # list of FileSource + + try: + files = self.filter(os.path.join(directory, x) for x in os.listdir(directory)) + except OSError as e: + logger.warning( + 'OSError %s while accessing configuration directory %s, skipping files' % ( + e, directory)) + return [] + + for fname in files: + + fullname = os.path.join(directory, fname) + if os.path.isfile(fullname): + sources.append(FileSource(fullname, encoding=self.encoding, + interpret_as=self.source_classes)) + elif os.path.isdir(fullname) and self.scan_subdirectories: + sources.extend(self.get_sources_from_directory(fullname)) + else: + pass # FIFOs or sockets or something else + + return sources + + def provide(self) -> dict: + return MergingSource( + *self.get_sources_from_directory(self.path)).provide() + + +try: + import requests +except ModuleNotFoundError: + pass +else: + class HTTPJSONSource(BaseSource): + """Call somwhere, count on a 200-esque code and return a JSON!""" + + def __init__(self, url: str, method: str = 'GET', **kwargs): + """ + :param kwargs: these will be passed to requests.request(..) + """ + super(HTTPJSONSource, self).__init__() + self.url = url + self.method = method + self.kwargs = kwargs + + @rethrow_as(requests.RequestException, ConfigurationError) + def provide(self) -> dict: + r = requests.request(self.method, self.url, **self.kwargs) + if r.status_code >= 400: + raise ConfigurationError( + 'Target responded with HTTP %s' % (r.status_code,)) + + return r.json() + + + __all__.append('HTTPJSONSource') diff --git a/satella/configuration/sources/format.py b/satella/configuration/sources/format.py new file mode 100644 index 00000000..5081cd8a --- /dev/null +++ b/satella/configuration/sources/format.py @@ -0,0 +1,95 @@ +import binascii +import codecs +import json +import typing as tb + +from satella.coding import rethrow_as +from satella.exceptions import ConfigurationError +from .base import BaseSource + +__all__ = [ + 'FormatSource', 'FORMAT_SOURCES' +] + +FORMAT_SOURCES = [] # sources capable of parsing a text format + + +def register_format_source(source): + source_name = source.__name__ + __all__.append(source_name) + FORMAT_SOURCES.append(source_name) + + return source + + +def _override_me(key): + raise NotImplementedError('override me') + + +class FormatSource(BaseSource): + TRANSFORM = _override_me + BASE_EXCEPTIONS = [TypeError, UnicodeDecodeError, ValueError, + binascii.Error, LookupError] + EXTRA_EXCEPTIONS = [] + + def __init__(self, root: tb.Union[bytes, str], encoding: str = 'utf-8'): + """ + :param root: content + :type root: if bytes, will be decoded with given encoding' + """ + super(FormatSource, self).__init__() + self.root = root + self.encoding = encoding + + def provide(self) -> dict: + cls = self.__class__ + + with rethrow_as(tuple(cls.BASE_EXCEPTIONS + cls.EXTRA_EXCEPTIONS), + ConfigurationError): + if isinstance(self.root, bytes): + self.root = codecs.decode(self.root, self.encoding) + if isinstance(self.root, bytes): # probably base64 encoded + self.root = self.root.decode('utf-8') + + ret_val = cls.TRANSFORM(self.root) + if not isinstance(ret_val, dict): + raise ConfigurationError( + 'provider was unable to generate a text volume') + else: + return ret_val + + +@register_format_source +class JSONSource(FormatSource): + """ + Loads JSON strings + """ + TRANSFORM = json.loads + EXTRA_EXCEPTIONS = [json.JSONDecodeError] + + +try: + import yaml +except ImportError: + pass +else: + @register_format_source + class YAMLSource(FormatSource): + """ + Loads YAML strings + """ + EXTRA_EXCEPTIONS = [yaml.YAMLError] + TRANSFORM = yaml.load + +try: + import toml +except ImportError: + pass +else: + @register_format_source + class TOMLSource(FormatSource): + """ + Loads TOML strings + """ + EXTRA_EXCEPTIONS = [toml.TomlDecodeError] + TRANSFORM = toml.loads diff --git a/satella/configuration/sources/from_dict.py b/satella/configuration/sources/from_dict.py new file mode 100644 index 00000000..839ec6cc --- /dev/null +++ b/satella/configuration/sources/from_dict.py @@ -0,0 +1,78 @@ +import copy + +from satella.coding import rethrow_as, for_argument +from satella.configuration import sources +from satella.configuration.sources.base import BaseSource +from satella.exceptions import ConfigurationError + +""" +If a dict has a field "type" then it will be treated specially: + + "binary" - it is a binary value of "value" to be encoded with "encoding" (default ascii) + "lambda" - it allows expressing the simplest filters there can be + name of a source class - it will be instantated with arguments "args". + rest keys will be kwargs. + + Special key is "optional" to be bool - if so, the source will be decorated as optional + +See the unit test for more in-depth knowledge +""" + +__all__ = [ + 'load_source_from_dict', + 'load_source_from_list' +] + +EXTRA_TYPES = { + 'binary': lambda dct: dct['value'].encode(dct.get('encoding', 'ascii')), + 'lambda': lambda dct: eval('lambda x: ' + dct['operation'], globals(), + locals()) +} + + +@rethrow_as(Exception, ConfigurationError) +@for_argument(copy.copy) # since we are modyfing that dict +def load_source_from_dict(dct: dict) -> BaseSource: + """ + dct has a form of + + { + "type": "BaseSource", + "args": [] # optional + ... kwargs + } + + :raises ConfigurationError: upon failure to instantiate + """ + type_ = dct.pop('type') + args = dct.pop('args', []) + optional = dct.pop('optional', False) + + def argify(arg): + if isinstance(arg, dict) and 'type' in arg: + atype = arg['type'] + if atype in EXTRA_TYPES: + return EXTRA_TYPES[atype](arg) + elif atype in sources.__dict__: + return load_source_from_dict(arg) + else: + raise ValueError( + 'unrecognized argument type %s' % (arg['type'],)) + else: + return arg + + args = map(argify, args) + kwargs = {k: argify(v) for k, v in dct.items()} + + s = sources.__dict__[type_](*args, **kwargs) + + if optional: + s = sources.OptionalSource(s) + return s + + +def load_source_from_list(obj: list) -> BaseSource: + """ + Builds a MergingSource from dict-ed objects + """ + return sources.MergingSource(*map(load_source_from_dict, obj)) diff --git a/satella/exceptions.py b/satella/exceptions.py index 2f7ed217..d65ea681 100644 --- a/satella/exceptions.py +++ b/satella/exceptions.py @@ -25,3 +25,20 @@ class PreconditionError(BaseSatellaException, ValueError): """ A precondition was not met for the argument """ + + +class ConfigurationError(BaseSatellaException): + """A generic error during configuration""" + +class ConfigurationSchemaError(ConfigurationError): + """Schema mismatch to what was seen""" + + +class ConfigurationValidationError(ConfigurationSchemaError): + """A validator failed""" + def __init__(self, msg, value=None): + """ + :param value: value found + """ + super(ConfigurationValidationError, self).__init__(msg) + self.value = value \ No newline at end of file diff --git a/satella/instrumentation/__init__.py b/satella/instrumentation/__init__.py index fc5c73f2..0cf26a93 100644 --- a/satella/instrumentation/__init__.py +++ b/satella/instrumentation/__init__.py @@ -6,4 +6,6 @@ All things related to: """ from __future__ import print_function, absolute_import, division -from .trace_back import Traceback +from .trace_back import Traceback, StoredVariableValue, StackFrame, GenerationPolicy + +__all__ = ['Traceback', 'StoredVariableValue', 'StackFrame', 'GenerationPolicy'] diff --git a/satella/instrumentation/trace_back.py b/satella/instrumentation/trace_back.py index d3e8892e..9739ac97 100644 --- a/satella/instrumentation/trace_back.py +++ b/satella/instrumentation/trace_back.py @@ -23,8 +23,7 @@ import io import sys import traceback import zlib - -import six +import typing as tp try: import cPickle as pickle @@ -41,9 +40,10 @@ class GenerationPolicy(object): Override if need be, and pass the class (or instance) to Traceback """ - def __init__(self, enable_pickling=True, compress_at=128 * 1024, - repr_length_limit=128 * 1024, - compression_level=6): + def __init__(self, enable_pickling: bool = True, + compress_at: int = 128 * 1024, + repr_length_limit: int = 128 * 1024, + compression_level: int = 6): """ :param enable_pickling: bool, whether to enable pickling at all :param compress_at: pickles longer than this (bytes) will be compressed @@ -57,7 +57,7 @@ class GenerationPolicy(object): self.repr_length_limit = repr_length_limit self.compression_level = compression_level - def should_pickle(self, value): + def should_pickle(self, value: tp.Any) -> bool: """ Should this value be pickled? @@ -66,15 +66,15 @@ class GenerationPolicy(object): """ return self.enable_pickling - def should_compress(self, pickledata): + def should_compress(self, pickledata: bytes) -> bool: """ Should this pickle undergo compression? - :param pickledata: bytes, pickle value + :param pickledata: pickle value :return: bool """ return len(pickledata) > self.compress_at - def get_compression_level(self, pickledata): + def get_compression_level(self, pickledata: bytes) -> int: """ What compression level to use to pickle this? :param pickledata: bytes, pickle value @@ -83,7 +83,7 @@ class GenerationPolicy(object): """ return self.compression_level - def process_repr(self, r): + def process_repr(self, r: str) -> str: """ Process the string obtained from __repr__ing :param r: result of a __repr__ on value @@ -98,7 +98,7 @@ class GenerationPolicy(object): return r -class StoredVariable(object): +class StoredVariableValue: """ Class used to store a variable value. Picklable. @@ -111,13 +111,15 @@ class StoredVariable(object): None - nothing "pickle" - normal Python pickle "pickle/gzip" - Python pickle treated with zlib.compress - "failed" - could not pickle, pickle contains a text with - human-readable reason + "failed" - could not pickle, pickle contains a UTF-8 text with + human-readable exception reason + "failed/gzip" - compression failed, pickle contains a UTF-8 text with + human-readable exception reason """ __slots__ = ('repr', 'type_', 'pickle', 'pickle_type') - def __init__(self, value, policy): + def __init__(self, value: tp.Any, policy: tp.Optional[GenerationPolicy] = None): """ If value cannot be pickled, it's repr will be at least preserved @@ -126,9 +128,8 @@ class StoredVariable(object): """ self.repr = repr(value) self.type_ = repr(type(value)) - if six.PY2: - self.repr = six.text_type(self.repr, 'utf8') - self.type_ = six.text_type(self.type_, 'utf8') + + policy = policy or GenerationPolicy() self.repr = policy.process_repr(self.repr) @@ -141,7 +142,7 @@ class StoredVariable(object): self.pickle_type = 'pickle' except BaseException as e: # yes, they all can happen! - self.pickle = repr(e.args) + self.pickle = repr((e,)+e.args).encode('utf8') self.pickle_type = "failed" else: if policy.should_compress(self.pickle): @@ -151,8 +152,9 @@ class StoredVariable(object): policy.get_compression_level( self.pickle)) self.pickle_type = "pickle/gzip" - except zlib.error: - pass # ok, keep normal + except zlib.error as e: + self.pickle = ('failed to gzip, reason is %s' % (repr(e), )).encode('utf8') + self.pickle_type = "failed/gzip" def load_value(self): """ @@ -170,24 +172,24 @@ class StoredVariable(object): raise ValueError( 'Value has failed to be pickled, reason is %s' % (self.pickle,)) elif self.pickle_type == 'pickle/gzip': - pickle = zlib.decompress(self.pickle) + pickle_ = zlib.decompress(self.pickle) elif self.pickle_type == 'pickle': - pickle = self.pickle + pickle_ = self.pickle try: - return pickle.loads(pickle) + return pickle.loads(pickle_) except pickle.UnpicklingError: raise ValueError( 'object picklable, but cannot load in this environment') -class StackFrame(object): +class StackFrame: """ Class used to verily preserve stack frames. Picklable. """ __slots__ = ('locals', 'globals', 'name', 'filename', 'lineno') - def __init__(self, frame, policy): + def __init__(self, frame: "<class 'frame'>", policy: GenerationPolicy): """ :type frame: Python stack frame """ @@ -196,22 +198,28 @@ class StackFrame(object): self.lineno = frame.f_lineno self.locals = {} - for key, value in six.iteritems(frame.f_locals): - self.locals[key] = StoredVariable(value, policy) + for key, value in frame.f_locals.items(): + self.locals[key] = StoredVariableValue(value, policy) self.globals = {} - for key, value in six.iteritems(frame.f_globals): - self.globals[key] = StoredVariable(value, policy) + for key, value in frame.f_globals.items(): + self.globals[key] = StoredVariableValue(value, policy) class Traceback(object): - """Class used to preserve exceptions. Picklable.""" + """ + Class used to preserve exceptions and chains of stack frames. + Picklable. + """ __slots__ = ('formatted_traceback', 'frames') - def __init__(self, policy=GenerationPolicy): + def __init__(self, starting_frame=None, policy=GenerationPolicy): """ To be invoked while processing an exception is in progress + :param starting_frame: frame to start tracking the traceback from. + Must be either None, in which case an exception must be in progress and will be taken + else must be an instance of <class 'frame'>. :param policy: policy for traceback generation :raise ValueError: there is no traceback to get info from! Is any exception in process? @@ -224,30 +232,32 @@ class Traceback(object): self.frames = [] - if tb is None: - raise ValueError('No traceback') - else: + if starting_frame is None: + if tb is None: + raise ValueError('No traceback') while tb.tb_next: tb = tb.tb_next - f = tb.tb_frame - while f: - self.frames.append(StackFrame(f, value_pickling_policy)) - f = f.f_back + else: + f = starting_frame + + while f: + self.frames.append(StackFrame(f, value_pickling_policy)) + f = f.f_back - self.formatted_traceback = six.text_type(traceback.format_exc()) + self.formatted_traceback = str(traceback.format_exc()) - def pickle_to(self, stream): + def pickle_to(self, stream: tp.BinaryIO): """Pickle self to target stream""" pickle.dump(self, stream, pickle.HIGHEST_PROTOCOL) - def pickle(self): + def pickle(self) -> bytes: """Returns this instance, pickled""" bio = io.BytesIO() self.pickle_to(bio) return bio.getvalue() - def pretty_format(self): + def pretty_format(self) -> str: """ Return a multi-line, pretty-printed representation of all exception data. @@ -257,7 +267,7 @@ class Traceback(object): self.pretty_print(bio) return bio.getvalue() - def pretty_print(self, output=sys.stderr): + def pretty_print(self, output: tp.TextIO = sys.stderr): """ Pretty-print the exception :param output: a file-like object in text mode @@ -269,8 +279,8 @@ class Traceback(object): for frame in self.frames: output.write(u'** %s at %s:%s\n' % ( frame.name, frame.filename, frame.lineno)) - for name, value in six.iteritems(frame.locals): + for name, value in frame.locals.items(): try: output.write(u'*** %s: %s\n' % (name, value.repr)) - except: - output.write(u'*** %s: repr unavailable\n' % name) + except BaseException as e: + output.write(u'*** %s: repr unavailable (due to locally raised %s)\n' % (name, repr(e))) diff --git a/satella/posix/__init__.py b/satella/posix/__init__.py index cf9b50c6..183ac7e0 100644 --- a/satella/posix/__init__.py +++ b/satella/posix/__init__.py @@ -32,7 +32,7 @@ def is_running_as_root(): return os.geteuid() == 0 -def suicide(kill_entire_pg=True): +def suicide(kill_entire_pg: bool = True): """ Kill self. diff --git a/satella/posix/daemon.py b/satella/posix/daemon.py index 391e2b75..a2cc89a5 100644 --- a/satella/posix/daemon.py +++ b/satella/posix/daemon.py @@ -4,8 +4,7 @@ from __future__ import print_function, absolute_import, division import logging import os import sys - -import six +import typing as tp try: import pwd @@ -19,17 +18,15 @@ except ImportError: pwd = grp = L() -from satella.coding import typed, Callable - logger = logging.getLogger(__name__) DEVNULL = '/dev/null' -@typed(Callable, bool, (None, int), (None, int)) -def daemonize(exit_via=sys.exit, - redirect_std_to_devnull=True, - uid=None, gid=None): +def daemonize(exit_via: tp.Callable = sys.exit, + redirect_std_to_devnull: bool = True, + uid: tp.Optional[int] = None, + gid: tp.Optional[int] = None): """ Make this process into a daemon. @@ -66,7 +63,7 @@ def daemonize(exit_via=sys.exit, def _parse_ug(no, module, fieldname, osfun): if no is not None: - if isinstance(no, six.string_types): + if isinstance(no, str): no = getattr(module.getpwnam(no), fieldname) osfun(no) diff --git a/satella/posix/signals.py b/satella/posix/signals.py index 3a21ba7f..875a3a88 100644 --- a/satella/posix/signals.py +++ b/satella/posix/signals.py @@ -6,21 +6,21 @@ from __future__ import print_function, absolute_import, division import logging import signal -import sys import time -from satella.coding import typed +import typing as tp logger = logging.getLogger(__name__) end = False + def __sighandler(a, b): global end end = True -@typed((list, None)) -def hang_until_sig(extra_signals=None): + +def hang_until_sig(extra_signals: tp.Optional[tp.List] = None): """Will hang until this process receives SIGTERM or SIGINT. If you pass extra signal IDs (signal.SIG*) with extra_signals, then also on those signals this call will release.""" diff --git a/setup.cfg b/setup.cfg index 8ed4ee18..61afc771 100644 --- a/setup.cfg +++ b/setup.cfg @@ -8,7 +8,6 @@ url = https://github.com/piotrmaslanka/satella classifier = Programming Language :: Python - Programming Language :: Python :: 2.7 Programming Language :: Python :: 3.5 Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.7 @@ -30,10 +29,3 @@ universal=1 verbosity=3 with-coverage=1 detailed-errors=1 - -[isort] -add_imports = - from __future__ import absolute_import - from __future__ import division - from __future__ import print_function - from __future__ import unicode_literals diff --git a/setup.py b/setup.py index c19c79fe..ca783931 100644 --- a/setup.py +++ b/setup.py @@ -6,13 +6,10 @@ setup(keywords=['ha', 'high availability', 'scalable', 'scalability', 'server'], packages=find_packages(include=['satella', 'satella.*']), version=__version__, install_requires=[ - "six", - "monotonic", - "typing", ], tests_require=[ "nose", "mock", "coverage" ], test_suite='nose.collector', - python_requires='>=2.7.*,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*' + python_requires='!=2.7.*,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*' ) diff --git a/tests/test_coding/test_concurrent.py b/tests/test_coding/test_concurrent.py index 672a5eba..d48f24c4 100644 --- a/tests/test_coding/test_concurrent.py +++ b/tests/test_coding/test_concurrent.py @@ -1,6 +1,5 @@ # coding=UTF-8 from __future__ import print_function, absolute_import, division -import six import unittest from satella.coding import CallableGroup diff --git a/tests/test_coding/test_debug.py b/tests/test_coding/test_debug.py index 5111cb7d..bbf2dcef 100644 --- a/tests/test_coding/test_debug.py +++ b/tests/test_coding/test_debug.py @@ -3,11 +3,8 @@ from __future__ import print_function, absolute_import, division import unittest -import six - -from satella.coding import typed, CallSignature, Number, coerce, Optional, \ - List, Dict, Tuple, Set, Callable, checked_coerce, for_argument, \ - precondition, PreconditionError +from satella.coding import for_argument, precondition +from satella.exceptions import PreconditionError class TestTypecheck(unittest.TestCase): @@ -20,38 +17,6 @@ class TestTypecheck(unittest.TestCase): self.assertRaises(PreconditionError, lambda: return_double([1], 2, 5)) return_double([1], 1, 'dupa') - def test_cls(self): - # if we don't care about apps - class Lol(object): - @typed(returns=int) - def zomg(self, a): - return a - - @typed('self', List(int), Dict(str, int), Tuple(int), Set(int), - Callable(int, None)) - def lel(self, lst, dct, tpl, st, cbl): - pass - - Lol().zomg(2) - self.assertRaises(TypeError, lambda: Lol().zomg('a')) - Lol().lel([], {}, (), set([1]), lambda a: None) - - def test_che_co2(self): - @checked_coerce((int, None)) - def p(a): - return a - - p(5) - p(None) - self.assertRaises(TypeError, lambda: p(5.0)) - - def test_lambda(self): - @typed(Callable) - def q(p): - pass - - q(lambda: None) - def test_forarg_kwords(self): @for_argument(int, typed=bool) def check(v1, typed='True'): @@ -84,160 +49,3 @@ class TestTypecheck(unittest.TestCase): self.assertEqual(testa('5'), 5) self.assertEqual(testa(5), 5) self.assertIsInstance(testa('5'), int) - - def test_checked_coerce(self): - @checked_coerce([(str, int), int], returns=(int, float)) - def testa(a): - return a - - self.assertRaises(TypeError, lambda: testa(5.0)) - self.assertEqual(testa('5'), 5.0) - self.assertEqual(testa(5), 5.0) - self.assertIsInstance(testa('5'), float) - - def test_cls_test(self): - class Lol(object): - # this should fail, since the first argument the decorator gets is "self", because decorators always get FUNCTION objects! - @typed(int, returns=int) - def zomg(self, a): - return a - - self.assertRaises(TypeError, lambda: Lol().zomg(2)) - - def test_ta(self): - @typed(int, int, returns=int) - def sum(a, b): - return a + b - - @typed(int, int, returns=str) - def sum2(a, b): - return a + b - - sum(1, 2) - self.assertRaises(TypeError, lambda: sum2(2, 3)) - - def test_tma(self): - def test(a, b, c, **d): - pass - - cs = CallSignature(test) - - self.assertTrue(cs.is_match_amount(1, 2, 3, wtf=4)) - self.assertFalse(cs.is_match_amount(1, 2, wtf=4)) - self.assertTrue(cs.is_match_amount(1, 2, 3)) - self.assertFalse(cs.is_match_amount(1, 2, 3, 4)) - - def test_t1(self): - @typed(int, float, six.text_type) - def testf(a_int, a_float, a_string): - pass - - self.assertRaises(TypeError, lambda: testf('lol', 15, b'test')) - self.assertRaises(TypeError, lambda: testf(12, 2.0, b'hey')) - testf(12, 2.0, u'hey') - - @typed((None, int, float)) - def testa(param): - pass - - self.assertRaises(TypeError, lambda: testa('hey')) - testa(2) - testa(2) - testa(2.0) - - @typed(None, int) - def testb(a, num): - pass - - testb(None, 100000000000000000000000000000000) - testb(u'hello', 1) - testb(object, 2) - - @typed((None,)) - def testc(p): - pass - - self.assertRaises(TypeError, lambda: testc(2)) - testc(None) - - def test_t2(self): - @typed(Optional(int)) - def testa(a=5): - pass - - self.assertRaises(TypeError, lambda: testa(2.0)) - testa(a=2.0) - self.assertRaises(TypeError, lambda: testa('yuyu')) - testa(a=None) - testa(a=6) - - def test_shorter_coerces(self): - @coerce(int, None, str) - def test(a, b, c, d, e): - return a, b, c, d, e - - a, b, c, d, e = test('1', 2, 3, 4, 5) - - self.assertEqual(a, 1) - self.assertEqual(b, 2) - self.assertEqual(c, '3') - self.assertEqual(d, 4) - self.assertEqual(e, 5) - - def test_coerce(self): - class Wtf(object): - @coerce('self', float, float) - def add(self, a, b): - return a + b - - self.assertEqual(Wtf().add('1', '2.5'), 3.5) - - def test_coerce_result(self): - @coerce(returns=str) - def add(a, b): - return a + b - - self.assertEqual(add(1, 2), '3') - - def test_self(self): - class Wtf(object): - @typed('self', Number, Number, returns=Number) - def add(self, a, b): - return a + b - - Wtf().add(1, 2.5) - - def test_T2(self): - @typed((int, None)) - def testa(a=5): - pass - - self.assertRaises(TypeError, lambda: testa(2.0)) - testa(a=2.0) - self.assertRaises(TypeError, lambda: testa('yuyu')) - testa(a=None) - testa(a=6) - - def test_T2a(self): - @typed(Optional(int)) - def testa(a=5): - pass - - self.assertRaises(TypeError, lambda: testa(2.0)) - testa(a=2.0) - self.assertRaises(TypeError, lambda: testa('yuyu')) - testa(a=None) - testa(a=6) - - def test_t3(self): - def a(b, c): - pass - - def b(b, c): - pass - - def c(b, c, **args): - pass - - self.assertEquals(CallSignature(a), CallSignature(b)) - self.assertNotEquals(CallSignature(a), CallSignature(c)) diff --git a/tests/test_coding/test_fun_static.py b/tests/test_coding/test_fun_static.py index 74c16cd3..c3bd9e7d 100644 --- a/tests/test_coding/test_fun_static.py +++ b/tests/test_coding/test_fun_static.py @@ -1,8 +1,7 @@ import unittest from threading import Thread from time import sleep -import six -from six.moves.queue import Queue +from queue import Queue from satella.coding import static_var @@ -18,7 +17,6 @@ class FunTestTest(unittest.TestCase): static_fun(3) self.assertEquals(static_fun.counter, 4) - @unittest.skipIf(six.PY2, 'Syntax unsupported on Python 2') def test_fun_static_method(self): class MyClass(object): @static_var("counter", 2) diff --git a/tests/test_coding/test_monitor.py b/tests/test_coding/test_monitor.py index c4cd2a86..ea8dbe18 100644 --- a/tests/test_coding/test_monitor.py +++ b/tests/test_coding/test_monitor.py @@ -2,7 +2,7 @@ import unittest from threading import Thread from time import sleep -from six.moves.queue import Queue +from queue import Queue from satella.coding import Monitor diff --git a/tests/test_coding/test_rethrow.py b/tests/test_coding/test_rethrow.py index daa5e464..6784e060 100644 --- a/tests/test_coding/test_rethrow.py +++ b/tests/test_coding/test_rethrow.py @@ -1,6 +1,5 @@ # coding=UTF-8 from __future__ import print_function, absolute_import, division -import six import unittest from satella.coding import rethrow_as, silence_excs diff --git a/tests/test_coding/test_structures.py b/tests/test_coding/test_structures.py index 0080ba55..bbf37a5c 100644 --- a/tests/test_coding/test_structures.py +++ b/tests/test_coding/test_structures.py @@ -5,7 +5,6 @@ import copy import unittest import mock -import six from satella.coding import TimeBasedHeap, Heap, CallableGroup, typednamedtuple, \ OmniHashableMixin @@ -86,7 +85,7 @@ class TestTimeBasedHeap(unittest.TestCase): def test_foobar(self): tbh = TimeBasedHeap() bytes(tbh) - six.text_type(tbh) + str(tbh) repr(tbh) copy.copy(tbh) copy.deepcopy(tbh) diff --git a/tests/test_coding/test_treat_result_with.py b/tests/test_coding/test_treat_result_with.py deleted file mode 100644 index 14c2f100..00000000 --- a/tests/test_coding/test_treat_result_with.py +++ /dev/null @@ -1,17 +0,0 @@ -import logging -import typing -import unittest - -from satella.coding import treat_result_with - -logger = logging.getLogger(__name__) - - -class TreatResultWithTest(unittest.TestCase): - def test_treat_result_with(self): - - @treat_result_with(int) - def mirror(v): - return v - - self.assertEquals(mirror('5'), 5) diff --git a/tests/test_configuration/__init__.py b/tests/test_configuration/__init__.py new file mode 100644 index 00000000..139597f9 --- /dev/null +++ b/tests/test_configuration/__init__.py @@ -0,0 +1,2 @@ + + diff --git a/tests/test_configuration/test_load_source_from_dict.py b/tests/test_configuration/test_load_source_from_dict.py new file mode 100644 index 00000000..b0a6f81d --- /dev/null +++ b/tests/test_configuration/test_load_source_from_dict.py @@ -0,0 +1,40 @@ +from satella.configuration import load_source_from_dict, load_source_from_list +from .test_sources.utils import SourceTestCase + +INNER_DATA = [ + { + 'type': 'JSONSource', + 'args': ['{"a": 5}'] + }, + { + 'type': 'JSONSource', + 'args': ['ivnadsgfgsfg'], + 'optional': True + }, + { + 'type': 'JSONSource', + 'args': ['{"b": 5}'] + }, + { + 'type': 'JSONSource', + 'encoding': "base64", + 'args': [ + { + 'type': 'binary', + 'value': 'eyJjIjogMjF9\n', + } + ] + } +] + +DICT_DATA = { + 'type': 'MergingSource', + 'args': INNER_DATA +} + + +class TestLoadSourceFromDict(SourceTestCase): + def test_lsf(self): + output = {'a': 5, 'b': 5, 'c': 21} + self.assertSourceHas(load_source_from_dict(DICT_DATA), output) + self.assertSourceHas(load_source_from_list(INNER_DATA), output) diff --git a/tests/test_configuration/test_schema.py b/tests/test_configuration/test_schema.py new file mode 100644 index 00000000..aff435db --- /dev/null +++ b/tests/test_configuration/test_schema.py @@ -0,0 +1,101 @@ +import unittest +import tempfile +import os + +from satella.configuration.schema import * +from satella.configuration.sources import DirectorySource + + +class TestSchema(unittest.TestCase): + + def test_descriptor_from_schema(self): + schema = { + "key_s": "str", + "key_i": "int", + "key_f": "float", + "ip_addr": "ipv4", + "nested": { + "key_s": "str", + }, + "default_five": { + "type": "int", + "optional": True, + "default": 5 + }, + "list": { + "type": "list", + "of": "str" + } + } + + s = descriptor_from_dict(schema) + self.assertEquals(s({ + 'key_s': 'string', + 'key_i': '5', + 'key_f': '5.5', + 'ip_addr': '10.2.3.43', + 'nested': { + 'key_s': "string" + }, + 'list': [1, 2, 3] + }), { + 'key_s': 'string', + 'key_i': 5, + 'key_f': 5.5, + 'ip_addr': '10.2.3.43', + 'nested': { + 'key_s': "string" + }, + 'list': ['1', '2', '3'], + 'default_five': 5 + }) + + def test_schema(self): + D1 = { + 'key_s': 'value', + 'key_i': '5', + 'key_f': '5.2', + 'unknown_key': None, + 'ip_addr': '127.0.0.1' + } + + s = Dict([ + create_key(String(), 'key_s'), + create_key(Integer(), 'key_i'), + create_key(Float(), 'key_f'), + create_key(String(), 'key_not_present', optional=True, + default='hello world'), + create_key(IPv4(), 'ip_addr') + ], unknown_key_mapper=lambda key, value: str(value)) + + D2 = D1.copy() + D2.update(key_not_present='hello world', key_i=5, key_f=5.2, + unknown_key='None') + self.assertEqual(s(D1), D2) + + + def test_schema_x(self): + dir = tempfile.mkdtemp() + + with open(os.path.join(dir, 'smok5_config.json'), 'w') as f_out: + f_out.write("""{ + "logging": { + "logstash": { + "host": "192.168.10.11", + "port": 5959 + } + } +}""") + ds = DirectorySource(dir) + print(ds.get_sources_from_directory(dir)) + source = ds.provide() + + schema = Dict([ + create_key(Dict([ + create_key(Dict([ + create_key(String(), 'host'), + create_key(Integer(), 'port') + ]), 'logstash') + ]), 'logging') + ]) + source = schema(source) diff --git a/tests/test_configuration/test_sources/__init__.py b/tests/test_configuration/test_sources/__init__.py new file mode 100644 index 00000000..eea436a3 --- /dev/null +++ b/tests/test_configuration/test_sources/__init__.py @@ -0,0 +1,3 @@ +import logging + +logger = logging.getLogger(__name__) diff --git a/tests/test_configuration/test_sources/test_envvars.py b/tests/test_configuration/test_sources/test_envvars.py new file mode 100644 index 00000000..6794b73e --- /dev/null +++ b/tests/test_configuration/test_sources/test_envvars.py @@ -0,0 +1,12 @@ +from satella.configuration.sources import EnvVarsSource, OptionalSource +from .utils import SourceTestCase, mock_env + + +class TestEnvVarsSource(SourceTestCase): + @mock_env('satella', '{"a":2}') + def test_ok(self): + self.assertSourceHas(EnvVarsSource('satella'), {u"a": 2}) + + def test_none(self): + self.assertSourceEmpty(OptionalSource(EnvVarsSource('satella'))) + self.assertSourceFails(EnvVarsSource('satella')) diff --git a/tests/test_configuration/test_sources/test_file.py b/tests/test_configuration/test_sources/test_file.py new file mode 100644 index 00000000..9a738775 --- /dev/null +++ b/tests/test_configuration/test_sources/test_file.py @@ -0,0 +1,55 @@ +import os +import tempfile +import unittest + +from satella.coding import silence_excs +from satella.configuration.sources import FileSource, FORMAT_SOURCES, \ + DirectorySource +from .utils import SourceTestCase + + +class TestFileSource(SourceTestCase): + def setUp(self): + super(TestFileSource, self).setUp() + self.filename = tempfile.mktemp() + + @silence_excs(FileNotFoundError) + def tearDown(self): + os.unlink(self.filename) + + def check_with_data(self, data: bytes, value: dict): + with open(self.filename, 'wb') as fout: + fout.write(data) + + self.assertSourceHas(FileSource(self.filename), value) + + def test_json(self): + self.check_with_data(b'{"a": 2}', {"a": 2}) + + @unittest.skipIf('YAMLSource' not in FORMAT_SOURCES, + reason='No pyyaml installed') + def test_yaml(self): + self.check_with_data(b"\na: 5\n", {"a": 5}) + + @unittest.skipIf('TOMLSource' not in FORMAT_SOURCES, + reason='No TOML installed') + def test_toml(self): + self.check_with_data(b"a = 5", {"a": 5}) + + +class TestDirectorySource(SourceTestCase): + def test_directory_source(self): + with tempfile.TemporaryDirectory() as outdir: + self.filename = outdir + self.assertTrue(os.path.isdir(outdir)) + self.ds = DirectorySource(outdir, scan_subdirectories=True) + self.makefile('config.json', b'{"a": [2]}') + self.makefile('config2.json', b'{"a": [2]}') + os.mkdir(os.path.join(outdir, 'temp1')) + self.makefile(os.path.join(outdir, 'temp1', 'config3.json'), + b'{"a": [23]}') + self.assertSourceHas(self.ds, set((2, 2, 23)), postop=lambda x: set(x['a'])) + + def makefile(self, name: str, value: bytes): + with open(os.path.join(self.filename, name), 'wb') as fout: + fout.write(value) diff --git a/tests/test_configuration/test_sources/test_json.py b/tests/test_configuration/test_sources/test_json.py new file mode 100644 index 00000000..e490d59a --- /dev/null +++ b/tests/test_configuration/test_sources/test_json.py @@ -0,0 +1,33 @@ +import unittest + +from satella.configuration.sources import JSONSource, OptionalSource, \ + FORMAT_SOURCES +from .utils import SourceTestCase + + +class TestJSONSource(SourceTestCase): + @unittest.skipIf('TOMLSource' not in FORMAT_SOURCES, 'No TOML installed') + def test_toml(self): + from satella.configuration.sources import TOMLSource + self.assertSourceHas(TOMLSource(u"key = 5"), {"key": 5}) + self.assertSourceFails(TOMLSource(u'this isnt toml')) + + @unittest.skipIf('YAMLSource' not in FORMAT_SOURCES, 'No YAML installed') + def test_yaml(self): + from satella.configuration.sources import YAMLSource + self.assertSourceHas(YAMLSource(u"\na: 5\n"), {"a": 5}) + self.assertSourceFails( + YAMLSource(u'!3 #23= 43$ 434 #34 = this isnt yaml')) + + def test_ok(self): + self.assertSourceHas(JSONSource(u'{"a":2}'), {"a": 2}) + + def test_malform(self): + self.assertSourceFails(JSONSource(u'sdfsf>>::')) + self.assertSourceEmpty(OptionalSource(JSONSource(u'sdfsf>>::'))) + + def test_base64_bad(self): + self.assertSourceFails(JSONSource(b'bG9s\x02', encoding='base64')) + + def test_unicode_bad(self): + self.assertSourceFails(JSONSource(b'\x00')) diff --git a/tests/test_configuration/test_sources/test_optional.py b/tests/test_configuration/test_sources/test_optional.py new file mode 100644 index 00000000..c97c2a2a --- /dev/null +++ b/tests/test_configuration/test_sources/test_optional.py @@ -0,0 +1,21 @@ +from satella.configuration.sources import MergingSource, JSONSource, \ + AlternativeSource +from satella.exceptions import ConfigurationError +from .utils import SourceTestCase + + +class TestMergingSource(SourceTestCase): + def test_ok(self): + s = MergingSource( + JSONSource('{"a": [5]}'), + JSONSource('{"a": [6]}'), + ) + self.assertSourceHas(s, {"a": [5, 6]}) + + def test_empty(self): + self.assertSourceEmpty(MergingSource()) + + +class TestAlternativeSource(SourceTestCase): + def test_alternative(self): + self.assertSourceFails(AlternativeSource(), ConfigurationError) diff --git a/tests/test_configuration/test_sources/utils.py b/tests/test_configuration/test_sources/utils.py new file mode 100644 index 00000000..83a36f57 --- /dev/null +++ b/tests/test_configuration/test_sources/utils.py @@ -0,0 +1,47 @@ +import functools +import os +import unittest +import typing as tp + +from satella.configuration.sources import BaseSource +from satella.exceptions import ConfigurationError + + +def mock_env(env: tp.Optional[str] = None, val: tp.Optional[str] = None) -> \ + tp.Callable[[tp.Callable], tp.Callable]: + """ + Set an env and then clear it out + :param env: + :param val: + :return: + """ + + def outer(fun: tp.Callable): + @functools.wraps(fun) + def inner(*args, **kwargs): + try: + if env is not None: + os.environ[env] = val + return fun(*args, **kwargs) + finally: + if env is not None: + del os.environ[env] + + return inner + + return outer + + +class SourceTestCase(unittest.TestCase): + def assertSourceFails(self, source: BaseSource, + fails_with=ConfigurationError): + self.assertRaises(fails_with, source.provide) + + def assertSourceEmpty(self, source: BaseSource): + self.assertEqual(self.provide(source), {}) + + def assertSourceHas(self, source: BaseSource, value: tp.Any, postop=lambda x: x): + self.assertEqual(postop(self.provide(source)), value) + + def provide(self, source: BaseSource) -> dict: + return source.provide() diff --git a/tests/test_instrumentation/test_trace_back.py b/tests/test_instrumentation/test_trace_back.py index 9967c815..62e576ad 100644 --- a/tests/test_instrumentation/test_trace_back.py +++ b/tests/test_instrumentation/test_trace_back.py @@ -3,6 +3,7 @@ from __future__ import print_function, absolute_import, division import pickle +import sys import unittest from satella.instrumentation import Traceback @@ -33,6 +34,10 @@ class TestTraceback(unittest.TestCase): a = tb.pickle() self.assertIsInstance(pickle.loads(a), Traceback) + def test_normal_stack_frames(self): + tb = Traceback(list(sys._current_frames().values())[0]) + tb.pretty_format() + def test_compression_happens(self): try: -- GitLab