repo
string
pull_number
int64
instance_id
string
issue_numbers
sequence
base_commit
string
patch
string
test_patch
string
problem_statement
string
hints_text
string
created_at
timestamp[ns, tz=UTC]
version
float64
python-attrs/attrs
367
python-attrs__attrs-367
[ "361" ]
57817b2c0e9cf98a2d974e8e845e8f6a1a1be89a
diff --git a/src/attr/_make.py b/src/attr/_make.py --- a/src/attr/_make.py +++ b/src/attr/_make.py @@ -23,6 +23,8 @@ _init_converter_pat = "__attr_converter_{}" _init_factory_pat = "__attr_factory_{}" _tuple_property_pat = " {attr_name} = property(itemgetter({index}))" +_classvar_prefixes = ("typing.ClassVar", "t.ClassVar", "ClassVar") + _empty_metadata_singleton = metadata_proxy({}) @@ -232,10 +234,11 @@ def _is_class_var(annot): """ Check whether *annot* is a typing.ClassVar. - The implementation is gross but importing `typing` is slow and there are - discussions to remove it from the stdlib alltogether. + The string comparison hack is used to avoid evaluating all string + annotations which would put attrs-based classes at a performance + disadvantage compared to plain old classes. """ - return str(annot).startswith("typing.ClassVar") + return str(annot).startswith(_classvar_prefixes) def _get_annotations(cls):
diff --git a/tests/test_annotations.py b/tests/test_annotations.py --- a/tests/test_annotations.py +++ b/tests/test_annotations.py @@ -11,6 +11,7 @@ import attr +from attr._make import _classvar_prefixes from attr.exceptions import UnannotatedAttributeError @@ -204,13 +205,14 @@ class A: assert A.__init__.__annotations__ == {'return': None} @pytest.mark.parametrize("slots", [True, False]) - def test_annotations_strings(self, slots): + @pytest.mark.parametrize("classvar", _classvar_prefixes) + def test_annotations_strings(self, slots, classvar): """ String annotations are passed into __init__ as is. """ @attr.s(auto_attribs=True, slots=slots) class C: - cls_var: 'typing.ClassVar[int]' = 23 + cls_var: classvar + '[int]' = 23 a: 'int' x: 'typing.List[int]' = attr.Factory(list) y: 'int' = 2
Support ClassVar string annotations The following doesn't work in Python 3.7 ``` from __future__ import annotations import attr from typing import ClassVar @attr.dataclass class A: x: ClassVar[int] a = A() ``` Because `ClassVar[int]` will be `'ClassVar[int]` in `A.__annotations__` and so `return str(annot).startswith("typing.ClassVar")` returns `False`. You can repro without 3.7 using this: ``` import attr from typing import ClassVar @attr.dataclass class A: x: 'ClassVar[int]' a = A() ``` Note: It's weird I'm not giving it a value but if I do then attrs will think it's a default value and the code won't error out. Oh and the following works fine: ``` import attr import typing @attr.dataclass class A: x: 'typing.ClassVar[int]' a = A() ```
Oh god more string comparisons. 🙈 This is the last blocker for the next release. Any ideas how to solve this save expanding the comparison to just ClassVar and call it a day? I believe these are our only viable options: 1. `eval` the first part (up to the first `[`) of every string annotation. * If it raises, move on, if you get something check if it's a ClassVar. * Pros: Catches every possible invocation, `from typing import ClassVar as MyClassVar` * Cons: I imagine this is prohibitively slow but it could be tested. 2. Just check for `.startswith('ClassVar')` also and move on. * Pros: Fast. * Cons: This will miss `as` imports. And will also ignore situations in which you didn't import ClassVar correctly or at all. e.g. ``` @attr.s(auto_attrib=True) class a: a: 'ClassVar' ``` (Though any good linter would catch that) Can I haz a non-terrible third option? :'( The non-gross option is to import typing and use `get_type_hints()`. I thought you're already doing that for resolving regular fields but apparently not. How about this: if user code *is* using annotations *and* that annotation is a string, *then* we import typing and use `get_type_hints()` like grown-ups? In all likelihood if the code was already using annotations, it was importing typing somewhere anyway so there's no danger of slowing anybody down. Plus, since user code is already using annotations, we know it's Python 3 so `typing` is there? Let me create a pull request to that effect. OK, so I tried the above and it seems it's a nuclear option since it forces all annotations to be evaluated. This is what I wanted to avoid with `from __future__ import annotations`. Making it more intelligent would be possible with `typing.ForwardRef` and its `_evaluate()` method but those are only available on 3.7+ (PEP 560). I looked what `dataclasses` do in this case and they also don't work with `from __future__ import annotations` which I need to fix before Python 3.7 beta4. So my pull request is going with Euresti's Option 2. This is going to be enough for 99.9% users.
2018-04-11T08:32:48Z
17.4
python-attrs/attrs
394
python-attrs__attrs-394
[ "387" ]
9c414702bd26c2793386250c4442d48864e3e0b9
diff --git a/src/attr/_make.py b/src/attr/_make.py --- a/src/attr/_make.py +++ b/src/attr/_make.py @@ -671,7 +671,7 @@ def attrs( :param bool cmp: Create ``__eq__``, ``__ne__``, ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` methods that compare the class as if it were a tuple of its ``attrs`` attributes. But the attributes are *only* - compared, if the type of both classes is *identical*! + compared, if the types of both classes are *identical*! :param hash: If ``None`` (default), the ``__hash__`` method is generated according how *cmp* and *frozen* are set. @@ -747,6 +747,11 @@ def attrs( .. versionchanged:: 18.1.0 If *these* is passed, no attributes are deleted from the class body. .. versionchanged:: 18.1.0 If *these* is ordered, the order is retained. + .. deprecated:: 18.2.0 + ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now raise a + :class:`DeprecationWarning` if the classes compared are subclasses of + each other. ``__eq`` and ``__ne__`` never tried to compared subclasses + to each other. """ def wrap(cls): @@ -885,6 +890,12 @@ def __ne__(self, other): return not result +WARNING_CMP_ISINSTANCE = ( + "Comparision of subclasses using __%s__ is deprecated and will be removed " + "in 2019." +) + + def _make_cmp(attrs): attrs = [a for a in attrs if a.cmp] @@ -938,6 +949,10 @@ def __lt__(self, other): Automatically created by attrs. """ if isinstance(other, self.__class__): + if other.__class__ is not self.__class__: + warnings.warn( + WARNING_CMP_ISINSTANCE % ("lt",), DeprecationWarning + ) return attrs_to_tuple(self) < attrs_to_tuple(other) else: return NotImplemented @@ -947,6 +962,10 @@ def __le__(self, other): Automatically created by attrs. """ if isinstance(other, self.__class__): + if other.__class__ is not self.__class__: + warnings.warn( + WARNING_CMP_ISINSTANCE % ("le",), DeprecationWarning + ) return attrs_to_tuple(self) <= attrs_to_tuple(other) else: return NotImplemented @@ -956,6 +975,10 @@ def __gt__(self, other): Automatically created by attrs. """ if isinstance(other, self.__class__): + if other.__class__ is not self.__class__: + warnings.warn( + WARNING_CMP_ISINSTANCE % ("gt",), DeprecationWarning + ) return attrs_to_tuple(self) > attrs_to_tuple(other) else: return NotImplemented @@ -965,6 +988,10 @@ def __ge__(self, other): Automatically created by attrs. """ if isinstance(other, self.__class__): + if other.__class__ is not self.__class__: + warnings.warn( + WARNING_CMP_ISINSTANCE % ("ge",), DeprecationWarning + ) return attrs_to_tuple(self) >= attrs_to_tuple(other) else: return NotImplemented
diff --git a/tests/test_make.py b/tests/test_make.py --- a/tests/test_make.py +++ b/tests/test_make.py @@ -1249,3 +1249,42 @@ class C(object): ) assert C() == copy.deepcopy(C()) + + +class TestMakeCmp: + """ + Tests for _make_cmp(). + """ + + @pytest.mark.parametrize( + "op", ["__%s__" % (op,) for op in ("lt", "le", "gt", "ge")] + ) + def test_subclasses_deprecated(self, recwarn, op): + """ + Calling comparison methods on subclasses raises a deprecation warning; + calling them on identical classes does not.. + """ + + @attr.s + class A(object): + a = attr.ib() + + @attr.s + class B(A): + pass + + getattr(A(42), op)(A(42)) + getattr(B(42), op)(B(42)) + + assert [] == recwarn.list + + getattr(A(42), op)(B(42)) + + w = recwarn.pop() + + assert [] == recwarn.list + assert isinstance(w.message, DeprecationWarning) + assert ( + "Comparision of subclasses using %s is deprecated and will be " + "removed in 2019." % (op,) + ) == w.message.args[0]
attrs._make should document why it is more restrictive with __eq__ than other comparisons The type check in `__eq__` for generated classes is different than in other comparison methods. For all other methods, `isinstance(other, self.__class__)` is used, which means subclasses will participate in the "happy" branch of the comparison. For `__eq__` though, [`other.__class__ is self.__class__`](https://github.com/python-attrs/attrs/blob/master/src/attr/_make.py#L867) is used, so a trivial subclass will *not* compare equal, leading to the quite confusing: ``` >>> import attr; Parent = attr.make_class("Parent", dict(foo=attr.ib())); Child = type("Child", (Parent,), {}); print (Parent(foo=1) == Parent(foo=1), Parent(foo=1) == Child(foo=1), Parent(foo=1) < Parent(foo=2), Child(foo=1) < Parent(foo=2)) (True, False, True, True) ``` This strikes me as a bug (the incongruity), and that `__eq__` should use the same check, but even if it isn't, it likely bears mentioning that there's a difference. It even seems like dataclasses have even done something [oddly similar](https://www.youtube.com/watch?v=T-TwcmT6Rcw#t=24m09s), maybe just straight copying the code here? https://github.com/python-attrs/attrs/commit/d134ce45fc98323576a19f03e39669dce615c4e1 looks like it's the commit that originally made the change (though since then `__eq__` now is hand-generated).
Agreed; a reasonable developer would be surprised by the asymmetry described. (FWIW I am; though I try not to subclass, so hadn't noticed this.) Well yeah same, I think `attrs` should even have a `attr.s(..., allow_subclassing=True|False)` and default it to false :P, but that's a different story. Agree on all counts. And while it may be a bit off topic, I just wanted to echo a similar thought (experiment), that inheritance might be somewhat redeemed in a world where most types aren't open to extension by default. Not sure how deep attrs wants to dip into metaclasses. I suggest we start with some understanding about why this difference exists at all; without that, it's not obvious to me that it's even intentional. (Though I want to assume that it is.) Well yeah, this one even made it to LWN: https://lwn.net/Articles/740153/ The question is in what direction we want to go. I find it hard to come to terms that two objects can be equal if they have different types. So my gut feeling would be to add a warning if someone compares instances that have different types and remove it in a year. Bonus points for an option to allow to configure the behavior for both? > Well yeah, this one even made it to LWN: https://lwn.net/Articles/740153/ Fun :) > So my gut feeling would be to add a warning if someone compares instances that have different types and remove it in a year. +1 > Bonus points for an option to allow to configure the behavior for both? If I steal my own idea about `repr`, one could support this by having `attr.s(cmp=lambda self, other: isinstance(other, self.__class__))` (vs `cmp=lambda self, other: self.__class__ is other.__class__`, or `lambda self, other: False` / `True`), though I'm not inventive enough at the minute to think of any reason someone would want something other than those four options. @hynek I think I'm hearing that the existing difference is, in fact, accidental? Anyway, I like the idea of warning/deprecating the `isinstance` behavior in the default comparators. I like the @Julian's idea of `cmp` accepting a callable, and have be thinking about that for a while… but I'd like to consider other uses than the type precondition. For example, I'd like to be able to change the compared data from the default of calling `attrs_to_tuple` on each object. In my case I've wanted to change the order of the attributes, or have one of them sorted the other direction than the default ("descending"), and right now I have to implement my own comparators in order to do that. More generally, one might want `cmp` to accept a callable like Python 2's [`__cmp__`](https://docs.python.org/2/reference/datamodel.html?highlight=__cmp__#object.__cmp__) operator function… If you want to swap out the whole implementation wouldn't you just use `cmp=False`? Well, it's a lot more… boilerplate to implement all of the `__foo__` methods than it is to provide an alternative to `attrs_to_tuple`. Probably I should fess up and show an example. I [did this as a mix-in](https://github.com/burningmantech/ranger-ims-server/blob/master/src/ims/model/_cmp.py#L31) such that all I need to do in classes that inherit from the mix-in is implement `_cmpValue`. [Here's a simple example](https://github.com/burningmantech/ranger-ims-server/blob/master/src/ims/model/_address.py#L105) in which all I do is change the order of the compared values because I want `description` to be factored in last. [Here's another](https://github.com/burningmantech/ranger-ims-server/blob/master/src/ims/model/_entry.py#L62) when I just invert one of the values. This is a lot easier than implementing all of the dunders in each class. Sorry if this is a tangent; it probably should be a separate ticket, but it effects this if we want to overload `cmp`, so just trying to get ahead of that. I'm not quite sure which way this thread is leaning at this point. `is` or `isinstance()`? If configurable, then which default? @wsanchez ah I see, well, I guess if we're dragging tangents into things I am pretty strongly against the way the current implementation combines equality with ordering, which is at least partially relevant for what you're doing there. Possibly yet a third ticket (or maybe it exists already I forget) though. @altendky My read of @hynek's comment is that we'd prefer `is` as the default, we should deprecate the current use of `isinstance`, and configurability is a bonus. (And I agree with all that.) @Julian yeah, I agree that combining the two is problematic. Th reason I think it's relevant here is that I suspect any configuration option we pick here (if we do) could make fixing that problem harder. I guess I don't follow how `is` would be expected behavior when inheriting. I get tending away from inheritance (more or less strongly) but trivially inheriting a class shouldn't make it not equal, should it? Wouldn't LSP encourage `isinstance()`? I'm split between thinking I'm missing something and thinking that a dislike of inheritance is going to result in breaking it for those that use it. Note: I don't think I've inherited from an attrs class yet so I'm not arguing because of any direct effect I expect it to have on me. @altendky It's a fair point, because it's really non-obvious and I don't know of a settled best practice here, but the `is` check is simply being conservative, because the parent class' implementation of, say `__eq__` can't be sure that a subclass is equal any more than it can of a rando object. If the subclass adds a field (certainly not uncommon), that field… wait… yes?… that field is not factored into the equality test. (I hesitate because I'm looking at [`attrs_to_tuple`](https://github.com/python-attrs/attrs/blob/master/src/attr/_make.py#L902) and it's a closure that uses the same `attrs` for both `self` and `other`, but I'm not super familiar with that code.) The thing is that the default implementation provided by `attrs` doesn't know what a subclass considers equal, so returning `NotImplemented` is just saying "I dunno". It would be nice if one could explicitly declare a subclass as considering its instances equal to those of the parent class with equal field values, but I'm a little wary of an implementation that assumes this is true. I need to get a better understanding of LSP. It seems to be part of 'proper' inheritance, but it also seems to hog tie subclasses. Random thought: isinstance also means that if you compare a and b and b is a subclass of a, you get different results for a == b and b == a. Not sure now I'm following the last few comments -- the two options though would have to be `is`, or `isinstance` *plus* a field length check, because a subclass that adds fields seems pretty clearly like it shouldn't be `==` according to anyone, so you can't use *just* `isinstance`. But in the latter case (`isinstance` + field length check) you get associativity and transitivity, so not sure I follow there, maybe I'm missing something. And if I didn't mention it earlier, yeah personally I'd lean towards that one, but since I am in the "don't use inheritance" camp I wouldn't argue heavily for it :P I guess comparisons are a well known exception to LSP? Or LSP is just considered not relevant anymore? I still need to study it... No your argument is good IMHO :) Then even `isinstance(other, type(self))` is too restrictive because a trivial child will reject a parent with matching attribute values. Unless LSP is only about full substitution of all instances to the sun type and not about mixing parent and child instances. I'd think this would agree with both other languages where a parent comparison method wouldn't even be aware of any new attributes on a child class as well as aligning with duck typing in Python. > Then even isinstance(other, type(self)) is too restrictive because a trivial child will reject a parent with matching attribute values. This is what I meant in https://github.com/python-attrs/attrs/issues/387#issuecomment-395295204 btw. I guess we’d have to check both ways and that is kind of getting silly. The more I think about it, the more I’m convinced we should go the `is` route and only add more options if there’s enough people complaining. What about using `super()`? If that passes and `isinstance(other, type(self))` then extra attributes of the subclass are compared. I think that would pass. I think that to satisfy LSP that any attributes added in a subclass would have to have defaults. That won't work when the parent is on the left of the comparison -- it would call super, and not return `NotImplemented`, and then proceed to only compare to a subset of the subclass's fields. Though again I must not be following because this is the one option that I think *shouldn't* be on the table -- the only reasonable option that uses `isinstance` is returning `NotImplemented` for `not isinstance(other, self.__class__) or attr.fields(self) != attr.fields(other)`, just using the first part of the conditional there I think definitely produces nonsense for subclasses that add extra fields. @Julian I'll try an example and see what I see. It does seem odd though that we are talking about inheritance and the one thing to not consider using is `super()`. For what it's worth, there's also: ```python @attr.s class Rectangle(object): width: float height: float def area(self) ➜ float: return self.width * self.height @attr.s class RedRectangle(object): pass @attr.s class BlueRectangle(object): pass ``` In which case, one might `assert RedRectangle(1,2) != BlueRectangle(1,2)` and perhaps even `assert RedRectangle(1,2) != Rectangle(1,2)`. This is obviously contrived, but I don't think one can really just assume that subclasses with the same fields are equal. Also: I see the auto-correct typo above, but Python really should accept `➜` as equivalent to `->`. Just my two cents here. The most accurate definition of LSP I've ever found is this one: > Objects of subtypes should behave like those of supertypes if used via supertype methods. For me it is essentially *the same* than duck typing. And the important point here is the "if used via supertype methods" part. This has nothing to do with which fields do the subtypes have. So taking into account the _fields_ in the comparison would go in the wrong direction in respect to LSP... but, does this really have to do with LSP at all? I don't think so, but I'm also not so sure as to make an statement. What I'm sure of (and I hope anyone seriously following this thread) is that **Identity** is not a simple problem, but a really complex one because it involves what **you** mean by **Identity**. It depends on what you give "relevance" to and what you "filter out" of the comparison. So what to do in cases like this one? When in doubt, I tend to apply the Fail-soon principle: implement by default the most restrictive code, the one that will throw an error sooner than others (in this case, the `is` comparison) so one will hit the problem the first time "**it just doesn't work as I thought it worked**". And then let me implement a different comparison method by my own when I have a more clear idea of what _equal_ means **for me** (which is already possible with `attrs`). In case of using inheritance, you will have to do this just once at the base class, so it is not a big deal anyway. @xgid I think I agree with almost all of that -- the only issue here is that there is no "failure" here -- Python doesn't throw errors when things don't implement equality, it coerces to a value no matter what. So you do need to carefully pick the better of the two options. Ehm. *Your* EOL Python maybe. 🙃 *Edit* oh wait you wrote equality...well that one is already strict so that can’t be changed anyways. @hynek your shiny one too. ``` ⊙ python3 -c ' Julian@Macnetic ● quote> class Foo(object): quote> def __eq__(self, other): quote> return NotImplemented quote> print(Foo() == Foo())' False ``` The only thing it fixed there was the comparison operators I believe. Equality ones still always coerce. @Julian Sorry, I was not explicit enough regarding the `__eq__` implementation I'm proposing. I mean just: ```python def __eq__(self, other): return id(self) == id(other) ``` When I said "_the one that will throw an error sooner than others_" I was not meaning to return `NotImplemented` nor an exception. Just that the more restrictive implementation shown above will give you a comparison error in your code whenever you expect that something "equivalent"(1) to this will work: ```python assert Foo(1,2) == Foo(1,2) ``` instead of succeeding. If it succeeds, you may not find the (logical) "bug" in your code until much much later. It's a kind of "defensive programming", but just for those "corner cases where things may get dangerous if you are not aware of the real implications", like this one. (1) By "equivalent" above I mean that this may be a _simple_ `if a == b` in your code for which you expect that the comparison will be successful. **Edit**: Just now I see that I made a mess talking about "the `is` comparison" at my first comment. Don't know what I was thinking about... 😳 @xgid That's just the `object` provided `==` isn't it? So suggesting that is basically suggesting not implementing `__eq__` at all from an attrs perspective? After chatting with @Julian and reading around a bit more I think I see what I was missing. It seems that the basic issue is a contention between satisfying both mathematical properties and Liskov. The desire to use `==` as a way to access the equality check which should satisfy mathematical constraints brings in the inheritance factor and therefor Liskov. So, don't use `__eq__`... :] My guess is that isn't an option we are looking to consider here but this does make me feel that any solution is going to be a compromise of some principal. I haven't analyzed the situation carefully but `OrderedDict` is a 'classic' example of this issue and a solution that is tending towards the Liskov side over the math side of correctness. There were suggestions that the reason for `is` type checking making sense is that equality is a type of equivalence relation and that they are defined on members of a set. An `__eq__` method on a class that can be inherited may be applied to things that we can't include in the set we are considering when writing the `__eq__` because the subclasses haven't even been written yet. At least by considering only the same type for a `True` result you have restricted the set of things that might be equal. A bit of a helpful explanation for me at least. Perhaps there are some options here. Default to no `__eq__` method (IOW `id()` checking from `object` iirc) and force people to... implement their own? pick between a pre-existing set of options that satisfy either math or Liskov? It encourages them to check the docs and get a brief explanation of the dilemma and at least they have made the decision. I'm really not sure what I think should happen anymore but I at least better understand the issue with where I was going. FWIW, I made a little implementation and set of tests while exploring my approach. https://repl.it/@altendky/LSP-and-eq-1 @altendky > That's just the object provided == isn't it? So suggesting that is basically suggesting not implementing __eq__ at all from an attrs perspective? Exactly! And for the exact same reasons you have explained better than me in your comment. The final goal is precisely that "_It encourages them to check the docs and get a brief explanation of the dilemma_" before they choose anything "meaninful" for them. If I can also easily "pick between a pre-existing set of options", that would be even better, of course! I was talking only about the **default implementation**. > @xgid That's just the object provided == isn't it? So suggesting that is basically suggesting not implementing __eq__ at all from an attrs perspective? Returning `NotImplemented` allows the other object implement a comparator, so explicitly implementing an identity test in a class defeats the design of how Python implements these operators, no? @wsanchez I'm not sure what your comment means in regards to that quote. That if someone were considering not implementing `__eq__` that they should instead implement it and `return NotImplemented`? By 'object provided ==' I was referring to the `__eq__` provided by the type `object` that we all inherit from. What I mean is that the way `__eq__` works is that if you don't want to implement it, then don't add the method, or return `NotImplemented` when you don't know. If you implement it as simply `return id(self) == id(other)` (which is, I think, more simply written as `self is other`) then you are forcing an identity test to be used, instead of allowing such a test to be a fallback in the absence of, say, the other object implementing a comparator. Ah, implementing `__eq__` as `self is other` is not the same as leaving it to `object`'s implementation because of other getting a chance in the latter case. I see the connection. JFTR I’ve just noticed that the docs were always very clear on this matter: `But the attributes are only compared, if the type of both classes is identical!` So technically we could even get away without a deprecation period but let’s do it anyways.
2018-06-16T05:59:11Z
18.1
python-attrs/attrs
383
python-attrs__attrs-383
[ "382" ]
8274c9fdbc1513c87272340f92cd51dfea27c2ab
diff --git a/src/attr/validators.py b/src/attr/validators.py --- a/src/attr/validators.py +++ b/src/attr/validators.py @@ -135,7 +135,12 @@ class _InValidator(object): options = attrib() def __call__(self, inst, attr, value): - if value not in self.options: + try: + in_options = value in self.options + except TypeError as e: # e.g. `1 in "abc"` + in_options = False + + if not in_options: raise ValueError( "'{name}' must be in {options!r} (got {value!r})" .format(name=attr.name, options=self.options, value=value)
diff --git a/tests/test_validators.py b/tests/test_validators.py --- a/tests/test_validators.py +++ b/tests/test_validators.py @@ -243,6 +243,19 @@ def test_fail(self): "'test' must be in [1, 2, 3] (got None)", ) == e.value.args + def test_fail_with_string(self): + """ + Raise ValueError if the value is outside our options when the + options are specified as a string and the value is not a string. + """ + v = in_("abc") + a = simple_attr("test") + with pytest.raises(ValueError) as e: + v(None, a, None) + assert ( + "'test' must be in 'abc' (got None)", + ) == e.value.args + def test_repr(self): """ Returned validator has a useful `__repr__`.
Poor error message/type for validators.in_ with a string ```python import attr @attr.s class C: s = attr.ib(validator=attr.validators.in_('abc')) C(s=1) # TypeError: 'in <string>' requires string as left operand, not int ``` `__contains__` behaves a little weirdly for strings, but I still think this error could be improved. Obvious options would be to catch exceptions and re-raise something with a clearer error message, and/or to deprecate use of strings as the collection here. Found in attrs=18.1.0 while working on HypothesisWorks/hypothesis#954.
What is the actual problem here? I don't think it's fair to expect in_ to also type check? I think you want ```python s = attr.ib(validator=[attr.validators.instance_of(str), attr.validators.in_('abc')]) ``` ? I think the request is for something like: ```python def __call__(self, inst, attr, value): if value not in self.options: try: raise ValueError( "'{name}' must be in {options!r} (got {value!r})" .format(name=attr.name, options=self.options, value=value) ) except Exception as e: raise AssertionError( "in_ validator for attribute {attr} raised exception for value {value}: {error}" .format(attr=attr, value=value, error=e) ) ``` Almost - the idea is that if `value in self.options` raises an exception instead of returning a bool, we should still raise a ValueError (not e.g. TypeError), and preferably one with a useful and consistent message. I'd write this as follows, and would be happy to open a PR if that would be helpful 😄 ```python def __call__(self, inst, attr, value): try: if value not in self.options: raise ValueError( "'{name}' must be in {options!r} (got {value!r})" .format(name=attr.name, options=self.options, value=value) ) except Exception: raise ValueError( "'{name}' must be in {options!r} " "(got {value!r}, which caused an internal error)" .format(name=attr.name, options=self.options, value=value) ) ``` The contract for validators isn't super clear in the docs, but I don't see anything that says that `ValueError` is the only acceptable sort of exception to raise. That probably should be clarified, but I would think `TypeError` is a valid exception to raise in the case where data of the wrong type is given, as in this example. I do agree that the a better error message would be useful, but I'd definitely include the original exception text. Uh yeah there is absolutely no contract about what kind of exceptions are raised by validators and I’d even argue that changing it could be backward incompatible. 🤔 I think it might make some sense for us to advise that certain exceptions are used, but I wouldn't go farther than that. Otherwise, every validator would have to wrap a try/except around it's body, which could be a best practice, but seems like a lame thing to require. Which leaves us with: should we do better than the above error message. I notice I wrapped the try/except in my example around the wrong code (oops). Lemme try again with a different suggestion: ```python def __call__(self, inst, attr, value): try: in_options = value in self.options except TypeError as e: in_options = False if not in_options: raise ValueError( "'{name}' must be in {options!r} (got {value!r})" .format(name=attr.name, options=self.options, value=value) ) ``` I don't think trying to catch all exceptions is necessarily more correct, but we can definitely say that a non-string object isn't in the provide bucket (that happens to be a string) of options. Since that bucket isn't required to contain objects of a homogenous type, I returning `False` is more appropriate (or consistent with, say a `list`) than raising a `TypeError`. I guess I would accept a PR in this case.
2018-05-23T13:16:10Z
18.1
python-attrs/attrs
286
python-attrs__attrs-286
[ "284" ]
7501cecf0f4313c3b2597d03ac0853cca1659065
diff --git a/src/attr/_compat.py b/src/attr/_compat.py --- a/src/attr/_compat.py +++ b/src/attr/_compat.py @@ -3,6 +3,7 @@ import platform import sys import types +import warnings PY2 = sys.version_info[0] == 2 @@ -85,11 +86,54 @@ def iteritems(d): def metadata_proxy(d): return types.MappingProxyType(dict(d)) -if PYPY: # pragma: no cover - def set_closure_cell(cell, value): - cell.__setstate__((value,)) + +def import_ctypes(): # pragma: nocover + """ + Moved into a function for testability. + """ + try: + import ctypes + return ctypes + except ImportError: + return None + + +if not PY2: + def just_warn(*args, **kw): + """ + We only warn on Python 3 because we are not aware of any concrete + consequences of not setting the cell on Python 2. + """ + warnings.warn( + "Missing ctypes. Some features like bare super() or accessing " + "__class__ will not work with slots classes.", + RuntimeWarning, + stacklevel=2, + ) else: - import ctypes - set_closure_cell = ctypes.pythonapi.PyCell_Set - set_closure_cell.argtypes = (ctypes.py_object, ctypes.py_object) - set_closure_cell.restype = ctypes.c_int + def just_warn(*args, **kw): # pragma: nocover + """ + We only warn on Python 3 because we are not aware of any concrete + consequences of not setting the cell on Python 2. + """ + + +def make_set_closure_cell(): + """ + Moved into a function for testability. + """ + if PYPY: # pragma: no cover + def set_closure_cell(cell, value): + cell.__setstate__((value,)) + else: + ctypes = import_ctypes() + if ctypes is not None: + set_closure_cell = ctypes.pythonapi.PyCell_Set + set_closure_cell.argtypes = (ctypes.py_object, ctypes.py_object) + set_closure_cell.restype = ctypes.c_int + else: + set_closure_cell = just_warn + return set_closure_cell + + +set_closure_cell = make_set_closure_cell()
diff --git a/tests/test_slots.py b/tests/test_slots.py --- a/tests/test_slots.py +++ b/tests/test_slots.py @@ -13,7 +13,7 @@ import attr -from attr._compat import PY2 +from attr._compat import PY2, PYPY, just_warn, make_set_closure_cell @attr.s @@ -325,76 +325,98 @@ class C2(C1Bare): @pytest.mark.skipif(PY2, reason="closure cell rewriting is PY3-only.") -def test_closure_cell_rewriting(): - """ - Slot classes support proper closure cell rewriting. - - This affects features like `__class__` and the no-arg super(). - """ - non_slot_instance = C1(x=1, y="test") - slot_instance = C1Slots(x=1, y="test") - - assert non_slot_instance.my_class() is C1 - assert slot_instance.my_class() is C1Slots - - # Just assert they return something, and not an exception. - assert non_slot_instance.my_super() - assert slot_instance.my_super() - - -@pytest.mark.skipif(PY2, reason="closure cell rewriting is PY3-only.") -def test_closure_cell_rewriting_inheritance(): - """ - Slot classes support proper closure cell rewriting when inheriting. - - This affects features like `__class__` and the no-arg super(). - """ - @attr.s - class C2(C1): - def my_subclass(self): - return __class__ # NOQA: F821 - - @attr.s - class C2Slots(C1Slots): - def my_subclass(self): - return __class__ # NOQA: F821 - - non_slot_instance = C2(x=1, y="test") - slot_instance = C2Slots(x=1, y="test") - - assert non_slot_instance.my_class() is C1 - assert slot_instance.my_class() is C1Slots - - # Just assert they return something, and not an exception. - assert non_slot_instance.my_super() - assert slot_instance.my_super() - - assert non_slot_instance.my_subclass() is C2 - assert slot_instance.my_subclass() is C2Slots - - -@pytest.mark.skipif(PY2, reason="closure cell rewriting is PY3-only.") -@pytest.mark.parametrize("slots", [True, False]) -def test_closure_cell_rewriting_cls_static(slots): - """ - Slot classes support proper closure cell rewriting for class- and static - methods. - """ - # Python can reuse closure cells, so we create new classes just for - # this test. - - @attr.s(slots=slots) - class C: - @classmethod - def clsmethod(cls): - return __class__ # noqa: F821 - - assert C.clsmethod() is C - - @attr.s(slots=slots) - class D: - @staticmethod - def statmethod(): - return __class__ # noqa: F821 - - assert D.statmethod() is D +class TestClosureCellRewriting(object): + def test_closure_cell_rewriting(self): + """ + Slot classes support proper closure cell rewriting. + + This affects features like `__class__` and the no-arg super(). + """ + non_slot_instance = C1(x=1, y="test") + slot_instance = C1Slots(x=1, y="test") + + assert non_slot_instance.my_class() is C1 + assert slot_instance.my_class() is C1Slots + + # Just assert they return something, and not an exception. + assert non_slot_instance.my_super() + assert slot_instance.my_super() + + def test_inheritance(self): + """ + Slot classes support proper closure cell rewriting when inheriting. + + This affects features like `__class__` and the no-arg super(). + """ + @attr.s + class C2(C1): + def my_subclass(self): + return __class__ # NOQA: F821 + + @attr.s + class C2Slots(C1Slots): + def my_subclass(self): + return __class__ # NOQA: F821 + + non_slot_instance = C2(x=1, y="test") + slot_instance = C2Slots(x=1, y="test") + + assert non_slot_instance.my_class() is C1 + assert slot_instance.my_class() is C1Slots + + # Just assert they return something, and not an exception. + assert non_slot_instance.my_super() + assert slot_instance.my_super() + + assert non_slot_instance.my_subclass() is C2 + assert slot_instance.my_subclass() is C2Slots + + @pytest.mark.parametrize("slots", [True, False]) + def test_cls_static(self, slots): + """ + Slot classes support proper closure cell rewriting for class- and + static methods. + """ + # Python can reuse closure cells, so we create new classes just for + # this test. + + @attr.s(slots=slots) + class C: + @classmethod + def clsmethod(cls): + return __class__ # noqa: F821 + + assert C.clsmethod() is C + + @attr.s(slots=slots) + class D: + @staticmethod + def statmethod(): + return __class__ # noqa: F821 + + assert D.statmethod() is D + + @pytest.mark.skipif( + PYPY, + reason="ctypes are used only on CPython" + ) + def test_missing_ctypes(self, monkeypatch): + """ + Keeps working if ctypes is missing. + + A warning is emitted that points to the actual code. + """ + monkeypatch.setattr(attr._compat, "import_ctypes", lambda: None) + func = make_set_closure_cell() + + with pytest.warns(RuntimeWarning) as wr: + func() + + w = wr.pop() + assert __file__ == w.filename + assert ( + "Missing ctypes. Some features like bare super() or accessing " + "__class__ will not work with slots classes.", + ) == w.message.args + + assert just_warn is func
Allow using attrs without ctypes #226 introduced the use of `ctypes`. I use attrs in a Google App Engine application. The app runs in a sandbox which limits the use of certain modules. `ctypes` is one of those libraries. I can peg my app to use only attrs 17.2.0, but it would be nice to have a fail-gently approach where, if ctypes is unavailable, attrs keeps working, though the behavior allowed by #226 of course would not work.
Now, that’s unfortunate! Making the support unconditional is gonna be easy, the big question is how to document/communicate the behavior it to the user. 🤔 Probably log a warning? Hopefully in a way that's visible. Also, we won't need ctypes in CPython 3.7 right?
2017-11-09T10:33:37Z
17.3
python-attrs/attrs
292
python-attrs__attrs-292
[ "291" ]
a84a36d45f34a82a1bb3180a33d5842f7718cdef
diff --git a/src/attr/_make.py b/src/attr/_make.py --- a/src/attr/_make.py +++ b/src/attr/_make.py @@ -201,6 +201,22 @@ def _is_class_var(annot): return str(annot).startswith("typing.ClassVar") +def _get_annotations(cls): + """ + Get annotations for *cls*. + """ + anns = getattr(cls, "__annotations__", None) + if anns is None: + return {} + + # Verify that the annotations aren't merely inherited. + for super_cls in cls.__mro__[1:]: + if anns is getattr(super_cls, "__annotations__", None): + return {} + + return anns + + def _transform_attrs(cls, these, auto_attribs): """ Transform all `_CountingAttr`s on a class into `Attribute`s. @@ -210,16 +226,15 @@ def _transform_attrs(cls, these, auto_attribs): Return an `_Attributes`. """ cd = cls.__dict__ - anns = getattr(cls, "__annotations__", {}) + anns = _get_annotations(cls) - if these is None and auto_attribs is False: + if these is not None: ca_list = sorted(( - (name, attr) - for name, attr - in cd.items() - if isinstance(attr, _CountingAttr) + (name, ca) + for name, ca + in iteritems(these) ), key=lambda e: e[1].counter) - elif these is None and auto_attribs is True: + elif auto_attribs is True: ca_names = { name for name, attr @@ -251,9 +266,10 @@ def _transform_attrs(cls, these, auto_attribs): ) else: ca_list = sorted(( - (name, ca) - for name, ca - in iteritems(these) + (name, attr) + for name, attr + in cd.items() + if isinstance(attr, _CountingAttr) ), key=lambda e: e[1].counter) non_super_attrs = [
diff --git a/tests/test_annotations.py b/tests/test_annotations.py --- a/tests/test_annotations.py +++ b/tests/test_annotations.py @@ -131,3 +131,26 @@ class C: assert ( "The following `attr.ib`s lack a type annotation: v, y.", ) == e.value.args + + @pytest.mark.parametrize("slots", [True, False]) + def test_auto_attribs_subclassing(self, slots): + """ + Attributes from super classes are inherited, it doesn't matter if the + subclass has annotations or not. + + Ref #291 + """ + @attr.s(slots=slots, auto_attribs=True) + class A: + a: int = 1 + + @attr.s(slots=slots, auto_attribs=True) + class B(A): + b: int = 2 + + @attr.s(slots=slots, auto_attribs=True) + class C(A): + pass + + assert "B(a=1, b=2)" == repr(B()) + assert "C(a=1)" == repr(C())
Type hint defaults don't work with inheritance Consider ``` In [12]: @attr.s(auto_attribs=True) ...: class A: ...: a: int = 10 ...: In [13]: @attr.s(auto_attribs=True) ...: class B(A): ...: pass ...: In [14]: A() Out[14]: A(a=10) In [15]: B() --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-15-1c5ecc61f85b> in <module>() ----> 1 B() TypeError: __init__() missing 1 required positional argument: 'a' ``` This should work, given that the following works: ``` In [16]: @attr.s() ...: class A: ...: a = attr.ib(default=10) ...: In [17]: @attr.s() ...: class B(A): ...: pass ...: In [18]: A() Out[18]: A(a=10) In [19]: B() Out[19]: B(a=10) ```
Ugh so it’s not inheritance, it’s inheritance + no new annotations. The problem is that `__annotations__` gets inherited: ```pycon >>> import attr >>> @attr.s(auto_attribs=True) ... class A: ... a: int = 10 >>> @attr.s(auto_attribs=True) ... class B(A): ... pass >>> A.__annotations__ {'a': <class 'int'>} >>> B.__annotations__ {'a': <class 'int'>} >>> A.__annotations__ is B.__annotations__ True >>> @attr.s(auto_attribs=True) ... class C(A): ... c: float = 20.0 >>> C() C(a=10, c=20.0) ```
2017-11-11T07:04:59Z
17.3
python-attrs/attrs
343
python-attrs__attrs-343
[ "300" ]
93eb1e4d21b5cc5c88da61e8182a42bb41cab557
diff --git a/src/attr/_compat.py b/src/attr/_compat.py --- a/src/attr/_compat.py +++ b/src/attr/_compat.py @@ -10,6 +10,13 @@ PYPY = platform.python_implementation() == "PyPy" +if PYPY or sys.version_info[:2] >= (3, 6): + ordered_dict = dict +else: + from collections import OrderedDict + ordered_dict = OrderedDict + + if PY2: from UserDict import IterableUserDict diff --git a/src/attr/_make.py b/src/attr/_make.py --- a/src/attr/_make.py +++ b/src/attr/_make.py @@ -8,7 +8,9 @@ from operator import itemgetter from . import _config -from ._compat import PY2, isclass, iteritems, metadata_proxy, set_closure_cell +from ._compat import ( + PY2, isclass, iteritems, metadata_proxy, ordered_dict, set_closure_cell +) from .exceptions import ( DefaultAlreadySetError, FrozenInstanceError, NotAnAttrsClassError, UnannotatedAttributeError @@ -233,6 +235,13 @@ def _get_annotations(cls): return anns +def _counter_getter(e): + """ + Key function for sorting to avoid re-creating a lambda for every class. + """ + return e[1].counter + + def _transform_attrs(cls, these, auto_attribs): """ Transform all `_CountingAttr`s on a class into `Attribute`s. @@ -245,11 +254,14 @@ def _transform_attrs(cls, these, auto_attribs): anns = _get_annotations(cls) if these is not None: - ca_list = sorted(( + ca_list = [ (name, ca) for name, ca in iteritems(these) - ), key=lambda e: e[1].counter) + ] + + if not isinstance(these, ordered_dict): + ca_list.sort(key=_counter_getter) elif auto_attribs is True: ca_names = { name @@ -593,6 +605,11 @@ def attrs(maybe_cls=None, these=None, repr_ns=None, If *these* is not ``None``, ``attrs`` will *not* search the class body for attributes and will *not* remove any attributes from it. + If *these* is an ordered dict (:class:`dict` on Python 3.6+, + :class:`collections.OrderedDict` otherwise), the order is deduced from + the order of the attributes inside *these*. Otherwise the order + of the definition of the attributes is used. + :type these: :class:`dict` of :class:`str` to :func:`attr.ib` :param str repr_ns: When using nested classes, there's no way in Python 2 @@ -681,6 +698,7 @@ def attrs(maybe_cls=None, these=None, repr_ns=None, .. versionadded:: 17.3.0 *auto_attribs* .. versionchanged:: 18.1.0 If *these* is passed, no attributes are deleted from the class body. + .. versionchanged:: 18.1.0 If *these* is ordered, the order is retained. """ def wrap(cls): if getattr(cls, "__class__", None) is None: @@ -1513,6 +1531,11 @@ def make_class(name, attrs, bases=(object,), **attributes_arguments): :param attrs: A list of names or a dictionary of mappings of names to attributes. + + If *attrs* is a list or an ordered dict (:class:`dict` on Python 3.6+, + :class:`collections.OrderedDict` otherwise), the order is deduced from + the order of the names or attributes inside *attrs*. Otherwise the + order of the definition of the attributes is used. :type attrs: :class:`list` or :class:`dict` :param tuple bases: Classes that the new class will subclass. @@ -1522,7 +1545,8 @@ def make_class(name, attrs, bases=(object,), **attributes_arguments): :return: A new class with *attrs*. :rtype: type - .. versionadded:: 17.1.0 *bases* + .. versionadded:: 17.1.0 *bases* + .. versionchanged:: 18.1.0 If *attrs* is ordered, the order is retained. """ if isinstance(attrs, dict): cls_dict = attrs
diff --git a/tests/test_make.py b/tests/test_make.py --- a/tests/test_make.py +++ b/tests/test_make.py @@ -19,7 +19,7 @@ import attr from attr import _config -from attr._compat import PY2 +from attr._compat import PY2, ordered_dict from attr._make import ( Attribute, Factory, _AndValidator, _Attributes, _ClassBuilder, _CountingAttr, _transform_attrs, and_, fields, make_class, validate @@ -281,6 +281,20 @@ class C(object): assert 5 == C().x assert "C(x=5)" == repr(C()) + def test_these_ordered(self): + """ + If these is passed ordered attrs, their order respect instead of the + counter. + """ + b = attr.ib(default=2) + a = attr.ib(default=1) + + @attr.s(these=ordered_dict([("a", a), ("b", b)])) + class C(object): + pass + + assert "C(a=1, b=2)" == repr(C()) + def test_multiple_inheritance(self): """ Order of attributes doesn't get mixed up by multiple inheritance. @@ -610,6 +624,18 @@ def test_missing_sys_getframe(self, monkeypatch): assert 1 == len(C.__attrs_attrs__) + def test_make_class_ordered(self): + """ + If `make_class()` is passed ordered attrs, their order is respected + instead of the counter. + """ + b = attr.ib(default=2) + a = attr.ib(default=1) + + C = attr.make_class("C", ordered_dict([("a", a), ("b", b)])) + + assert "C(a=1, b=2)" == repr(C()) + class TestFields(object): """ @@ -686,13 +712,14 @@ def test_convert_factory_property(self, val, init): """ Property tests for attributes with convert, and a factory default. """ - C = make_class("C", { - "y": attr.ib(), - "x": attr.ib( + C = make_class("C", ordered_dict([ + ("y", attr.ib()), + ("x", attr.ib( init=init, default=Factory(lambda: val), - converter=lambda v: v + 1), - }) + converter=lambda v: v + 1 + )), + ])) c = C(2) assert c.x == val + 1
Allow overwriting order inferred from _CountingAttr.counter when passing ordered `these` attrs classes can be constructed dynamically using `attr.s(maybe_cls=A, these=some_dict)`. Usually `some_dict` will be a standard (unordered) python dictionary and the order of attributes has to be inferred from `_CountingAttr.counter`. What about allowing the user to overwrite the `_CountingAttr.counter` by passing an `OrderedDict` as argument to `these`?
I think @Julian would like this? Yep! Would love to see what this looks like.
2018-02-05T09:39:46Z
17.4
python-attrs/attrs
277
python-attrs__attrs-277
[ "262" ]
2a50c4b93002a0f4f4355051759beae5e0324497
diff --git a/src/attr/__init__.py b/src/attr/__init__.py --- a/src/attr/__init__.py +++ b/src/attr/__init__.py @@ -1,5 +1,7 @@ from __future__ import absolute_import, division, print_function +from functools import partial + from ._funcs import ( asdict, assoc, @@ -43,6 +45,7 @@ s = attributes = attrs ib = attr = attrib +dataclass = partial(attrs, auto_attribs=True) # happy Easter ;) __all__ = [ "Attribute", diff --git a/src/attr/_make.py b/src/attr/_make.py --- a/src/attr/_make.py +++ b/src/attr/_make.py @@ -18,6 +18,7 @@ DefaultAlreadySetError, FrozenInstanceError, NotAnAttrsClassError, + UnannotatedAttributeError, ) @@ -190,7 +191,17 @@ class MyClassAttributes(tuple): ]) -def _transform_attrs(cls, these): +def _is_class_var(annot): + """ + Check whether *annot* is a typing.ClassVar. + + The implementation is gross but importing `typing` is slow and there are + discussions to remove it from the stdlib alltogether. + """ + return str(annot).startswith("typing.ClassVar") + + +def _transform_attrs(cls, these, auto_attribs): """ Transform all `_CountingAttr`s on a class into `Attribute`s. @@ -198,24 +209,58 @@ def _transform_attrs(cls, these): Return an `_Attributes`. """ - if these is None: - ca_list = [(name, attr) - for name, attr - in cls.__dict__.items() - if isinstance(attr, _CountingAttr)] + cd = cls.__dict__ + anns = getattr(cls, "__annotations__", {}) + + if these is None and auto_attribs is False: + ca_list = sorted(( + (name, attr) + for name, attr + in cd.items() + if isinstance(attr, _CountingAttr) + ), key=lambda e: e[1].counter) + elif these is None and auto_attribs is True: + ca_names = { + name + for name, attr + in cd.items() + if isinstance(attr, _CountingAttr) + } + ca_list = [] + annot_names = set() + for attr_name, type in anns.items(): + if _is_class_var(type): + continue + annot_names.add(attr_name) + a = cd.get(attr_name, NOTHING) + if not isinstance(a, _CountingAttr): + if a is NOTHING: + a = attrib() + else: + a = attrib(default=a) + ca_list.append((attr_name, a)) + + unannotated = ca_names - annot_names + if len(unannotated) > 0: + raise UnannotatedAttributeError( + "The following `attr.ib`s lack a type annotation: " + + ", ".join(sorted( + unannotated, + key=lambda n: cd.get(n).counter + )) + "." + ) else: - ca_list = [(name, ca) - for name, ca - in iteritems(these)] - ca_list = sorted(ca_list, key=lambda e: e[1].counter) - - ann = getattr(cls, "__annotations__", {}) + ca_list = sorted(( + (name, ca) + for name, ca + in iteritems(these) + ), key=lambda e: e[1].counter) non_super_attrs = [ Attribute.from_counting_attr( name=attr_name, ca=ca, - type=ann.get(attr_name), + type=anns.get(attr_name), ) for attr_name, ca in ca_list @@ -250,7 +295,7 @@ def _transform_attrs(cls, these): Attribute.from_counting_attr( name=attr_name, ca=ca, - type=ann.get(attr_name) + type=anns.get(attr_name) ) for attr_name, ca in ca_list @@ -296,8 +341,8 @@ class _ClassBuilder(object): "_frozen", "_has_post_init", ) - def __init__(self, cls, these, slots, frozen): - attrs, super_attrs = _transform_attrs(cls, these) + def __init__(self, cls, these, slots, frozen, auto_attribs): + attrs, super_attrs = _transform_attrs(cls, these, auto_attribs) self._cls = cls self._cls_dict = dict(cls.__dict__) if slots else {} @@ -460,7 +505,7 @@ def add_cmp(self): def attrs(maybe_cls=None, these=None, repr_ns=None, repr=True, cmp=True, hash=None, init=True, - slots=False, frozen=False, str=False): + slots=False, frozen=False, str=False, auto_attribs=False): r""" A class decorator that adds `dunder <https://wiki.python.org/moin/DunderAlias>`_\ -methods according to the @@ -535,6 +580,23 @@ def attrs(maybe_cls=None, these=None, repr_ns=None, ``object.__setattr__(self, "attribute_name", value)``. .. _slots: https://docs.python.org/3/reference/datamodel.html#slots + :param bool auto_attribs: If True, collect `PEP 526`_-annotated attributes + (Python 3.6 and later only) from the class body. + + In this case, you **must** annotate every field. If ``attrs`` + encounters a field that is set to an :func:`attr.ib` but lacks a type + annotation, an :exc:`attr.exceptions.UnannotatedAttributeError` is + raised. Use ``field_name: typing.Any = attr.ib(...)`` if you don't + want to set a type. + + If you assign a value to those attributes (e.g. ``x: int = 42``), that + value becomes the default value like if it were passed using + ``attr.ib(default=42)``. Passing an instance of :class:`Factory` also + works as expected. + + Attributes annotated as :class:`typing.ClassVar` are **ignored**. + + .. _`PEP 526`: https://www.python.org/dev/peps/pep-0526/ .. versionadded:: 16.0.0 *slots* .. versionadded:: 16.1.0 *frozen* @@ -542,12 +604,13 @@ def attrs(maybe_cls=None, these=None, repr_ns=None, .. versionchanged:: 17.1.0 *hash* supports ``None`` as value which is also the default now. + .. versionadded:: 17.3.0 *auto_attribs* """ def wrap(cls): if getattr(cls, "__class__", None) is None: raise TypeError("attrs only works with new-style classes.") - builder = _ClassBuilder(cls, these, slots, frozen) + builder = _ClassBuilder(cls, these, slots, frozen, auto_attribs) if repr is True: builder.add_repr(repr_ns) diff --git a/src/attr/exceptions.py b/src/attr/exceptions.py --- a/src/attr/exceptions.py +++ b/src/attr/exceptions.py @@ -37,3 +37,12 @@ class DefaultAlreadySetError(RuntimeError): .. versionadded:: 17.1.0 """ + + +class UnannotatedAttributeError(RuntimeError): + """ + A class with ``auto_attribs=True`` has an ``attr.ib()`` without a type + annotation. + + .. versionadded:: 17.3.0 + """
diff --git a/tests/test_annotations.py b/tests/test_annotations.py --- a/tests/test_annotations.py +++ b/tests/test_annotations.py @@ -4,12 +4,15 @@ Python 3.6+ only. """ +import types import typing import pytest import attr +from attr.exceptions import UnannotatedAttributeError + class TestAnnotations: """ @@ -65,3 +68,66 @@ class C: y: int assert 1 == len(attr.fields(C)) + + @pytest.mark.parametrize("slots", [True, False]) + def test_auto_attribs(self, slots): + """ + If *auto_attribs* is True, bare annotations are collected too. + Defaults work and class variables are ignored. + """ + @attr.s(auto_attribs=True, slots=slots) + class C: + cls_var: typing.ClassVar[int] = 23 + a: int + x: typing.List[int] = attr.Factory(list) + y: int = 2 + z: int = attr.ib(default=3) + foo: typing.Any = None + + i = C(42) + assert "C(a=42, x=[], y=2, z=3, foo=None)" == repr(i) + + attr_names = set(a.name for a in C.__attrs_attrs__) + assert "a" in attr_names # just double check that the set works + assert "cls_var" not in attr_names + + assert int == attr.fields(C).a.type + + assert attr.Factory(list) == attr.fields(C).x.default + assert typing.List[int] == attr.fields(C).x.type + + assert int == attr.fields(C).y.type + assert 2 == attr.fields(C).y.default + + assert int == attr.fields(C).z.type + + assert typing.Any == attr.fields(C).foo.type + + # Class body is clean. + if slots is False: + with pytest.raises(AttributeError): + C.y + + assert 2 == i.y + else: + assert isinstance(C.y, types.MemberDescriptorType) + + i.y = 23 + assert 23 == i.y + + @pytest.mark.parametrize("slots", [True, False]) + def test_auto_attribs_unannotated(self, slots): + """ + Unannotated `attr.ib`s raise an error. + """ + with pytest.raises(UnannotatedAttributeError) as e: + @attr.s(slots=slots, auto_attribs=True) + class C: + v = attr.ib() + x: int + y = attr.ib() + z: str + + assert ( + "The following `attr.ib`s lack a type annotation: v, y.", + ) == e.value.args diff --git a/tests/test_make.py b/tests/test_make.py --- a/tests/test_make.py +++ b/tests/test_make.py @@ -144,7 +144,7 @@ def test_no_modifications(self): Doesn't attach __attrs_attrs__ to the class anymore. """ C = make_tc() - _transform_attrs(C, None) + _transform_attrs(C, None, False) assert None is getattr(C, "__attrs_attrs__", None) @@ -153,7 +153,7 @@ def test_normal(self): Transforms every `_CountingAttr` and leaves others (a) be. """ C = make_tc() - attrs, _, = _transform_attrs(C, None) + attrs, _, = _transform_attrs(C, None, False) assert ["z", "y", "x"] == [a.name for a in attrs] @@ -165,14 +165,14 @@ def test_empty(self): class C(object): pass - assert _Attributes(((), [])) == _transform_attrs(C, None) + assert _Attributes(((), [])) == _transform_attrs(C, None, False) def test_transforms_to_attribute(self): """ All `_CountingAttr`s are transformed into `Attribute`s. """ C = make_tc() - attrs, super_attrs = _transform_attrs(C, None) + attrs, super_attrs = _transform_attrs(C, None, False) assert [] == super_attrs assert 3 == len(attrs) @@ -188,7 +188,7 @@ class C(object): y = attr.ib() with pytest.raises(ValueError) as e: - _transform_attrs(C, None) + _transform_attrs(C, None, False) assert ( "No mandatory attributes allowed after an attribute with a " "default value or factory. Attribute in question: Attribute" @@ -207,7 +207,7 @@ class Base(object): class C(Base): y = attr.ib() - attrs, super_attrs = _transform_attrs(C, {"x": attr.ib()}) + attrs, super_attrs = _transform_attrs(C, {"x": attr.ib()}, False) assert [] == super_attrs assert ( @@ -817,7 +817,7 @@ def test_repr(self): class C(object): pass - b = _ClassBuilder(C, None, True, True) + b = _ClassBuilder(C, None, True, True, False) assert "<_ClassBuilder(cls=C)>" == repr(b) @@ -828,7 +828,7 @@ def test_returns_self(self): class C(object): x = attr.ib() - b = _ClassBuilder(C, None, True, True) + b = _ClassBuilder(C, None, True, True, False) cls = b.add_cmp().add_hash().add_init().add_repr("ns").add_str() \ .build_class()
Add option to collect annotated fields Since I’m sick of hearing that “[PEP 557](https://www.python.org/dev/peps/pep-0557/) is like attrs, but using variable annotations for field declarations”, I’d like to have an option to collect annotated fields that have no attr.ib definition. ie. ```python @attr.s(collect_bare=True) class C: x: int ``` should be equivalent to: ```python @attr.s class C: x: int = attr.ib() ``` I’m open to better/shorter names. Volunteers? @chadrik maybe? :)
Hm I'd actually be ok with doing this by default? A turn off switch would be nice, just default collect_bare to true? Is this a compatibility issue? I’d love to but that is technically backward incompatible. Durn it. We could run it thru a deprecation cycle I guess? `s/collect_bare/automatic_attributes/` Maybe just `auto_attribs`? So I gave this a lackluster shot and I’ve run into a problem: ```python def test_auto_attribs(self): """ If *auto_attribs* is True, bare annotations are collected too. """ @attr.s(auto_attribs=True) class C: x: typing.List[int] b = attr.ib() y: int assert "C(x=1, b=2, y=3)" == repr(C(1, 2, 3)) ``` `x` and `y` are defined in `__annotations__` while `b` is defined in `__dict__`. It there a way to determine their order? *** To me it looks like all we can do is to allow either 100% annotations or 100% `attr.ib`s which only makes sense once we have something like this: ```python @attr.s(auto_attribs=True, check_types=True) class Point: x: float y: float ``` or am I missing something? (since we can’t attach any more meta data to the fields, this has to be something that happens in `@attr.s`) This kind of smells like a `record = attr.s(auto_attribs=True, check_types=True, frozen=True, slots=True)` which is certainly useful *** Or am I missing something?. Ah OK: we just have to enforce that all fields are annotated. That’s not that bad.
2017-10-27T08:03:24Z
17.2
python-attrs/attrs
229
python-attrs__attrs-229
[ "221" ]
37a421e559a6330b7ace242698b06b258b979e91
diff --git a/src/attr/_make.py b/src/attr/_make.py --- a/src/attr/_make.py +++ b/src/attr/_make.py @@ -181,11 +181,6 @@ def _transform_attrs(cls, these): If *these* is passed, use that and don't look for them on the class. """ - super_cls = [] - for c in reversed(cls.__mro__[1:-1]): - sub_attrs = getattr(c, "__attrs_attrs__", None) - if sub_attrs is not None: - super_cls.extend(a for a in sub_attrs if a not in super_cls) if these is None: ca_list = [(name, attr) for name, attr @@ -201,6 +196,17 @@ def _transform_attrs(cls, these): for attr_name, ca in sorted(ca_list, key=lambda e: e[1].counter) ] + + super_cls = [] + non_super_names = set(a.name for a in non_super_attrs) + for c in reversed(cls.__mro__[1:-1]): + sub_attrs = getattr(c, "__attrs_attrs__", None) + if sub_attrs is not None: + super_cls.extend( + a for a in sub_attrs + if a not in super_cls and a.name not in non_super_names + ) + attr_names = [a.name for a in super_cls + non_super_attrs] AttrsClass = _make_attr_tuple_class(cls.__name__, attr_names)
diff --git a/tests/test_dark_magic.py b/tests/test_dark_magic.py --- a/tests/test_dark_magic.py +++ b/tests/test_dark_magic.py @@ -271,3 +271,15 @@ def compute(self): return self.x + 1 assert C(1, 2) == C() + + @pytest.mark.parametrize("slots", [True, False]) + @pytest.mark.parametrize("frozen", [True, False]) + def test_attrib_overwrite(self, slots, frozen): + """ + Subclasses can overwrite attributes of their superclass. + """ + @attr.s(slots=slots, frozen=frozen) + class SubOverwrite(Super): + x = attr.ib(default=attr.Factory(list)) + + assert SubOverwrite([]) == SubOverwrite()
Can't overide a __super__'s attrib. Hi. Is it not possible to do this? Let's say I want to change the default for arg 'p' in the inherited Class, B: ``` @attr.s class A(object): p=attr.ib(default='old') @attr.s class B(A): p=attr.ib(default='new') B().p ``` I get farts: ``` Traceback (most recent call last): File "E:/D/OneDrive/PyCharm/attrs_test/01.py", line 44, in <module> class B(A): File "E:\D\OneDrive\Miniconda3\envs\dev\lib\site-packages\attr\_make.py", line 391, in attributes return wrap(maybe_cls) File "E:\D\OneDrive\Miniconda3\envs\dev\lib\site-packages\attr\_make.py", line 364, in wrap cls = _add_init(cls, effectively_frozen) File "E:\D\OneDrive\Miniconda3\envs\dev\lib\site-packages\attr\_make.py", line 569, in _add_init bytecode = compile(script, unique_filename, "exec") File "<attrs generated init 6e95dcc9478c7a8b8784f6244afbff55f563f7c6>", line 1 SyntaxError: duplicate argument 'p' in function definition ``` Thanks.
Yes that’s a bug that got mentioned in a different ticket and asked the reporter to file a new bug but he didn’t get around to it so it got lost.
2017-08-12T07:31:40Z
17.2
python-attrs/attrs
60
python-attrs__attrs-60
[ "3" ]
5a1814f8d07e00d47d7d81274e4b4ee10dd83d41
diff --git a/src/attr/__init__.py b/src/attr/__init__.py --- a/src/attr/__init__.py +++ b/src/attr/__init__.py @@ -19,6 +19,7 @@ get_run_validators, set_run_validators, ) +from . import exceptions from . import filters from . import validators @@ -49,6 +50,7 @@ "attrib", "attributes", "attrs", + "exceptions", "fields", "filters", "get_run_validators", diff --git a/src/attr/_make.py b/src/attr/_make.py --- a/src/attr/_make.py +++ b/src/attr/_make.py @@ -3,8 +3,9 @@ import hashlib import linecache -from ._compat import exec_, iteritems, isclass, iterkeys from . import _config +from ._compat import exec_, iteritems, isclass, iterkeys +from .exceptions import FrozenInstanceError class _Nothing(object): @@ -145,8 +146,16 @@ def _transform_attrs(cls, these): had_default = True +def _frozen_setattrs(self, name, value): + """ + Attached to frozen classes as __setattr__. + """ + raise FrozenInstanceError() + + def attributes(maybe_cls=None, these=None, repr_ns=None, - repr=True, cmp=True, hash=True, init=True, slots=False): + repr=True, cmp=True, hash=True, init=True, + slots=False, frozen=False): """ A class decorator that adds `dunder <https://wiki.python.org/moin/DunderAlias>`_\ -methods according to the @@ -161,33 +170,42 @@ def attributes(maybe_cls=None, these=None, repr_ns=None, If *these* is not `None`, the class body is *ignored*. :type these: :class:`dict` of :class:`str` to :func:`attr.ib` - :param repr_ns: When using nested classes, there's no way in Python 2 to - automatically detect that. Therefore it's possible to set the + :param str repr_ns: When using nested classes, there's no way in Python 2 + to automatically detect that. Therefore it's possible to set the namespace explicitly for a more meaningful ``repr`` output. - - :param repr: Create a ``__repr__`` method with a human readable + :param bool repr: Create a ``__repr__`` method with a human readable represantation of ``attrs`` attributes.. - :type repr: bool - - :param cmp: Create ``__eq__``, ``__ne__``, ``__lt__``, ``__le__``, + :param bool cmp: Create ``__eq__``, ``__ne__``, ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` methods that compare the class as if it were a tuple of its ``attrs`` attributes. But the attributes are *only* compared, if the type of both classes is *identical*! - :type cmp: bool + :param bool hash: Create a ``__hash__`` method that returns the + :func:`hash` of a tuple of all ``attrs`` attribute values. + :param bool init: Create a ``__init__`` method that initialiazes the + ``attrs`` attributes. Leading underscores are stripped for the + argument name. + :param bool slots: Create a slots_-style class that's more + memory-efficient. See :ref:`slots` for further ramifications. + :param bool frozen: Make instances immutable after initialization. If + someone attempts to modify a frozen instance, + :exc:`attr.exceptions.FrozenInstanceError` is raised. + + Please note: - :param hash: Create a ``__hash__`` method that returns the :func:`hash` of - a tuple of all ``attrs`` attribute values. - :type hash: bool + 1. This is achieved by installing a custom ``__setattr__`` method + on your class so you can't implement an own one. - :param init: Create a ``__init__`` method that initialiazes the ``attrs`` - attributes. Leading underscores are stripped for the argument name. - :type init: bool + 2. True immutability is impossible in Python. - :param slots: Create a slots_-style class that's more memory-efficient. - See :ref:`slots` for further ramifications. - :type slots: bool + 3. This *does* have a minor a runtime performance impact when + initializing new instances. In other words: ``__init__`` is + slightly slower with ``frozen=True``. - .. _slots: https://docs.python.org/3.5/reference/datamodel.html#slots + .. _slots: https://docs.python.org/3.5/reference/datamodel.html#slots + + .. versionadded:: 16.0.0 *slots* + + .. versionadded:: 16.1.0 *frozen* """ def wrap(cls): if getattr(cls, "__class__", None) is None: @@ -209,8 +227,10 @@ def wrap(cls): if hash is True: cls = _add_hash(cls) if init is True: - cls = _add_init(cls) - if slots: + cls = _add_init(cls, frozen) + if frozen is True: + cls.__setattr__ = _frozen_setattrs + if slots is True: cls_dict = dict(cls.__dict__) cls_dict["__slots__"] = tuple(ca_list) for ca_name in ca_list: @@ -367,7 +387,10 @@ def repr_(self): return cls -def _add_init(cls): +def _add_init(cls, frozen): + """ + Add a __init__ method to *cls*. If *frozen* is True, make it immutable. + """ attrs = [a for a in cls.__attrs_attrs__ if a.init or a.default is not NOTHING] @@ -378,14 +401,21 @@ def _add_init(cls): sha1.hexdigest() ) - script = _attrs_to_script(attrs) + script = _attrs_to_script(attrs, frozen) locs = {} bytecode = compile(script, unique_filename, "exec") attr_dict = dict((a.name, a) for a in attrs) - exec_(bytecode, {"NOTHING": NOTHING, - "attr_dict": attr_dict, - "validate": validate, - "_convert": _convert}, locs) + globs = { + "NOTHING": NOTHING, + "attr_dict": attr_dict, + "validate": validate, + "_convert": _convert + } + if frozen is True: + # Save the lookup overhead in __init__ if we need to circumvent + # immutability. + globs["_cached_setattr"] = object.__setattr__ + exec_(bytecode, globs, locs) init = locs["__init__"] # In order of debuggers like PDB being able to step through the code, @@ -450,11 +480,31 @@ def _convert(inst): setattr(inst, a.name, a.convert(getattr(inst, a.name))) -def _attrs_to_script(attrs): +def _attrs_to_script(attrs, frozen): """ Return a valid Python script of an initializer for *attrs*. + + If *frozen* is True, we cannot set the attributes directly so we use + a cached ``object.__setattr__``. """ lines = [] + if frozen is True: + lines.append( + "_setattr = _cached_setattr.__get__(self, self.__class__)" + ) + + def fmt_setter(attr_name, value): + return "_setattr('%(attr_name)s', %(value)s)" % { + "attr_name": attr_name, + "value": value, + } + else: + def fmt_setter(attr_name, value): + return "self.%(attr_name)s = %(value)s" % { + "attr_name": attr_name, + "value": value, + } + args = [] has_validator = False has_convert = False @@ -467,14 +517,16 @@ def _attrs_to_script(attrs): arg_name = a.name.lstrip("_") if a.init is False: if isinstance(a.default, Factory): - lines.append("""\ -self.{attr_name} = attr_dict["{attr_name}"].default.factory()""".format( - attr_name=attr_name, + lines.append(fmt_setter( + attr_name, + "attr_dict['{attr_name}'].default.factory()" + .format(attr_name=attr_name) )) else: - lines.append("""\ -self.{attr_name} = attr_dict["{attr_name}"].default""".format( - attr_name=attr_name, + lines.append(fmt_setter( + attr_name, + "attr_dict['{attr_name}'].default" + .format(attr_name=attr_name) )) elif a.default is not NOTHING and not isinstance(a.default, Factory): args.append( @@ -483,26 +535,21 @@ def _attrs_to_script(attrs): attr_name=attr_name, ) ) - lines.append("self.{attr_name} = {arg_name}".format( - arg_name=arg_name, - attr_name=attr_name, - )) + lines.append(fmt_setter(attr_name, arg_name)) elif a.default is not NOTHING and isinstance(a.default, Factory): args.append("{arg_name}=NOTHING".format(arg_name=arg_name)) - lines.extend("""\ -if {arg_name} is not NOTHING: - self.{attr_name} = {arg_name} -else: - self.{attr_name} = attr_dict["{attr_name}"].default.factory()""" - .format(attr_name=attr_name, - arg_name=arg_name) - .split("\n")) + lines.append("if {arg_name} is not NOTHING:" + .format(arg_name=arg_name)) + lines.append(" " + fmt_setter(attr_name, arg_name)) + lines.append("else:") + lines.append(" " + fmt_setter( + attr_name, + "attr_dict['{attr_name}'].default.factory()" + .format(attr_name=attr_name) + )) else: args.append(arg_name) - lines.append("self.{attr_name} = {arg_name}".format( - attr_name=attr_name, - arg_name=arg_name, - )) + lines.append(fmt_setter(attr_name, arg_name)) if has_convert: lines.append("_convert(self)") @@ -511,10 +558,10 @@ def _attrs_to_script(attrs): return """\ def __init__(self, {args}): - {setters} + {lines} """.format( args=", ".join(args), - setters="\n ".join(lines) if lines else "pass", + lines="\n ".join(lines) if lines else "pass", ) @@ -544,7 +591,7 @@ def __init__(self, **kw): raise TypeError("Missing argument '{arg}'.".format(arg=a)) def __setattr__(self, name, value): - raise AttributeError("can't set attribute") # To mirror namedtuple. + raise FrozenInstanceError() @classmethod def from_counting_attr(cls, name, ca): diff --git a/src/attr/exceptions.py b/src/attr/exceptions.py new file mode 100644 --- /dev/null +++ b/src/attr/exceptions.py @@ -0,0 +1,12 @@ +from __future__ import absolute_import, division, print_function + + +class FrozenInstanceError(AttributeError): + """ + A frozen/immutable instance has been attempted to be modified. + + It mirrors the behavior of ``namedtuples`` by using the same error message + and subclassing :exc:`AttributeError``. + """ + msg = "can't set attribute" + args = [msg]
diff --git a/tests/test_dark_magic.py b/tests/test_dark_magic.py --- a/tests/test_dark_magic.py +++ b/tests/test_dark_magic.py @@ -8,6 +8,7 @@ from attr._compat import TYPE from attr._make import Attribute, NOTHING +from attr.exceptions import FrozenInstanceError @attr.s @@ -62,6 +63,11 @@ class SubSlots(SuperSlots): y = attr.ib() +@attr.s(frozen=True, slots=True) +class Frozen(object): + x = attr.ib() + + class TestDarkMagic(object): """ Integration tests. @@ -114,12 +120,12 @@ class C3(object): assert "C3(_x=1)" == repr(C3(x=1)) - @given(booleans()) - def test_programmatic(self, slots): + @given(booleans(), booleans()) + def test_programmatic(self, slots, frozen): """ `attr.make_class` works. """ - PC = attr.make_class("PC", ["a", "b"], slots=slots) + PC = attr.make_class("PC", ["a", "b"], slots=slots, frozen=frozen) assert ( Attribute(name="a", default=NOTHING, validator=None, repr=True, cmp=True, hash=True, init=True), @@ -155,3 +161,19 @@ class Sub2(base): i = Sub2(x=obj) assert i.x is i.meth() is obj assert "Sub2(x={obj})".format(obj=obj) == repr(i) + + @pytest.mark.parametrize("frozen_class", [ + Frozen, # has slots=True + attr.make_class("FrozenToo", ["x"], slots=False, frozen=True), + ]) + def test_frozen_instance(self, frozen_class): + """ + Frozen instances can't be modified (easily). + """ + frozen = frozen_class(1) + + with pytest.raises(FrozenInstanceError) as e: + frozen.x = 2 + + assert e.value.args[0] == "can't set attribute" + assert 1 == frozen.x diff --git a/tests/test_dunders.py b/tests/test_dunders.py --- a/tests/test_dunders.py +++ b/tests/test_dunders.py @@ -35,7 +35,7 @@ class InitC(object): __attrs_attrs__ = [simple_attr("a"), simple_attr("b")] -InitC = _add_init(InitC) +InitC = _add_init(InitC, False) class TestAddCmp(object): @@ -219,12 +219,13 @@ class TestAddInit(object): """ Tests for `_add_init`. """ - @given(booleans()) - def test_init(self, slots): + @given(booleans(), booleans()) + def test_init(self, slots, frozen): """ If `init` is False, ignore that attribute. """ - C = make_class("C", {"a": attr(init=False), "b": attr()}, slots=slots) + C = make_class("C", {"a": attr(init=False), "b": attr()}, + slots=slots, frozen=frozen) with pytest.raises(TypeError) as e: C(a=1, b=2) @@ -233,8 +234,8 @@ def test_init(self, slots): e.value.args[0] ) - @given(booleans()) - def test_no_init_default(self, slots): + @given(booleans(), booleans()) + def test_no_init_default(self, slots, frozen): """ If `init` is False but a Factory is specified, don't allow passing that argument but initialize it anyway. @@ -243,7 +244,7 @@ def test_no_init_default(self, slots): "_a": attr(init=False, default=42), "_b": attr(init=False, default=Factory(list)), "c": attr() - }, slots=slots) + }, slots=slots, frozen=frozen) with pytest.raises(TypeError): C(a=1, c=2) with pytest.raises(TypeError): @@ -252,8 +253,8 @@ def test_no_init_default(self, slots): i = C(23) assert (42, [], 23) == (i._a, i._b, i.c) - @given(booleans()) - def test_no_init_order(self, slots): + @given(booleans(), booleans()) + def test_no_init_order(self, slots, frozen): """ If an attribute is `init=False`, it's legal to come after a mandatory attribute. @@ -261,7 +262,7 @@ def test_no_init_order(self, slots): make_class("C", { "a": attr(default=Factory(list)), "b": attr(init=False), - }, slots=slots) + }, slots=slots, frozen=frozen) def test_sets_attributes(self): """ @@ -282,7 +283,7 @@ class C(object): simple_attr(name="c", default=None), ] - C = _add_init(C) + C = _add_init(C, False) i = C() assert 2 == i.a assert "hallo" == i.b @@ -300,7 +301,7 @@ class C(object): simple_attr(name="a", default=Factory(list)), simple_attr(name="b", default=Factory(D)), ] - C = _add_init(C) + C = _add_init(C, False) i = C() assert [] == i.a assert isinstance(i.b, D) @@ -363,7 +364,7 @@ def test_underscores(self): class C(object): __attrs_attrs__ = [simple_attr("_private")] - C = _add_init(C) + C = _add_init(C, False) i = C(private=42) assert 42 == i._private
clean division between "value" and "object" Objects, as in object-oriented programming, are side-effecty, mutable state, whose methods ought to represent I/O. Values, as in functional programming, are immutable data, whose methods ought to represent computation. Python does not have as good a division between these very different sorts of beasts as it should, but it does have one critical distinction: the `__hash__` method. `characteristic` has this issue which crops up occasionally where you end up with objects that you can't put into a dictionary as a key, because one of its attributes is a dictionary. Sometimes people expect to be able to do this because `characteristic` makes so many other things easy, and just expect dictionaries to suddenly be immutable; sometimes people expect hash-by-identity. I propose that attr provide a way to specifically create two types of objects with a distinct interface; one that creates a "value" and one that creates an "object", so that users can see issues around mutability far earlier in the process. So, for example, the "object" type would: - not provide `__hash__` by default, provide an `__eq__` that does structural equality, and `__gt__`/`__lt__` that just raise exceptions - if asked, provide an identity-based `__hash__`, but then also switch to an identity-based `__eq__` and the 'value" type would - call `hash()` on all of its arguments at construction time so it would fail immediately if it contained a mutable type - fail immediately at class-definition time if any validator is mutable - provide immutable descriptors for all its attributes
How would that look API-wise? Would something like `@attr.object` or `@attr.value` be presets for `@attr.s`? I'm not sure. I'm trying to decide what I think the default for `attr.s` is; there's a strong case in my mind for both default-to-mutable or default-to-immutable. `attr.s(mutable=True)`? The default should be what most people (and I!) would expect it to do. We’re still on Python and don’t have free COW structures etc. Therefore any immutability gimmickry ought to be opt-in (but in place and simple to use). I think you're right; Python programmers are going to expect mutability by default, trying to turn that off would just be an ideological statement, not useful functionality. So how about just having a `__hash__` that raises an exception which points you at `attr.value`, and `attr.s` is `object`? Hi, I found this ticket because I was looking for an `immutable` argument to `@attr.s` like there was with characteristic :) ALSO: on the subject of "value types", I would like to point out my new library [sumtypes](https://pypi.python.org/pypi/sumtypes) (honestly I didn't come here looking for a place to advertise, but it seems relevant). `immutable` would be great to have for it too (and I probably _would_ make it the default in that library). I’m reluctant to implement immutable myself because it adds another method I have to highly invasively muck with (`__setattr__`). _But_ `attrs` [is extensible](https://attrs.readthedocs.org/en/stable/extending.html), so feel free to implement it yourself! @hynek I guess there's two separate aspects of immutability: making sure declared attributes are immutable, and making sure new attributes can't be dynamically assigned to. Neither of these actually require defining `__setattr__`. The former can be accomplished with the descriptor protocol, and the latter can be accomplished with `__slots__`. I assumed Attribute must have already implemented the descriptor protocol, but I see now I'm wrong, and, for example, mutating an attribute post-instantiation will allow setting it to a value that a validator wouldn't accept, because validators are not run on mutation. So maybe it would be best if Attribute instances do provide the descriptor protocol, both to support validating mutation as well as implementing immutability if it's requested? I kinda want to recommend setting up a performance test suite at this point, because while it seems to me like using the descriptor protocol _ought_ to be zero-cost on PyPy and "fast enough" on CPython, it also seems like it would be worth knowing that for sure. (Although I also assumed that attrs would validate on mutation and this is a slightly unpleasant surprise. Hmm.) attrs had validation on mutation once but there were so many loopholes that I decided to take it out because the performance hit for _everyone_ simply wasn’t worth it. I’m pretty sure that using descriptors is measurably slower. I’ll happily be proven wrong with a benchmark. :) What do you mean "so many loopholes"? Other than `__dict__.__setitem__` and `object.__setattr__` what else is there? @glyph well, there's also other methods on `__dict__` than `__setitem__`, so generalize that to "`__dict__` mutation" Personally I think it's worth doing validation with the descriptor protocol even though someone could bypass with `__dict__` mutation. However! It would also be possible to make naive dict mutation ineffective, since Python always consults a descriptor before a `__dict__` entry. It might have other downsides, but you could have the descriptors store their actual data under a different name in the instance dict, so at least someone would really know they're bypassing validation when they're assigning something to `__dict__['__attrs_x']`. But... yeah, that might be a bit too surprising/weird for people who do want to dig down into the representation. I mean, Pickling would still work just fine, but maybe there are some useful things that would break? Also, as a follow-up to my previous mention of `__slots__`, I've hit a snag there: apparently the way `__slots__` works is by creating descriptors on the class. Which means you can't use your _own_ descriptors (or even non-descriptors -- you can't even use class variables to declare defaults for instances, for example). The result of trying to add `__slots__` to a `@attrib.s`-using class is this: ``` >>> @attr.s ... class Foo(object): ... __slots__ = ('x', 'y') ... x = attr.ib() ... y = attr.ib() ... >>> Foo(1, 2) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "<attrs generated init a7439b12cdcdd2d603c04767f9a798adb8a0d944>", line 2, in __init__ AttributeError: 'Foo' object attribute 'x' is read-only ``` There may be some clever trick I'm missing to still use `__slots__` (which actually would be nice to use on CPython if we could...), but I'm thinking @hynek is right and the only way to implement immutability would be by defining a `__setattr__` :-( Oh, never mind. There is a way to use both slots and descriptors: http://stackoverflow.com/questions/4912499/using-python-descriptors-with-slots It's just a matter of having the user's descriptor be named different from the slot. most obvious loophole: `instance.attribute.do_something_that_mutates_instance()`. I don’t want to sound like an ass but adding features that are invasive but only interesting to a small fraction of people made `characteristic` what it is so I’m much more conservative this time and rather made it extensible than implementing everything ppl shout at me. I’m open to descriptor-based solutions that have no negative impact on performance though. Mutability doesn't seem like a particularly obscure or minority concern :). That said, I'm not sure what you're objecting to. Doing validation on attributes by default? Doing validation on attributes at all? Or the actual topic of this bug, `@attr.value`, which would not be on by default anyway? Echo chamber. :) I’m objecting the tangent of validating on assignment. I don’t object immutability but I feel it should be a separate decorator. Maybe `@attr.value`? :) OK good. Let's have another issue to discuss that. For the case of `@attr.value` specifically, we could just override `__setattr__` to raise `AttributeError` after initialization. Does that seem sufficient? You mentioned not wanting to do it yourself, but nothing _except_ `value` would need to mess with it. Just to be clear: my main issue is not being lazy but wanting to keep attrs clean. :) Moving settattr magic into a separate decorator seems a fair compromise to me. Cool, thank you, I asked the question very awkwardly but that was exactly the form of answer I wanted :). See also #50 ?
2016-08-16T14:35:17Z
16
python-attrs/attrs
181
python-attrs__attrs-181
[ "165" ]
a328e671690be4e6342d50056d121506b64da9d3
diff --git a/src/attr/validators.py b/src/attr/validators.py --- a/src/attr/validators.py +++ b/src/attr/validators.py @@ -48,9 +48,9 @@ def instance_of(type): :param type: The type to check for. :type type: type or tuple of types - The :exc:`TypeError` is raised with a human readable error message, the - attribute (of type :class:`attr.Attribute`), the expected type, and the - value it got. + :raises TypeError: With a human readable error message, the attribute + (of type :class:`attr.Attribute`), the expected type, and the value it + got. """ return _InstanceOfValidator(type) @@ -87,9 +87,9 @@ def provides(interface): :param zope.interface.Interface interface: The interface to check for. - The :exc:`TypeError` is raised with a human readable error message, the - attribute (of type :class:`attr.Attribute`), the expected interface, and - the value it got. + :raises TypeError: With a human readable error message, the attribute + (of type :class:`attr.Attribute`), the expected interface, and the + value it got. """ return _ProvidesValidator(interface) @@ -127,3 +127,39 @@ def optional(validator): if isinstance(validator, list): return _OptionalValidator(_AndValidator(validator)) return _OptionalValidator(validator) + + +@attributes(repr=False, slots=True) +class _InValidator(object): + options = attr() + + def __call__(self, inst, attr, value): + if value not in self.options: + raise ValueError( + "'{name}' must be in {options!r} (got {value!r})" + .format(name=attr.name, options=self.options, value=value) + ) + + def __repr__(self): + return ( + "<in_ validator with options {options!r}>" + .format(options=self.options) + ) + + +def in_(options): + """ + A validator that raises a :exc:`ValueError` if the initializer is called + with a value that does not belong in the options provided. The check is + performed using ``value in options``. + + :param options: Allowed options. + :type options: list, tuple, :class:`enum.Enum`, ... + + :raises ValueError: With a human readable error message, the attribute (of + type :class:`attr.Attribute`), the expected options, and the value it + got. + + .. versionadded:: 17.1.0 + """ + return _InValidator(options)
diff --git a/tests/test_validators.py b/tests/test_validators.py --- a/tests/test_validators.py +++ b/tests/test_validators.py @@ -7,7 +7,7 @@ import pytest import zope.interface -from attr.validators import and_, instance_of, provides, optional +from attr.validators import and_, instance_of, provides, optional, in_ from attr._compat import TYPE from attr._make import attributes, attr @@ -214,3 +214,37 @@ def test_repr(self, validator): "<{type} 'int'>> or None>") .format(type=TYPE) ) == repr(v) + + +class TestIn_(object): + """ + Tests for `in_`. + """ + def test_success_with_value(self): + """ + If the value is in our options, nothing happens. + """ + v = in_([1, 2, 3]) + a = simple_attr("test") + v(1, a, 3) + + def test_fail(self): + """ + Raise ValueError if the value is outside our options. + """ + v = in_([1, 2, 3]) + a = simple_attr("test") + with pytest.raises(ValueError) as e: + v(None, a, None) + assert ( + "'test' must be in [1, 2, 3] (got None)", + ) == e.value.args + + def test_repr(self): + """ + Returned validator has a useful `__repr__`. + """ + v = in_([3, 4, 5]) + assert( + ("<in_ validator with options [3, 4, 5]>") + ) == repr(v)
attr default based on other attributes Hi! I have unsucessfully tried to define a default value by referencing other attributes. I'm sure the code below doesnt' work for some obvious or fundamental reason, but I would be grateful for comments on how to do something like it: ```python import attr from attr.validators import instance_of import datetime @attr.s class Something: some_date = attr.ib(validator=instance_of(datetime.date)) some_number = attr.ib(convert=float) name = attr.ib(validator=instance_of(str), default="Generic Name {0} - {1}%".format( some_date.strftime("%d-%b-%Y"), some_number * 100) ) s = Something(some_date=datetime.date.today(), some_number=0.375) ``` I included the `.strftime()` conversion to highlight that `name` doesn't see a float and a date, but a `_CountingAttr` object, hence I get an AttributeError (and a TypeError for `some_number * 100`). Since I can't reference self either, what would be the correct way to do this?
Hi, for now I suggest using `__attrs_post_init__` (http://attrs.readthedocs.io/en/stable/examples.html?highlight=attrs_post_init#other-goodies). The linked example is basically what you want. I have wanted to do this several times. For example, you have a test of a function which takes many inputs. You want to test this function with many different cases. So (of course) you create a quick e.g. `@attr.s class TestInput:...` for that. In many cases you want some attribute to have a default depending on other attributes, but still be able to override it. The existing options are not so good for this: - I think you can use `these` for this but it's cumbersome. - `__attrs_post_init__` is not good because you can't override it. What I'd want, following the ad-hoc validators example, is something like this: ``` @attr.s class C: x = attr.ib() y = attr.ib() z = attr.ib() @z.default def z_default(self, attribute): return self.x + self.y ``` This is sensitive to the initialization order. However, if you have a stateful factory, the order is already important, so this is not new. And the natural order is the definition order which is intuitive. Another issue is that this makes it possible to specify multiple defaults. I would just raise an error if this is detected. Final issue I can think of is that an occasional user might confuse this with a property, while it does not behave like a property: - The value is persisted. - In mutable objects, the value doesn't change if the dependencies change, unlike a dynamically-computed property. But I think "default" is clear on the behavior it has. At first glance I like the proposed API :) I guess we could have a lot fun with decorators based on `_CountingAttr`. I *think* I want this in 17.1 but it entirely depends on the goodwill of reviewers (most likely @Tinche – I *gotta* shanghai some innocent souls at PyCon).
2017-05-01T17:45:41Z
16.3
python-attrs/attrs
186
python-attrs__attrs-186
[ "161" ]
fdfd51e249f11483a9f731a4f19282df0f97e1e7
diff --git a/src/attr/_make.py b/src/attr/_make.py --- a/src/attr/_make.py +++ b/src/attr/_make.py @@ -897,7 +897,7 @@ def __init__(self, default, validator, repr, cmp, hash, init, convert, self.default = default # If validator is a list/tuple, wrap it using helper validator. if validator and isinstance(validator, (list, tuple)): - self._validator = _AndValidator(tuple(validator)) + self._validator = and_(*validator) else: self._validator = validator self.repr = repr @@ -911,37 +911,18 @@ def validator(self, meth): """ Decorator that adds *meth* to the list of validators. - Returns meth unchanged. + Returns *meth* unchanged. """ - if not isinstance(self._validator, _AndValidator): - self._validator = _AndValidator( - (self._validator,) if self._validator else () - ) - self._validator.add(meth) + if self._validator is None: + self._validator = meth + else: + self._validator = and_(self._validator, meth) return meth _CountingAttr = _add_cmp(_add_repr(_CountingAttr)) -@attributes(slots=True) -class _AndValidator(object): - """ - Compose many validators to a single one. - """ - _validators = attr() - - def __call__(self, inst, attr, value): - for v in self._validators: - v(inst, attr, value) - - def add(self, validator): - """ - Add *validator*. Shouldn't be called after the class is done. - """ - self._validators += (validator,) - - @attributes(slots=True) class Factory(object): """ @@ -981,3 +962,40 @@ def make_class(name, attrs, bases=(object,), **attributes_arguments): raise TypeError("attrs argument must be a dict or a list.") return attributes(**attributes_arguments)(type(name, bases, cls_dict)) + + +# These are required by whithin this module so we define them here and merely +# import into .validators. + + +@attributes(slots=True) +class _AndValidator(object): + """ + Compose many validators to a single one. + """ + _validators = attr() + + def __call__(self, inst, attr, value): + for v in self._validators: + v(inst, attr, value) + + +def and_(*validators): + """ + A validator that composes multiple validators into one. + + When called on a value, it runs all wrapped validators. + + :param validators: Arbitrary number of validators. + :type validators: callables + + .. versionadded:: 17.1.0 + """ + vals = [] + for validator in validators: + vals.extend( + validator._validators if isinstance(validator, _AndValidator) + else [validator] + ) + + return _AndValidator(tuple(vals)) diff --git a/src/attr/validators.py b/src/attr/validators.py --- a/src/attr/validators.py +++ b/src/attr/validators.py @@ -4,7 +4,15 @@ from __future__ import absolute_import, division, print_function -from ._make import attr, attributes +from ._make import attr, attributes, and_, _AndValidator + + +__all__ = [ + "and_", + "instance_of", + "optional", + "provides", +] @attributes(repr=False, slots=True) @@ -93,12 +101,13 @@ class _OptionalValidator(object): def __call__(self, inst, attr, value): if value is None: return - return self.validator(inst, attr, value) + + self.validator(inst, attr, value) def __repr__(self): return ( - "<optional validator for {type} or None>" - .format(type=repr(self.validator)) + "<optional validator for {what} or None>" + .format(what=repr(self.validator)) ) @@ -108,6 +117,13 @@ def optional(validator): which can be set to ``None`` in addition to satisfying the requirements of the sub-validator. - :param validator: A validator that is used for non-``None`` values. + :param validator: A validator (or a list of validators) that is used for + non-``None`` values. + :type validator: callable or :class:`list` of callables. + + .. versionadded:: 15.1.0 + .. versionchanged:: 17.1.0 *validator* can be a list of validators. """ + if isinstance(validator, list): + return _OptionalValidator(_AndValidator(validator)) return _OptionalValidator(validator)
diff --git a/tests/test_make.py b/tests/test_make.py --- a/tests/test_make.py +++ b/tests/test_make.py @@ -21,6 +21,7 @@ _AndValidator, _CountingAttr, _transform_attrs, + and_, attr, attributes, fields, @@ -71,6 +72,8 @@ def v2(_, __): def test_validator_decorator_single(self): """ + If _CountingAttr.validator is used as a decorator and there is no + decorator set, the decorated method is used as the validator. """ a = attr() @@ -78,17 +81,23 @@ def test_validator_decorator_single(self): def v(): pass - assert _AndValidator((v,)) == a._validator + assert v == a._validator - def test_validator_decorator(self): + @pytest.mark.parametrize("wrap", [ + lambda v: v, + lambda v: [v], + lambda v: and_(v) + + ]) + def test_validator_decorator(self, wrap): """ - If _CountingAttr.validator is used as a decorator, the decorated method - is added to validators. + If _CountingAttr.validator is used as a decorator and there is already + a decorator set, the decorators are composed using `and_`. """ def v(_, __): pass - a = attr(validator=[v]) + a = attr(validator=wrap(v)) @a.validator def v2(self, _, __): diff --git a/tests/test_validators.py b/tests/test_validators.py --- a/tests/test_validators.py +++ b/tests/test_validators.py @@ -7,8 +7,9 @@ import pytest import zope.interface -from attr.validators import instance_of, provides, optional +from attr.validators import and_, instance_of, provides, optional from attr._compat import TYPE +from attr._make import attributes, attr from .utils import simple_attr @@ -58,6 +59,53 @@ def test_repr(self): ) == repr(v) +def always_pass(_, __, ___): + """ + Toy validator that always passses. + """ + + +def always_fail(_, __, ___): + """ + Toy validator that always fails. + """ + 0/0 + + +class TestAnd(object): + def test_success(self): + """ + Succeeds if all wrapped validators succeed. + """ + v = and_(instance_of(int), always_pass) + + v(None, simple_attr("test"), 42) + + def test_fail(self): + """ + Fails if any wrapped validator fails. + """ + v = and_(instance_of(int), always_fail) + + with pytest.raises(ZeroDivisionError): + v(None, simple_attr("test"), 42) + + def test_sugar(self): + """ + `and_(v1, v2, v3)` and `[v1, v2, v3]` are equivalent. + """ + @attributes + class C(object): + a1 = attr("a1", validator=and_( + instance_of(int), + )) + a2 = attr("a2", validator=[ + instance_of(int), + ]) + + assert C.__attrs_attrs__[0].validator == C.__attrs_attrs__[1].validator + + class IFoo(zope.interface.Interface): """ An interface. @@ -111,29 +159,33 @@ def test_repr(self): ) == repr(v) +@pytest.mark.parametrize("validator", [ + instance_of(int), + [always_pass, instance_of(int)], +]) class TestOptional(object): """ Tests for `optional`. """ - def test_success_with_type(self): + def test_success(self, validator): """ - Nothing happens if types match. + Nothing happens if validator succeeds. """ - v = optional(instance_of(int)) + v = optional(validator) v(None, simple_attr("test"), 42) - def test_success_with_none(self): + def test_success_with_none(self, validator): """ Nothing happens if None. """ - v = optional(instance_of(int)) + v = optional(validator) v(None, simple_attr("test"), None) - def test_fail(self): + def test_fail(self, validator): """ Raises `TypeError` on wrong types. """ - v = optional(instance_of(int)) + v = optional(validator) a = simple_attr("test") with pytest.raises(TypeError) as e: v(None, a, "42") @@ -144,13 +196,21 @@ def test_fail(self): ) == e.value.args - def test_repr(self): + def test_repr(self, validator): """ Returned validator has a useful `__repr__`. """ - v = optional(instance_of(int)) - assert ( - ("<optional validator for <instance_of validator for type " - "<{type} 'int'>> or None>") - .format(type=TYPE) - ) == repr(v) + v = optional(validator) + + if isinstance(validator, list): + assert ( + ("<optional validator for _AndValidator(_validators=[{func}, " + "<instance_of validator for type <{type} 'int'>>]) or None>") + .format(func=repr(always_pass), type=TYPE) + ) == repr(v) + else: + assert ( + ("<optional validator for <instance_of validator for type " + "<{type} 'int'>> or None>") + .format(type=TYPE) + ) == repr(v)
validators.optional should take lists like validator= We’ve added the support for taking a list of validators to validator=, optional() should be able to take lists too otherwise users have to wrap all validators in the list with optional().
2017-05-10T09:52:56Z
16.3
mochajs/mocha
5,292
mochajs__mocha-5292
[ "5289" ]
b1f1cb78b655191b7a43dc962b513bf1b076890c
diff --git a/lib/reporters/xunit.js b/lib/reporters/xunit.js --- a/lib/reporters/xunit.js +++ b/lib/reporters/xunit.js @@ -104,7 +104,7 @@ function XUnit(runner, options) { ); tests.forEach(function (t) { - self.test(t); + self.test(t, options); }); self.write('</testsuite>'); @@ -152,13 +152,13 @@ XUnit.prototype.write = function (line) { * * @param {Test} test */ -XUnit.prototype.test = function (test) { +XUnit.prototype.test = function (test, options) { Base.useColors = false; var attrs = { classname: test.parent.fullTitle(), name: test.title, - file: test.file, + file: testFilePath(test.file, options), time: test.duration / 1000 || 0 }; @@ -215,4 +215,12 @@ function tag(name, attrs, close, content) { return tag; } +function testFilePath(filepath, options) { + if (options && options.reporterOptions && options.reporterOptions.showRelativePaths) { + return path.relative(process.cwd(), filepath); + } + + return filepath; +} + XUnit.description = 'XUnit-compatible XML output';
diff --git a/test/reporters/xunit.spec.js b/test/reporters/xunit.spec.js --- a/test/reporters/xunit.spec.js +++ b/test/reporters/xunit.spec.js @@ -592,4 +592,69 @@ describe('XUnit reporter', function () { expect(lines[0], 'to contain', defaultSuiteName); }); }); + + describe('showRelativePaths reporter option', function () { + const projectPath = path.join('home', 'username', 'demo-project'); + const relativeTestPath = path.join('tests', 'demo-test.spec.js'); + const absoluteTestPath = path.join(projectPath, relativeTestPath); + + var expectedWrite = ''; + const fakeThis = { + write: function (str) { + expectedWrite = expectedWrite + str; + } + }; + + const failingTest = { + state: STATE_FAILED, + title: expectedTitle, + file: absoluteTestPath, + parent: { + fullTitle: function () { + return expectedClassName; + } + }, + duration: 1000, + err: { + actual: 'foo', + expected: 'bar', + message: expectedMessage, + stack: expectedStack + } + }; + + beforeEach(function () { + sinon.stub(process, 'cwd').returns(projectPath); + }); + + afterEach(function () { + sinon.restore(); + expectedWrite = ''; + }); + + it('shows relative paths for tests if showRelativePaths reporter option is set', function () { + const options = { + reporterOptions: { + showRelativePaths: true + } + }; + const xunit = new XUnit(runner, options); + + xunit.test.call(fakeThis, failingTest, options); + + expect(expectedWrite, 'not to contain', absoluteTestPath); + expect(expectedWrite, 'to contain', relativeTestPath); + }); + + it('shows absolute paths for tests by default', function () { + const options = {}; + const xunit = new XUnit(runner); + + xunit.test.call(fakeThis, failingTest, options); + + expect(expectedWrite, 'to contain', absoluteTestPath); + // Double quote included to ensure printed paths don't start with relative path. Example printed line: <testcase classname="suite" name="test" file="some/tesfile.js" time="0"/> + expect(expectedWrite, 'not to contain', `"${relativeTestPath}`); + }); + }); });
🚀 Feature: allow using test file's relative path in xunit reporter output ### Feature Request Checklist - [x] I have read and agree to Mocha's [Code of Conduct](https://github.com/mochajs/mocha/blob/main/.github/CODE_OF_CONDUCT.md) and [Contributing Guidelines](https://github.com/mochajs/mocha/blob/main/.github/CONTRIBUTING.md) - [x] I have searched for [related issues](https://github.com/mochajs/mocha/issues?q=is%3Aissue) and [issues with the `faq` label](https://github.com/mochajs/mocha/issues?utf8=%E2%9C%93&q=is%3Aissue%20label%3Afaq%20), but none matched my issue. - [ ] I want to provide a PR to resolve this ### Overview Today, the `xunit` reporter always fills the `file` attribute with the fully qualified path to the file. ```shell > npx mocha --reporter=xunit <testsuite name="Mocha Tests" tests="1" failures="0" errors="0" skipped="0" timestamp="Thu, 30 Jan 2025 00:11:09 GMT" time="0.001"> <testcase classname="add" name="should add two numbers" file="/private/tmp/test-mocha/test/add.test.js" time="0"/> </testsuite> ``` In CircleCI, when a test fails, they provide a dedicated UI that reads junit-compatible `xml` files and shows failures in a user-friendly way: ![Image](https://github.com/user-attachments/assets/75317e18-b050-4dc3-8fa2-bd63f4e98abd) However, because the fully qualified path is provided, I have to delete the prefix (`/home/circleci/project`) to paste it into my editor to pull up the file. With my jest test suites and the junit reporter, I can easily just click the "copy" icon because it's the relative path: ![Image](https://github.com/user-attachments/assets/69652c3f-0651-4db1-a64d-0d682b80b17b) ### Suggested Solution We should introduce a new `reporterOption` for this feature. Perhaps it's called `useRelativeFilePath` and defaults to `false` to retain backward compatibility. We could alternatively use a mode like `filePathMode: 'relative' | 'absolute'`, defaulting to `absolute`. ### Alternatives For now, I'm just using `sed` to edit the file after it's produced. But it would be nice to remove this additional step. ### Additional Info _No response_
👍 this strikes me as a very reasonable feature request. Absolute paths in CI output are irksome in a lot of contexts. The irritation of constantly having to remove the absolute prefix is definitely something I resonate with! Accepting PRs as an opt-in reporter option. Thanks for filing! Thanks, in the meantime this is what I'm doing, in case it helps others: ```yml - run: name: Make JUnit XML report contain relative paths # Work around https://github.com/mochajs/mocha/issues/5289 command: | sed -i "s|file=\"$(pwd)/|file=\"|" ./junit-reports/mocha.xml ``` Folks can adapt this pattern pretty easily to other CI platforms too 😄
2025-02-05T19:45:27Z
11.1
mochajs/mocha
5,325
mochajs__mocha-5325
[ "5310" ]
1a0caf6b653d39b9fb09cde6ee1e92a075be8f4b
diff --git a/docs-next/public/example/Array.js b/docs-next/public/example/Array.js new file mode 100644 --- /dev/null +++ b/docs-next/public/example/Array.js @@ -0,0 +1,75 @@ +"use strict"; + +describe('Array', function () { + describe('.push()', function () { + it('should append a value', function () { + var arr = []; + arr.push('foo'); + arr.push('bar'); + expect(arr[0]).to.equal('foo'); + expect(arr[1]).to.equal('bar'); + }) + + it('should return the length', function () { + var arr = []; + var n = arr.push('foo'); + expect(n).to.equal(1); + n = arr.push('bar'); + expect(n).to.equal(2); + }) + + describe('with many arguments', function () { + it('should add the values', function () { + var arr = []; + arr.push('foo', 'bar'); + expect(arr[0]).to.equal('foo'); + expect(arr[1]).to.equal('bar'); + }) + }) + }) + + describe('.unshift()', function () { + it('should prepend a value', function () { + var arr = [1, 2, 3]; + arr.unshift('foo'); + expect(arr[0]).to.equal('foo'); + expect(arr[1]).to.equal(1); + }) + + it('should return the length', function () { + var arr = []; + var n = arr.unshift('foo'); + expect(n).to.equal(1); + n = arr.unshift('bar'); + expect(n).to.equal(2); + }) + + describe('with many arguments', function () { + it('should add the values', function () { + var arr = []; + arr.unshift('foo', 'bar'); + expect(arr[0]).to.equal('foo'); + expect(arr[1]).to.equal('bar'); + }) + }) + }) + + describe('.pop()', function () { + it('should remove and return the last value', function () { + var arr = [1, 2, 3]; + expect(arr.pop()).to.equal(3); + expect(arr.pop()).to.equal(2); + expect(arr).to.have.length(1); + }) + }) + + describe('.shift()', function () { + it('should remove and return the first value', function () { + var arr = [1, 2, 3]; + expect(arr.shift()).to.equal(1); + expect(arr.shift()).to.equal(2); + expect(arr).to.have.length(1); + }) + }) +}) +
diff --git a/docs-next/public/example/tests.html b/docs-next/public/example/tests.html new file mode 100644 --- /dev/null +++ b/docs-next/public/example/tests.html @@ -0,0 +1,20 @@ +<!DOCTYPE html> +<html lang="en"> + <head> + <meta charset="utf-8"> + <title>Mocha</title> + <link rel="stylesheet" href="https://unpkg.com/mocha/mocha.css"> + <link rel="shortcut icon" href="../favicon.svg"> + </head> + <body> + <div id="mocha"></div> + <script src="https://unpkg.com/mocha/mocha.js"></script> + <script src="https://unpkg.com/chai@4.5.0/chai.js"></script> + <script>mocha.setup('bdd')</script> + <script>expect = chai.expect</script> + <script src="Array.js"></script> + <script> + mocha.run(); + </script> + </body> +</html>
📝 Docs: Add an `/example/tests` page to the new website ### Documentation Request Checklist - [x] I have read and agree to Mocha's [Code of Conduct](https://github.com/mochajs/mocha/blob/main/.github/CODE_OF_CONDUCT.md) and [Contributing Guidelines](https://github.com/mochajs/mocha/blob/main/.github/CONTRIBUTING.md) - [x] I have searched for [related issues](https://github.com/mochajs/mocha/issues?q=is%3Aissue) and [issues with the `faq` label](https://github.com/mochajs/mocha/issues?utf8=%E2%9C%93&q=is%3Aissue%20label%3Afaq%20), but none matched my issue. - [x] I want to provide a PR to resolve this ### Overview https://mochajs.org/example/tests.html contains an example of Mocha in the browser. It's a nice page to send people as a reference. I think we'll want to add an equivalent to the new website too, right? ### Additional Info _No response_
2025-04-08T12:23:43Z
11.2
mochajs/mocha
5,165
mochajs__mocha-5165
[ "4903" ]
6caa9026eb120b136dc8210614b31310f8bff83b
diff --git a/lib/cli/cli.js b/lib/cli/cli.js --- a/lib/cli/cli.js +++ b/lib/cli/cli.js @@ -12,7 +12,7 @@ const debug = require('debug')('mocha:cli:cli'); const symbols = require('log-symbols'); -const yargs = require('yargs/yargs'); +const yargs = require('yargs'); const path = require('path'); const { loadRc, @@ -63,7 +63,7 @@ exports.main = (argv = process.argv.slice(2), mochaArgs) => { debug('caught error sometime before command handler: %O', err); yargs.showHelp(); console.error(`\n${symbols.error} ${ansi.red('ERROR:')} ${msg}`); - process.exitCode = 1; + process.exit(1); }) .help('help', 'Show usage information & exit') .alias('help', 'h') @@ -71,7 +71,7 @@ exports.main = (argv = process.argv.slice(2), mochaArgs) => { .alias('version', 'V') .wrap(process.stdout.columns ? Math.min(process.stdout.columns, 80) : 80) .epilog( - `Mocha Resources + `${ansi.reset("Mocha Resources")} Chat: ${ansi.magenta(discord)} GitHub: ${ansi.blue(repository.url)} Docs: ${ansi.yellow(homepage)} diff --git a/lib/cli/run.js b/lib/cli/run.js --- a/lib/cli/run.js +++ b/lib/cli/run.js @@ -169,11 +169,11 @@ exports.builder = yargs => group: GROUPS.RULES }, 'list-interfaces': { - conflicts: Array.from(ONE_AND_DONE_ARGS), + conflicts: Array.from(ONE_AND_DONE_ARGS).filter(arg => arg !== "list-interfaces"), description: 'List built-in user interfaces & exit' }, 'list-reporters': { - conflicts: Array.from(ONE_AND_DONE_ARGS), + conflicts: Array.from(ONE_AND_DONE_ARGS).filter(arg => arg !== "list-reporters"), description: 'List built-in reporters & exit' }, 'no-colors': { diff --git a/lib/mocha.js b/lib/mocha.js --- a/lib/mocha.js +++ b/lib/mocha.js @@ -194,7 +194,9 @@ function Mocha(options = {}) { .ui(options.ui) .reporter( options.reporter, - options.reporterOption || options.reporterOptions // for backwards compatibility + options['reporter-option'] || + options.reporterOption || + options.reporterOptions // for backwards compatibility ) .slow(options.slow) .global(options.global);
diff --git a/test/node-unit/cli/run.spec.js b/test/node-unit/cli/run.spec.js --- a/test/node-unit/cli/run.spec.js +++ b/test/node-unit/cli/run.spec.js @@ -7,7 +7,7 @@ describe('command', function () { describe('run', function () { describe('builder', function () { const IGNORED_OPTIONS = new Set(['help', 'version']); - const options = builder(require('yargs/yargs')().reset()).getOptions(); + const options = builder(require('yargs')()).getOptions(); ['number', 'string', 'boolean', 'array'].forEach(type => { describe(`${type} type`, function () { Array.from(new Set(options[type])).forEach(option => {
🔒 Security: Upgrade yargs-parser and yargs to latest stable version Currently the mocha@10.0.0 version has not upgraded its yarg-parser and yargs which is causing a security vulnerability (NO-CVE: Regular Expression Denial Of Service (ReDoS)) . Please help upgrade both to the most stable version as of current date. Thank you . Attached are the vulnerability and the most stable release in the npm package library ![image](https://user-images.githubusercontent.com/109644072/179895841-a95810b7-dc70-45e4-9886-98cdc58efe6f.png) ![image](https://user-images.githubusercontent.com/109644072/179895952-0c16c07f-acb3-4bce-bce9-11bb65914235.png)
Snyk scan is also flagging Mocha ReDos as a High Risk Vulnerability: https://security.snyk.io/vuln/SNYK-JS-MOCHA-2863123. This issue hasn't had any recent activity, and I'm labeling it `stale`. Remove the label or comment or this issue will be closed in 14 days. Thanks for contributing to Mocha! See also #4938 and #4809 This issue hasn't had any recent activity, and I'm labeling it `stale`. Remove the label or comment or this issue will be closed in 14 days. Thanks for contributing to Mocha! Any news about updating yargs-* to latest stable version? This issue hasn't had any recent activity, and I'm labeling it `stale`. Remove the label or comment or this issue will be closed in 14 days. Thanks for contributing to Mocha! Any news about updating yargs-* to latest stable version? Marking as accepting PRs. Note that Mocha's current major version supports Node 14, so any version of a new package must also support 14.
2024-07-02T16:25:28Z
11
mochajs/mocha
5,231
mochajs__mocha-5231
[ "5202" ]
14e640ee49718d587779a9594b18f3796c42cf2a
diff --git a/lib/interfaces/common.js b/lib/interfaces/common.js --- a/lib/interfaces/common.js +++ b/lib/interfaces/common.js @@ -57,7 +57,7 @@ module.exports = function (suites, context, mocha) { * @param {Function} fn */ before: function (name, fn) { - suites[0].beforeAll(name, fn); + return suites[0].beforeAll(name, fn); }, /** @@ -67,7 +67,7 @@ module.exports = function (suites, context, mocha) { * @param {Function} fn */ after: function (name, fn) { - suites[0].afterAll(name, fn); + return suites[0].afterAll(name, fn); }, /** @@ -77,7 +77,7 @@ module.exports = function (suites, context, mocha) { * @param {Function} fn */ beforeEach: function (name, fn) { - suites[0].beforeEach(name, fn); + return suites[0].beforeEach(name, fn); }, /** @@ -87,7 +87,7 @@ module.exports = function (suites, context, mocha) { * @param {Function} fn */ afterEach: function (name, fn) { - suites[0].afterEach(name, fn); + return suites[0].afterEach(name, fn); }, suite: { diff --git a/lib/suite.js b/lib/suite.js --- a/lib/suite.js +++ b/lib/suite.js @@ -257,7 +257,7 @@ Suite.prototype.beforeAll = function (title, fn) { var hook = this._createHook(title, fn); this._beforeAll.push(hook); this.emit(constants.EVENT_SUITE_ADD_HOOK_BEFORE_ALL, hook); - return this; + return hook; }; /** @@ -281,7 +281,7 @@ Suite.prototype.afterAll = function (title, fn) { var hook = this._createHook(title, fn); this._afterAll.push(hook); this.emit(constants.EVENT_SUITE_ADD_HOOK_AFTER_ALL, hook); - return this; + return hook; }; /** @@ -305,7 +305,7 @@ Suite.prototype.beforeEach = function (title, fn) { var hook = this._createHook(title, fn); this._beforeEach.push(hook); this.emit(constants.EVENT_SUITE_ADD_HOOK_BEFORE_EACH, hook); - return this; + return hook; }; /** @@ -329,7 +329,7 @@ Suite.prototype.afterEach = function (title, fn) { var hook = this._createHook(title, fn); this._afterEach.push(hook); this.emit(constants.EVENT_SUITE_ADD_HOOK_AFTER_EACH, hook); - return this; + return hook; }; /**
diff --git a/test/unit/timeout.spec.js b/test/unit/timeout.spec.js --- a/test/unit/timeout.spec.js +++ b/test/unit/timeout.spec.js @@ -70,5 +70,31 @@ describe('timeouts', function () { }); }); }); + + describe('chaining calls', function () { + before(function (done) { + setTimeout(function () { + done(); + }, 50); + }).timeout(1500); + + it('should allow overriding via chaining', function (done) { + setTimeout(function () { + done(); + }, 50); + }).timeout(1500); + + describe('suite-level', function () { + it('should work with timeout(0)', function (done) { + setTimeout(done, 1); + }); + + describe('nested suite', function () { + it('should work with timeout(0)', function (done) { + setTimeout(done, 1); + }); + }); + }).timeout(1000); + }); }); });
🚀 Feature: Allow to set timeout on before/after/beforeEach/afterEach in declaration ### Feature Request Checklist - [X] I have read and agree to Mocha's [Code of Conduct](https://github.com/mochajs/mocha/blob/main/.github/CODE_OF_CONDUCT.md) and [Contributing Guidelines](https://github.com/mochajs/mocha/blob/main/.github/CONTRIBUTING.md) - [X] I have searched for [related issues](https://github.com/mochajs/mocha/issues?q=is%3Aissue) and [issues with the `faq` label](https://github.com/mochajs/mocha/issues?utf8=%E2%9C%93&q=is%3Aissue%20label%3Afaq%20), but none matched my issue. - [ ] I want to provide a PR to resolve this ### Overview `it` returns a `TestFunction` that has a `timeout` method. This is useful for setting timeouts in slow tests, without needing to define the test code inside a `function() {}` block. An analogous feature would be useful for the `before/beforeEach/after/afterEach` hooks. The current workaround would be: ```javascript before(function() { this.timeout(3000); }); ``` ### Suggested Solution `before/beforeEach/after/afterEach` could return a `HookFunction`, analogous to `TestFunction`, which has a `timeout` method. The code above could be rewritten: ```javascript before(() => {}).timeout(3000); ``` ### Alternatives None ### Additional Info _No response_
👍 Agreed, I'm surprised they didn't have this already! This took me a bit to parse through as someone who doesn't use those APIs much. Putting a summary here for clarity... There are two ways to describe a timeout: * `this.timeout(...);`: allowed for _hooks_ (`before(...)`, etc.) as well as _tests_ (`it(...)`, etc.) * test.timeout(...): only allowed for _tests_ Assuming I'm understanding correctly, this issue is asking to allow `hook.timeout(...)`. Doing so would mirror the `test.timeout(...)` already allowed. @JoshuaKGoldberg thanks for summarizing it more clearly than the original post 🙂 ok, I just looked into the codebase, and I think this will be very simple to implement, but I am not sure about potential implications. I will send a PR shortly
2024-10-14T03:36:54Z
10.8
mochajs/mocha
5,198
mochajs__mocha-5198
[ "5141" ]
d5766c887e72b1bb55d5efeac33b1cadd0544b84
diff --git a/lib/cli/options.js b/lib/cli/options.js --- a/lib/cli/options.js +++ b/lib/cli/options.js @@ -181,8 +181,24 @@ const loadPkgRc = (args = {}) => { result = {}; const filepath = args.package || findUp.sync(mocharc.package); if (filepath) { + let configData; try { - const pkg = JSON.parse(fs.readFileSync(filepath, 'utf8')); + configData = fs.readFileSync(filepath, 'utf8'); + } catch (err) { + // If `args.package` was explicitly specified, throw an error + if (filepath == args.package) { + throw createUnparsableFileError( + `Unable to read ${filepath}: ${err}`, + filepath + ); + } else { + debug('failed to read default package.json at %s; ignoring', + filepath); + return result; + } + } + try { + const pkg = JSON.parse(configData); if (pkg.mocha) { debug('`mocha` prop of package.json parsed: %O', pkg.mocha); result = pkg.mocha; @@ -190,13 +206,11 @@ const loadPkgRc = (args = {}) => { debug('no config found in %s', filepath); } } catch (err) { - if (args.package) { - throw createUnparsableFileError( - `Unable to read/parse ${filepath}: ${err}`, - filepath - ); - } - debug('failed to read default package.json at %s; ignoring', filepath); + // If JSON failed to parse, throw an error. + throw createUnparsableFileError( + `Unable to parse ${filepath}: ${err}`, + filepath + ); } } return result;
diff --git a/test/node-unit/cli/options.spec.js b/test/node-unit/cli/options.spec.js --- a/test/node-unit/cli/options.spec.js +++ b/test/node-unit/cli/options.spec.js @@ -149,7 +149,7 @@ describe('options', function () { loadOptions('--package /something/wherever --require butts'); }, 'to throw', - 'Unable to read/parse /something/wherever: bad file message' + 'Unable to read /something/wherever: bad file message' ); }); }); @@ -199,6 +199,36 @@ describe('options', function () { }); }); + describe('when path to package.json unspecified and package.json exists but is invalid', function () { + beforeEach(function () { + const filepath = '/some/package.json'; + readFileSync = sinon.stub(); + // package.json + readFileSync + .onFirstCall() + .returns('{definitely-invalid'); + findConfig = sinon.stub().returns('/some/.mocharc.json'); + loadConfig = sinon.stub().returns({}); + findupSync = sinon.stub().returns(filepath); + loadOptions = proxyLoadOptions({ + readFileSync, + findConfig, + loadConfig, + findupSync + }); + }); + + it('should throw', function () { + expect( + () => { + loadOptions(); + }, + 'to throw', + /SyntaxError/, + ); + }); + }); + describe('when called with package = false (`--no-package`)', function () { let result; beforeEach(function () { @@ -287,7 +317,7 @@ describe('options', function () { }); it('should set config = false', function () { - expect(loadOptions(), 'to have property', 'config', false); + expect(result, 'to have property', 'config', false); }); });
🐛 Bug: mocha fails silently on invalid `package.json` section ### Bug Report Checklist - [X] I have read and agree to Mocha's [Code of Conduct](https://github.com/mochajs/mocha/blob/master/.github/CODE_OF_CONDUCT.md) and [Contributing Guidelines](https://github.com/mochajs/mocha/blob/master/.github/CONTRIBUTING.md) - [X] I have searched for [related issues](https://github.com/mochajs/mocha/issues?q=is%3Aissue) and [issues with the `faq` label](https://github.com/mochajs/mocha/issues?utf8=%E2%9C%93&q=is%3Aissue%20label%3Afaq%20), but none matched my issue. - [X] I have 'smoke tested' the code to be tested by running it outside the real test suite to get a better sense of whether the problem is in the code under test, my usage of Mocha, or Mocha itself. - [X] I want to provide a PR to resolve this ### Expected I made an error in my `package.json` (an extra comma) because JSON is a bad format for configuration files ;-) like this: ```json "mocha": { "spec": "./*.spec.js", } ``` I expected `mocha` to give me an error, ideally a bit friendlier than the one from `npm`, but something like that: ``` $ npm install npm ERR! code EJSONPARSE ... npm ERR! JSON.parse Note: package.json must be actual JSON, not just JavaScript. ``` ### Actual Mocha just failed as if I hadn't configured it at all. This was very confusing: ``` $ npx mocha Error: No test files found: "test" ``` ### Minimal, Reproducible Example See "expected" above but basically: ``` npm install -D mocha ``` then add the offending section to `package.json`, then create `foo.spec.js`, then try to run `mocha`. ### Versions ` 10.4.0 10.4.0 v20.11.0 ` ### Additional Info I could definitely provide a PR if you could direct me to the appropriate place in the code (with which I am not familiar)
🤔 I don't reproduce this in https://github.com/mochajs/mocha-examples/tree/4b00891d6c7886f2d451e962a974478e7d3c1aa9/packages/hello-world. After adding an invalid `abc` to the top of its `package.json`: ```plaintext $ npm run test npm ERR! code EJSONPARSE npm ERR! JSON.parse Invalid package.json: JSONParseError: Unexpected token 'a', "abc npm ERR! JSON.parse { npm ERR! JSON.parse "n"... is not valid JSON while parsing 'abc npm ERR! JSON.parse { npm ERR! JSON.parse "name": "hello-world", npm ERR! JSON.parse "versio' npm ERR! JSON.parse Failed to parse JSON data. npm ERR! JSON.parse Note: package.json must be actual JSON, not just JavaScript. ``` Could you post a standalone reproduction please @dhdaines? Relevant piece of code in mocha: https://github.com/mochajs/mocha/blob/c44653a3a04b8418ec24a942fa7513a4673f3667/lib/cli/options.js#L182-L201 If one hasn't specified a `package.json` explicitly then it simply ignores it when it can't be read, instead only doing a `debug()`, so yeah – it will completely ignore the config in these cases. @JoshuaKGoldberg Your error might be from npm itself
2024-08-14T17:33:22Z
10.7
mochajs/mocha
5,032
mochajs__mocha-5032
[ "4552", "4552" ]
103c56b63542e36ba7a289ec25913d77bf2156b6
diff --git a/lib/nodejs/serializer.js b/lib/nodejs/serializer.js --- a/lib/nodejs/serializer.js +++ b/lib/nodejs/serializer.js @@ -6,7 +6,7 @@ 'use strict'; -const {type} = require('../utils'); +const {type, breakCircularDeps} = require('../utils'); const {createInvalidArgumentTypeError} = require('../errors'); // this is not named `mocha:parallel:serializer` because it's noisy and it's // helpful to be able to write `DEBUG=mocha:parallel*` and get everything else. @@ -188,14 +188,9 @@ class SerializableEvent { * @param {Array<object|string>} pairs - List of parent/key tuples to process; modified in-place. This JSDoc type is an approximation * @param {object} parent - Some parent object * @param {string} key - Key to inspect - * @param {WeakSet<Object>} seenObjects - For avoiding circular references */ - static _serialize(pairs, parent, key, seenObjects) { + static _serialize(pairs, parent, key) { let value = parent[key]; - if (seenObjects.has(value)) { - parent[key] = Object.create(null); - return; - } let _type = type(value); if (_type === 'error') { // we need to reference the stack prop b/c it's lazily-loaded. @@ -263,13 +258,14 @@ class SerializableEvent { error: this.originalError }); + // mutates the object + breakCircularDeps(result); + const pairs = Object.keys(result).map(key => [result, key]); - const seenObjects = new WeakSet(); let pair; while ((pair = pairs.shift())) { - SerializableEvent._serialize(pairs, ...pair, seenObjects); - seenObjects.add(pair[0]); + SerializableEvent._serialize(pairs, ...pair); } this.data = result.data; diff --git a/lib/utils.js b/lib/utils.js --- a/lib/utils.js +++ b/lib/utils.js @@ -647,3 +647,36 @@ exports.assignNewMochaID = obj => { */ exports.getMochaID = obj => obj && typeof obj === 'object' ? obj[MOCHA_ID_PROP_NAME] : undefined; + +/** + * Replaces any detected circular dependency with the string '[Circular]' + * Mutates original object + * @param inputObj {*} + * @returns {*} + */ +exports.breakCircularDeps = inputObj => { + const seen = new Set(); + + function _breakCircularDeps(obj) { + if (obj && typeof obj !== 'object') { + return obj; + } + + if (seen.has(obj)) { + return '[Circular]'; + } + + seen.add(obj); + for (const k in obj) { + if (Object.prototype.hasOwnProperty.call(obj, k)) { + obj[k] = _breakCircularDeps(obj[k], k); + } + } + + // deleting means only a seen object that is its own child will be detected + seen.delete(obj); + return obj; + } + + return _breakCircularDeps(inputObj); +};
diff --git a/test/integration/fixtures/parallel/circular-error.mjs b/test/integration/fixtures/parallel/circular-error.mjs new file mode 100644 --- /dev/null +++ b/test/integration/fixtures/parallel/circular-error.mjs @@ -0,0 +1,10 @@ +import {describe,it} from "../../../../index.js"; + +describe('test1', () => { + it('test', () => { + const error = new Error('Foo'); + error.foo = { props: [] }; + error.foo.props.push(error.foo); + throw error; + }); +}); diff --git a/test/integration/parallel.spec.js b/test/integration/parallel.spec.js --- a/test/integration/parallel.spec.js +++ b/test/integration/parallel.spec.js @@ -30,4 +30,17 @@ describe('parallel run', () => { assert.strictEqual(result.stats.failures, 0); assert.strictEqual(result.stats.passes, 3); }); + + it('should correctly handle circular references in an exception', async () => { + const result = await runMochaJSONAsync('parallel/circular-error.mjs', [ + '--parallel', + '--jobs', + '2', + require.resolve('./fixtures/parallel/testworkerid1.mjs') + ]); + assert.strictEqual(result.stats.failures, 1); + assert.strictEqual(result.stats.passes, 1); + assert.strictEqual(result.failures[0].err.message, 'Foo'); + assert.strictEqual(result.failures[0].err.foo.props[0], '[Circular]'); + }); });
🐛 Bug: Parallel mode crashes if test exception contains circular references <!-- Have you read Mocha's Code of Conduct? By filing an Issue, you are expected to comply with it, including treating everyone with respect: https://github.com/mochajs/mocha/blob/master/.github/CODE_OF_CONDUCT.md For more, check out the Mocha Gitter chat room: https://gitter.im/mochajs/mocha Detail the steps necessary to reproduce the problem. To get the fastest support, create an MCVE and upload it to GitHub. create an [MCVE](https://stackoverflow.com/help/mcve) and upload it to GitHub. --> ### Prerequisites <!-- Place an `x` between the square brackets on the lines below for every satisfied prerequisite. --> - [x] Checked that your issue hasn't already been filed by cross-referencing [issues with the `faq` label](https://github.com/mochajs/mocha/issues?utf8=%E2%9C%93&q=is%3Aissue%20label%3Afaq%20) - [x] Checked next-gen ES issues and syntax problems by using the same environment and/or transpiler configuration without Mocha to ensure it isn't just a feature that actually isn't supported in the environment in question or a bug in your code. - [x] 'Smoke tested' the code to be tested by running it outside the real test suite to get a better sense of whether the problem is in the code under test, your usage of Mocha, or Mocha itself - [x] Ensured that there is no discrepancy between the locally and globally installed versions of Mocha. You can find them with: `node node_modules/.bin/mocha --version`(Local) and `mocha --version`(Global). We recommend that you _not_ install Mocha globally. ### Description Parallel mode crashes or hangs if test exception contains circular references ### Steps to Reproduce #### 1. Hangs if error has self reference: _Test file `test.js`_ ```javascript describe('Test', () => { it('test', () => { const error = new Error('Foo'); error.self = error; throw error; }); }); ``` Running following will simply hang: ```bash $ mocha --parallel test.js ``` #### 2. Crashes if error has circular references passed within array properties _Test file `test.js`_ ```javascript describe('Test', () => { it('test', () => { const error = new Error('Foo'); error.foo = { props: [] }; error.foo.props.push(error.foo); throw error; }); }); ``` Running following ```bash $ mocha --parallel test.js ``` will result with an error as: ``` 1) Uncaught error outside test suite 0 passing (308ms) 1 failing 1) Uncaught error outside test suite: Uncaught TypeError: Converting circular structure to JSON --> starting at object with constructor 'Object' | property 'props' -> object with constructor 'Array' --- index 0 closes the circle at stringify (<anonymous>) at writeChannelMessage (node:internal/child_process/serialization:120:20) at process.target._send (node:internal/child_process:819:17) at process.target.send (node:internal/child_process:719:19) at processTicksAndRejections (node:internal/process/task_queues:93:5) ``` **Expected behavior:** Originally thrown exception should surface in report ### Versions <!-- If applicable, please specify: --> - The output of `mocha --version` and `node node_modules/.bin/mocha --version`: 8.2.1 - The output of `node --version`: 15.5.1 - Your operating system - name and version: macOS 10.15.7 - architecture (32 or 64-bit): 64-bit - Your shell (e.g., bash, zsh, PowerShell, cmd): zsh 🐛 Bug: Parallel mode crashes if test exception contains circular references <!-- Have you read Mocha's Code of Conduct? By filing an Issue, you are expected to comply with it, including treating everyone with respect: https://github.com/mochajs/mocha/blob/master/.github/CODE_OF_CONDUCT.md For more, check out the Mocha Gitter chat room: https://gitter.im/mochajs/mocha Detail the steps necessary to reproduce the problem. To get the fastest support, create an MCVE and upload it to GitHub. create an [MCVE](https://stackoverflow.com/help/mcve) and upload it to GitHub. --> ### Prerequisites <!-- Place an `x` between the square brackets on the lines below for every satisfied prerequisite. --> - [x] Checked that your issue hasn't already been filed by cross-referencing [issues with the `faq` label](https://github.com/mochajs/mocha/issues?utf8=%E2%9C%93&q=is%3Aissue%20label%3Afaq%20) - [x] Checked next-gen ES issues and syntax problems by using the same environment and/or transpiler configuration without Mocha to ensure it isn't just a feature that actually isn't supported in the environment in question or a bug in your code. - [x] 'Smoke tested' the code to be tested by running it outside the real test suite to get a better sense of whether the problem is in the code under test, your usage of Mocha, or Mocha itself - [x] Ensured that there is no discrepancy between the locally and globally installed versions of Mocha. You can find them with: `node node_modules/.bin/mocha --version`(Local) and `mocha --version`(Global). We recommend that you _not_ install Mocha globally. ### Description Parallel mode crashes or hangs if test exception contains circular references ### Steps to Reproduce #### 1. Hangs if error has self reference: _Test file `test.js`_ ```javascript describe('Test', () => { it('test', () => { const error = new Error('Foo'); error.self = error; throw error; }); }); ``` Running following will simply hang: ```bash $ mocha --parallel test.js ``` #### 2. Crashes if error has circular references passed within array properties _Test file `test.js`_ ```javascript describe('Test', () => { it('test', () => { const error = new Error('Foo'); error.foo = { props: [] }; error.foo.props.push(error.foo); throw error; }); }); ``` Running following ```bash $ mocha --parallel test.js ``` will result with an error as: ``` 1) Uncaught error outside test suite 0 passing (308ms) 1 failing 1) Uncaught error outside test suite: Uncaught TypeError: Converting circular structure to JSON --> starting at object with constructor 'Object' | property 'props' -> object with constructor 'Array' --- index 0 closes the circle at stringify (<anonymous>) at writeChannelMessage (node:internal/child_process/serialization:120:20) at process.target._send (node:internal/child_process:819:17) at process.target.send (node:internal/child_process:719:19) at processTicksAndRejections (node:internal/process/task_queues:93:5) ``` **Expected behavior:** Originally thrown exception should surface in report ### Versions <!-- If applicable, please specify: --> - The output of `mocha --version` and `node node_modules/.bin/mocha --version`: 8.2.1 - The output of `node --version`: 15.5.1 - Your operating system - name and version: macOS 10.15.7 - architecture (32 or 64-bit): 64-bit - Your shell (e.g., bash, zsh, PowerShell, cmd): zsh
2023-11-21T08:48:00Z
10.5
mochajs/mocha
4,985
mochajs__mocha-4985
[ "5112" ]
37deed262d4bc0788d32c66636495d10038ad398
diff --git a/lib/reporters/xunit.js b/lib/reporters/xunit.js --- a/lib/reporters/xunit.js +++ b/lib/reporters/xunit.js @@ -158,6 +158,7 @@ XUnit.prototype.test = function (test) { var attrs = { classname: test.parent.fullTitle(), name: test.title, + file: test.file, time: test.duration / 1000 || 0 };
diff --git a/test/reporters/xunit.spec.js b/test/reporters/xunit.spec.js --- a/test/reporters/xunit.spec.js +++ b/test/reporters/xunit.spec.js @@ -30,6 +30,7 @@ describe('XUnit reporter', function () { var expectedLine = 'some-line'; var expectedClassName = 'fullTitle'; var expectedTitle = 'some title'; + var expectedFile = 'testFile.spec.js'; var expectedMessage = 'some message'; var expectedDiff = '\n + expected - actual\n\n -foo\n +bar\n '; @@ -325,6 +326,7 @@ describe('XUnit reporter', function () { var expectedTest = { state: STATE_FAILED, title: expectedTitle, + file: expectedFile, parent: { fullTitle: function () { return expectedClassName; @@ -347,6 +349,8 @@ describe('XUnit reporter', function () { expectedClassName + '" name="' + expectedTitle + + '" file="' + + expectedFile + '" time="1"><failure>' + expectedMessage + '\n' + @@ -365,6 +369,7 @@ describe('XUnit reporter', function () { var expectedTest = { state: STATE_FAILED, title: expectedTitle, + file: expectedFile, parent: { fullTitle: function () { return expectedClassName; @@ -402,6 +407,7 @@ describe('XUnit reporter', function () { return true; }, title: expectedTitle, + file: expectedFile, parent: { fullTitle: function () { return expectedClassName; @@ -418,6 +424,8 @@ describe('XUnit reporter', function () { expectedClassName + '" name="' + expectedTitle + + '" file="' + + expectedFile + '" time="1"><skipped/></testcase>'; expect(expectedWrite, 'to be', expectedTag); }); @@ -431,6 +439,7 @@ describe('XUnit reporter', function () { return false; }, title: expectedTitle, + file: expectedFile, parent: { fullTitle: function () { return expectedClassName; @@ -447,6 +456,8 @@ describe('XUnit reporter', function () { expectedClassName + '" name="' + expectedTitle + + '" file="' + + expectedFile + '" time="0"/>'; expect(expectedWrite, 'to be', expectedTag); });
🚀 Feature: Add file path to xunit reporter ### Feature Request Checklist - [X] I have read and agree to Mocha's [Code of Conduct](https://github.com/mochajs/mocha/blob/master/.github/CODE_OF_CONDUCT.md) and [Contributing Guidelines](https://github.com/mochajs/mocha/blob/master/.github/CONTRIBUTING.md) - [X] I have searched for [related issues](https://github.com/mochajs/mocha/issues?q=is%3Aissue) and [issues with the `faq` label](https://github.com/mochajs/mocha/issues?utf8=%E2%9C%93&q=is%3Aissue%20label%3Afaq%20), but none matched my issue. - [X] I want to provide a PR to resolve this ### Overview Filing an issue to track #4985: back in #4219, the `json`, `doc`, and `json-stream` reporters were augmented to output the test filenames in addition to their other attributes. But the `xunit` reporter wasn't given the same addition. ### Suggested Solution #4985, literally. 😄 As in: adding `file: test.file` to the reported attributes. ### Alternatives I can't think of anything. https://github.com/mochajs/mocha/pull/4985/files#r1193989943 suggests using `test?.invocationDetails?.absoluteFile` as a backup, but neither `invocationDetails` nor `absoluteFile` exist in Mocha. I think that's a Cypress-only thing. ### Additional Info Adding a co-author credit as @bmish made two PRs justifying this feature. ❤️ Co-authored-by: @bmish
2023-05-15T14:09:59Z
10.2
mochajs/mocha
5,074
mochajs__mocha-5074
[ "5085" ]
6f3f45e587a17463b75047631152429fa14b82a3
diff --git a/lib/cli/run.js b/lib/cli/run.js --- a/lib/cli/run.js +++ b/lib/cli/run.js @@ -369,7 +369,7 @@ exports.handler = async function (argv) { try { await runMocha(mocha, argv); } catch (err) { - console.error('\n' + (err.stack || `Error: ${err.message || err}`)); + console.error('\n Exception during run:', err); process.exit(1); } };
diff --git a/test/integration/reporters.spec.js b/test/integration/reporters.spec.js --- a/test/integration/reporters.spec.js +++ b/test/integration/reporters.spec.js @@ -211,7 +211,7 @@ describe('reporters', function () { return; } - var pattern = `^Error: invalid or unsupported TAP version: "${invalidTapVersion}"`; + var pattern = `Error: invalid or unsupported TAP version: "${invalidTapVersion}"`; expect(res, 'to satisfy', { code: 1, output: new RegExp(pattern, 'm')
🐛 Bug: Errorhandling fails when node:module registered errors are thrown ### Bug Report Checklist - [X] I have read and agree to Mocha's [Code of Conduct](https://github.com/mochajs/mocha/blob/master/.github/CODE_OF_CONDUCT.md) and [Contributing Guidelines](https://github.com/mochajs/mocha/blob/master/.github/CONTRIBUTING.md) - [X] I have searched for [related issues](https://github.com/mochajs/mocha/issues?q=is%3Aissue) and [issues with the `faq` label](https://github.com/mochajs/mocha/issues?utf8=%E2%9C%93&q=is%3Aissue%20label%3Afaq%20), but none matched my issue. - [X] I have 'smoke tested' the code to be tested by running it outside the real test suite to get a better sense of whether the problem is in the code under test, my usage of Mocha, or Mocha itself. - [X] I want to provide a PR to resolve this ### Expected When running a test that throws a null error i expect to get information about what happened and the errorhandler to give useful output. ### Actual When an null error is thrown when running the tests the output from the mocha script is the program option list and a line: `✖ ERROR: null` ### Minimal, Reproducible Example I created a PR for this error - thinking this was only applicable for Typescript errors, but as stated in the PR: all null errors it will cause this to happen. Nevertheless, the example created for the original PR shows the error: https://github.com/stalet/mocha-typescript-testproject ### Versions 10.2.0 ### Additional Info This error was reported as part of PR #5074 - but since it's not just applicable for Typescript errors the PR was closed - and I am opening this issue instead.
2024-01-08T10:53:04Z
10.3
mochajs/mocha
4,842
mochajs__mocha-4842
[ "3596" ]
22f9306265287eee3d273e174873fa16046376b6
diff --git a/lib/cli/run-helpers.js b/lib/cli/run-helpers.js --- a/lib/cli/run-helpers.js +++ b/lib/cli/run-helpers.js @@ -225,18 +225,18 @@ exports.validateLegacyPlugin = (opts, pluginType, map = {}) => { // if this exists, then it's already loaded, so nothing more to do. if (!map[pluginId]) { + let foundId; try { - map[pluginId] = require(pluginId); + foundId = require.resolve(pluginId); + map[pluginId] = require(foundId); } catch (err) { - if (err.code === 'MODULE_NOT_FOUND') { - // Try to load reporters from a path (absolute or relative) - try { - map[pluginId] = require(path.resolve(pluginId)); - } catch (err) { - throw createUnknownError(err); - } - } else { - throw createUnknownError(err); + if (foundId) throw createUnknownError(err); + + // Try to load reporters from a cwd-relative path + try { + map[pluginId] = require(path.resolve(pluginId)); + } catch (e) { + throw createUnknownError(e); } } } diff --git a/lib/mocha.js b/lib/mocha.js --- a/lib/mocha.js +++ b/lib/mocha.js @@ -16,7 +16,6 @@ var Suite = require('./suite'); var esmUtils = require('./nodejs/esm-utils'); var createStatsCollector = require('./stats-collector'); const { - warn, createInvalidReporterError, createInvalidInterfaceError, createMochaInstanceAlreadyDisposedError, @@ -335,35 +334,26 @@ Mocha.prototype.reporter = function (reporterName, reporterOptions) { } // Try to load reporters from process.cwd() and node_modules if (!reporter) { + let foundReporter; try { - reporter = require(reporterName); + foundReporter = require.resolve(reporterName); + reporter = require(foundReporter); } catch (err) { - if (err.code === 'MODULE_NOT_FOUND') { - // Try to load reporters from a path (absolute or relative) - try { - reporter = require(path.resolve(utils.cwd(), reporterName)); - } catch (_err) { - _err.code === 'MODULE_NOT_FOUND' - ? warn(`'${reporterName}' reporter not found`) - : warn( - `'${reporterName}' reporter blew up with error:\n ${err.stack}` - ); - } - } else { - warn(`'${reporterName}' reporter blew up with error:\n ${err.stack}`); + if (foundReporter) { + throw createInvalidReporterError(err.message, foundReporter); + } + // Try to load reporters from a cwd-relative path + try { + reporter = require(path.resolve(reporterName)); + } catch (e) { + throw createInvalidReporterError(e.message, reporterName); } } } - if (!reporter) { - throw createInvalidReporterError( - `invalid reporter '${reporterName}'`, - reporterName - ); - } this._reporter = reporter; } this.options.reporterOption = reporterOptions; - // alias option name is used in public reporters xunit/tap/progress + // alias option name is used in built-in reporters xunit/tap/progress this.options.reporterOptions = reporterOptions; return this; };
diff --git a/test/browser-specific/fixtures/webpack/webpack.config.js b/test/browser-specific/fixtures/webpack/webpack.config.js --- a/test/browser-specific/fixtures/webpack/webpack.config.js +++ b/test/browser-specific/fixtures/webpack/webpack.config.js @@ -17,7 +17,7 @@ module.exports = { plugins: [ new FailOnErrorsPlugin({ failOnErrors: true, - failOnWarnings: true + failOnWarnings: false }) ] }; diff --git a/test/node-unit/cli/run-helpers.spec.js b/test/node-unit/cli/run-helpers.spec.js --- a/test/node-unit/cli/run-helpers.spec.js +++ b/test/node-unit/cli/run-helpers.spec.js @@ -77,6 +77,23 @@ describe('helpers', function () { {message: /wonky/, code: 'ERR_MOCHA_INVALID_REPORTER'} ); }); + + it('should fail and report the original "MODULE_NOT_FOUND" error.message', function () { + expect( + () => + validateLegacyPlugin( + { + reporter: require.resolve('./fixtures/bad-require.fixture.js') + }, + 'reporter' + ), + 'to throw', + { + message: /Error: Cannot find module 'fake'/, + code: 'ERR_MOCHA_INVALID_REPORTER' + } + ); + }); }); }); diff --git a/test/node-unit/mocha.spec.js b/test/node-unit/mocha.spec.js --- a/test/node-unit/mocha.spec.js +++ b/test/node-unit/mocha.spec.js @@ -246,7 +246,7 @@ describe('Mocha', function () { it('should load from current working directory', function () { expect(function () { - mocha.reporter('./spec.js'); + mocha.reporter('./lib/reporters/spec.js'); }, 'not to throw'); }); @@ -255,7 +255,7 @@ describe('Mocha', function () { expect( function () { mocha.reporter( - '../../test/node-unit/fixtures/wonky-reporter.fixture.js' + './test/node-unit/fixtures/wonky-reporter.fixture.js' ); }, 'to throw', @@ -264,19 +264,6 @@ describe('Mocha', function () { } ); }); - - it('should warn about the error before throwing', function () { - try { - mocha.reporter( - '../../test/node-unit/fixtures/wonky-reporter.fixture.js' - ); - } catch (ignored) { - } finally { - expect(stubs.errors.warn, 'to have a call satisfying', [ - expect.it('to match', /reporter blew up/) - ]); - } - }); }); }); @@ -292,7 +279,7 @@ describe('Mocha', function () { expect( function () { mocha.reporter( - './test/node-unit/fixtures/wonky-reporter.fixture.js' + '../test/node-unit/fixtures/wonky-reporter.fixture.js' ); }, 'to throw', @@ -301,19 +288,6 @@ describe('Mocha', function () { } ); }); - - it('should warn about the error before throwing', function () { - try { - mocha.reporter( - './test/node-unit/fixtures/wonky-reporter.fixture.js' - ); - } catch (ignored) { - } finally { - expect(stubs.errors.warn, 'to have a call satisfying', [ - expect.it('to match', /reporter blew up/) - ]); - } - }); }); }); });
Correctly diagnose errors from required reporter module ### Prerequisites - [x] Checked that your issue hasn't already been filed by cross-referencing [issues with the `faq` label](https://github.com/mochajs/mocha/issues?utf8=%E2%9C%93&q=is%3Aissue%20label%3Afaq%20) - [x] Checked next-gen ES issues and syntax problems by using the same environment and/or transpiler configuration without Mocha to ensure it isn't just a feature that actually isn't supported in the environment in question or a bug in your code. - [x] 'Smoke tested' the code to be tested by running it outside the real test suite to get a better sense of whether the problem is in the code under test, your usage of Mocha, or Mocha itself - [x] Ensured that there is no discrepancy between the locally and globally installed versions of Mocha. You can find them with: `node node_modules/.bin/mocha --version`(Local) and `mocha --version`(Global). We recommend avoiding the use of globally installed Mocha. ### Description When developing a third party reporter, mocha misdiagnoses an error coming from within the reporter as the reporter module not being found. ### Steps to Reproduce [Minimal reproduction](https://github.com/aido179/mocha-err) 1. Create a parent project that uses mocha. 2. Create a custom reporter, that doesn't require mocha anywhere. 3. Install that reporter in the parent project (in this case, as a local package using the file location). 4. When mocha `require`s the reporter, node throws an error. The `require` works correctly, but mocha erroneously `console.warn`s that the reporter wasn't found. **Expected behavior:** Mocha says that the reporter package caused the error. **Actual behavior:** Mocha erroneously `console.warn`s that the reporter wasn't found. **Reproduces how often:** 100% ### Versions Mocha version: 5.2.0 (not installed globally) Node version: v8.11.1 OS: MacOs High Sierra v10.13.6 Shell: iTerm2 build 3.2.4beta2 ### Additional Information As discussed in #3530, initially thought the package was not being required correctly.
While I can see how a better error message could be helpful, the _real_ bug here is in _your_ reporter depending on missing project dependencies.
2022-03-09T16:31:19Z
9.2
mochajs/mocha
4,835
mochajs__mocha-4835
[ "4330" ]
472a8be14f9b578c8b1ef3e6ae05d06fc2d9891b
diff --git a/lib/cli/options.js b/lib/cli/options.js --- a/lib/cli/options.js +++ b/lib/cli/options.js @@ -208,9 +208,10 @@ module.exports.loadPkgRc = loadPkgRc; * Priority list: * * 1. Command-line args - * 2. RC file (`.mocharc.c?js`, `.mocharc.ya?ml`, `mocharc.json`) - * 3. `mocha` prop of `package.json` - * 4. default configuration (`lib/mocharc.json`) + * 2. `MOCHA_OPTIONS` environment variable. + * 3. RC file (`.mocharc.c?js`, `.mocharc.ya?ml`, `mocharc.json`) + * 4. `mocha` prop of `package.json` + * 5. default configuration (`lib/mocharc.json`) * * If a {@link module:lib/cli/one-and-dones.ONE_AND_DONE_ARGS "one-and-done" option} is present in the `argv` array, no external config files will be read. * @summary Parses options read from `.mocharc.*` and `package.json`. @@ -231,6 +232,7 @@ const loadOptions = (argv = []) => { return args; } + const envConfig = parse(process.env.MOCHA_OPTIONS || ''); const rcConfig = loadRc(args); const pkgConfig = loadPkgRc(args); @@ -243,7 +245,14 @@ const loadOptions = (argv = []) => { args._ = args._.concat(pkgConfig._ || []); } - args = parse(args._, mocharc, args, rcConfig || {}, pkgConfig || {}); + args = parse( + args._, + mocharc, + args, + envConfig, + rcConfig || {}, + pkgConfig || {} + ); // recombine positional arguments and "spec" if (args.spec) {
diff --git a/test/node-unit/cli/options.spec.js b/test/node-unit/cli/options.spec.js --- a/test/node-unit/cli/options.spec.js +++ b/test/node-unit/cli/options.spec.js @@ -42,9 +42,10 @@ describe('options', function () { /** * Order of priority: * 1. Command-line args - * 2. RC file (`.mocharc.js`, `.mocharc.ya?ml`, `mocharc.json`) - * 3. `mocha` prop of `package.json` - * 4. default rc + * 2. `MOCHA_OPTIONS` environment variable + * 3. RC file (`.mocharc.js`, `.mocharc.ya?ml`, `mocharc.json`) + * 4. `mocha` prop of `package.json` + * 5. default rc */ describe('loadOptions()', function () { describe('when no parameter provided', function () { @@ -408,6 +409,30 @@ describe('options', function () { }); }); + describe('env options', function () { + it('should parse flags from MOCHA_OPTIONS', function () { + readFileSync = sinon.stub().onFirstCall().returns('{}'); + findConfig = sinon.stub().returns('/some/.mocharc.json'); + loadConfig = sinon.stub().returns({}); + findupSync = sinon.stub().returns('/some/package.json'); + sinon + .stub(process, 'env') + .value({MOCHA_OPTIONS: '--retries 42 --color'}); + + loadOptions = proxyLoadOptions({ + readFileSync, + findConfig, + loadConfig, + findupSync + }); + + expect(loadOptions(), 'to satisfy', { + retries: 42, + color: true + }); + }); + }); + describe('config priority', function () { it('should prioritize package.json over defaults', function () { readFileSync = sinon.stub(); @@ -474,6 +499,27 @@ describe('options', function () { '500' ); }); + + it('should prioritize env over rc file', function () { + readFileSync = sinon.stub(); + readFileSync.onFirstCall().returns('{}'); + readFileSync.onSecondCall().returns(''); + findConfig = sinon.stub().returns('/some/.mocharc.json'); + loadConfig = sinon.stub().returns({retries: 300}); + findupSync = sinon.stub().returns('/some/package.json'); + sinon + .stub(process, 'env') + .value({MOCHA_OPTIONS: '--retries 800 --color'}); + + loadOptions = proxyLoadOptions({ + readFileSync, + findConfig, + loadConfig, + findupSync + }); + + expect(loadOptions(), 'to have property', 'retries', 800); + }); }); describe('when called with a one-and-done arg', function () {
🚀 Feature: Support setting options via environment vars As discussed in #4232, it can be difficult to pass mocha command-line options through `npm run` scripts. As @boneskull said: > yargs has a feature that supports setting options via an environment variable; we should probably take advantage of that at some point. Might even be an issue for it already. I didn't see any open issues for this so figured I would open one.
sorry, butt-closed this one yargs has support for this via `.env()` which we _should_ be able to leverage. From reading [the `yargs` docs](https://yargs.js.org/docs/#api-envprefix) seems like `.env('MOCHA')` should be sufficient, I'll open a PR soon in my experience, it's never that easy, but I wish you luck regardless 😅 *You know*, this just makes me wish yargs itself would accept an environment variable that's equivalent to `.env('MOCHA')` so I could do this kind of thing with any command built with yargs https://github.com/mochajs/mocha/pull/4835
2022-02-25T18:21:19Z
10.4
mochajs/mocha
4,771
mochajs__mocha-4771
[ "4216" ]
9a1c45891f8d646c1bd540407ec7b3e63940deda
diff --git a/lib/cli/run-helpers.js b/lib/cli/run-helpers.js --- a/lib/cli/run-helpers.js +++ b/lib/cli/run-helpers.js @@ -21,25 +21,24 @@ const {UnmatchedFile} = require('./collect-files'); /** * Exits Mocha when tests + code under test has finished execution (default) - * @param {number} code - Exit code; typically # of failures + * @param {number} clampedCode - Exit code; typically # of failures * @ignore * @private */ -const exitMochaLater = code => { +const exitMochaLater = clampedCode => { process.on('exit', () => { - process.exitCode = Math.min(code, 255); + process.exitCode = clampedCode; }); }; /** * Exits Mocha when Mocha itself has finished execution, regardless of * what the tests or code under test is doing. - * @param {number} code - Exit code; typically # of failures + * @param {number} clampedCode - Exit code; typically # of failures * @ignore * @private */ -const exitMocha = code => { - const clampedCode = Math.min(code, 255); +const exitMocha = clampedCode => { let draining = 0; // Eagerly set the process's exit code in case stream.write doesn't @@ -139,12 +138,17 @@ const handleUnmatchedFiles = (mocha, unmatchedFiles) => { * @param {Mocha} mocha - Mocha instance * @param {Options} [opts] - Command line options * @param {boolean} [opts.exit] - Whether or not to force-exit after tests are complete + * @param {boolean} [opts.passOnFailingTestSuite] - Whether or not to fail test run if tests were failed * @param {Object} fileCollectParams - Parameters that control test * file collection. See `lib/cli/collect-files.js`. * @returns {Promise<Runner>} * @private */ -const singleRun = async (mocha, {exit}, fileCollectParams) => { +const singleRun = async ( + mocha, + {exit, passOnFailingTestSuite}, + fileCollectParams +) => { const fileCollectionObj = collectFiles(fileCollectParams); if (fileCollectionObj.unmatchedFiles.length > 0) { @@ -156,7 +160,9 @@ const singleRun = async (mocha, {exit}, fileCollectParams) => { // handles ESM modules await mocha.loadFilesAsync(); - return mocha.run(exit ? exitMocha : exitMochaLater); + return mocha.run( + createExitHandler({exit, passOnFailingTestSuite}) + ); }; /** @@ -186,7 +192,9 @@ const parallelRun = async (mocha, options, fileCollectParams) => { mocha.files = fileCollectionObj.files; // note that we DO NOT load any files here; this is handled by the worker - return mocha.run(options.exit ? exitMocha : exitMochaLater); + return mocha.run( + createExitHandler(options) + ); }; /** @@ -282,3 +290,15 @@ exports.validateLegacyPlugin = (opts, pluginType, map = {}) => { } } }; + +const createExitHandler = ({ exit, passOnFailingTestSuite }) => { + return code => { + const clampedCode = passOnFailingTestSuite + ? 0 + : Math.min(code, 255); + + return exit + ? exitMocha(clampedCode) + : exitMochaLater(clampedCode); + }; +}; diff --git a/lib/cli/run-option-metadata.js b/lib/cli/run-option-metadata.js --- a/lib/cli/run-option-metadata.js +++ b/lib/cli/run-option-metadata.js @@ -35,6 +35,7 @@ const TYPES = (exports.types = { 'diff', 'dry-run', 'exit', + 'pass-on-failing-test-suite', 'fail-zero', 'forbid-only', 'forbid-pending', diff --git a/lib/cli/run.js b/lib/cli/run.js --- a/lib/cli/run.js +++ b/lib/cli/run.js @@ -98,6 +98,11 @@ exports.builder = yargs => requiresArg: true, coerce: list }, + 'pass-on-failing-test-suite': { + default: false, + description: 'Not fail test run if tests were failed', + group: GROUPS.RULES + }, 'fail-zero': { description: 'Fail test run if no test(s) encountered', group: GROUPS.RULES diff --git a/lib/mocha.js b/lib/mocha.js --- a/lib/mocha.js +++ b/lib/mocha.js @@ -157,6 +157,7 @@ exports.run = function (...args) { * @param {boolean} [options.delay] - Delay root suite execution? * @param {boolean} [options.diff] - Show diff on failure? * @param {boolean} [options.dryRun] - Report tests without running them? + * @param {boolean} [options.passOnFailingTestSuite] - Fail test run if tests were failed? * @param {boolean} [options.failZero] - Fail test run if zero tests? * @param {string} [options.fgrep] - Test filter given string. * @param {boolean} [options.forbidOnly] - Tests marked `only` fail the suite? @@ -216,6 +217,7 @@ function Mocha(options = {}) { 'delay', 'diff', 'dryRun', + 'passOnFailingTestSuite', 'failZero', 'forbidOnly', 'forbidPending', @@ -870,6 +872,20 @@ Mocha.prototype.failZero = function (failZero) { return this; }; +/** + * Fail test run if tests were failed. + * + * @public + * @see [CLI option](../#-pass-on-failing-test-suite) + * @param {boolean} [passOnFailingTestSuite=false] - Whether to fail test run. + * @return {Mocha} this + * @chainable + */ +Mocha.prototype.passOnFailingTestSuite = function(passOnFailingTestSuite) { + this.options.passOnFailingTestSuite = passOnFailingTestSuite === true; + return this; +}; + /** * Causes tests marked `only` to fail the suite. *
diff --git a/test/integration/fixtures/failing-sync.fixture.js b/test/integration/fixtures/failing-sync.fixture.js new file mode 100644 --- /dev/null +++ b/test/integration/fixtures/failing-sync.fixture.js @@ -0,0 +1,9 @@ +'use strict'; + +var assert = require('assert'); + +describe('a suite', function() { + it('should succeed', function() { + assert(false); + }); +}); diff --git a/test/integration/options/passOnFailingTestSuite.spec.js b/test/integration/options/passOnFailingTestSuite.spec.js new file mode 100644 --- /dev/null +++ b/test/integration/options/passOnFailingTestSuite.spec.js @@ -0,0 +1,40 @@ +'use strict'; + +var helpers = require('../helpers'); +var runMochaJSON = helpers.runMochaJSON; + +describe('Enabled --pass-on-failing-test-suite', function() { + var args = ['--pass-on-failing-test-suite=true']; + + it('Test should finish with zero code with disabled option', function(done) { + var fixture = 'failing-sync.fixture.js'; + runMochaJSON(fixture, args, function(err, res) { + if (err) { + return done(err); + } + + expect(res, 'to have passed test count', 0) + .and('to have test count', 1) + .and('to have exit code', 0); + done(); + }); + }); +}); + +describe('Disabled --pass-on-failing-test-suite', function() { + var args = ['--pass-on-failing-test-suite=false']; + + it('Test should return non-zero code with enabled option', function(done) { + var fixture = 'failing-sync.fixture.js'; + runMochaJSON(fixture, args, function(err, res) { + if (err) { + return done(err); + } + + expect(res, 'to have passed test count', 0) + .and('to have test count', 1) + .and('to have exit code', 1); + done(); + }); + }); +}); diff --git a/test/unit/mocha.spec.js b/test/unit/mocha.spec.js --- a/test/unit/mocha.spec.js +++ b/test/unit/mocha.spec.js @@ -376,6 +376,28 @@ describe('Mocha', function () { }); }); + describe('passOnFailingTestSuite()', function() { + it('should set the passOnFailingTestSuite option to false', function() { + mocha.passOnFailingTestSuite(); + expect( + mocha.options, + 'to have property', + 'passOnFailingTestSuite', + false + ); + }); + + it('should set the passOnFailingTestSuite option to true', function() { + mocha.passOnFailingTestSuite(true); + expect( + mocha.options, + 'to have property', + 'passOnFailingTestSuite', + true + ); + }); + }); + describe('failZero()', function () { it('should set the failZero option to true', function () { mocha.failZero();
🚀 Feature: Possibility to return 0 exit code when tests run successfully even with fails **Is your feature request related to a problem or a nice-to-have?? Please describe.** A clear and concise description of what the problem is. E.g. I'm always frustrated when [...] Sometimes it is useful to separate situations when tests run successfully, and there is no infrastructure or runtime problems, and when there are some runtime problems. Possibility to return non-zero exit code only on infrastructure or runtime problems make easier integration with different toolchain. **Describe the solution you'd like** A clear and concise description of what you want to happen. Flag, something like `failOnFailingTests` like in Karma (https://github.com/karma-runner/karma/blob/master/docs/config/01-configuration-file.md#failonfailingtestsuite). - 0 exit code when tests run successfully - 1 exit code when something went wrong **Describe alternatives you've considered** A clear and concise description of any alternative solutions or features you've considered. Karma https://github.com/karma-runner/karma/blob/master/docs/config/01-configuration-file.md#failonfailingtestsuite **Additional context** Add any other context or screenshots about the feature request here.
> Sometimes it is useful to separate situations when tests run successfully, and there is no infrastructure or runtime problems, and when there are some runtime problems. I didn't understand it. Could you explain more detail for usecases. Using Mocha in some build tools (for example Gradle). After Mocha successfully run, I know test status via custom loader. Then I provide test results to "parent" build system and doesn't matter on Mocha exit code. Further results will be processed, and I don't want to fail "parent" build system. But it does matter if there is some problems with infrastructure (for example Node problem). And in this case I want to get non-zero exit code of Mocha and force to fail "parent" build system. But for now I have to fail of "parent" build system on both cases, when tests are failed which is **valid** situation for "parent" build system, and infrastructure problem (syntax problem e.g.) which is **invalid** situation for "parent" build system. If I ignore exit code of Mocha, there is no fail on failed tests (and it is OK) and on infrastructure problem (and it is not OK) Thanks. This is a duplicate of...several issues. Can you run `mocha <args> || true` instead? This is how I've seen people treat any result as "passing" @boneskull Thank you for reply. I don't need any result as "passing", I think that in topic starter I incorrectly expressed the thought. Let's imagine we have states: - Tests passed and run successful - 0 failed tests and 0 zero code - Some tests failed but run **successful** - some failed tests and 0 zero code - Run **failed** - some infrastructure problems (for example don't find some critical module) I want to get zero code only if no "infrastructure" problem. If I run `mocha <args> || true` I get zero code even in this case. It is similar with next property in Karma https://github.com/karma-runner/karma/blob/master/docs/config/01-configuration-file.md#failonfailingtestsuite
2021-10-18T13:01:32Z
10.6
mochajs/mocha
4,807
mochajs__mocha-4807
[ "4803" ]
60fafa45106e911801d9071a97b0f33542b6835f
diff --git a/lib/nodejs/esm-utils.js b/lib/nodejs/esm-utils.js --- a/lib/nodejs/esm-utils.js +++ b/lib/nodejs/esm-utils.js @@ -53,15 +53,30 @@ exports.requireOrImport = hasStableEsmImplementation err.code === 'ERR_UNSUPPORTED_DIR_IMPORT' ) { try { + // Importing a file usually works, but the resolution of `import` is the ESM + // resolution algorithm, and not the CJS resolution algorithm. So in this case + // if we fail, we may have failed because we tried the ESM resolution and failed + // So we try to `require` it return require(file); } catch (requireErr) { - if (requireErr.code === 'ERR_REQUIRE_ESM') { - // This happens when the test file is a JS file, but via type:module is actually ESM, + if ( + requireErr.code === 'ERR_REQUIRE_ESM' || + (requireErr instanceof SyntaxError && + requireErr + .toString() + .includes('Cannot use import statement outside a module')) + ) { + // ERR_REQUIRE_ESM happens when the test file is a JS file, but via type:module is actually ESM, // AND has an import to a file that doesn't exist. - // This throws an `ERR_MODULE_NOT_FOUND` // error above, + // This throws an `ERR_MODULE_NOT_FOUND` error above, // and when we try to `require` it here, it throws an `ERR_REQUIRE_ESM`. // What we want to do is throw the original error (the `ERR_MODULE_NOT_FOUND`), // and not the `ERR_REQUIRE_ESM` error, which is a red herring. + // + // SyntaxError happens when in an edge case: when we're using an ESM loader that loads + // a `test.ts` file (i.e. unrecognized extension), and that file includes an unknown + // import (which thows an ERR_MODULE_NOT_FOUND). require-ing it will throw the + // syntax error, because we cannot require a file that has import-s. throw err; } else { throw requireErr;
diff --git a/test/integration/esm.spec.js b/test/integration/esm.spec.js --- a/test/integration/esm.spec.js +++ b/test/integration/esm.spec.js @@ -81,4 +81,25 @@ describe('esm', function () { 'test-that-imports-non-existing-module' ); }); + + it('should throw an ERR_MODULE_NOT_FOUND and not ERR_REQUIRE_ESM if file imports a non-existing module with a loader', async function () { + const fixture = + 'esm/loader-with-module-not-found/test-that-imports-non-existing-module.fixture.ts'; + + const err = await runMochaAsync( + fixture, + [ + '--unhandled-rejections=warn', + '--loader=./test/integration/fixtures/esm/loader-with-module-not-found/loader-that-recognizes-ts.mjs' + ], + { + stdio: 'pipe' + } + ).catch(err => err); + + expect(err.output, 'to contain', 'ERR_MODULE_NOT_FOUND').and( + 'to contain', + 'non-existent-package' + ); + }); }); diff --git a/test/integration/fixtures/esm/loader-with-module-not-found/loader-that-recognizes-ts.mjs b/test/integration/fixtures/esm/loader-with-module-not-found/loader-that-recognizes-ts.mjs new file mode 100644 --- /dev/null +++ b/test/integration/fixtures/esm/loader-with-module-not-found/loader-that-recognizes-ts.mjs @@ -0,0 +1,18 @@ +import path from 'path' +import {fileURLToPath} from 'url' + +/** + * @param {string} specifier + * @param {{ + * conditions: !Array<string>, + * parentURL: !(string | undefined), + * }} context + * @param {Function} defaultResolve + * @returns {Promise<{ url: string }>} + */ +export async function resolve(specifier, context, defaultResolve) { + const extension = path.extname( + fileURLToPath(/**@type {import('url').URL}*/ (new URL(specifier, context.parentURL))), + ) + return await defaultResolve(specifier.replace('.ts', '.mjs'), context, defaultResolve) +} diff --git a/test/integration/fixtures/esm/loader-with-module-not-found/test-that-imports-non-existing-module.fixture.mjs b/test/integration/fixtures/esm/loader-with-module-not-found/test-that-imports-non-existing-module.fixture.mjs new file mode 100644 --- /dev/null +++ b/test/integration/fixtures/esm/loader-with-module-not-found/test-that-imports-non-existing-module.fixture.mjs @@ -0,0 +1 @@ +import 'non-existent-package'; diff --git a/test/integration/fixtures/esm/loader-with-module-not-found/test-that-imports-non-existing-module.fixture.ts b/test/integration/fixtures/esm/loader-with-module-not-found/test-that-imports-non-existing-module.fixture.ts new file mode 100644 --- /dev/null +++ b/test/integration/fixtures/esm/loader-with-module-not-found/test-that-imports-non-existing-module.fixture.ts @@ -0,0 +1,2 @@ +// This file will be resolved to `test-that-imports-non-existing-module.fixture.mjs` by the loader +import 'non-existent-package'; diff --git a/test/integration/helpers.js b/test/integration/helpers.js --- a/test/integration/helpers.js +++ b/test/integration/helpers.js @@ -368,7 +368,11 @@ function createSubprocess(args, done, opts = {}) { * @returns {string} Resolved filepath */ function resolveFixturePath(fixture) { - if (path.extname(fixture) !== '.js' && path.extname(fixture) !== '.mjs') { + if ( + path.extname(fixture) !== '.js' && + path.extname(fixture) !== '.mjs' && + path.extname(fixture) !== '.ts' + ) { fixture += '.fixture.js'; } return path.isAbsolute(fixture)
Missing file/package in import statement misreported as ESM error ### Prerequisites <!-- Place an `x` between the square brackets on the lines below for every satisfied prerequisite. --> - [x] Checked that your issue hasn't already been filed by cross-referencing [issues with the `faq` label](https://github.com/mochajs/mocha/issues?utf8=%E2%9C%93&q=is%3Aissue%20label%3Afaq%20) - [x] Checked next-gen ES issues and syntax problems by using the same environment and/or transpiler configuration without Mocha to ensure it isn't just a feature that actually isn't supported in the environment in question or a bug in your code. - [x] 'Smoke tested' the code to be tested by running it outside the real test suite to get a better sense of whether the problem is in the code under test, your usage of Mocha, or Mocha itself - [x] Ensured that there is no discrepancy between the locally and globally installed versions of Mocha. You can find them with: `node node_modules/.bin/mocha --version`(Local) and `mocha --version`(Global). We recommend that you _not_ install Mocha globally. ### Description This lead me down the garden path: A bad import statement (where the targeted package doesn't exist) results in mocha claiming the containing file was not interpreted as ESM despite containing an `import` (and mocha swallows the actual, correct error message). Error from Mocha: ``` /…/test.jsx:1 import { ^^^^^^ SyntaxError: Cannot use import statement outside a module ``` Error from Node.js: ``` Error [ERR_MODULE_NOT_FOUND]: Cannot find package '@testing-library/jest-dom' imported from /…/test.jsx ``` The bug originates from https://github.com/mochajs/mocha/blob/v9.1.3/lib/nodejs/esm-utils.js#L51 ### Steps to Reproduce <details> <summary>test.jsx</summary> Without installing `@testing-library/jest-dom`: ```js import '@testing-library/jest-dom'; ``` </details> ```console NODE_ENV=test NODE_OPTIONS="--loader=./loader.mjs" npx mocha ./test.jsx ``` (The loader live-transpiles jsx via esbuild and marks the file as esm; it's not especially important for the reproduction but it did help me find the bug in Mocha). **Expected behavior:** Mocha reports the correct error (that the targeted package doesn't exist, as node correctly reports). **Actual behavior:** Mocha wrongly reports a different, unrelated error. **Reproduces how often:** 100% ### Versions <!-- If applicable, please specify: --> - The output of `mocha --version` and `node node_modules/.bin/mocha --version`: 9.1.3 (also occurs in 8.3.0) - The output of `node --version`: 17.2.0 - Your operating system - name and version: macOS 12.1 - architecture (32 or 64-bit): 64 bit - Your shell (e.g., bash, zsh, PowerShell, cmd): bash - Your browser and version (if running browser tests): N/A - Any third-party Mocha-related modules (and their versions): - Any code transpiler (e.g., TypeScript, CoffeeScript, Babel) being used (and its version): esbuild ### Additional Information N/A
I believe the checks for `ERR_MODULE_NOT_FOUND` and `ERR_UNKNOWN_FILE_EXTENSION` as they are here are both wrong/outdated: `ERR_MODULE_NOT_FOUND`: I see the code comment about the red herring: https://github.com/mochajs/mocha/blob/28b482472a519b7abaf30a18b8ad709707bfd5a7/lib/nodejs/esm-utils.js#L58-L65 I think `ERR_REQUIRE_ESM` is not thrown anymore (it's now a `SyntaxError`). Also, I don't really understand your logic for trying to handle it this way. AFAIK, Node.js (at least for modules) only throws `ERR_MODULE_NOT_FOUND` when something is actually not found; if that's not true, I think it's probably wrong and should be updated (if it is still the case, could you confirm and provide a stacktrace so I can fix it). `ERR_UNKNOWN_FILE_EXTENSION`: `require()` will interpret any unknown file extension as commonjs, but it shouldn't be blindly applied without first checking whether format is known (for instance, `packageJson.type = 'module'` or a loader set the format to `'module'`). So this condition should be paired with another condition, something like `format !== 'module' && err.code === 'ERR_UNKNOWN_FILE_EXTENSION'`. @giltayar could you have a look at this one, please? Oh, well hello @giltayar haha @juergba @JakobJingleheimer. I've seen this too! Let me look into it this week (or the week after). @JakobJingleheimer I tried reproducing the problem, and couldn't. My setup was Node and mocha versions exactly like yours, ```sh $ npm install mocha babel-register-esm # babel-register-esm is a loader of mine that transpiles via babel $ cat test.mjs import '@testing-library/jest-dom'; $ npx mocha --loader=babel-register-esm ./test.mjs (node:3252) ExperimentalWarning: --experimental-loader is an experimental feature. This feature could change at any time (Use `node --trace-warnings ...` to show where the warning was created) (node:3252) DeprecationWarning: Obsolete loader hook(s) supplied and will be ignored: getFormat, transformSource Error [ERR_MODULE_NOT_FOUND]: Cannot find package '@testing-library/jest-dom' imported from /Users/giltayar/code/tmp/mocha-esm-problem/test.mjs ... ``` So it looks like it's working...? What did I miss here? Your file extension is `.mjs`. That's why it's not reproducing for you 😉 Mocha has a specific check just above the code I cited before for `.mjs` (it returns before hitting the bugged code): https://github.com/mochajs/mocha/blob/28b482472a519b7abaf30a18b8ad709707bfd5a7/lib/nodejs/esm-utils.js#L44-L46 Ensure your file extension is not `.mjs` or `.js`. ``` $> NODE_ENV=test NODE_OPTIONS='--loader=./loader.mjs' npx mocha ./test/index.spec.jsx /…/test/index.spec.jsx:1 import { ^^^^^^ SyntaxError: Cannot use import statement outside a module at Object.compileFunction (node:vm:352:18) at wrapSafe (node:internal/modules/cjs/loader:1026:15) at Module._compile (node:internal/modules/cjs/loader:1061:27) at Object.Module._extensions..js (node:internal/modules/cjs/loader:1149:10) at Module.load (node:internal/modules/cjs/loader:975:32) at Function.Module._load (node:internal/modules/cjs/loader:822:12) at Module.require (node:internal/modules/cjs/loader:999:19) at require (node:internal/modules/cjs/helpers:102:18) at Object.exports.requireOrImport (/…/test/node_modules/mocha/lib/nodejs/esm-utils.js:56:20) at processTicksAndRejections (node:internal/process/task_queues:96:5) at async Object.exports.loadFilesAsync (/…/test/node_modules/mocha/lib/nodejs/esm-utils.js:88:20) at async singleRun (/…/test/node_modules/mocha/lib/cli/run-helpers.js:125:3) at async Object.exports.handler (/…/test/node_modules/mocha/lib/cli/run.js:374:5) ``` The contents of `./test/index.spec.jsx` that produced the above is just: ```mjs import 'nonexistent'; ``` If you replace that import with one to something that does exist, all is well. @JakobJingleheimer reproduced! The fix is really ugly, as this whole section of code is heuristics in figuring out whether the `require` threw for "real" reasons or just because it wasn't supposed to be `require`-ed. But it's mostly passing the test of time, and I can't think of another way to fallback to `require` if `import` fails (for example, if there are `require` hooks that are needed). Hmm... maybe we don't really need the `require`? is it time we got rid of it? AFAIR, require hooks are still called when `importi`-ng a CJS file. (just thoughts. Will be fixed one way or another this weekend) 🤔 can you not trust `ERR_MODULE_NOT_FOUND`? If it's removed from the 3 conditions currently used to determine whether to try `require()`, Mocha will throw the true error. Then in the try/catch for `require()`, check for a SyntaxError instead of the nonexistent `ERR_REQUIRE_ESM`. I just tried both, and it worked for this bug's scenario: Removing the `ERR_MODULE_NOT_FOUND` addresses the case where Node has already (correctly) determined the file doesn't exist. Updating the check for `ERR_REQUIRE_ESM` → `instanceof SyntaxError` + selective `requireErr.message` substring match addresses where there wasn't enough info before to know whether `require()` is appropriate (so try and see, as Mocha already does), but it turns out, nope, not appropriate. @JakobJingleheimer unfortunately, I can't. The `ERR_MODULE_NOT_FOUND` condition is there because you can run `mocha ./foo` and assume that Mocha will load `./foo.js`. But I fixed the problem and a PR will be coming in a few minutes! (it's a hack, but the whole way of determining whether a file is ESM or CJS is ultimately a hack)
2022-01-04T05:29:23Z
9.1
mochajs/mocha
4,746
mochajs__mocha-4746
[ "4740" ]
4860738af9de9493fade35aea3df65dc7461e100
diff --git a/browser-entry.js b/browser-entry.js --- a/browser-entry.js +++ b/browser-entry.js @@ -213,7 +213,4 @@ Mocha.process = process; global.Mocha = Mocha; global.mocha = mocha; -// this allows test/acceptance/required-tokens.js to pass; thus, -// you can now do `const describe = require('mocha').describe` in a -// browser context (assuming browserification). should fix #880 -module.exports = Object.assign(mocha, global); +module.exports = mocha; diff --git a/lib/cli/watch-run.js b/lib/cli/watch-run.js --- a/lib/cli/watch-run.js +++ b/lib/cli/watch-run.js @@ -46,7 +46,7 @@ exports.watchParallelRun = ( // this `require` is needed because the require cache has been cleared. the dynamic // exports set via the below call to `mocha.ui()` won't work properly if a - // test depends on this module (see `required-tokens.spec.js`). + // test depends on this module. const Mocha = require('../mocha'); // ... and now that we've gotten a new module, we need to use it again due @@ -108,7 +108,7 @@ exports.watchRun = (mocha, {watchFiles, watchIgnore}, fileCollectParams) => { // this `require` is needed because the require cache has been cleared. the dynamic // exports set via the below call to `mocha.ui()` won't work properly if a - // test depends on this module (see `required-tokens.spec.js`). + // test depends on this module. const Mocha = require('../mocha'); // ... and now that we've gotten a new module, we need to use it again due
diff --git a/test/browser-specific/setup.js b/test/browser-specific/setup.js --- a/test/browser-specific/setup.js +++ b/test/browser-specific/setup.js @@ -8,3 +8,5 @@ global.expect = require('unexpected') .use(require('unexpected-map')) .use(require('unexpected-sinon')) .use(require('unexpected-eventemitter')); + +require('../../browser-entry'); diff --git a/test/unit/required-tokens.spec.js b/test/unit/required-tokens.spec.js deleted file mode 100644 --- a/test/unit/required-tokens.spec.js +++ /dev/null @@ -1,12 +0,0 @@ -'use strict'; - -var assert = require('assert'); -var describe = require('../..').describe; -var it = require('../..').it; - -describe('using imported describe', function() { - it('using imported it', function(done) { - assert.ok(true); - done(); - }); -});
Window properties impeded using mocha I recently tried to use mocha in a project whose runtime environment had a `window.ui` property and we had to patch our copy of mocha.js to make it work. The window property overshadows the mocha prototype property and mocha wouldn't even initialize properly. I suspect there are other critical properties that, if they appear as members of window, will cause similar problems. I'm not sure if this is something the project wants to address directly, or if it's a "Doctor, it hurts when I do that - Don't do that" but I figured I'd report it anyway. ### Prerequisites - [x] Checked that your issue hasn't already been filed by cross-referencing [issues with the `faq` label](https://github.com/mochajs/mocha/issues?utf8=%E2%9C%93&q=is%3Aissue%20label%3Afaq%20) - [x] Checked next-gen ES issues and syntax problems by using the same environment and/or transpiler configuration without Mocha to ensure it isn't just a feature that actually isn't supported in the environment in question or a bug in your code. - [x] 'Smoke tested' the code to be tested by running it outside the real test suite to get a better sense of whether the problem is in the code under test, your usage of Mocha, or Mocha itself - [x] Ensured that there is no discrepancy between the locally and globally installed versions of Mocha. You can find them with: `node node_modules/.bin/mocha --version`(Local) and `mocha --version`(Global). We recommend that you _not_ install Mocha globally. Actually, the last two are kind of N/A for this issue. ### Description In this piece of code at the end of mocha.js, windows properties overwrite mocha properties, even those that are necessary for mocha to function. ```javascript var browserEntry = Object.assign(mocha, commonjsGlobal); return browserEntry; ``` In our case, the ui function was getting overwritten by a ui property introduced by the Foundry execution framework in which we were running our tests. Once we adjusted this code to do the following, we were able to run mocha tests in our environment in the usual way: ```javascript // Wicked hack for Quench - // get ui property out of copy of global merged with mocha before merging so it doesn't clobber mocha's ui function var hackedCommonJsGlobal = Object.assign({}, commonjsGlobal); delete hackedCommonJsGlobal.ui; var browserEntry = Object.assign(mocha, hackedCommonJsGlobal); return browserEntry; ``` It occurs to me that there may be other mocha properties that shouldn't be overshadowed by properties from window as well, and that mocha could protect itself from. ### Steps to Reproduce Not sure how applicable this is. Set up a globalThis.ui property before you import mocha and you should see the effect. **Expected behavior:** Property doesn't affect mocha's ability to initialize before running tests. **Actual behavior:** Property overshadows mocha's ui function and mocha is unable to initialize from options, even `setup('bdd')`, before running any tests. **Reproduces how often:** Quite consistently - if your globalThis has properties that happen to overshadow mocha properties. ### Versions We were working with mocha 8.x and 9.x. The version of node doesn't specifically matter. ### Additional Information We aren't running mocha standalone from a command line in NOde, and we aren't spawning a process to run it and piping back the results, but we're running it as a library embedded in another app running in the browser. The tests need to use the APIs of the browser app in which the runner is executing. This issue was found in the context of a module that lets us run tests within the FoundryVTT environment. This module adds the UI to the environment to kick off the tests and display the results, but it does it along with loading the core UI, the world, the system, etc. so quite a lot of stuff for the environment has loaded, including the window.ui property, well before the imports of this specific module are invoked (including the mocha and chai imports). It is intended to be used in support of building other modules. With this one tweak, the tests seem to run fine.
2021-09-12T15:11:31Z
9.1
mochajs/mocha
4,668
mochajs__mocha-4668
[ "4665" ]
f033ff1ab561101e956285924343c23150cd6595
diff --git a/lib/esm-utils.js b/lib/esm-utils.js --- a/lib/esm-utils.js +++ b/lib/esm-utils.js @@ -49,7 +49,8 @@ exports.requireOrImport = hasStableEsmImplementation } catch (err) { if ( err.code === 'ERR_MODULE_NOT_FOUND' || - err.code === 'ERR_UNKNOWN_FILE_EXTENSION' + err.code === 'ERR_UNKNOWN_FILE_EXTENSION' || + err.code === 'ERR_UNSUPPORTED_DIR_IMPORT' ) { return require(file); } else {
diff --git a/test/integration/esm.spec.js b/test/integration/esm.spec.js --- a/test/integration/esm.spec.js +++ b/test/integration/esm.spec.js @@ -1,4 +1,5 @@ 'use strict'; +var path = require('path'); var helpers = require('./helpers'); var run = helpers.runMochaJSON; var runMochaAsync = helpers.runMochaAsync; @@ -65,4 +66,19 @@ describe('esm', function() { done(); }); }); + + it('should enable requiring/loading a cjs module with "dir" as filename', async function() { + var fixture = 'esm/test-that-uses-dir-cjs-require.fixture.js'; + const result = await runMochaAsync( + fixture, + [ + ...args, + '--require', + path.resolve(__dirname, './fixtures/esm/dir-cjs-require') + ], + {stdio: 'pipe'} + ); + + expect(result, 'to have passed test count', 1); + }); }); diff --git a/test/integration/fixtures/esm/dir-cjs-require/index.js b/test/integration/fixtures/esm/dir-cjs-require/index.js new file mode 100644 --- /dev/null +++ b/test/integration/fixtures/esm/dir-cjs-require/index.js @@ -0,0 +1 @@ +global.testPassesIfThisVariableIsDefined = true diff --git a/test/integration/fixtures/esm/test-that-uses-dir-cjs-require.fixture.js/index.js b/test/integration/fixtures/esm/test-that-uses-dir-cjs-require.fixture.js/index.js new file mode 100644 --- /dev/null +++ b/test/integration/fixtures/esm/test-that-uses-dir-cjs-require.fixture.js/index.js @@ -0,0 +1,4 @@ +// See https://github.com/mochajs/mocha/issues/4665 for an explanation of this test +it('should require a dir import', () => { + expect(global.testPassesIfThisVariableIsDefined, 'to be', true) +})
`ts-node/register` not supported in 9.0.0 ### Prerequisites - [x] Checked that your issue hasn't already been filed by cross-referencing [issues with the `faq` label](https://github.com/mochajs/mocha/issues?utf8=%E2%9C%93&q=is%3Aissue%20label%3Afaq%20) - [x] Checked next-gen ES issues and syntax problems by using the same environment and/or transpiler configuration without Mocha to ensure it isn't just a feature that actually isn't supported in the environment in question or a bug in your code. - [x] 'Smoke tested' the code to be tested by running it outside the real test suite to get a better sense of whether the problem is in the code under test, your usage of Mocha, or Mocha itself - [x] Ensured that there is no discrepancy between the locally and globally installed versions of Mocha. You can find them with: `node node_modules/.bin/mocha --version`(Local) and `mocha --version`(Global). We recommend that you _not_ install Mocha globally. ### Description mocha configuration contains `require: ['ts-node/register']`, in order to load the TS loader. But since `node_modules/ts-node/register` is a folder, the new ESM loader in mocha fails to load, and throws: `Error [ERR_UNSUPPORTED_DIR_IMPORT]: Directory import '...../node_modules/ts-node/register' is not supported resolving ES modules imported from ...../node_modules/mocha/lib/esm-utils.js` ### Steps to Reproduce 1. `npm i --save-dev ts-node` 2. Add `require: ['ts-node/register']` to the `.mocharc.yml` **Expected behavior:** Should work, and at least automatically resolve some well-known loaders like `ts-node`. **Actual behavior:** Fails miserably. **Reproduces how often:** 100% ### Versions - The output of `mocha --version` and `node node_modules/.bin/mocha --version`: 9.0.0 - The output of `node --version`: v15.12.0 - Your operating system - name and version: MacOS 11.12.3 - architecture (32 or 64-bit): M1 - Your shell (e.g., bash, zsh, PowerShell, cmd): bash - Any code transpiler (e.g., TypeScript, CoffeeScript, Babel) being used (and its version): ts-node 9.1.1
Will look into it tonight. Hopefully, just `require`-ing when we get `ERR_UNSUPPORTED_DIR_IMPORT` will solve the problem. Oh, wait, I see your fix @swansontec. Looks like it _will_ solve it. Thanks! I'm surprised that this issue is arising only two weeks after publishing v9.0.0. Is nobody using typescript with ts-node? You are using node v15.12.0 which is end-of-life already. Is v14/v16 also failing? @swantontec, I tried reproducing it, but it works for me: ```typescript // x.test.ts function foo(x: number) { console.log('hi') } foo(4) ``` ```yaml # .mocharc.yaml require: ['ts-node/register'] ``` ```shell $ npm init -y $ npm i -D ts-node mocha $ npx mocha x.test.ts hi 0 passing (0ms) ``` I tried both for Node.js v15.12.0, and v16.3.0. Mocha was v9.0.1, and ts-node was 10.0.0 and 9.1.1. If you could create a repository that reproduces this (including full instructions to reproduce), that would maybe pinpoint the problem? @danielgindi @swansontec are you requiring the `esm` package? > @danielgindi @swansontec are you requiring the `esm` package? Mot specifically. Simply `require: ['ts-node/register']` Aha! This only affects *old* versions of ts-node. Modern versions, such as the current one, have an `exports` field in their `package.json` file, which solves the problem another way. However, the latest version of [Sucrase](https://github.com/alangpierce/sucrase#readme) (3.19.0) still has the issue. Here is a minimal reproduction script: ```sh mkdir mocha-demo; cd mocha-demo yarn add --dev mocha sucrase echo "console.log('Hey')" > test.ts yarn mocha -r sucrase/register test.ts ``` So, this does open up the possibility of patching Sucrase to use `exports`, instead of fixing Mocha itself. Perhaps both would be worthwhile. > Aha! This only affects *old* versions of ts-node. Modern versions, such as the current one, have an `exports` field in their `package.json` file, which solves the problem another way. Wait. What would be the correct wait to register ts-node with mocha? > > Aha! This only affects _old_ versions of ts-node. Modern versions, such as the current one, have an `exports` field in their `package.json` file, which solves the problem another way. > > Wait. What would be the correct wait to register ts-node with mocha? It works the same as before - you just add `-r ts-node/require`, or put `require: ['ts-node/register']` into a Mocha config file. The problem is that `import()` does not know how to search the filesystem the way `require()` does. The only way for `import('ts-node/register')` to succeed is if the library provides an explicit [subpath export in `package.json`](https://nodejs.org/dist/latest-v16.x/docs/api/packages.html#packages_subpath_exports). Recent versions of ts-node *do* have the right subpath exports, so `import('ts-node/register')` succeeds! However, other packages like sucrase do *not* provide subpath exports in their `package.json` file, which means `import('sucrase/register')` will fail. Falling back onto `require('sucrase/register')` will work fine, because `require` will search the filesystem as well. This which is what #4666 achieves - enabling the `require` fallback logic for this case. Got it. But for commonjs, if there's no package.json, the default of course is `index.js`. And commonjs is the base module system for nodejs and it does not make sense to remove support for it :) Well, there's a PR for it so all good! @swansontec got it. I'll try and reproduce it without specifically using `ts-node`. Thanks for the detailed explanation. @swansontec yup. got it. Fixing... Hey, @swansontec, I just saw you have a PR for this. After I made mine... 😂 Unfortunately, your PR doesn't include a test. If you'd rather you add a test to your PR, we can use yours. Otherwise, I'll add mine.
2021-06-25T06:54:20Z
9
mochajs/mocha
4,614
mochajs__mocha-4614
[ "4580" ]
34643e4c0821aeb8d6977c1942bc106c9363789a
diff --git a/lib/cli/watch-run.js b/lib/cli/watch-run.js --- a/lib/cli/watch-run.js +++ b/lib/cli/watch-run.js @@ -261,17 +261,21 @@ const createRerunner = (mocha, watcher, {beforeRun} = {}) => { let rerunScheduled = false; const run = () => { - mocha = beforeRun ? beforeRun({mocha, watcher}) || mocha : mocha; - runner = mocha.run(() => { - debug('finished watch run'); - runner = null; - blastCache(watcher); - if (rerunScheduled) { - rerun(); - } else { - console.error(`${logSymbols.info} [mocha] waiting for changes...`); - } - }); + try { + mocha = beforeRun ? beforeRun({mocha, watcher}) || mocha : mocha; + runner = mocha.run(() => { + debug('finished watch run'); + runner = null; + blastCache(watcher); + if (rerunScheduled) { + rerun(); + } else { + console.error(`${logSymbols.info} [mocha] waiting for changes...`); + } + }); + } catch (e) { + console.error(e.stack); + } }; const scheduleRun = () => {
diff --git a/test/integration/helpers.js b/test/integration/helpers.js --- a/test/integration/helpers.js +++ b/test/integration/helpers.js @@ -470,6 +470,7 @@ async function runMochaWatchJSONAsync(args, opts, change) { // eslint-disable-next-line no-control-regex .replace(/\u001b\[\?25./g, '') .split('\u001b[2K') + .filter(x => x) .map(x => JSON.parse(x)) ); } diff --git a/test/integration/options/watch.spec.js b/test/integration/options/watch.spec.js --- a/test/integration/options/watch.spec.js +++ b/test/integration/options/watch.spec.js @@ -47,6 +47,19 @@ describe('--watch', function() { }); }); + it('reruns test when watched test file crashes', function() { + const testFile = path.join(tempDir, 'test.js'); + copyFixture(DEFAULT_FIXTURE, testFile); + + replaceFileContents(testFile, 'done();', 'done((;'); + + return runMochaWatchJSONAsync([testFile], tempDir, () => { + replaceFileContents(testFile, 'done((;', 'done();'); + }).then(results => { + expect(results, 'to have length', 1); + }); + }); + describe('when in parallel mode', function() { it('reruns test when watched test file is touched', function() { const testFile = path.join(tempDir, 'test.js'); @@ -58,6 +71,19 @@ describe('--watch', function() { expect(results, 'to have length', 2); }); }); + + it('reruns test when watched test file is crashed', function() { + const testFile = path.join(tempDir, 'test.js'); + copyFixture(DEFAULT_FIXTURE, testFile); + + replaceFileContents(testFile, 'done();', 'done((;'); + + return runMochaWatchJSONAsync([testFile], tempDir, () => { + replaceFileContents(testFile, 'done((;', 'done();'); + }).then(results => { + expect(results, 'to have length', 1); + }); + }); }); it('reruns test when file matching --watch-files changes', function() {
Watcher crashes when reloading code that throws (regression mocha@7 to mocha@8) <!-- Have you read Mocha's Code of Conduct? By filing an Issue, you are expected to comply with it, including treating everyone with respect: https://github.com/mochajs/mocha/blob/master/.github/CODE_OF_CONDUCT.md For more, check out the Mocha Gitter chat room: https://gitter.im/mochajs/mocha Detail the steps necessary to reproduce the problem. To get the fastest support, create an MCVE and upload it to GitHub. create an [MCVE](https://stackoverflow.com/help/mcve) and upload it to GitHub. --> ### Prerequisites <!-- Place an `x` between the square brackets on the lines below for every satisfied prerequisite. --> - [x] Checked that your issue hasn't already been filed by cross-referencing [issues with the `faq` label](https://github.com/mochajs/mocha/issues?utf8=%E2%9C%93&q=is%3Aissue%20label%3Afaq%20) - [x] Checked next-gen ES issues and syntax problems by using the same environment and/or transpiler configuration without Mocha to ensure it isn't just a feature that actually isn't supported in the environment in question or a bug in your code. - [x] 'Smoke tested' the code to be tested by running it outside the real test suite to get a better sense of whether the problem is in the code under test, your usage of Mocha, or Mocha itself - [x] Ensured that there is no discrepancy between the locally and globally installed versions of Mocha. You can find them with: `node node_modules/.bin/mocha --version`(Local) and `mocha --version`(Global). We recommend that you _not_ install Mocha globally. ### Description MCVE: please see https://github.com/marcelbeumer/mocha-8-watcher-issue for a minimal test case with STR. Since mocha@8 the watcher crashes when reloading tests and a require fails, for example when causing a syntax error. Errors that happen during test execution are handled correctly by the watcher. Experimenting with different mocha versions, it turns out it is a regression from v7 to v8. ### Steps to Reproduce See MCVE: https://github.com/marcelbeumer/mocha-8-watcher-issue. It seems that when the watcher is reloading code that will throw when requiring, the watcher crashes. **Expected behavior:** The watcher does not crash when reloading test code. Behavior for the initial test run when starting the watcher is debatable but would be best if it handled both cases well. **Actual behavior:** The watcher crashes when reloading test code that causes an error. **Reproduces how often:** 100% ### Versions <!-- If applicable, please specify: --> - The output of `mocha --version` and `node node_modules/.bin/mocha --version`: 8.30 (but also tried @8.0, and @7) - The output of `node --version`: v15.6.0 - Your operating system - name and version: macOS 11.1 - architecture (32 or 64-bit): 64 - Your shell (e.g., bash, zsh, PowerShell, cmd): zsh - Your browser and version (if running browser tests): - Any third-party Mocha-related modules (and their versions): - Any code transpiler (e.g., TypeScript, CoffeeScript, Babel) being used (and its version): no, but same behavior with ts-node setup (see branch in https://github.com/marcelbeumer/mocha-8-watcher-issue) ### Additional Information <!-- Any additional information, configuration or data that might be necessary to reproduce the issue. -->
When I tested your MCVE(nice MCVE), it only happened with node.js 15.x. With 14.x and mocha v8 watcher is fine. It seems it caused by node 15.x. But I'm not sure because mocha v7 is fine with node 15.x. Didn't Node 15 change handling of unhandled rejections to throw? Maybe this in combination with mocha 8 refactorings for running things in parallel? @marcelbeumer Yes, Node 15's [default mode for unhandledRejection is changed to throw (from warn)](https://github.com/nodejs/node/blob/master/doc/changelogs/CHANGELOG_V15.md#throw-on-unhandled-rejections---33021). I am also strongly suspicious of this issue, but I have not yet confirmed it.
2021-03-28T12:10:43Z
8.3
mochajs/mocha
4,638
mochajs__mocha-4638
[ "3675" ]
7c3daea17365fc826751fd9a35f97ba8cfbb7100
diff --git a/lib/reporters/base.js b/lib/reporters/base.js --- a/lib/reporters/base.js +++ b/lib/reporters/base.js @@ -190,6 +190,13 @@ function stringifyDiffObjs(err) { */ var generateDiff = (exports.generateDiff = function(actual, expected) { try { + const diffSize = 2048; + if (actual.length > diffSize) { + actual = actual.substring(0, diffSize) + ' ... Lines skipped'; + } + if (expected.length > diffSize) { + expected = expected.substring(0, diffSize) + ' ... Lines skipped'; + } return exports.inlineDiffs ? inlineDiff(actual, expected) : unifiedDiff(actual, expected);
diff --git a/test/reporters/base.spec.js b/test/reporters/base.spec.js --- a/test/reporters/base.spec.js +++ b/test/reporters/base.spec.js @@ -164,6 +164,34 @@ describe('Base reporter', function() { ' \n actual expected\n \n a foobar inline diff\n ' ); }); + + it("should truncate overly long 'actual' ", function() { + var actual = ''; + var i = 0; + while (i++ < 120) { + actual += 'a foo unified diff '; + } + var expected = 'a bar unified diff'; + + inlineDiffsStub.value(false); + var output = generateDiff(actual, expected); + + expect(output, 'to match', / \.\.\. Lines skipped/); + }); + + it("should truncate overly long 'expected' ", function() { + var actual = 'a foo unified diff'; + var expected = ''; + var i = 0; + while (i++ < 120) { + expected += 'a bar unified diff '; + } + + inlineDiffsStub.value(false); + var output = generateDiff(actual, expected); + + expect(output, 'to match', / \.\.\. Lines skipped/); + }); }); describe('inline strings diff', function() {
Mocha failing when error message too big ### Description Mocha never ends with big error messages ### Steps to Reproduce ``` const {expect} = require('chai') it('Test failing', function () { const longArray = _.times(10000, function (i) { return {a : i} }) const shortArray = [] expect(longArray).deep.equal(shortArray) }) ``` Then execute `yarn mocha test --exit`, the test will execute it will be marked as failure, but the summary will never appear and the process will not exit. If on contrary one writes: ``` const {expect} = require('chai') it('Test failing', function () { const longArray = _.times(10000, function (i) { return {a : i} }) const shortArray = [] try { expect(longArray).deep.equal(shortArray) } catch (e) { console.error(e) throw new Error('Small error') } }) ``` The test fails and it displays an error in the summary. Error is logged without any problem. **Expected behavior:** [What you expect to happen] Mocha should be able to exit the process even if the error is overly long. Maybe the message could be reduced? **Actual behavior:** [What actually happens] The process hangs forever **Reproduces how often:** [What percentage of the time does it reproduce?] Always ### Versions Mocha 5.1.1 <!-- If applicable, please specify: --> - The output of `mocha --version` and `node node_modules/.bin/mocha --version`: - The output of `node --version`: v8.11.2 - The version and architecture of your operating system: Ubuntu - Your shell (bash, zsh, PowerShell, cmd, etc.): bash - Your browser and version (if running browser tests): - Any other third party Mocha related modules (with versions): - The code transpiler being used:
Please retry this but use the built in `assert` module. same result? if so, the problem may be in our diff-generating code. <sub>Sent with <a href="http://githawk.com">GitHawk</a></sub> With current master(6b5a7855110d9c493dc41aec9fb2cea15aaa42aa), it is hang pretty long rather than forever ```sh $ time mocha rr.js 1) Test failing 0 passing (106ms) 1 failing 1) Test failing: AssertionError [ERR_ASSERTION]: Input A expected to strictly deep-equal input B: + expected - actual ... Lines skipped real 1m58.016s user 2m2.341s sys 0m0.649s ``` With `assert` module, it is same result. ```js const assert = require('assert'); const _ = require('lodash'); it('Test failing', function () { const longArray = _.times(10000, function (i) { return {a : i} }) const shortArray = [] assert.deepStrictEqual(longArray, shortArray); }); ``` ```sh real 1m56.692s user 2m1.052s sys 0m0.571s ``` If it just takes a long time, is that necessarily a _"confirmed bug"_? Because it doesn't take so long without mocha. So, I thought there is a performance issue in mocha even if I didn't find why. This could very well be related to #3686 @furstenheim Try running mocha again with `--full-trace` enabled, if it performs well then it sure is related to #3686 @cyjake Just tried mocha 5.2.0 (latest) with `--full-trace` option and the problem persists @boneskull I've reproduced this issue locally and done a superficial study of the reason. It is blocked at `Base.epilogue()` (`generateDiff()` specifically). When run in Node without Mocha, the AssertionError output appears very fast and is even shortened, see "Lines skipped". When run in Mocha there is an additional part, which is not shortened and takes very long. The information content is the same in both parts. I will investigate, maybe we can just skip it. Or do Browsers need that second part? ``` AssertionError [ERR_ASSERTION]: Input A expected to strictly deep-equal input B: + expected - actual ... Lines skipped - [ - { - a: 1 - }, // [...] - { - a: 6 - }, - { ... + [] ... + expected - actual // additional Mocha part -[ - { - "a": 1 - } - { ``` The first part is read from `err.message`, an `AssertionError` of the built in `assert` module. The output is perfectly formatted and shortened. Unfortunately we can not take this formatted diff for granted with all third party assertion libraries. The second part is made by Mocha and has to be shortened somehow. Running Mocha with `no-diff` option, the performance is very fast, since no Mocha diff is created. Mocha is using [jsdiff](https://github.com/kpdecker/jsdiff) for creating a diff-patch out off `err.actual` and `err.expected`. Since several years there have been open issues regarding heavy performance problems with large files. Maybe we should think about evaluating another package one day. For now I propose to just shorten the two strings to be compared before creating the diff-patch. ```js var generateDiff = (exports.generateDiff = function(actual, expected) { var diffSize = 4096; if (actual.length > diffSize) { actual = actual.substring(0, diffSize) + ' ... Lines skipped'; } if (expected.length > diffSize) { expected = expected.substring(0, diffSize) + ' ... Lines skipped'; } return exports.inlineDiffs ? inlineDiff(actual, expected) : unifiedDiff(actual, expected); }); ``` @furstenheim Do you have time to patch `base.js` and give it a try with your `chai` example, please? @juergba checked. That change does the trick Does upgrading our ['jsdiff'](https://github.com/kpdecker/jsdiff/releases) dependency make any difference (before making changes to Mocha)? No, doesn't make any difference. A nice solution would be using an option like `diff-timeout` to limit the time spent in building the diff, like Google's [diff-match-patch](https://github.com/google/diff-match-patch/wiki/API) offers. But there is no sense in making a PR which is ignored to death then. I think it is not JSdiff fault because.. ## lib/reporters/base.js ```js var msg = diff.createPatch('string', actual, expected); var lines = msg.split('\n').splice(5); ``` this code spent a lot of time in test code and then result real 2m40.205s user 2m43.989s sys 0m1.058s so I think it is relevant to diff.createPatch performance in first But..I think it is wrong. it is only late in mocha! not the jsdiff fault! I tested same code in **only** jsdiff ```js const diff = require('diff') const _ = require('lodash') let actual = _.times(10000, function (i) { return {a : i} }) let expected = [] actual = JSON.stringify(actual) expected = JSON.stringify(expected) var msg = diff.createPatch('string', actual, expected) console.log(msg) var lines = msg.split('\n').splice(5); console.log(lines) ``` the result is.. real 0m0.436s user 0m0.140s sys 0m0.047s so fast.. but mocha in diff is 3.5 version and my tested diff version is 4.0 version I will check 4.0 version in mocha Actually almost I found it!! In diff structure is like that set diff variable like this ## lib > patch > create.js > structuredPatch ``` var diff = /*istanbul ignore start*/ (0, /*istanbul ignore end*/ /*istanbul ignore start*/ _line /*istanbul ignore end*/ . /*istanbul ignore start*/ diffLines) /*istanbul ignore end*/ (oldStr, newStr, options); console.log(diff, _line.diffLines, oldStr, newStr, options) ``` and then.. it said like this in mocha ``` {\n "a": 9789\n }\n {\n "a": 9790\n }\n {\n "a": 9791\n }\n {\n "a": 9792\n }\n {\n "a": 9793\n }\n {\n "a": 9794\n }\n {\n "a": 9795\n }\n {\n "a": 9796\n }\n {\n "a": 9797\n }\n {\n "a": 9798\n }\n {\n "a": 9799\n }\n {\n "a": 9800\n }\n {\n "a": 9801\n }\n {\n "a": 9802\n }\n {\n "a": 9803\n }\n {\n "a": 9804\n }\n {\n "a": 9805\n }\n {\n "a": 9806\n }\n {\n "a": 9807\n }\n {\n "a": 9808\n }\n {\n "a": 9809\n }\n {\n "a": 9810\n }\n {\n "a": 9811\n }\n {\n "a": 9812\n }\n {\n "a": 9813\n }\n {\n "a": 9814\n }\n {\n "a": 9815\n }\n {\n ``` but..!!! in only diff ``` "a":9949},{"a":9950},{"a":9951},{"a":9952},{"a":9953},{"a":9954},{"a":9955},{"a":9956},{"a":9957},{"a":9958},{"a":9959},{"a":9960},{"a":9961},{"a":9962},{"a":9963},{"a":9964},{"a":9965},{"a":9966},{"a":9967},{"a":9968},{"a":9969},{"a":9970},{"a":9971},{"a":9972},{"a":9973},{"a":9974},{"a":9975},{"a":9976},{"a":9977},{"a":9978},{"a":9979},{"a":9980},{"a":9981},{"a":9982},{"a":9983},{"a":9984},{"a":9985},{"a":9986},{"a":9987},{"a":9988},{"a":9989},{"a":9990},{"a":9991},{"a":9992},{"a":9993},{"a":9994},{"a":9995},{"a":9996},{"a":9997},{"a":9998},{"a":9999}]' ``` very clear in mocha stringify logic is wrong! and now i will figure it out! @juergba I was wrong.. sorry.the reason is all about the jsdiff ## 1. I logged to real err.actual and write file to message.txt ```js //in reporters/base.js fs.writeFileSync('./message.txt', err.actual); msg += generateDiff(err.actual, err.expected); // indent stack trace stack = stack.replace(/^/gm, ' '); // indented test title var testTitle = ''; test.titlePath().forEach(function(str, index) { if (index !== 0) { ... ``` ## 2. and testing jsDiff ```js const diff = require('diff') const _ = require('lodash') const fs = require('fs') const expected = '[]' const actual = fs.readFileSync('./message.txt').toString(); console.log(actual) var msg = diff.createPatch('string', actual, expected) console.log(msg) ``` but jsdiff can't handle like.. `"a": 9789\n }\n {\n "a": 9790\n }\n` .... I'm sorry to confuse you. I just ran into this issue due to large and difficult to diff `err.actual` and `err.expected` objects: ```js const assert = require('assert'); const zlib = require('zlib'); it('produces a large/slow diff', () => { assert.deepStrictEqual(new zlib.Gzip(), new zlib.Inflate()); }); ``` I killed mocha after 10 minutes. Inspector showed the following stack trace: at execEditLength (/tmp/example/node_modules/diff/lib/diff/base.js:53:7) at Diff.diff (/tmp/example/node_modules/diff/lib/diff/base.js:116:19) at diffLines (/tmp/example/node_modules/diff/lib/diff/line.js:44:19) at structuredPatch (/tmp/example/node_modules/diff/lib/patch/create.js:20:83) at createTwoFilesPatch (/tmp/example/node_modules/diff/lib/patch/create.js:126:14) at Object.createPatch (/tmp/example/node_modules/diff/lib/patch/create.js:146:10) at unifiedDiff (/tmp/example/node_modules/mocha/lib/reporters/base.js:449:18) at exports.generateDiff (/tmp/example/node_modules/mocha/lib/reporters/base.js:188:9) at /tmp/example/node_modules/mocha/lib/reporters/base.js:263:14 at Array.forEach (<anonymous>) at Function.exports.list (/tmp/example/node_modules/mocha/lib/reporters/base.js:212:12) at Spec.Base.epilogue (/tmp/example/node_modules/mocha/lib/reporters/base.js:364:10) at Object.onceWrapper (events.js:286:20) at Runner.emit (events.js:203:15) at /tmp/example/node_modules/mocha/lib/runner.js:908:12 at /tmp/example/node_modules/mocha/lib/runner.js:779:7 at next (/tmp/example/node_modules/mocha/lib/runner.js:362:14) at Immediate._onImmediate (/tmp/example/node_modules/mocha/lib/runner.js:428:5) at runCallback (timers.js:705:18) at tryOnImmediate (timers.js:676:5) at processImmediate (timers.js:658:5) with `actual.length === 107108` and `expected.length === 115195`. > Mocha is using [jsdiff](https://github.com/kpdecker/jsdiff) for creating a diff-patch out off `err.actual` and `err.expected`. Since several years there have been open issues regarding heavy performance problems with large files. Maybe we should think about evaluating another package one day. Apparently Google's `diff-match-patch` compares favorably. (See kpdecker/jsdiff#239). You may also want to consider limiting the size of the input to diff. Finding a minimal diff of 2 highly-dissimilar multi-MB strings is probably not a good use of time/compute/output screen space, even with a fast algorithm. It could also be an easy interim fix until diff alternatives are evaluated. I would like to see this move forward, as we are still getting issues relating to it. it has issues from top to bottom -- from the stringification code to the diffing of those canonical results... yuk.
2021-05-24T14:15:43Z
8.4
mochajs/mocha
4,557
mochajs__mocha-4557
[ "4551" ]
84d0c9671ba7ebb2a88a7a8311965dbc2bb424d2
diff --git a/lib/esm-utils.js b/lib/esm-utils.js --- a/lib/esm-utils.js +++ b/lib/esm-utils.js @@ -3,7 +3,29 @@ const url = require('url'); const formattedImport = async file => { if (path.isAbsolute(file)) { - return import(url.pathToFileURL(file)); + try { + return await import(url.pathToFileURL(file)); + } catch (err) { + // This is a hack created because ESM in Node.js (at least in Node v15.5.1) does not emit + // the location of the syntax error in the error thrown. + // This is problematic because the user can't see what file has the problem, + // so we add the file location to the error. + // This `if` should be removed once Node.js fixes the problem. + if ( + err instanceof SyntaxError && + err.message && + err.stack && + !err.stack.includes(file) + ) { + const newErrorWithFilename = new SyntaxError(err.message); + newErrorWithFilename.stack = err.stack.replace( + /^SyntaxError/, + `SyntaxError[ @${file} ]` + ); + throw newErrorWithFilename; + } + throw err; + } } return import(file); };
diff --git a/test/integration/esm.spec.js b/test/integration/esm.spec.js --- a/test/integration/esm.spec.js +++ b/test/integration/esm.spec.js @@ -1,5 +1,7 @@ 'use strict'; -var run = require('./helpers').runMochaJSON; +var helpers = require('./helpers'); +var run = helpers.runMochaJSON; +var runMochaAsync = helpers.runMochaAsync; var utils = require('../../lib/utils'); var args = +process.versions.node.split('.')[0] >= 13 ? [] : ['--experimental-modules']; @@ -38,6 +40,17 @@ describe('esm', function() { }); }); + it('should show file location when there is a syntax error in the test', async function() { + var fixture = 'esm/syntax-error/esm-syntax-error.fixture.mjs'; + const err = await runMochaAsync(fixture, args, {stdio: 'pipe'}).catch( + err => err + ); + expect(err.output, 'to contain', 'SyntaxError').and( + 'to contain', + 'esm-syntax-error.fixture.mjs' + ); + }); + it('should recognize esm files ending with .js due to package.json type flag', function(done) { if (!utils.supportsEsModules(false)) return this.skip(); diff --git a/test/integration/fixtures/esm/syntax-error/esm-syntax-error.fixture.mjs b/test/integration/fixtures/esm/syntax-error/esm-syntax-error.fixture.mjs new file mode 100644 --- /dev/null +++ b/test/integration/fixtures/esm/syntax-error/esm-syntax-error.fixture.mjs @@ -0,0 +1,3 @@ +// This is intentionally a syntax error +it('should never run because of a syntax error here', => { +});
If one among multiple ESM tests has a syntax error, then Mocha doesn’t report which one ### Prerequisites <!-- Place an `x` between the square brackets on the lines below for every satisfied prerequisite. --> - [x] Checked that your issue hasn't already been filed by cross-referencing [issues with the `faq` label](https://github.com/mochajs/mocha/issues?utf8=%E2%9C%93&q=is%3Aissue%20label%3Afaq%20) - [x] Checked next-gen ES issues and syntax problems by using the same environment and/or transpiler configuration without Mocha to ensure it isn't just a feature that actually isn't supported in the environment in question or a bug in your code. - [x] 'Smoke tested' the code to be tested by running it outside the real test suite to get a better sense of whether the problem is in the code under test, your usage of Mocha, or Mocha itself - [x] Ensured that there is no discrepancy between the locally and globally installed versions of Mocha. You can find them with: `node node_modules/.bin/mocha --version`(Local) and `mocha --version`(Global). We recommend that you _not_ install Mocha globally. ### Description If I run multiple ESM tests (e.g. via `npm t test*.mjs`), and one of them has a syntax error, then Mocha doesn’t tell me which file it is. ### Steps to Reproduce Repository with MCVE: https://github.com/rauschma/mocha-test-repo Execute either of the following two commands: ``` npm t test*.mjs npm t 'test*.mjs' ``` `test2.mjs` has a syntax error. **Expected behavior:** I’d like Mocha to tell me which file has the syntax error. **Actual behavior:** I’m getting the following error message. ``` SyntaxError: Unexpected token '=>' at Loader.moduleStrategy (internal/modules/esm/translators.js:117:18) at async link (internal/modules/esm/module_job.js:42:21) ``` **Reproduces how often:** 100% ### Versions - The output of `mocha --version` and `node node_modules/.bin/mocha --version`: 8.2.1 (there is no globally installed Mocha) - The output of `node --version`: v14.15.4
2021-01-20T15:56:51Z
8.2
mochajs/mocha
4,607
mochajs__mocha-4607
[ "4131" ]
bbf0c11b29544de91a18c1bd667c975ee44b7c90
diff --git a/example/config/.mocharc.js b/example/config/.mocharc.js --- a/example/config/.mocharc.js +++ b/example/config/.mocharc.js @@ -33,7 +33,7 @@ module.exports = { parallel: false, recursive: false, reporter: 'spec', - 'reporter-option': ['foo=bar', 'baz=quux'], + 'reporter-option': ['foo=bar', 'baz=quux'], // array, not object require: '@babel/register', retries: 1, slow: '75', diff --git a/lib/reporters/base.js b/lib/reporters/base.js --- a/lib/reporters/base.js +++ b/lib/reporters/base.js @@ -90,7 +90,7 @@ exports.colors = { exports.symbols = { ok: symbols.success, - err: symbols.err, + err: symbols.error, dot: '.', comma: ',', bang: '!' diff --git a/lib/reporters/json.js b/lib/reporters/json.js --- a/lib/reporters/json.js +++ b/lib/reporters/json.js @@ -7,12 +7,16 @@ */ var Base = require('./base'); +var fs = require('fs'); +var path = require('path'); +const createUnsupportedError = require('../errors').createUnsupportedError; +const utils = require('../utils'); var constants = require('../runner').constants; var EVENT_TEST_PASS = constants.EVENT_TEST_PASS; +var EVENT_TEST_PENDING = constants.EVENT_TEST_PENDING; var EVENT_TEST_FAIL = constants.EVENT_TEST_FAIL; var EVENT_TEST_END = constants.EVENT_TEST_END; var EVENT_RUN_END = constants.EVENT_RUN_END; -var EVENT_TEST_PENDING = constants.EVENT_TEST_PENDING; /** * Expose `JSON`. @@ -30,7 +34,7 @@ exports = module.exports = JSONReporter; * @param {Runner} runner - Instance triggers reporter actions. * @param {Object} [options] - runner options */ -function JSONReporter(runner, options) { +function JSONReporter(runner, options = {}) { Base.call(this, runner, options); var self = this; @@ -38,6 +42,14 @@ function JSONReporter(runner, options) { var pending = []; var failures = []; var passes = []; + var output; + + if (options.reporterOption && options.reporterOption.output) { + if (utils.isBrowser()) { + throw createUnsupportedError('file output not supported in browser'); + } + output = options.reporterOption.output; + } runner.on(EVENT_TEST_END, function(test) { tests.push(test); @@ -66,7 +78,20 @@ function JSONReporter(runner, options) { runner.testResults = obj; - process.stdout.write(JSON.stringify(obj, null, 2)); + var json = JSON.stringify(obj, null, 2); + if (output) { + try { + fs.mkdirSync(path.dirname(output), {recursive: true}); + fs.writeFileSync(output, json); + } catch (err) { + console.error( + `${Base.symbols.err} [mocha] writing output to "${output}" failed: ${err.message}\n` + ); + process.stdout.write(json); + } + } else { + process.stdout.write(json); + } }); }
diff --git a/test/reporters/json.spec.js b/test/reporters/json.spec.js --- a/test/reporters/json.spec.js +++ b/test/reporters/json.spec.js @@ -1,12 +1,16 @@ 'use strict'; +var fs = require('fs'); var sinon = require('sinon'); +var JSONReporter = require('../../lib/reporters/json'); +var utils = require('../../lib/utils'); var Mocha = require('../../'); var Suite = Mocha.Suite; var Runner = Mocha.Runner; var Test = Mocha.Test; describe('JSON reporter', function() { + var mocha; var suite; var runner; var testTitle = 'json test 1'; @@ -14,131 +18,216 @@ describe('JSON reporter', function() { var noop = function() {}; beforeEach(function() { - var mocha = new Mocha({ + mocha = new Mocha({ reporter: 'json' }); suite = new Suite('JSON suite', 'root'); runner = new Runner(suite); - var options = {}; - /* eslint no-unused-vars: off */ - var mochaReporter = new mocha._reporter(runner, options); - }); - - beforeEach(function() { - sinon.stub(process.stdout, 'write').callsFake(noop); }); afterEach(function() { sinon.restore(); }); - it('should have 1 test failure', function(done) { - var error = {message: 'oh shit'}; + describe('test results', function() { + beforeEach(function() { + var options = {}; + /* eslint no-unused-vars: off */ + var mochaReporter = new mocha._reporter(runner, options); + }); + + beforeEach(function() { + sinon.stub(process.stdout, 'write').callsFake(noop); + }); + + it('should have 1 test failure', function(done) { + var error = {message: 'oh shit'}; - var test = new Test(testTitle, function(done) { - done(new Error(error.message)); + var test = new Test(testTitle, function(done) { + done(new Error(error.message)); + }); + + test.file = testFile; + suite.addTest(test); + + runner.run(function(failureCount) { + sinon.restore(); + expect(runner, 'to satisfy', { + testResults: { + failures: [ + { + title: testTitle, + file: testFile, + err: { + message: error.message + } + } + ] + } + }); + expect(failureCount, 'to be', 1); + done(); + }); }); - test.file = testFile; - suite.addTest(test); - - runner.run(function(failureCount) { - sinon.restore(); - expect(runner, 'to satisfy', { - testResults: { - failures: [ - { - title: testTitle, - file: testFile, - err: { - message: error.message + it('should have 1 test pending', function(done) { + var test = new Test(testTitle); + test.file = testFile; + suite.addTest(test); + + runner.run(function(failureCount) { + sinon.restore(); + expect(runner, 'to satisfy', { + testResults: { + pending: [ + { + title: testTitle, + file: testFile } - } - ] - } + ] + } + }); + expect(failureCount, 'to be', 0); + done(); }); - expect(failureCount, 'to be', 1); - done(); }); - }); - it('should have 1 test pending', function(done) { - var test = new Test(testTitle); - test.file = testFile; - suite.addTest(test); - - runner.run(function(failureCount) { - sinon.restore(); - expect(runner, 'to satisfy', { - testResults: { - pending: [ - { - title: testTitle, - file: testFile - } - ] - } + it('should have 1 test pass', function(done) { + const test = new Test(testTitle, () => {}); + + test.file = testFile; + suite.addTest(test); + + runner.run(function(failureCount) { + sinon.restore(); + expect(runner, 'to satisfy', { + testResults: { + passes: [ + { + title: testTitle, + file: testFile, + speed: /(slow|medium|fast)/ + } + ] + } + }); + expect(failureCount, 'to be', 0); + done(); }); - expect(failureCount, 'to be', 0); - done(); }); - }); - it('should have 1 test pass', function(done) { - const test = new Test(testTitle, () => {}); - - test.file = testFile; - suite.addTest(test); - - runner.run(function(failureCount) { - expect(runner, 'to satisfy', { - testResults: { - passes: [ - { - title: testTitle, - file: testFile, - speed: /(slow|medium|fast)/ - } - ] - } + it('should handle circular objects in errors', function(done) { + var testTitle = 'json test 1'; + function CircleError() { + this.message = 'oh shit'; + this.circular = this; + } + var error = new CircleError(); + + var test = new Test(testTitle, function(done) { + throw error; + }); + + test.file = testFile; + suite.addTest(test); + + runner.run(function(failureCount) { + sinon.restore(); + expect(runner, 'to satisfy', { + testResults: { + failures: [ + { + title: testTitle, + file: testFile, + err: { + message: error.message + } + } + ] + } + }); + expect(failureCount, 'to be', 1); + done(); }); - expect(failureCount, 'to be', 0); - done(); }); }); - it('should handle circular objects in errors', function(done) { - var testTitle = 'json test 1'; - function CircleError() { - this.message = 'oh shit'; - this.circular = this; - } - var error = new CircleError(); + describe('when "reporterOption.output" is provided', function() { + var expectedDirName = 'reports'; + var expectedFileName = 'reports/test-results.json'; + var options = { + reporterOption: { + output: expectedFileName + } + }; + + beforeEach(function() { + /* eslint no-unused-vars: off */ + var mochaReporter = new mocha._reporter(runner, options); + }); - var test = new Test(testTitle, function(done) { - throw error; + beforeEach(function() { + // Add one test to suite to avoid assertions against empty test results + var test = new Test(testTitle, () => {}); + test.file = testFile; + suite.addTest(test); }); - test.file = testFile; - suite.addTest(test); - - runner.run(function(failureCount) { - sinon.restore(); - expect(runner, 'to satisfy', { - testResults: { - failures: [ - { - title: testTitle, - file: testFile, - err: { - message: error.message - } - } - ] - } + it('should write test results to file', function(done) { + const fsMkdirSync = sinon.stub(fs, 'mkdirSync'); + const fsWriteFileSync = sinon.stub(fs, 'writeFileSync'); + + fsWriteFileSync.callsFake(function(filename, content) { + const expectedJson = JSON.stringify(runner.testResults, null, 2); + expect(expectedFileName, 'to be', filename); + expect(content, 'to be', expectedJson); }); - expect(failureCount, 'to be', 1); - done(); + + runner.run(function() { + expect( + fsMkdirSync.calledWith(expectedDirName, {recursive: true}), + 'to be true' + ); + expect(fsWriteFileSync.calledOnce, 'to be true'); + done(); + }); + }); + + it('should warn and write test results to console', function(done) { + const fsMkdirSync = sinon.stub(fs, 'mkdirSync'); + const fsWriteFileSync = sinon.stub(fs, 'writeFileSync'); + + fsWriteFileSync.throws('unable to write file'); + + const outLog = []; + const fake = chunk => outLog.push(chunk); + sinon.stub(process.stderr, 'write').callsFake(fake); + sinon.stub(process.stdout, 'write').callsFake(fake); + + runner.run(function() { + sinon.restore(); + expect( + fsMkdirSync.calledWith(expectedDirName, {recursive: true}), + 'to be true' + ); + expect(fsWriteFileSync.calledOnce, 'to be true'); + expect( + outLog[0], + 'to contain', + `[mocha] writing output to "${expectedFileName}" failed:` + ); + expect(outLog[1], 'to match', /"fullTitle": "JSON suite json test 1"/); + done(); + }); + }); + + it('should throw "unsupported error" in browser', function() { + sinon.stub(utils, 'isBrowser').callsFake(() => true); + expect( + () => new JSONReporter(runner, options), + 'to throw', + 'file output not supported in browser' + ); }); }); });
Add output option to JSON reporter **Describe the solution you'd like** I'd like to use test report for other applications (e.g. Slack) So, JSON file is useful. `package.json`: ```json { "scripts": { "test": "mocha ./test/*-test.js --reporter json --reporter-options output=report.json", "posttest": "node slack.js" } } ``` `slack.js`: ```js const readJSONFile = require('./readJSONFile') const createMessage = require('./createMessage') const postMessage = require('./postMessage') const report = await readJSONFile('report.json') const message = createMessage(report) await postMessage(message) ``` **Describe alternatives you've considered** - https://github.com/i3oges/mocha-json-file-reporter
@munierujp In your shell you can pipe the report output to a file. You don't need a `reporter-option` for this. In Windows it is the ">" sign. The standard output may occasionally contain noise. And, xunit reporter supports `output` option already. > By default, it will output to the console. To write directly to a file, use --reporter-options output=filename.xml. https://mochajs.org/#xunit Is there a difference between these reporters? any update here? if you need another use case to add momentum to this ticket: i'm trying to add `mocha-multi-reporters` to a project so that our custom `stdout` reporter still shows up on our bitbucket pipeline, but also have a json output blob so we can do more programmatically interesting things with the results outside of the execution of the tests themselves. e: ~~@munierujp [json](https://github.com/mochajs/mocha/blob/master/lib/reporters/json.js) and [xunit](https://github.com/mochajs/mocha/blob/master/lib/reporters/xunit.js) reporters are actually implemented rather differently. i'm currently taking a stab at introducing rudimentary output file specification similar to how it's done in the `xunit` reporter.~~ e2: @munierujp's solution below does what i need. thanks! I've created a third-party reporter for outputting test result as JSON file. - [@mochajs/json-file-reporter](https://www.npmjs.com/package/@mochajs/json-file-reporter) Having both human readable console output and json or xunit file output at same time would be very useful. It's quite ridiculous you need one 3rd party package to support multiple reporters and another 3rd party package to write JSON report to file. Redirecting whole output to file wouldn't work in this case. I run into issue because [json-file-reporter](https://github.com/mocha-community/json-file-reporter) doesn't include `file` property like built in `json` reporter. This is exactly kind of issue that would not happen if built in `json` reporter had `output` option. @dorny yes, you should definitively terminate this ridiculous situation. What about a PR from yourside? I will be back, to check on your reply. @juergba sure I will do my best :) And don't get me wrong. My intension was not to be offensive. I was simply pointing out current situation is not ideal for the user and your suggestion to pipe the output does not cover all use cases. By the way, I actually doesn't need this for myself. I'm working on GitHub action [test-reporter](https://github.com/dorny/test-reporter). I've originally implemented support for JEST which I needed. But thought it could be useful for others to support mocha too as it's the second most used testing framework. Meanwhile I've also discovered another option: [mochawesome](https://www.npmjs.com/package/mochawesome) reporter. It produces JSON and HTML output while you can still specify another reporter for console output. Unfortunately it uses it's own format of JSON output so I would still prefer solution using [mocha-multi-reporters](https://www.npmjs.com/package/mocha-multi-reporters) and mocha built-in JSON reporter. @juergba I took a first look at the code. Changes to `json.js` reporter are trivial. Basically it would follow the same pattern as is already implemented in `xunit` reporter. However I'm not so sure about unit test. The [json.spec.js](https://github.com/mochajs/mocha/blob/v8.3.2/test/reporters/json.spec.js) looks it would benefit from some refactoring. Existing tests are not actually testing the stdout of reporter, instead they look at `testResults` property set on `runner`. This property is set [here](https://github.com/mochajs/mocha/blob/v8.3.2/lib/reporters/json.js#L67). Is `runner.testResults` actually part of API or it's some old hack just for testing purposes? It doesn't look it's used anywhere else. All other reporter tests are using different pattern - example [here](https://github.com/mochajs/mocha/blob/v8.3.2/test/reporters/spec.spec.js#L45). My idea would be to remove `testResults` property and rework `spec.json.js` so stdout is captured, parsed as JSON and then checked if expectations are met. Another test then would check if the stdout is empty when `output` option is provided and if correct content would be created on file system (mocked of course). So should I remove setting of the `testResults` property in `json` reporter and refactor the tests as I proposed? As I'm completely new to mocha codebase I would appreciate guidance. Thanks. @dorny thank you for taking over. > [...] Is `runner.testResults` actually part of API [...]? I blamed _lib/reporters/json.js_ and found #1294, which states: `It also saves the json result back into the runner object to allow a programmatically mocha run to query the output.` So I propose to leave this property as is, but refactor the tests as per your suggestion. Following the `xunit` implementation seems a good approach to me. I don't know this part of our codebase well either, and mocks aren't really my friends. So you shouldn't trust my comments completely.
2021-03-14T23:40:23Z
9
mochajs/mocha
4,418
mochajs__mocha-4418
[ "4417", "4417" ]
4d7a1716ff5c7fbc74ca048a21b67f4524fcd55a
diff --git a/lib/cli/node-flags.js b/lib/cli/node-flags.js --- a/lib/cli/node-flags.js +++ b/lib/cli/node-flags.js @@ -7,6 +7,7 @@ */ const nodeFlags = process.allowedNodeEnvironmentFlags; +const {isMochaFlag} = require('./run-option-metadata'); const unparse = require('yargs-unparser'); /** @@ -43,16 +44,14 @@ exports.isNodeFlag = (flag, bareword = true) => { flag = flag.replace(/^--?/, ''); } return ( - // treat --require/-r as Mocha flag even though it's also a node flag - !(flag === 'require' || flag === 'r') && // check actual node flags from `process.allowedNodeEnvironmentFlags`, // then historical support for various V8 and non-`NODE_OPTIONS` flags // and also any V8 flags with `--v8-` prefix - ((nodeFlags && nodeFlags.has(flag)) || - debugFlags.has(flag) || - /(?:preserve-symlinks(?:-main)?|harmony(?:[_-]|$)|(?:trace[_-].+$)|gc(?:[_-]global)?$|es[_-]staging$|use[_-]strict$|v8[_-](?!options).+?$)/.test( - flag - )) + (!isMochaFlag(flag) && nodeFlags && nodeFlags.has(flag)) || + debugFlags.has(flag) || + /(?:preserve-symlinks(?:-main)?|harmony(?:[_-]|$)|(?:trace[_-].+$)|gc(?:[_-]global)?$|es[_-]staging$|use[_-]strict$|v8[_-](?!options).+?$)/.test( + flag + ) ); }; diff --git a/lib/cli/run-option-metadata.js b/lib/cli/run-option-metadata.js --- a/lib/cli/run-option-metadata.js +++ b/lib/cli/run-option-metadata.js @@ -12,7 +12,7 @@ * @type {{string:string[]}} * @private */ -exports.types = { +const TYPES = (exports.types = { array: [ 'extension', 'file', @@ -58,7 +58,7 @@ exports.types = { 'slow', 'timeout' ] -}; +}); /** * Option aliases keyed by canonical option name. @@ -88,3 +88,26 @@ exports.aliases = { ui: ['u'], watch: ['w'] }; + +const ALL_MOCHA_FLAGS = Object.keys(TYPES).reduce((acc, key) => { + // gets all flags from each of the fields in `types`, adds those, + // then adds aliases of each flag (if any) + TYPES[key].forEach(flag => { + acc.add(flag); + const aliases = exports.aliases[flag] || []; + aliases.forEach(alias => { + acc.add(alias); + }); + }); + return acc; +}, new Set()); + +/** + * Returns `true` if the provided `flag` is known to Mocha. + * @param {string} flag - Flag to check + * @returns {boolean} If `true`, this is a Mocha flag + * @private + */ +exports.isMochaFlag = flag => { + return ALL_MOCHA_FLAGS.has(flag.replace(/^--?/, '')); +};
diff --git a/test/node-unit/cli/node-flags.spec.js b/test/node-unit/cli/node-flags.spec.js --- a/test/node-unit/cli/node-flags.spec.js +++ b/test/node-unit/cli/node-flags.spec.js @@ -1,22 +1,34 @@ 'use strict'; -const nodeEnvFlags = process.allowedNodeEnvironmentFlags; +const nodeEnvFlags = [...process.allowedNodeEnvironmentFlags]; const { isNodeFlag, impliesNoTimeouts, unparseNodeFlags } = require('../../../lib/cli/node-flags'); +const {isMochaFlag} = require('../../../lib/cli/run-option-metadata'); + describe('node-flags', function() { describe('isNodeFlag()', function() { describe('for all allowed node environment flags', function() { - // NOTE: this is not stubbing nodeEnvFlags in any way, so relies on - // the userland polyfill to be correct. - nodeEnvFlags.forEach(envFlag => { - it(`${envFlag} should return true`, function() { - expect(isNodeFlag(envFlag), 'to be true'); + nodeEnvFlags + .filter(flag => !isMochaFlag(flag)) + .forEach(envFlag => { + it(`${envFlag} should return true`, function() { + expect(isNodeFlag(envFlag), 'to be true'); + }); + }); + }); + + describe('for all allowed node env flags which conflict with mocha flags', function() { + nodeEnvFlags + .filter(flag => isMochaFlag(flag)) + .forEach(envFlag => { + it(`${envFlag} should return false`, function() { + expect(isNodeFlag(envFlag), 'to be false'); + }); }); - }); }); describe('when expecting leading dashes', function() { @@ -24,11 +36,6 @@ describe('node-flags', function() { expect(isNodeFlag('throw-deprecation', false), 'to be false'); expect(isNodeFlag('--throw-deprecation', false), 'to be true'); }); - - it('should return false for --require/-r', function() { - expect(isNodeFlag('--require', false), 'to be false'); - expect(isNodeFlag('-r', false), 'to be false'); - }); }); describe('special cases', function() { @@ -132,14 +139,5 @@ describe('node-flags', function() { ['--v8-numeric-one=1', '--v8-boolean-one', '--v8-numeric-two=2'] ); }); - - it('should special-case "--require"', function() { - // note the only way for this to happen IN REAL LIFE is if you use "--require esm"; - // mocha eats all --require args otherwise. - expect(unparseNodeFlags({require: 'mcrib'}), 'to equal', [ - '--require', - 'mcrib' - ]); - }); }); });
node flag parsing strategy is backwards https://github.com/nodejs/node/pull/34637 introduces a `-u` flag for `node`, which conflicts with Mocha's `-u`/`--ui` flag. I had expected Mocha would prefer its _own_ options over `node`'s where there was a conflict, but this is not the case--instead, Mocha explicitly whitelists `-r`/`--require`, which was previously the only known command-line flag shared between `mocha` and `node`. To fix this, Mocha needs to change its strategy to detect its own options _first_, _then_ attempt to detect if something is a `node` option. Currently, Mocha checks for `node` options first. node flag parsing strategy is backwards https://github.com/nodejs/node/pull/34637 introduces a `-u` flag for `node`, which conflicts with Mocha's `-u`/`--ui` flag. I had expected Mocha would prefer its _own_ options over `node`'s where there was a conflict, but this is not the case--instead, Mocha explicitly whitelists `-r`/`--require`, which was previously the only known command-line flag shared between `mocha` and `node`. To fix this, Mocha needs to change its strategy to detect its own options _first_, _then_ attempt to detect if something is a `node` option. Currently, Mocha checks for `node` options first.
cc @bethgriggs This _does_ mean that if someone wants to use custom conditions w/ `mocha`, they need to use `--conditions`, not `-u`. cc @bethgriggs This _does_ mean that if someone wants to use custom conditions w/ `mocha`, they need to use `--conditions`, not `-u`.
2020-08-25T18:18:24Z
8.1
mochajs/mocha
4,382
mochajs__mocha-4382
[ "4347" ]
02bdb6bc6c029fb0c01389b27797d00faa76ddde
diff --git a/lib/cli/watch-run.js b/lib/cli/watch-run.js --- a/lib/cli/watch-run.js +++ b/lib/cli/watch-run.js @@ -59,6 +59,10 @@ exports.watchParallelRun = ( // in `createRerunner`), we need to call `mocha.ui()` again to set up the context/globals. newMocha.ui(newMocha.options.ui); + // we need to call `newMocha.rootHooks` to set up rootHooks for the new + // suite + newMocha.rootHooks(newMocha.options.rootHooks); + // in parallel mode, the main Mocha process doesn't actually load the // files. this flag prevents `mocha.run()` from autoloading. newMocha.lazyLoadFiles(true); @@ -118,6 +122,10 @@ exports.watchRun = (mocha, {watchFiles, watchIgnore}, fileCollectParams) => { // in `createRerunner`), we need to call `mocha.ui()` again to set up the context/globals. newMocha.ui(newMocha.options.ui); + // we need to call `newMocha.rootHooks` to set up rootHooks for the new + // suite + newMocha.rootHooks(newMocha.options.rootHooks); + return newMocha; }, afterRun({watcher}) {
diff --git a/test/integration/fixtures/options/watch/hook.fixture.js b/test/integration/fixtures/options/watch/hook.fixture.js new file mode 100644 --- /dev/null +++ b/test/integration/fixtures/options/watch/hook.fixture.js @@ -0,0 +1,7 @@ +module.exports = { + mochaHooks: { + ["<hook>"]: function() { + throw new Error("<hook> Hook Error"); + }, + }, +}; diff --git a/test/integration/options/watch.spec.js b/test/integration/options/watch.spec.js --- a/test/integration/options/watch.spec.js +++ b/test/integration/options/watch.spec.js @@ -275,6 +275,43 @@ describe('--watch', function() { expect(results[1].tests, 'to have length', 2); }); }); + + describe('with required hooks', function() { + /** + * Helper for setting up hook tests + * + * @param {string} hookName name of hook to test + * @return {function} + */ + function setupHookTest(hookName) { + return function() { + const testFile = path.join(tempDir, 'test.js'); + const hookFile = path.join(tempDir, 'hook.js'); + + copyFixture('__default__', testFile); + copyFixture('options/watch/hook', hookFile); + + replaceFileContents(hookFile, '<hook>', hookName); + + return runMochaWatch( + [testFile, '--require', hookFile], + tempDir, + () => { + touchFile(testFile); + } + ).then(results => { + expect(results.length, 'to equal', 2); + expect(results[0].failures, 'to have length', 1); + expect(results[1].failures, 'to have length', 1); + }); + }; + } + + it('mochaHooks.beforeAll runs as expected', setupHookTest('beforeAll')); + it('mochaHooks.beforeEach runs as expected', setupHookTest('beforeEach')); + it('mochaHooks.afterAll runs as expected', setupHookTest('afterAll')); + it('mochaHooks.afterEach runs as expected', setupHookTest('afterEach')); + }); }); });
Thrown error in afterEach as a root hook plugin is ignored in watchmode <!-- Have you read Mocha's Code of Conduct? By filing an Issue, you are expected to comply with it, including treating everyone with respect: https://github.com/mochajs/mocha/blob/master/.github/CODE_OF_CONDUCT.md For more, check out the Mocha Gitter chat room: https://gitter.im/mochajs/mocha Detail the steps necessary to reproduce the problem. To get the fastest support, create an MCVE and upload it to GitHub. create an [MCVE](https://stackoverflow.com/help/mcve) and upload it to GitHub. --> ### Prerequisites <!-- Place an `x` between the square brackets on the lines below for every satisfied prerequisite. --> - [x] Checked that your issue hasn't already been filed by cross-referencing [issues with the `faq` label](https://github.com/mochajs/mocha/issues?utf8=%E2%9C%93&q=is%3Aissue%20label%3Afaq%20) - ~[ ] Checked next-gen ES issues and syntax problems by using the same environment and/or transpiler configuration without Mocha to ensure it isn't just a feature that actually isn't supported in the environment in question or a bug in your code.~ Not syntax related - ~[ ] 'Smoke tested' the code to be tested by running it outside the real test suite to get a better sense of whether the problem is in the code under test, your usage of Mocha, or Mocha itself~ Not related to code under test - [x] Ensured that there is no discrepancy between the locally and globally installed versions of Mocha. You can find them with: `node node_modules/.bin/mocha --version`(Local) and `mocha --version`(Global). We recommend that you _not_ install Mocha globally. ### Description <!-- [Description of the issue] --> ### Steps to Reproduce Full repro: https://github.com/eps1lon/repro-mocha-root-hook-throw ```js // test.js it("root hooks tests onces", () => {}); it("root hooks tests twice", () => {}); // `hooks.js` module.exports = { mochaHooks: { afterEach() { throw new Error("Threw in root afterEach"); }, }, }; // .mocharc.js module.exports = { require: [require.resolve("./hooks")], }; ``` - `mocha test.js` fails as expected - `mocha test.js --watch` passes unexpectedly **Expected behavior:** afterEach as a root hook behave the same as "local" hooks with regard to thrown errors. **Actual behavior:** Thrown error is ignored in afterEach in watchmode **Reproduces how often:** All the time ### Versions <!-- If applicable, please specify: --> - The output of `mocha --version` and `node node_modules/.bin/mocha --version`: 8.0.1 `mocha --version` does not work because I didn't install it globally - The output of `node --version`: 12.16.2 - Your operating system - name and version: Ubuntu 18.04.4 LTS - architecture (32 or 64-bit): 64bit - Your shell (e.g., bash, zsh, PowerShell, cmd): `fish, version 2.7.1` - Your browser and version (if running browser tests): N/A - Any third-party Mocha-related modules (and their versions): N/A - Any code transpiler (e.g., TypeScript, CoffeeScript, Babel) being used (and its version): N/A ### Additional Information N/A
This might be the same root problem as #4344. One more related issue: console logs are ignored in root hook plugins in watchmode. that sounds like a bug Hit this issue myself recently. I did a little digging & issue seems to be caused by the cloning the suite on L40. https://github.com/mochajs/mocha/blob/02bdb6bc6c029fb0c01389b27797d00faa76ddde/lib/cli/watch-run.js#L38-L66 It looks like `suite.clone()` doesn't copy over any of the hooks. https://github.com/mochajs/mocha/blob/7d3151d08a3082dd022116d00234caf1600a71b5/lib/suite.js#L117-L127 I did a quick local patch to add these & watch worked as expected with root hooks.
2020-07-26T02:20:06Z
8
mochajs/mocha
4,315
mochajs__mocha-4315
[ "4307" ]
a2f2e087a27ee39eec729e9b4c1f5f27a8b69b9e
diff --git a/lib/cli/options.js b/lib/cli/options.js --- a/lib/cli/options.js +++ b/lib/cli/options.js @@ -54,16 +54,19 @@ const configuration = Object.assign({}, YARGS_PARSER_CONFIG, { /** * This is a really fancy way to: - * - ensure unique values for `array`-type options - * - use its array's last element for `boolean`/`number`/`string`- options given multiple times + * - `array`-type options: ensure unique values and evtl. split comma-delimited lists + * - `boolean`/`number`/`string`- options: use last element when given multiple times * This is passed as the `coerce` option to `yargs-parser` * @private * @ignore */ +const globOptions = ['spec', 'ignore']; const coerceOpts = Object.assign( types.array.reduce( (acc, arg) => - Object.assign(acc, {[arg]: v => Array.from(new Set(list(v)))}), + Object.assign(acc, { + [arg]: v => Array.from(new Set(globOptions.includes(arg) ? v : list(v))) + }), {} ), types.boolean
diff --git a/test/node-unit/cli/options.spec.js b/test/node-unit/cli/options.spec.js --- a/test/node-unit/cli/options.spec.js +++ b/test/node-unit/cli/options.spec.js @@ -562,7 +562,9 @@ describe('options', function() { readFileSync = sandbox.stub(); readFileSync.onFirstCall().throws(); findConfig = sandbox.stub().returns('/some/.mocharc.json'); - loadConfig = sandbox.stub().returns({spec: '*.spec.js'}); + loadConfig = sandbox + .stub() + .returns({spec: '{dirA,dirB}/**/*.spec.js'}); findupSync = sandbox.stub(); loadOptions = proxyLoadOptions({ readFileSync, @@ -573,10 +575,41 @@ describe('options', function() { result = loadOptions(['*.test.js']); }); - it('should place both into the positional arguments array', function() { - expect(result, 'to have property', '_', ['*.test.js', '*.spec.js']); + it('should place both - unsplitted - into the positional arguments array', function() { + expect(result, 'to have property', '_', [ + '*.test.js', + '{dirA,dirB}/**/*.spec.js' + ]); }); }); }); + + describe('"ignore" handling', function() { + let result; + + beforeEach(function() { + readFileSync = sandbox.stub(); + readFileSync.onFirstCall().throws(); + findConfig = sandbox.stub().returns('/some/.mocharc.json'); + loadConfig = sandbox + .stub() + .returns({ignore: '{dirA,dirB}/**/*.spec.js'}); + findupSync = sandbox.stub(); + loadOptions = proxyLoadOptions({ + readFileSync, + findConfig, + loadConfig, + findupSync + }); + result = loadOptions(['--ignore', '*.test.js']); + }); + + it('should not split option values by comma', function() { + expect(result, 'to have property', 'ignore', [ + '*.test.js', + '{dirA,dirB}/**/*.spec.js' + ]); + }); + }); }); });
Migrating from mocha.opts to mocharc broke glob pattern support ### Prerequisites <details> <summary>Click for details</summary> - [X] Checked that your issue hasn't already been filed by cross-referencing [issues with the `faq` label](https://github.com/mochajs/mocha/issues?utf8=%E2%9C%93&q=is%3Aissue%20label%3Afaq%20) - [X] Checked next-gen ES issues and syntax problems by using the same environment and/or transpiler configuration without Mocha to ensure it isn't just a feature that actually isn't supported in the environment in question or a bug in your code. - [X] 'Smoke tested' the code to be tested by running it outside the real test suite to get a better sense of whether the problem is in the code under test, your usage of Mocha, or Mocha itself - [X] Ensured that there is no discrepancy between the locally and globally installed versions of Mocha. You can find them with: `node node_modules/.bin/mocha --version`(Local) and `mocha --version`(Global). We recommend that you _not_ install Mocha globally. </details> --- ### Description - Using `opts configuration`, the [glob pattern](https://globster.xyz/) `{controllers,test}/**/*.test.js` used to expand into: + `controllers/**/*.test.js` + `test/**/*.test.js` - Using RC configration files, this glob partten gets tokenized into invalid ones: ``` [mocha/lib/cli/options.js] > parse() > yargsParser.detailed() > result.argv.spec [ '{controllers', 'test}/**/*.test.js' ] Warning: Cannot find any files matching pattern "{controllers" Warning: Cannot find any files matching pattern "test}/**/*.test.js" ``` - Commenting the [**yargs** parsing option `coerce: coerceOpts`](https://github.com/mochajs/mocha/blob/master/lib/cli/options.js#L124) fixes the glob tokenization, even though it isn't a long-term fix. - The `coerceOtps` function can be found here: https://github.com/mochajs/mocha/blob/master/lib/cli/options.js#L63 As a side I should add that these two following `spec` values give the results highlighted above - `spec: '{controllers,test}/**/*.test.js',` - `spec: [ '{controllers,test}/**/*.test.js' ],` --- ### Steps to Reproduce I created a barebone repository to highlight the issue: https://github.com/TheOptimisticFactory/mocha-glob-issue 1. Open a terminal 2. Clone the demo project using `git clone git@github.com:TheOptimisticFactory/mocha-glob-issue.git` 2. Install packages using `cd mocha-glob-issue && npm i` 4. Run any of the following scripts: - `npm run test-legacy-working`: Check tests passes when using lecacy opts `test/mocha.opts` - `npm run test-bugged-baseline`: tests WONT BE FOUND when using `.mocharc.js` - `npm run test-bugged-showcase`: Dumps the BROKEN file patterns when using `.mocha.multi-paths.js` --- **LEGACY behavior:** [What used to happen] <details> <summary>LEGACY configuration file: /test/mocha.opts</summary> ```javascript --require test/setup.js {controllers,test}/**/*.test.js --exit ``` </details> `npm run test-legacy-working` ![image](https://user-images.githubusercontent.com/2607260/83260239-ba02f880-a1b9-11ea-871e-2c4619eabd34.png) --- **Actual behavior:** [What actually happens] <details> <summary>Using .mocharc.js</summary> ```javascript 'use strict'; module.exports = { exit: true, require: 'test/setup.js', spec: '{controllers,test}/**/*.test.js', }; ``` </details> `npm run test-bugged-baseline` ![image](https://user-images.githubusercontent.com/2607260/83260600-5deca400-a1ba-11ea-84ed-96c5698d9b01.png) <details> <summary>Using .mocharc.multi-paths.js</summary> ```javascript 'use strict'; module.exports = { exit: true, require: 'test/setup.js', spec: [ '{controllers,test}/**/*.test.js', 'test/**/*.test.js' ], }; ``` </details> `npm run test-bugged-showcase` ![image](https://user-images.githubusercontent.com/2607260/83260968-ed925280-a1ba-11ea-9a35-45be078be6ae.png) --- **Reproduces how often:** [What percentage of the time does it reproduce?] About 100% of the time :) --- ### Versions ![image](https://user-images.githubusercontent.com/2607260/83262687-b5d8da00-a1bd-11ea-8cf6-c02a16a7278e.png)
I confirm, I have the same problem. @TheOptimisticFactory thank you for your detailed description. I haven't tested, but I guess you are correct. It's the `list` function which splits the parsed string by the `,` separator. As a work around you can use: `spec: [ 'controllers/**/*.test.js', 'test/**/*.test.js' ]` @juergba From the looks of it, one possible fix would be to do an additional globbing pass over the raw `spec` values (thus performing brace expansion if necessary) before parsing them any further. @rgroothuijsen I don't know. I don't really like the idea to have two glob expansion steps in two different places. There could be more comma-delimited glob patterns than just brace expansion? `spec` is kind of a bastard option form. In our first parsing step by _yargs-parser_, it's an option. In our second parsing step by _yargs,_ it's a positional argument. We could disallow comma-delimited lists in `spec`: - ok: `node mocha test1 test2` - ok?: `node mocha -- test1 test2` - ok: `node mocha --spec test1 --spec test2` - ok, but disallow?: `node mocha --spec test1,test2` @juergba Just as a note, commas are sometimes necessary in forms like this: `{,!(node_modules)/**/}*.js`, and there the workaround doesn't work. IMO the best way to solve this, is to remove the comma splitting for options which could contain glob patterns: `--spec` and `--ignore`. As per _yargs_ configuration comma-delimited lists are not customized for these two option/positional argument anyway. @TheOptimisticFactory @adjerbetian if you have some time left ... Could you check wether the following patch in _lib/cli/option.js_ is working, please? ```js const globOptions = ['spec', 'ignore']; // new const coerceOpts = Object.assign( types.array.reduce( (acc, arg) => Object.assign(acc, { [arg]: v => Array.from(new Set(globOptions.includes(arg) ? v : list(v))) // modified }), {} ), .... ``` @juergba I just tested, it works with me :+1: > IMO the best way to solve this, is to remove the comma splitting for options which could contain glob patterns: `--spec` and `--ignore`. > As per _yargs_ configuration comma-delimited lists are not customized for these two option/positional argument anyway. > > @TheOptimisticFactory @adjerbetian if you have some time left ... > Could you check wether the following patch in _lib/cli/option.js_ is working, please? > > ```js > const globOptions = ['spec', 'ignore']; // new > const coerceOpts = Object.assign( > types.array.reduce( > (acc, arg) => > Object.assign(acc, { > [arg]: v => Array.from(new Set(globOptions.includes(arg) ? v : list(v))) // modified > }), > {} > ), > .... > ``` @juergba I also confirm [your diff](https://github.com/mochajs/mocha/issues/4307#issuecomment-639486379) fixes the issue in all my projects :+1: @adjerbetian @TheOptimisticFactory thank you!
2020-06-06T07:17:06Z
7.1
mochajs/mocha
4,165
mochajs__mocha-4165
[ "4160" ]
c0f1d1456dbc068f0552a5ceaed0d9b95e940ce1
diff --git a/lib/runner.js b/lib/runner.js --- a/lib/runner.js +++ b/lib/runner.js @@ -654,7 +654,7 @@ Runner.prototype.runTests = function(suite, fn) { self.emit(constants.EVENT_TEST_END, test); // skip inner afterEach hooks below errSuite level var origSuite = self.suite; - self.suite = errSuite; + self.suite = errSuite || self.suite; return self.hookUp(HOOK_TYPE_AFTER_EACH, function(e, eSuite) { self.suite = origSuite; next(e, eSuite);
diff --git a/test/integration/fixtures/pending/programmatic.fixture.js b/test/integration/fixtures/pending/programmatic.fixture.js new file mode 100644 --- /dev/null +++ b/test/integration/fixtures/pending/programmatic.fixture.js @@ -0,0 +1,8 @@ +'use strict'; +const Mocha = require('../../../../lib/mocha'); + +const mocha = new Mocha({reporter: 'json'}); +mocha.addFile("./test/integration/fixtures/__default__.fixture.js"); + +const runner = mocha.run(); +runner.on('test', function (test) { test.pending = true; }); diff --git a/test/integration/helpers.js b/test/integration/helpers.js --- a/test/integration/helpers.js +++ b/test/integration/helpers.js @@ -143,6 +143,8 @@ module.exports = { invokeMochaAsync: invokeMochaAsync, + invokeNode: invokeNode, + /** * Resolves the path to a fixture to the full path. */ @@ -227,6 +229,19 @@ function invokeMochaAsync(args, opts) { return [mochaProcess, resultPromise]; } +/** + * Invokes Node without Mocha binary with the given arguments, + * when Mocha is used programmatically. + */ +function invokeNode(args, fn, opts) { + if (typeof args === 'function') { + opts = fn; + fn = args; + args = []; + } + return _spawnMochaWithListeners(args, fn, opts); +} + function invokeSubMocha(args, fn, opts) { if (typeof args === 'function') { opts = fn; diff --git a/test/integration/pending.spec.js b/test/integration/pending.spec.js --- a/test/integration/pending.spec.js +++ b/test/integration/pending.spec.js @@ -1,9 +1,12 @@ 'use strict'; var assert = require('assert'); -var run = require('./helpers').runMochaJSON; -var runMocha = require('./helpers').runMocha; -var splitRegExp = require('./helpers').splitRegExp; +var helpers = require('./helpers'); +var run = helpers.runMochaJSON; +var runMocha = helpers.runMocha; +var splitRegExp = helpers.splitRegExp; +var invokeNode = helpers.invokeNode; +var toJSONRunResult = helpers.toJSONRunResult; var args = []; describe('pending', function() { @@ -323,4 +326,21 @@ describe('pending', function() { }); }); }); + + describe('programmatic usage', function() { + it('should skip the test by listening to test event', function(done) { + var path = require.resolve('./fixtures/pending/programmatic.fixture.js'); + invokeNode([path], function(err, res) { + if (err) { + return done(err); + } + var result = toJSONRunResult(res); + expect(result, 'to have passed') + .and('to have passed test count', 0) + .and('to have pending test count', 1) + .and('to have pending test order', 'should succeed'); + done(); + }); + }); + }); });
Mocha runner throws if test.pending is set to true in the 'test' event I have a script that hooks on to mocha events and dynamically determines which tests to run at runtime. I do this by setting `args.pending = true` in the `test` hook. 7.0.0 seems to break this behavior. Here's a sample repro ```javascript function mochaRun() { const mochaLib = require('mocha'); mochaLib.Suite.prototype.beforeAll = () => { }; mochaLib.Suite.prototype.afterAll = () => { }; mochaLib.Suite.prototype.beforeEach = () => { }; mochaLib.Suite.prototype.afterEach = () => { }; this.mocha = new mochaLib({}); this.mocha.addFile("test.js"); const runner = this.mocha.run(); runner.setMaxListeners(20); runner.on('test', (args) => { args.pending = true; }); } // Use domain to catch the error in mocha const domain = require('domain'); const executionDomain = domain.create(); executionDomain.on('error', (err) => { console.error(err); }); executionDomain.run(() => { mochaRun() }); ``` with sample `test.js`: ```javascript const assert = require('assert'); describe('suite a', () => { it('test case a1', () => { }); it('test case a2', () => { assert.fail('failure'); }); }) ``` On running this mocha throws with the error `Cannot read property 'parent' of undefined` ``` suite a - test case a1 TypeError: Cannot read property 'parent' of undefined at Runner.parents (E:\mocharepro\node_modules\mocha\lib\runner.js:501:16) at Runner.hookUp (E:\mocharepro\node_modules\mocha\lib\runner.js:475:41) at E:\mocharepro\node_modules\mocha\lib\runner.js:658:21 at next (E:\mocharepro\node_modules\mocha\lib\runner.js:450:14) at E:\mocharepro\node_modules\mocha\lib\runner.js:460:7 at next (E:\mocharepro\node_modules\mocha\lib\runner.js:362:14) at Immediate.<anonymous> (E:\mocharepro\node_modules\mocha\lib\runner.js:428:5) at processImmediate (internal/timers.js:439:21) at process.topLevelDomainCallback (domain.js:130:23) { domainThrown: true } ``` If this is now the expected behavior how do I get around it and dynamically determine which tests to skip.
@karanjitsingh could you please patch [following line](https://github.com/mochajs/mocha/blob/master/lib/runner.js#L657) in "lib/runner.js": - old: `self.suite = errSuite;` - new: `self.suite = errSuite || self.suite;` @karanjitsingh our CI tests haven't covered this test scenario, since they didn't fail in the past. So if you just open a PR with exactly my proposed line, our CI test have no significance, they don't prove wether the bug is fixed or not. - either you test this patch in your environment and tell me wether it works or not - or you add a test to your PR which covers this scenario - or you do both items above @juergba, Not sure how to test this change, if there is a test for that block of code please point me to it. Let me know if this needs to be a unit / functional test with helpful code pointers. I can confirm that the change is working for me. ![image](https://user-images.githubusercontent.com/4632805/72778464-17e96780-3c3f-11ea-81bf-13d423085b92.png)
2020-01-22T14:06:57Z
7
mochajs/mocha
4,234
mochajs__mocha-4234
[ "2783" ]
c0137eb698add08f29035467ea1dc230904f82ba
diff --git a/browser-entry.js b/browser-entry.js --- a/browser-entry.js +++ b/browser-entry.js @@ -52,6 +52,17 @@ process.removeListener = function(e, fn) { } }; +/** + * Implements listenerCount for 'uncaughtException'. + */ + +process.listenerCount = function(name) { + if (name === 'uncaughtException') { + return uncaughtExceptionHandlers.length; + } + return 0; +}; + /** * Implements uncaughtException listener. */ diff --git a/lib/errors.js b/lib/errors.js --- a/lib/errors.js +++ b/lib/errors.js @@ -149,6 +149,35 @@ function createInvalidPluginError(message, pluginType, pluginId) { } } +/** + * Creates an error object to be thrown when a mocha object's `run` method is executed while it is already disposed. + * @param {string} message The error message to be displayed. + * @param {boolean} cleanReferencesAfterRun the value of `cleanReferencesAfterRun` + * @param {Mocha} instance the mocha instance that throw this error + */ +function createMochaInstanceAlreadyDisposedError( + message, + cleanReferencesAfterRun, + instance +) { + var err = new Error(message); + err.code = 'ERR_MOCHA_INSTANCE_ALREADY_DISPOSED'; + err.cleanReferencesAfterRun = cleanReferencesAfterRun; + err.instance = instance; + return err; +} + +/** + * Creates an error object to be thrown when a mocha object's `run` method is called while a test run is in progress. + * @param {string} message The error message to be displayed. + */ +function createMochaInstanceAlreadyRunningError(message, instance) { + var err = new Error(message); + err.code = 'ERR_MOCHA_INSTANCE_ALREADY_RUNNING'; + err.instance = instance; + return err; +} + module.exports = { createInvalidArgumentTypeError: createInvalidArgumentTypeError, createInvalidArgumentValueError: createInvalidArgumentValueError, @@ -158,5 +187,7 @@ module.exports = { createMissingArgumentError: createMissingArgumentError, createNoFilesMatchPatternError: createNoFilesMatchPatternError, createUnsupportedError: createUnsupportedError, - createInvalidPluginError: createInvalidPluginError + createInvalidPluginError: createInvalidPluginError, + createMochaInstanceAlreadyDisposedError: createMochaInstanceAlreadyDisposedError, + createMochaInstanceAlreadyRunningError: createMochaInstanceAlreadyRunningError }; diff --git a/lib/hook.js b/lib/hook.js --- a/lib/hook.js +++ b/lib/hook.js @@ -27,6 +27,14 @@ function Hook(title, fn) { */ inherits(Hook, Runnable); +/** + * Resets the state for a next run. + */ +Hook.prototype.reset = function() { + Runnable.prototype.reset.call(this); + delete this._error; +}; + /** * Get or set the test `err`. * diff --git a/lib/mocha.js b/lib/mocha.js --- a/lib/mocha.js +++ b/lib/mocha.js @@ -18,6 +18,10 @@ var esmUtils = utils.supportsEsModules() ? require('./esm-utils') : undefined; var createStatsCollector = require('./stats-collector'); var createInvalidReporterError = errors.createInvalidReporterError; var createInvalidInterfaceError = errors.createInvalidInterfaceError; +var createMochaInstanceAlreadyDisposedError = + errors.createMochaInstanceAlreadyDisposedError; +var createMochaInstanceAlreadyRunningError = + errors.createMochaInstanceAlreadyRunningError; var EVENT_FILE_PRE_REQUIRE = Suite.constants.EVENT_FILE_PRE_REQUIRE; var EVENT_FILE_POST_REQUIRE = Suite.constants.EVENT_FILE_POST_REQUIRE; var EVENT_FILE_REQUIRE = Suite.constants.EVENT_FILE_REQUIRE; @@ -25,6 +29,30 @@ var sQuote = utils.sQuote; exports = module.exports = Mocha; +/** + * A Mocha instance is a finite state machine. + * These are the states it can be in. + */ +var mochaStates = utils.defineConstants({ + /** + * Initial state of the mocha instance + */ + INIT: 'init', + /** + * Mocha instance is running tests + */ + RUNNING: 'running', + /** + * Mocha instance is done running tests and references to test functions and hooks are cleaned. + * You can reset this state by unloading the test files. + */ + REFERENCES_CLEANED: 'referencesCleaned', + /** + * Mocha instance is disposed and can no longer be used. + */ + DISPOSED: 'disposed' +}); + /** * To require local UIs and reporters when running in node. */ @@ -97,6 +125,7 @@ function Mocha(options) { this.options = options; // root suite this.suite = new exports.Suite('', new exports.Context(), true); + this._cleanReferencesAfterRun = true; this.grep(options.grep) .fgrep(options.fgrep) @@ -388,9 +417,18 @@ Mocha.unloadFile = function(file) { * @chainable */ Mocha.prototype.unloadFiles = function() { + if (this._state === mochaStates.DISPOSED) { + throw createMochaInstanceAlreadyDisposedError( + 'Mocha instance is already disposed, it cannot be used again.', + this._cleanReferencesAfterRun, + this + ); + } + this.files.forEach(function(file) { Mocha.unloadFile(file); }); + this._state = mochaStates.INIT; return this; }; @@ -490,6 +528,38 @@ Mocha.prototype.checkLeaks = function(checkLeaks) { return this; }; +/** + * Enables or disables whether or not to dispose after each test run. + * Disable this to ensure you can run the test suite multiple times. + * If disabled, be sure to dispose mocha when you're done to prevent memory leaks. + * @public + * @see {@link Mocha#dispose} + * @param {boolean} cleanReferencesAfterRun + * @return {Mocha} this + * @chainable + */ +Mocha.prototype.cleanReferencesAfterRun = function(cleanReferencesAfterRun) { + this._cleanReferencesAfterRun = cleanReferencesAfterRun !== false; + return this; +}; + +/** + * Manually dispose this mocha instance. Mark this instance as `disposed` and unable to run more tests. + * It also removes function references to tests functions and hooks, so variables trapped in closures can be cleaned by the garbage collector. + * @public + */ +Mocha.prototype.dispose = function() { + if (this._state === mochaStates.RUNNING) { + throw createMochaInstanceAlreadyRunningError( + 'Cannot dispose while the mocha instance is still running tests.' + ); + } + this.unloadFiles(); + this._previousRunner && this._previousRunner.dispose(); + this.suite.dispose(); + this._state = mochaStates.DISPOSED; +}; + /** * Displays full stack trace upon test failure. * @@ -770,6 +840,28 @@ Mocha.prototype.forbidPending = function(forbidPending) { return this; }; +/** + * Throws an error if mocha is in the wrong state to be able to transition to a "running" state. + */ +Mocha.prototype._guardRunningStateTransition = function() { + if (this._state === mochaStates.RUNNING) { + throw createMochaInstanceAlreadyRunningError( + 'Mocha instance is currently running tests, cannot start a next test run until this one is done', + this + ); + } + if ( + this._state === mochaStates.DISPOSED || + this._state === mochaStates.REFERENCES_CLEANED + ) { + throw createMochaInstanceAlreadyDisposedError( + 'Mocha instance is already disposed, cannot start a new test run. Please create a new mocha instance. Be sure to set disable `cleanReferencesAfterRun` when you want to reuse the same mocha instance for multiple test runs.', + this._cleanReferencesAfterRun, + this + ); + } +}; + /** * Mocha version as specified by "package.json". * @@ -810,13 +902,23 @@ Object.defineProperty(Mocha.prototype, 'version', { * mocha.run(failures => process.exitCode = failures ? 1 : 0); */ Mocha.prototype.run = function(fn) { + this._guardRunningStateTransition(); + this._state = mochaStates.RUNNING; + if (this._previousRunner) { + this._previousRunner.dispose(); + this.suite.reset(); + } if (this.files.length && !this.loadAsync) { this.loadFiles(); } + var self = this; var suite = this.suite; var options = this.options; options.files = this.files; - var runner = new exports.Runner(suite, options.delay); + var runner = new exports.Runner(suite, { + delay: options.delay, + cleanReferencesAfterRun: this._cleanReferencesAfterRun + }); createStatsCollector(runner); var reporter = new this._reporter(runner, options); runner.checkLeaks = options.checkLeaks === true; @@ -841,6 +943,12 @@ Mocha.prototype.run = function(fn) { exports.reporters.Base.hideDiff = !options.diff; function done(failures) { + self._previousRunner = runner; + if (self._cleanReferencesAfterRun) { + self._state = mochaStates.REFERENCES_CLEANED; + } else { + self._state = mochaStates.INIT; + } fn = fn || utils.noop; if (reporter.done) { reporter.done(failures, fn); diff --git a/lib/runnable.js b/lib/runnable.js --- a/lib/runnable.js +++ b/lib/runnable.js @@ -35,10 +35,8 @@ function Runnable(title, fn) { this.sync = !this.async; this._timeout = 2000; this._slow = 75; - this.timedOut = false; this._retries = -1; - this._currentRetry = 0; - this.pending = false; + this.reset(); } /** @@ -46,6 +44,17 @@ function Runnable(title, fn) { */ utils.inherits(Runnable, EventEmitter); +/** + * Resets the state initially or for a next run. + */ +Runnable.prototype.reset = function() { + this.timedOut = false; + this._currentRetry = 0; + this.pending = false; + delete this.state; + delete this.err; +}; + /** * Get current timeout value in msecs. * diff --git a/lib/runner.js b/lib/runner.js --- a/lib/runner.js +++ b/lib/runner.js @@ -121,18 +121,30 @@ module.exports = Runner; * @extends external:EventEmitter * @public * @class - * @param {Suite} suite Root suite - * @param {boolean} [delay] Whether to delay execution of root suite until ready. + * @param {Suite} suite - Root suite + * @param {Object|boolean} [opts] - Options. If `boolean`, whether or not to delay execution of root suite until ready (for backwards compatibility). + * @param {boolean} [opts.delay] - Whether to delay execution of root suite until ready. + * @param {boolean} [opts.cleanReferencesAfterRun] - Whether to clean references to test fns and hooks when a suite is done. */ -function Runner(suite, delay) { +function Runner(suite, opts) { + if (opts === undefined) { + opts = {}; + } + if (typeof opts === 'boolean') { + this._delay = opts; + opts = {}; + } else { + this._delay = opts.delay; + } var self = this; this._globals = []; this._abort = false; - this._delay = delay; this.suite = suite; this.started = false; + this._opts = opts; this.total = suite.total(); this.failures = 0; + this._eventListeners = []; this.on(constants.EVENT_TEST_END, function(test) { if (test.type === 'test' && test.retriedTest() && test.parent) { var idx = @@ -162,6 +174,53 @@ Runner.immediately = global.setImmediate || process.nextTick; */ inherits(Runner, EventEmitter); +/** + * Replacement for `target.on(eventName, listener)` that does bookkeeping to remove them when this runner instance is disposed. + * @param target {EventEmitter} + * @param eventName {string} + * @param fn {function} + */ +Runner.prototype._addEventListener = function(target, eventName, listener) { + target.on(eventName, listener); + this._eventListeners.push([target, eventName, listener]); +}; + +/** + * Replacement for `target.removeListener(eventName, listener)` that also updates the bookkeeping. + * @param target {EventEmitter} + * @param eventName {string} + * @param fn {function} + */ +Runner.prototype._removeEventListener = function(target, eventName, listener) { + var eventListenerIndex = this._eventListeners.findIndex(function( + eventListenerDescriptor + ) { + return ( + eventListenerDescriptor[0] === target && + eventListenerDescriptor[1] === eventName && + eventListenerDescriptor[2] === listener + ); + }); + if (eventListenerIndex !== -1) { + var removedListener = this._eventListeners.splice(eventListenerIndex, 1)[0]; + removedListener[0].removeListener(removedListener[1], removedListener[2]); + } +}; + +/** + * Removes all event handlers set during a run on this instance. + * Remark: this does _not_ clean/dispose the tests or suites themselves. + */ +Runner.prototype.dispose = function() { + this.removeAllListeners(); + this._eventListeners.forEach(function(eventListenerDescriptor) { + eventListenerDescriptor[0].removeListener( + eventListenerDescriptor[1], + eventListenerDescriptor[2] + ); + }); +}; + /** * Run tests with full titles matching `re`. Updates runner.total * with number of tests matched. @@ -378,7 +437,7 @@ Runner.prototype.hook = function(name, fn) { self.emit(constants.EVENT_HOOK_BEGIN, hook); if (!hook.listeners('error').length) { - hook.on('error', function(err) { + self._addEventListener(hook, 'error', function(err) { self.failHook(hook, err); }); } @@ -530,7 +589,7 @@ Runner.prototype.runTest = function(fn) { if (this.asyncOnly) { test.asyncOnly = true; } - test.on('error', function(err) { + this._addEventListener(test, 'error', function(err) { self.fail(test, err); }); if (this.allowUncaught) { @@ -920,21 +979,24 @@ Runner.prototype.run = function(fn) { } // references cleanup to avoid memory leaks - this.on(constants.EVENT_SUITE_END, function(suite) { - suite.cleanReferences(); - }); + if (this._opts.cleanReferencesAfterRun) { + this.on(constants.EVENT_SUITE_END, function(suite) { + suite.cleanReferences(); + }); + } // callback this.on(constants.EVENT_RUN_END, function() { - process.removeListener('uncaughtException', uncaught); - process.on('uncaughtException', self.uncaughtEnd); + debug(constants.EVENT_RUN_END); + self._removeEventListener(process, 'uncaughtException', uncaught); + self._addEventListener(process, 'uncaughtException', self.uncaughtEnd); debug('run(): emitted %s', constants.EVENT_RUN_END); fn(self.failures); }); // uncaught exception - process.removeListener('uncaughtException', self.uncaughtEnd); - process.on('uncaughtException', uncaught); + self._removeEventListener(process, 'uncaughtException', self.uncaughtEnd); + self._addEventListener(process, 'uncaughtException', uncaught); if (this._delay) { // for reporters, I guess. diff --git a/lib/suite.js b/lib/suite.js --- a/lib/suite.js +++ b/lib/suite.js @@ -61,19 +61,19 @@ function Suite(title, parentContext, isRoot) { this.ctx = new Context(); this.suites = []; this.tests = []; + this.root = isRoot === true; this.pending = false; + this._retries = -1; this._beforeEach = []; this._beforeAll = []; this._afterEach = []; this._afterAll = []; - this.root = isRoot === true; this._timeout = 2000; this._slow = 75; this._bail = false; - this._retries = -1; this._onlyTests = []; this._onlySuites = []; - this.delayed = false; + this.reset(); this.on('newListener', function(event) { if (deprecatedEvents[event]) { @@ -91,6 +91,22 @@ function Suite(title, parentContext, isRoot) { */ inherits(Suite, EventEmitter); +/** + * Resets the state initially or for a next run. + */ +Suite.prototype.reset = function() { + this.delayed = false; + function doReset(thingToReset) { + thingToReset.reset(); + } + this.suites.forEach(doReset); + this.tests.forEach(doReset); + this._beforeEach.forEach(doReset); + this._afterEach.forEach(doReset); + this._beforeAll.forEach(doReset); + this._afterAll.forEach(doReset); +}; + /** * Return a clone of this `Suite`. * @@ -493,6 +509,16 @@ Suite.prototype.getHooks = function getHooks(name) { return this['_' + name]; }; +/** + * cleans all references from this suite and all child suites. + */ +Suite.prototype.dispose = function() { + this.suites.forEach(function(suite) { + suite.dispose(); + }); + this.cleanReferences(); +}; + /** * Cleans up the references to all the deferred functions * (before/after/beforeEach/afterEach) and tests of a Suite.
diff --git a/lib/test.js b/lib/test.js --- a/lib/test.js +++ b/lib/test.js @@ -26,9 +26,9 @@ function Test(title, fn) { 'string' ); } - Runnable.call(this, title, fn); - this.pending = !fn; this.type = 'test'; + Runnable.call(this, title, fn); + this.reset(); } /** @@ -36,6 +36,15 @@ function Test(title, fn) { */ utils.inherits(Test, Runnable); +/** + * Resets the state initially or for a next run. + */ +Test.prototype.reset = function() { + Runnable.prototype.reset.call(this); + this.pending = !this.fn; + delete this.state; +}; + /** * Set or get retried test * diff --git a/test/integration/fixtures/multiple-runs/clean-references.fixture.js b/test/integration/fixtures/multiple-runs/clean-references.fixture.js new file mode 100644 --- /dev/null +++ b/test/integration/fixtures/multiple-runs/clean-references.fixture.js @@ -0,0 +1,6 @@ +'use strict'; +const Mocha = require('../../../../lib/mocha'); + +const mocha = new Mocha({ reporter: 'json' }); +mocha.cleanReferencesAfterRun(true); +require('./run-thrice-helper')(mocha); diff --git a/test/integration/fixtures/multiple-runs/dispose.fixture.js b/test/integration/fixtures/multiple-runs/dispose.fixture.js new file mode 100644 --- /dev/null +++ b/test/integration/fixtures/multiple-runs/dispose.fixture.js @@ -0,0 +1,6 @@ +'use strict'; +const Mocha = require('../../../../lib/mocha'); + +const mocha = new Mocha({ reporter: 'json' }); +mocha.dispose(); +require('./run-thrice-helper')(mocha); diff --git a/test/integration/fixtures/multiple-runs/multiple-runs-with-different-output-suite.fixture.js b/test/integration/fixtures/multiple-runs/multiple-runs-with-different-output-suite.fixture.js new file mode 100644 --- /dev/null +++ b/test/integration/fixtures/multiple-runs/multiple-runs-with-different-output-suite.fixture.js @@ -0,0 +1,19 @@ +describe('Multiple runs', () => { + + /** + * Shared state! Bad practice, but nice for this test + */ + let i = 0; + + it('should skip, fail and pass respectively', function () { + switch (i++) { + case 0: + this.skip(); + case 1: + throw new Error('Expected error'); + default: + // this is fine ☕ + break; + } + }); +}); diff --git a/test/integration/fixtures/multiple-runs/multiple-runs-with-flaky-before-each-suite.fixture.js b/test/integration/fixtures/multiple-runs/multiple-runs-with-flaky-before-each-suite.fixture.js new file mode 100644 --- /dev/null +++ b/test/integration/fixtures/multiple-runs/multiple-runs-with-flaky-before-each-suite.fixture.js @@ -0,0 +1,18 @@ +describe('Multiple runs', () => { + + /** + * Shared state! Bad practice, but nice for this test + */ + let i = 0; + + beforeEach(function () { + if (i++ === 0) { + throw new Error('Expected error for this test'); + } + }); + + + it('should be a dummy test', function () { + // this is fine ☕ + }); +}); diff --git a/test/integration/fixtures/multiple-runs/multiple-runs-with-flaky-before-each.fixture.js b/test/integration/fixtures/multiple-runs/multiple-runs-with-flaky-before-each.fixture.js new file mode 100644 --- /dev/null +++ b/test/integration/fixtures/multiple-runs/multiple-runs-with-flaky-before-each.fixture.js @@ -0,0 +1,13 @@ +'use strict'; +const Mocha = require('../../../../lib/mocha'); + +const mocha = new Mocha({ reporter: 'json' }); +mocha.cleanReferencesAfterRun(false); +mocha.addFile(require.resolve('./multiple-runs-with-flaky-before-each-suite.fixture.js')); +console.log('['); +mocha.run(() => { + console.log(','); + mocha.run(() => { + console.log(']'); + }); +}); diff --git a/test/integration/fixtures/multiple-runs/run-thrice-helper.js b/test/integration/fixtures/multiple-runs/run-thrice-helper.js new file mode 100644 --- /dev/null +++ b/test/integration/fixtures/multiple-runs/run-thrice-helper.js @@ -0,0 +1,24 @@ +module.exports = function (mocha) { + mocha.addFile(require.resolve('./multiple-runs-with-different-output-suite.fixture.js')); + console.log('['); + try { + mocha.run(() => { + console.log(','); + try { + mocha.run(() => { + console.log(','); + mocha.run(() => { + console.log(']'); + }); + }); + } catch (err) { + console.error(err.code); + throw err; + } + }); + } catch (err) { + console.error(err.code); + throw err; + } + +} diff --git a/test/integration/fixtures/multiple-runs/run-thrice.fixture.js b/test/integration/fixtures/multiple-runs/run-thrice.fixture.js new file mode 100644 --- /dev/null +++ b/test/integration/fixtures/multiple-runs/run-thrice.fixture.js @@ -0,0 +1,6 @@ +'use strict'; +const Mocha = require('../../../../lib/mocha'); + +const mocha = new Mocha({ reporter: 'json' }); +mocha.cleanReferencesAfterRun(false); +require('./run-thrice-helper')(mocha); diff --git a/test/integration/fixtures/multiple-runs/start-second-run-if-previous-is-still-running-suite.fixture.js b/test/integration/fixtures/multiple-runs/start-second-run-if-previous-is-still-running-suite.fixture.js new file mode 100644 --- /dev/null +++ b/test/integration/fixtures/multiple-runs/start-second-run-if-previous-is-still-running-suite.fixture.js @@ -0,0 +1,5 @@ +describe('slow suite', () => { + it('should be slow', (done) => { + setTimeout(200, done); + }); +}); diff --git a/test/integration/fixtures/multiple-runs/start-second-run-if-previous-is-still-running.fixture.js b/test/integration/fixtures/multiple-runs/start-second-run-if-previous-is-still-running.fixture.js new file mode 100644 --- /dev/null +++ b/test/integration/fixtures/multiple-runs/start-second-run-if-previous-is-still-running.fixture.js @@ -0,0 +1,12 @@ +'use strict'; +const Mocha = require('../../../../lib/mocha'); + +const mocha = new Mocha({ reporter: 'json' }); +mocha.addFile(require.resolve('./start-second-run-if-previous-is-still-running-suite.fixture.js')); +mocha.run(); +try { + mocha.run(); +} catch (err) { + console.error(err.code); +} + diff --git a/test/integration/multiple-runs.spec.js b/test/integration/multiple-runs.spec.js new file mode 100644 --- /dev/null +++ b/test/integration/multiple-runs.spec.js @@ -0,0 +1,89 @@ +'use strict'; + +var invokeNode = require('./helpers').invokeNode; + +describe('multiple runs', function(done) { + it('should be allowed to run multiple times if cleanReferences is turned off', function(done) { + var path = require.resolve( + './fixtures/multiple-runs/run-thrice.fixture.js' + ); + invokeNode([path], function(err, res) { + expect(err, 'to be null'); + expect(res.code, 'to be', 0); + var results = JSON.parse(res.output); + expect(results, 'to have length', 3); + expect(results[0].pending, 'to have length', 1); + expect(results[0].failures, 'to have length', 0); + expect(results[0].passes, 'to have length', 0); + expect(results[1].pending, 'to have length', 0); + expect(results[1].failures, 'to have length', 1); + expect(results[1].passes, 'to have length', 0); + expect(results[2].pending, 'to have length', 0); + expect(results[2].failures, 'to have length', 0); + expect(results[2].passes, 'to have length', 1); + done(); + }); + }); + + it('should not be allowed if cleanReferences is true', function(done) { + var path = require.resolve( + './fixtures/multiple-runs/clean-references.fixture.js' + ); + invokeNode( + [path], + function(err, res) { + expect(err, 'to be null'); + expect(res.code, 'not to be', 0); + expect(res.output, 'to contain', 'ERR_MOCHA_INSTANCE_ALREADY_DISPOSED'); + done(); + }, + {stdio: ['ignore', 'pipe', 'pipe']} + ); + }); + + it('should not be allowed if the instance is disposed', function(done) { + var path = require.resolve('./fixtures/multiple-runs/dispose.fixture.js'); + invokeNode( + [path, '--directly-dispose'], + function(err, res) { + expect(err, 'to be null'); + expect(res.code, 'not to be', 0); + expect(res.output, 'to contain', 'ERR_MOCHA_INSTANCE_ALREADY_DISPOSED'); + done(); + }, + {stdio: ['ignore', 'pipe', 'pipe']} + ); + }); + + it('should not be allowed to run while a previous run is in progress', function(done) { + var path = require.resolve( + './fixtures/multiple-runs/start-second-run-if-previous-is-still-running.fixture' + ); + invokeNode( + [path], + function(err, res) { + expect(err, 'to be null'); + expect(res.output, 'to contain', 'ERR_MOCHA_INSTANCE_ALREADY_RUNNING'); + done(); + }, + {stdio: ['ignore', 'pipe', 'pipe']} + ); + }); + + it('should reset the hooks between runs', function(done) { + var path = require.resolve( + './fixtures/multiple-runs/multiple-runs-with-flaky-before-each.fixture' + ); + invokeNode([path], function(err, res) { + expect(err, 'to be null'); + expect(res.code, 'to be', 0); + var results = JSON.parse(res.output); + expect(results, 'to have length', 2); + expect(results[0].failures, 'to have length', 1); + expect(results[0].passes, 'to have length', 0); + expect(results[1].passes, 'to have length', 1); + expect(results[1].failures, 'to have length', 0); + done(); + }); + }); +}); diff --git a/test/unit/hook.spec.js b/test/unit/hook.spec.js new file mode 100644 --- /dev/null +++ b/test/unit/hook.spec.js @@ -0,0 +1,44 @@ +'use strict'; +var sinon = require('sinon'); +var Mocha = require('../../lib/mocha'); +var Hook = Mocha.Hook; +var Runnable = Mocha.Runnable; + +describe(Hook.name, function() { + var hook; + + beforeEach(function() { + hook = new Hook('Some hook', function() {}); + }); + + afterEach(function() { + sinon.restore(); + }); + + describe('error', function() { + it('should set the hook._error', function() { + var expectedError = new Error('Expected error'); + hook.error(expectedError); + expect(hook._error, 'to be', expectedError); + }); + it('should get the hook._error when called without arguments', function() { + var expectedError = new Error('Expected error'); + hook._error = expectedError; + expect(hook.error(), 'to be', expectedError); + }); + }); + + describe('reset', function() { + it('should call Runnable.reset', function() { + var runnableResetStub = sinon.stub(Runnable.prototype, 'reset'); + hook.reset(); + expect(runnableResetStub, 'was called once'); + }); + + it('should reset the error state', function() { + hook.error(new Error('Expected error for test')); + hook.reset(); + expect(hook.error(), 'to be undefined'); + }); + }); +}); diff --git a/test/unit/mocha.spec.js b/test/unit/mocha.spec.js --- a/test/unit/mocha.spec.js +++ b/test/unit/mocha.spec.js @@ -22,6 +22,10 @@ describe('Mocha', function() { sandbox.stub(Mocha.prototype, 'global').returnsThis(); }); + it('should set _cleanReferencesAfterRun to true', function() { + expect(new Mocha()._cleanReferencesAfterRun, 'to be', true); + }); + describe('when "options.timeout" is `undefined`', function() { it('should not attempt to set timeout', function() { // eslint-disable-next-line no-new @@ -127,6 +131,25 @@ describe('Mocha', function() { }); }); + describe('#cleanReferencesAfterRun()', function() { + it('should set the _cleanReferencesAfterRun attribute', function() { + var mocha = new Mocha(opts); + mocha.cleanReferencesAfterRun(); + expect(mocha._cleanReferencesAfterRun, 'to be', true); + }); + + it('should set the _cleanReferencesAfterRun attribute to false', function() { + var mocha = new Mocha(opts); + mocha.cleanReferencesAfterRun(false); + expect(mocha._cleanReferencesAfterRun, 'to be', false); + }); + + it('should be chainable', function() { + var mocha = new Mocha(opts); + expect(mocha.cleanReferencesAfterRun(), 'to be', mocha); + }); + }); + describe('#color()', function() { it('should set the color option to true', function() { var mocha = new Mocha(opts); @@ -178,6 +201,32 @@ describe('Mocha', function() { }); }); + describe('#dispose()', function() { + it('should dispose the root suite', function() { + var mocha = new Mocha(opts); + var disposeStub = sandbox.stub(mocha.suite, 'dispose'); + mocha.dispose(); + expect(disposeStub, 'was called once'); + }); + + it('should dispose previous test runner', function() { + var mocha = new Mocha(opts); + var runStub = sandbox.stub(Mocha.Runner.prototype, 'run'); + var disposeStub = sandbox.stub(Mocha.Runner.prototype, 'dispose'); + mocha.run(); + runStub.callArg(0); + mocha.dispose(); + expect(disposeStub, 'was called once'); + }); + + it('should unload the files', function() { + var mocha = new Mocha(opts); + var unloadFilesStub = sandbox.stub(mocha, 'unloadFiles'); + mocha.dispose(); + expect(unloadFilesStub, 'was called once'); + }); + }); + describe('#forbidOnly()', function() { it('should set the forbidOnly option to true', function() { var mocha = new Mocha(opts); @@ -434,6 +483,99 @@ describe('Mocha', function() { mocha.run().on('end', done); }); + it('should throw if a run is in progress', function() { + var mocha = new Mocha(opts); + var runStub = sandbox.stub(Mocha.Runner.prototype, 'run'); + mocha.run(); + expect( + function() { + mocha.run(); + }, + 'to throw', + { + message: + 'Mocha instance is currently running tests, cannot start a next test run until this one is done', + code: 'ERR_MOCHA_INSTANCE_ALREADY_RUNNING', + instance: mocha + } + ); + expect(runStub, 'was called once'); + }); + + it('should throw the instance is already disposed', function() { + var mocha = new Mocha(opts); + var runStub = sandbox.stub(Mocha.Runner.prototype, 'run'); + mocha.dispose(); + expect( + function() { + mocha.run(); + }, + 'to throw', + { + message: + 'Mocha instance is already disposed, cannot start a new test run. Please create a new mocha instance. Be sure to set disable `cleanReferencesAfterRun` when you want to reuse the same mocha instance for multiple test runs.', + code: 'ERR_MOCHA_INSTANCE_ALREADY_DISPOSED', + cleanReferencesAfterRun: true, + instance: mocha + } + ); + expect(runStub, 'was called times', 0); + }); + + it('should throw if a run for a second time', function() { + var mocha = new Mocha(opts); + var runStub = sandbox.stub(Mocha.Runner.prototype, 'run'); + mocha.run(); + runStub.callArg(0); + expect( + function() { + mocha.run(); + }, + 'to throw', + { + message: + 'Mocha instance is already disposed, cannot start a new test run. Please create a new mocha instance. Be sure to set disable `cleanReferencesAfterRun` when you want to reuse the same mocha instance for multiple test runs.', + code: 'ERR_MOCHA_INSTANCE_ALREADY_DISPOSED', + instance: mocha + } + ); + expect(runStub, 'was called once'); + }); + + it('should allow multiple runs if `cleanReferencesAfterRun` is disabled', function() { + var mocha = new Mocha(opts); + var runStub = sandbox.stub(Mocha.Runner.prototype, 'run'); + mocha.cleanReferencesAfterRun(false); + mocha.run(); + runStub.callArg(0); + mocha.run(); + runStub.callArg(0); + expect(runStub, 'called times', 2); + }); + + it('should reset between runs', function() { + var mocha = new Mocha(opts); + var runStub = sandbox.stub(Mocha.Runner.prototype, 'run'); + var resetStub = sandbox.stub(Mocha.Suite.prototype, 'reset'); + mocha.cleanReferencesAfterRun(false); + mocha.run(); + runStub.callArg(0); + mocha.run(); + expect(resetStub, 'was called once'); + }); + + it('should dispose the previous runner when the next run starts', function() { + var mocha = new Mocha(opts); + var runStub = sandbox.stub(Mocha.Runner.prototype, 'run'); + var disposeStub = sandbox.stub(Mocha.Runner.prototype, 'dispose'); + mocha.cleanReferencesAfterRun(false); + mocha.run(); + runStub.callArg(0); + expect(disposeStub, 'was not called'); + mocha.run(); + expect(disposeStub, 'was called once'); + }); + describe('#reporter("xunit")#run(fn)', function() { // :TBD: Why does specifying reporter differentiate this test from preceding one it('should not raise errors if callback was not provided', function() { @@ -449,4 +591,29 @@ describe('Mocha', function() { }); }); }); + + describe('#unloadFiles()', function() { + it('should reset referencesCleaned and allow for next run', function() { + var mocha = new Mocha(opts); + var runStub = sandbox.stub(Mocha.Runner.prototype, 'run'); + mocha.run(); + runStub.callArg(0); + mocha.unloadFiles(); + expect(function() { + mocha.run(); + }, 'not to throw'); + }); + + it('should not be allowed when the current instance is already disposed', function() { + var mocha = new Mocha(opts); + mocha.dispose(); + expect( + function() { + mocha.unloadFiles(); + }, + 'to throw', + 'Mocha instance is already disposed, it cannot be used again.' + ); + }); + }); }); diff --git a/test/unit/runnable.spec.js b/test/unit/runnable.spec.js --- a/test/unit/runnable.spec.js +++ b/test/unit/runnable.spec.js @@ -127,6 +127,29 @@ describe('Runnable(title, fn)', function() { }); }); + describe('#reset', function() { + var run; + + beforeEach(function() { + run = new Runnable(); + }); + + it('should reset current run state', function() { + run.timedOut = true; + run._currentRetry = 5; + run.pending = true; + run.err = new Error(); + run.state = 'error'; + + run.reset(); + expect(run.timedOut, 'to be false'); + expect(run._currentRetry, 'to be', 0); + expect(run.pending, 'to be false'); + expect(run.err, 'to be undefined'); + expect(run.state, 'to be undefined'); + }); + }); + describe('.title', function() { it('should be present', function() { expect(new Runnable('foo').title, 'to be', 'foo'); diff --git a/test/unit/runner.spec.js b/test/unit/runner.spec.js --- a/test/unit/runner.spec.js +++ b/test/unit/runner.spec.js @@ -15,6 +15,7 @@ var EVENT_TEST_FAIL = Runner.constants.EVENT_TEST_FAIL; var EVENT_TEST_RETRY = Runner.constants.EVENT_TEST_RETRY; var EVENT_TEST_END = Runner.constants.EVENT_TEST_END; var EVENT_RUN_END = Runner.constants.EVENT_RUN_END; +var EVENT_SUITE_END = Runner.constants.EVENT_SUITE_END; var STATE_FAILED = Runnable.constants.STATE_FAILED; describe('Runner', function() { @@ -24,7 +25,7 @@ describe('Runner', function() { beforeEach(function() { suite = new Suite('Suite', 'root'); - runner = new Runner(suite); + runner = new Runner(suite, {cleanReferencesAfterRun: true}); runner.checkLeaks = true; sandbox = sinon.createSandbox(); }); @@ -456,13 +457,69 @@ describe('Runner', function() { done(); }); }); - // karma-mocha is inexplicably doing this with a Hook it('should not throw an exception if something emits EVENT_TEST_END with a non-Test object', function() { expect(function() { runner.emit(EVENT_TEST_END, {}); }, 'not to throw'); }); + + it('should clean references after a run', function() { + runner = new Runner(suite, {delay: false, cleanReferencesAfterRun: true}); + var cleanReferencesStub = sandbox.stub(suite, 'cleanReferences'); + runner.run(); + runner.emit(EVENT_SUITE_END, suite); + expect(cleanReferencesStub, 'was called once'); + }); + + it('should not clean references after a run when `cleanReferencesAfterRun` is `false`', function() { + runner = new Runner(suite, { + delay: false, + cleanReferencesAfterRun: false + }); + var cleanReferencesStub = sandbox.stub(suite, 'cleanReferences'); + runner.run(); + runner.emit(EVENT_SUITE_END, suite); + expect(cleanReferencesStub, 'was not called'); + }); + }); + + describe('.dispose', function() { + it('should remove all listeners from itself', function() { + runner.on('disposeShouldRemoveThis', noop); + runner.dispose(); + expect(runner.listenerCount('disposeShouldRemoveThis'), 'to be', 0); + }); + + it('should remove "error" listeners from a test', function() { + var fn = sandbox.stub(); + runner.test = new Test('test for dispose', fn); + runner.runTest(noop); + // sanity check + expect(runner.test.listenerCount('error'), 'to be', 1); + runner.dispose(); + expect(runner.test.listenerCount('error'), 'to be', 0); + }); + + it('should remove "uncaughtException" listeners from the process', function() { + var normalUncaughtExceptionListenerCount = process.listenerCount( + 'uncaughtException' + ); + sandbox.stub(); + runner.run(noop); + // sanity check + expect( + process.listenerCount('uncaughtException'), + 'to be', + normalUncaughtExceptionListenerCount + 1 + ); + runner.dispose(); + expect( + process.listenerCount('uncaughtException'), + 'to be', + normalUncaughtExceptionListenerCount + ); + }); }); describe('.runTest(fn)', function() { diff --git a/test/unit/suite.spec.js b/test/unit/suite.spec.js --- a/test/unit/suite.spec.js +++ b/test/unit/suite.spec.js @@ -80,6 +80,48 @@ describe('Suite', function() { }); }); + describe('.reset()', function() { + beforeEach(function() { + this.suite = new Suite('Suite to be reset', function() {}); + }); + + it('should reset the `delayed` state', function() { + this.suite.delayed = true; + this.suite.reset(); + expect(this.suite.delayed, 'to be', false); + }); + + it('should forward reset to suites and tests', function() { + var childSuite = new Suite('child suite', this.suite.context); + var test = new Test('test', function() {}); + this.suite.addSuite(childSuite); + this.suite.addTest(test); + var testResetStub = sandbox.stub(test, 'reset'); + var suiteResetStub = sandbox.stub(childSuite, 'reset'); + this.suite.reset(); + expect(testResetStub, 'was called once'); + expect(suiteResetStub, 'was called once'); + }); + + it('should forward reset to all hooks', function() { + this.suite.beforeEach(function() {}); + this.suite.afterEach(function() {}); + this.suite.beforeAll(function() {}); + this.suite.afterAll(function() {}); + sinon.stub(this.suite.getHooks('beforeEach')[0], 'reset'); + sinon.stub(this.suite.getHooks('afterEach')[0], 'reset'); + sinon.stub(this.suite.getHooks('beforeAll')[0], 'reset'); + sinon.stub(this.suite.getHooks('afterAll')[0], 'reset'); + + this.suite.reset(); + + expect(this.suite.getHooks('beforeEach')[0].reset, 'was called once'); + expect(this.suite.getHooks('afterEach')[0].reset, 'was called once'); + expect(this.suite.getHooks('beforeAll')[0].reset, 'was called once'); + expect(this.suite.getHooks('afterAll')[0].reset, 'was called once'); + }); + }); + describe('.timeout()', function() { beforeEach(function() { this.suite = new Suite('A Suite'); diff --git a/test/unit/test.spec.js b/test/unit/test.spec.js --- a/test/unit/test.spec.js +++ b/test/unit/test.spec.js @@ -1,10 +1,24 @@ 'use strict'; +var sinon = require('sinon'); var mocha = require('../../lib/mocha'); var Test = mocha.Test; -var sinon = require('sinon'); +var Runnable = mocha.Runnable; describe('Test', function() { + /** + * @type {sinon.SinonSandbox} + */ + var sandbox; + + beforeEach(function() { + sandbox = sinon.createSandbox(); + }); + + afterEach(function() { + sandbox.restore(); + }); + describe('.clone()', function() { beforeEach(function() { this._test = new Test('To be cloned', function() {}); @@ -56,6 +70,24 @@ describe('Test', function() { }); }); + describe('.reset()', function() { + beforeEach(function() { + this._test = new Test('Test to be reset', function() {}); + }); + + it('should reset the run state', function() { + this._test.pending = true; + this._test.reset(); + expect(this._test.pending, 'to be', false); + }); + + it('should call Runnable.reset', function() { + var runnableResetStub = sandbox.stub(Runnable.prototype, 'reset'); + this._test.reset(); + expect(runnableResetStub, 'was called once'); + }); + }); + describe('.isPending()', function() { beforeEach(function() { this._test = new Test('Is it skipped', function() {});
Mocha can't run tests twice programmatically Programmatically, I cannot run a Mocha test twice. I.e. I can't do `mocha.run()` twice. I looked at a similar issue (#[995](https://github.com/mochajs/mocha/issues/995)), however, the solutions are not optimal and/or not work. For example, deleting the require cache is not a solution for me, as I need to use the require cache. Is there a way to `mocha.run()` twice? Thanks in advance!
Mmmm ><. I guess I will stick with my solution of spawning a child process for each mocha run; but I wanted to avoid this. I had the same problem. Reading the similar issue #[736](https://github.com/mochajs/mocha/issues/736), it seems it's all about cleaning the "require.cache" of the previously loaded spec's file Here my current solution: ``` launchTests(){ let _sTestDir = 'tests/specs'; let _oMocha = new Mocha(); fs.readdirSync( _sTestDir ).filter( function( oFile ){ return ( oFile.substr( -3 ) === '.js' ); }).forEach(function( sFile ){ let _sPathSpec = path.join( path.resolve(), _sTestDir, sFile ); // Resetting caches to be able to launch tests multiple times... delete require.cache[ _sPathSpec ]; // Adding test _oMocha.addFile( _sPathSpec ); }); _oMocha.run(); } ``` does the "retry" functionality not work? No, I don't think so for the context of rerunning an entire test suite again. @KingRial's solution seems like it would work. My suggestion is to update the mocha code to allow mocha.run() to be executed more than once (or a similar solution). I ran into the same problem. Why was this issue closed? Making a new instance of Mocha, and adding files should behave deterministically without modifying the require cache. Seems like a bug. @josiahruddell Good point, I agree. +1 +1 +1 +1 I've tested the selective delete of the require.cache with hundreds of parallel reruns. See my [comment on](https://github.com/mochajs/mocha/issues/995#issuecomment-365441585) #955. (this basically removes the test file from `require.cache` immediately after it is evaluated). The question is, why do you need a durable `require.cache`? For people interested, we've built the re-run functionality in the Stryker plugin for mocha: https://github.com/stryker-mutator/stryker/blob/930a7c39952e09a7d3f913fd9423bf78f608ec17/packages/mocha-runner/src/MochaTestRunner.ts#L48-L68 The problem we now face is that we _want to rerun mocha, without cleaning require cash or reloading the files_. We want to call it "hot reload". So rerun tests, without changing source code or test code. Basically, what we want to do is this: ```js const mocha = new Mocha(...); mocha.addFile(...); mocha.run(() => { mocha.run() => { console.log('ran twice'); }); }); ``` But right now, the second run always errors with: `"Cannot read property 'call' of undefined"` (which isn't helpful...) The problem is that `mocha.run` has side effects. Namely: it cleans the test functions here: https://github.com/mochajs/mocha/blob/0dacd1fb0067e40f8567653f828f677022e4fb89/lib/runner.js#L895-L897 and here: https://github.com/mochajs/mocha/blob/0dacd1fb0067e40f8567653f828f677022e4fb89/lib/suite.js#L455-L481 I think `mocha.run` isn't the place to start listening for the `suite end` event and clean up after. This should be done in the mocha cli file itself, right? I'm willing to create the PR if the maintainers agree with this. We could also add an option for this to keep the API backward compatible. This is just _one_ aspect.. there are several others required to _really_ hope to reuse a Mocha instance. Have something in mind, but post 6.0 release. Of note, the Mocha-6.0 release API includes [`Mocha#unloadFiles`](https://github.com/mochajs/mocha/blob/52b5c42c3dda8c386735969642843bd1129a4562/lib/mocha.js#L356). It's one step towards eventual `mocha` reuse... Actually... no... To delete the require cache correctly, not only the test files but also the _source files_ should be cleared. Mocha doesn't keep track of those files. This is what we need to do in Stryker right now (the `purgeFiles` method takes care of it). Just to make sure we don't confuse 2 things: we actually want to reuse the same instance of mocha _without clearing any file cache_. Should I create a separate issue for that? essentially, you'd need to ensure Mocha's inner state was completely reset. given there are bits of state everywhere, it's kind of a tall order at this point without some significant refactors. granted, having a single source of truth for state would be a great refactor, but...nontrivial. > you'd need to ensure Mocha's inner state was completely reset. given there are bits of state everywhere, it's kind of a tall order at this point Could you give me a list of things you can think about at the top of your head, with pointers on how to solve it? We should be able to create some kind of `state` object and pass that along. We can make it opt-in, so functionality remains the same if you don't want to use it. > Could you give me a list of things you can think about at the top of your head, with pointers on how to solve it? While ongoing efforts are made to payoff some of this technical debt for reusability, there are higher priority issues to address (e.g., correctness, robustness). Would also need to redesign many of the automated tests to now run themselves twice. There can be no "opt-in" -- it would have to be baked-in. @nicojs Yes, what @plroebuck said, and that I can't explain Mocha's "state" strategy without saying "look at the code", because there's state everywhere. That said, I'm happy to entertain ideas/PRs to solve the "state" question, I just don't think our maintainer team has the resources to embark on such a ...quest. > I just don't think our maintainer team has the resources to embark on such a ...quest. Haha Thanks for the clear responses. If I find a couple of hours in the coming weeks, I might take a look. One might say, I will embark on a quest 🏇 > > I just don't think our maintainer team has the resources to embark on such a ...quest. > > Haha > > Thanks for the clear responses. If I find a couple of hours in the coming weeks, I might take a look. One might say, I will embark on a quest horse_racing I don't feel comfortable on undertaking a nontrivial _quest_ without knowing the codebase, but making myself available to help if needed. This would solve a current major issue I have. I hate to pull the redux card, but something like redux might be a good solution here. > I don't feel comfortable on undertaking a nontrivial _quest_ without knowing the codebase, but making myself available to help if needed. [This](https://www.youtube.com/watch?v=--hMJPUBwMc) seems appropriate somehow. Hello everyone. I'll try to write tool on top of mocha, to run heavy e2e tests in separate workers for each browser for example. Current solutions (like [mocha-parallel-tests](https://github.com/mocha-parallel/mocha-parallel-tests)) is not prefect for me, it don't allow share selenium webdriver connection between tests. So I want to start pool of workers, that each other initialize webdriver and all test cases, then master thread send message to worker for start specific test case and worker send result back on finish. [`Mocha#unloadFiles`]( https://github.com/mochajs/mocha/blob/52b5c42c3dda8c386735969642843bd1129a4562/lib/mocha.js#L356), seems could help, but unload cache and reinitialize tests for each run it's little bit overwhelmed. @boneskull, can you clarify, why `cleanReferences` remove only `fn` property from each hook instead of clear entire array? Ok, I've investigated the issues a bit and I want to propose the following changes. They are designed to be 100% backward compatible and minor while still allowing for the new feature. @boneskull @plroebuck do you agree with these changes? I'd be willing to prepare the PR. ## 🔀 `cleanReferences` I want to add a feature to allow references not to be cleared. Let's call it `autoDispose`. By default it would be `true` (non-breaking change). But if you use mocha programmatically you are allowed to set it to `false`, you would have to call `dispose` on the mocha instance later if you still want to dispose. ## 🔢 "bits of state" For the _bits of state_ issue, I'm pretty sure that the state is situated in `Suite`, `Test` and `Runnable`. I would like to add a `reset` method in each, which resets the state. It would be called from the `Mocha` class, whenever an _nth_ test run is started (where `n > 1`). This way the way of working within mocha remains mostly the same. No big changes needed in the respective classes, responsibility, etc. ## 🏡 Housekeeping I would also want to add 2 new validations: 1. Whenever mocha's `run` method is called an "object is already disposed" error will be thrown whenever the mocha's instance is already disposed (either by autoDispose, or by a manual dispose). 1. Whenever mocha's `run` method is called while a run is already in progress a "mocha run is already in progress" error is thrown. sounds reasonable to me. you’ll also want to ensure there’s no eventemitter leaks happening.
2020-04-21T20:31:18Z
7.1
mochajs/mocha
4,147
mochajs__mocha-4147
[ "4144" ]
7d78f209c6a4f8ef4eba584fe10515fd3901830e
diff --git a/lib/runner.js b/lib/runner.js --- a/lib/runner.js +++ b/lib/runner.js @@ -800,7 +800,7 @@ Runner.prototype.runSuite = function(suite, fn) { }; /** - * Handle uncaught exceptions. + * Handle uncaught exceptions within runner. * * @param {Error} err * @private @@ -893,6 +893,17 @@ Runner.prototype.uncaught = function(err) { this.abort(); }; +/** + * Handle uncaught exceptions after runner's end event. + * + * @param {Error} err + * @private + */ +Runner.prototype.uncaughtEnd = function uncaughtEnd(err) { + if (err instanceof Pending) return; + throw err; +}; + /** * Run the root suite and invoke `fn(failures)` * on completion. @@ -940,16 +951,12 @@ Runner.prototype.run = function(fn) { this.on(constants.EVENT_RUN_END, function() { debug(constants.EVENT_RUN_END); process.removeListener('uncaughtException', uncaught); - process.on('uncaughtException', function(err) { - if (err instanceof Pending) { - return; - } - throw err; - }); + process.on('uncaughtException', self.uncaughtEnd); fn(self.failures); }); // uncaught exception + process.removeListener('uncaughtException', self.uncaughtEnd); process.on('uncaughtException', uncaught); if (this._delay) {
diff --git a/test/integration/fixtures/uncaught/listeners.fixture.js b/test/integration/fixtures/uncaught/listeners.fixture.js new file mode 100644 --- /dev/null +++ b/test/integration/fixtures/uncaught/listeners.fixture.js @@ -0,0 +1,12 @@ +'use strict'; + +const assert = require('assert'); +const mocha = require("../../../../lib/mocha"); + +for (let i = 0; i < 15; i++) { + const r = new mocha.Runner(new mocha.Suite("" + i, undefined)); + r.run(); +} + +assert.equal(process.listenerCount('uncaughtException'), 1); +assert.equal(process.listeners('uncaughtException')[0].name, 'uncaughtEnd'); diff --git a/test/integration/uncaught.spec.js b/test/integration/uncaught.spec.js --- a/test/integration/uncaught.spec.js +++ b/test/integration/uncaught.spec.js @@ -86,4 +86,16 @@ describe('uncaught exceptions', function() { done(); }); }); + + it('removes uncaught exceptions handlers correctly', function(done) { + run('uncaught/listeners.fixture.js', args, function(err, res) { + if (err) { + return done(err); + } + + expect(res, 'to have passed').and('to have passed test count', 0); + + done(); + }); + }); });
Mocha 7: A global `uncaughtException` handle is leaked for every completed test runner ### Description A global `uncaughtException` handle is leaked for every completed test runner, in turn causing a `MaxListenersExceededWarning: Possible EventEmitter memory leak detected. 11 uncaughtException listeners added to [process]. Use emitter.setMaxListeners() to increase limit` warning to be thrown by `node`. Since every handler attached is identical in content [here](https://github.com/mochajs/mocha/pull/4030/files#r363540897), attaching _one_ for the first completed runner should be just as correct (although, functionality-wise, it may need to be detcahced once a new runner has started - it's unclear how the sequencing between multiple runners is intended to work out for this handler). ### Steps to Reproduce ```js const mocha = require("mocha"); for (let i = 0; i < 15; i++) { const r = new mocha.Runner(new mocha.Suite(""+i, undefined)); r.run(); } ``` **Expected behavior:** No warnings from node. **Actual behavior:** A `MaxListenersExceededWarning: Possible EventEmitter memory leak detected. 11 uncaughtException listeners added to [process]. Use emitter.setMaxListeners() to increase limit` warning is printed. **Reproduces how often:** Every time. You can paste that example code into a [repl](https://runkit.com/weswigham/5e13d0b85c4aff001b6c768f).
2020-01-09T14:55:18Z
7
mochajs/mocha
4,063
mochajs__mocha-4063
[ "4035" ]
ec17f6315e0817bfb8e37279d31affc4ec108623
diff --git a/lib/cli/node-flags.js b/lib/cli/node-flags.js --- a/lib/cli/node-flags.js +++ b/lib/cli/node-flags.js @@ -68,6 +68,7 @@ exports.impliesNoTimeouts = flag => debugFlags.has(flag); /** * All non-strictly-boolean arguments to node--those with values--must specify those values using `=`, e.g., `--inspect=0.0.0.0`. * Unparse these arguments using `yargs-unparser` (which would result in `--inspect 0.0.0.0`), then supply `=` where we have values. + * Apparently --require in Node.js v8 does NOT want `=`. * There's probably an easier or more robust way to do this; fixes welcome * @param {Object} opts - Arguments object * @returns {string[]} Unparsed arguments using `=` to specify values @@ -79,7 +80,9 @@ exports.unparseNodeFlags = opts => { ? args .join(' ') .split(/\b/) - .map(arg => (arg === ' ' ? '=' : arg)) + .map((arg, index, args) => + arg === ' ' && args[index - 1] !== 'require' ? '=' : arg + ) .join('') .split(' ') : [];
diff --git a/test/node-unit/cli/node-flags.spec.js b/test/node-unit/cli/node-flags.spec.js --- a/test/node-unit/cli/node-flags.spec.js +++ b/test/node-unit/cli/node-flags.spec.js @@ -132,5 +132,14 @@ describe('node-flags', function() { ['--v8-numeric-one=1', '--v8-boolean-one', '--v8-numeric-two=2'] ); }); + + it('should special-case "--require"', function() { + // note the only way for this to happen IN REAL LIFE is if you use "--require esm"; + // mocha eats all --require args otherwise. + expect(unparseNodeFlags({require: 'mcrib'}), 'to equal', [ + '--require', + 'mcrib' + ]); + }); }); });
6.2.1 fails for node 8.x if there is a --require <!-- Have you read Mocha's Code of Conduct? By filing an Issue, you are expected to comply with it, including treating everyone with respect: https://github.com/mochajs/mocha/blob/master/.github/CODE_OF_CONDUCT.md For more, check out the Mocha Gitter chat room: https://gitter.im/mochajs/mocha Detail the steps necessary to reproduce the problem. To get the fastest support, create an MCVE and upload it to GitHub. create an [MCVE](https://stackoverflow.com/help/mcve) and upload it to GitHub. --> ### Prerequisites <!-- Place an `x` between the square brackets on the lines below for every satisfied prerequisite. --> - [x] Checked that your issue hasn't already been filed by cross-referencing [issues with the `faq` label](https://github.com/mochajs/mocha/issues?utf8=%E2%9C%93&q=is%3Aissue%20label%3Afaq%20) - [x] Checked next-gen ES issues and syntax problems by using the same environment and/or transpiler configuration without Mocha to ensure it isn't just a feature that actually isn't supported in the environment in question or a bug in your code. - [x] 'Smoke tested' the code to be tested by running it outside the real test suite to get a better sense of whether the problem is in the code under test, your usage of Mocha, or Mocha itself - [x] Ensured that there is no discrepancy between the locally and globally installed versions of Mocha. You can find them with: `node node_modules/.bin/mocha --version`(Local) and `mocha --version`(Global). We recommend that you _not_ install Mocha globally. ### Description 6.2.0 the command below works `NODE_ENV=test PORT=9999 node_modules/.bin/mocha -t 5000 --require esm --exit -b --recursive server/test/bootstrap.test.js` with 6.2.1 if fails with this output on Node 8.x (NB: works on Node 10.x for both) `bad option: --require=esm` Node 8.x expects the require to be in this format `--require esm` i.e. no equals `=` sign I can workaround this by running the underlying node more directly but wanted to let you know about this small regression. ### Steps to Reproduce See description **Expected behavior:** [What you expect to happen] See description **Actual behavior:** [What actually happens] <!-- Please include any output, especially error messages (including stacktrace). Remember, we can't see your screen. Scrub if needed so as not to reveal passwords, etc. --> See description **Reproduces how often:** 100% ### Versions <!-- If applicable, please specify: --> - The output of `mocha --version` and `node node_modules/.bin/mocha --version`: No global install and local is 6.2.1 - The output of `node --version`: v8.16.1 - Your operating system - name and version: Mac OS Mojave 10.14.6 also fails on CircleCI node 8 image which I think is Alpine - architecture (32 or 64-bit): 64-bit - Your shell (e.g., bash, zsh, PowerShell, cmd): zsh - Your browser and version (if running browser tests): N/A - Any third-party Mocha-related modules (and their versions): None - Any code transpiler (e.g., TypeScript, CoffeeScript, Babel) being used (and its version): none / ESM as a loader ### Additional Information <!-- Any additional information, configuration or data that might be necessary to reproduce the issue. -->
I just hit this; will fix. probably 5f1cad5ee254ab3ac48d4585726a884255a23583 no, it's not directly responsible, but exposed the bug
2019-10-12T00:05:54Z
6.2
mochajs/mocha
4,068
mochajs__mocha-4068
[ "4022" ]
d9f5079b3b26c61fec3329a902dea00ccc961f70
diff --git a/lib/reporters/base.js b/lib/reporters/base.js --- a/lib/reporters/base.js +++ b/lib/reporters/base.js @@ -154,14 +154,14 @@ exports.cursor = { } }; -function showDiff(err) { +var showDiff = (exports.showDiff = function(err) { return ( err && err.showDiff !== false && sameType(err.actual, err.expected) && err.expected !== undefined ); -} +}); function stringifyDiffObjs(err) { if (!utils.isString(err.actual) || !utils.isString(err.expected)) { @@ -182,9 +182,19 @@ function stringifyDiffObjs(err) { * @return {string} Diff */ var generateDiff = (exports.generateDiff = function(actual, expected) { - return exports.inlineDiffs - ? inlineDiff(actual, expected) - : unifiedDiff(actual, expected); + try { + return exports.inlineDiffs + ? inlineDiff(actual, expected) + : unifiedDiff(actual, expected); + } catch (err) { + var msg = + '\n ' + + color('diff added', '+ expected') + + ' ' + + color('diff removed', '- actual: failed to generate Mocha diff') + + '\n'; + return msg; + } }); /** diff --git a/lib/reporters/xunit.js b/lib/reporters/xunit.js --- a/lib/reporters/xunit.js +++ b/lib/reporters/xunit.js @@ -163,9 +163,9 @@ XUnit.prototype.test = function(test) { if (test.state === STATE_FAILED) { var err = test.err; var diff = - Base.hideDiff || !err.actual || !err.expected - ? '' - : '\n' + Base.generateDiff(err.actual, err.expected); + !Base.hideDiff && Base.showDiff(err) + ? '\n' + Base.generateDiff(err.actual, err.expected) + : ''; this.write( tag( 'testcase',
diff --git a/test/reporters/xunit.spec.js b/test/reporters/xunit.spec.js --- a/test/reporters/xunit.spec.js +++ b/test/reporters/xunit.spec.js @@ -350,6 +350,42 @@ describe('XUnit reporter', function() { '</failure></testcase>'; expect(expectedWrite, 'to be', expectedTag); }); + + it('should handle non-string diff values', function() { + var runner = new EventEmitter(); + createStatsCollector(runner); + var xunit = new XUnit(runner); + + var expectedTest = { + state: STATE_FAILED, + title: expectedTitle, + parent: { + fullTitle: function() { + return expectedClassName; + } + }, + duration: 1000, + err: { + actual: 1, + expected: 2, + message: expectedMessage, + stack: expectedStack + } + }; + + sandbox.stub(xunit, 'write').callsFake(function(str) { + expectedWrite += str; + }); + + runner.emit(EVENT_TEST_FAIL, expectedTest, expectedTest.err); + runner.emit(EVENT_RUN_END); + sandbox.restore(); + + var expectedDiff = + '\n + expected - actual\n\n -1\n +2\n '; + + expect(expectedWrite, 'to contain', expectedDiff); + }); }); describe('on test pending', function() {
reporter xunit output report incomplete ### Prerequisites - [X] Checked that your issue hasn't already been filed by cross-referencing [issues with the `faq` label](https://github.com/mochajs/mocha/issues?utf8=%E2%9C%93&q=is%3Aissue%20label%3Afaq%20) - [X] Checked next-gen ES issues and syntax problems by using the same environment and/or transpiler configuration without Mocha to ensure it isn't just a feature that actually isn't supported in the environment in question or a bug in your code. - [X] 'Smoke tested' the code to be tested by running it outside the real test suite to get a better sense of whether the problem is in the code under test, your usage of Mocha, or Mocha itself - [X] Ensured that there is no discrepancy between the locally and globally installed versions of Mocha. You can find them with: `node node_modules/.bin/mocha --version`(Local) and `mocha --version`(Global). We recommend that you _not_ install Mocha globally. ### Description We are using hippie to test our components. During the request assertion, if the test fails, the output does not display the entire report. Not event the failed test nor the next tests. ``` <testsuite name="Mocha Tests" tests="3" failures="0" errors="1" skipped="0" timestamp="Tue, 17 Sep 2019 13:26:17 GMT" time="0.378"> <testcase classname="test" name="test ok" time="0.001"/> // missing testsuite end tag. ``` ### Steps to Reproduce ```js "use strict"; const {expect} = require("chai"); const hippie = require("hippie"); describe("test", function() { it("test ok", function(){ expect(true).to.be.equal(true); }) it("test ko", function(){ return hippie() .base("https://mochajs.org") .get("/not_found") .expectStatus(200) .end(); }) it("test ok", function(){ expect(true).to.be.equal(true); }) }) ``` Test report with spec reporter shows succesfully: `$ npx mocha test.js --reporter spec` ``` test ✓ test ok 1) test ko ✓ test ok 2 passing (204ms) 1 failing 1) test test ko: AssertionError: Status code Actual: 404 Expected: 200 at assert (node_modules/hippie/lib/hippie/assert.js:36:10) at statusCode (node_modules/hippie/lib/hippie/expect.js:23:10) at verify (node_modules/hippie/lib/hippie/client.js:476:5) at Client.verify (node_modules/hippie/lib/hippie/client.js:477:5) at /home/sbesson/GIT/TMP/node_modules/hippie/lib/hippie/client.js:437:12 at Client.exports.raw [as parse] (node_modules/hippie/lib/hippie/parsers.js:33:3) at Request._callback (node_modules/hippie/lib/hippie/client.js:435:10) at Request.self.callback (node_modules/request/request.js:185:22) at Request.<anonymous> (node_modules/request/request.js:1157:10) at IncomingMessage.<anonymous> (node_modules/request/request.js:1079:12) at endReadableNT (_stream_readable.js:1129:12) at process._tickCallback (internal/process/next_tick.js:63:19) ``` But with xunit reporter: `npx mocha test.js --reporter xunit` ``` <testsuite name="Mocha Tests" tests="3" failures="0" errors="1" skipped="0" timestamp="Tue, 17 Sep 2019 13:33:12 GMT" time="0.195"> <testcase classname="test" name="test ok" time="0.001"/> ``` **Expected behavior:** [What you expect to happen] We expect an output looking like this: ``` <testsuite name="Mocha Tests" tests="3" failures="0" errors="1" skipped="0" timestamp="Tue, 17 Sep 2019 13:37:42 GMT" time="0.251"> <testcase classname="test" name="test ok" time="0.001"/> <testcase classname="test" name="test ko" time="0"><failure>expected 404 to equal 200 + expected - actual -404 +200 AssertionError: expected 404 to equal 200 at /home/sbesson/GIT/TMP/test.js:16:31 at verify (node_modules/hippie/lib/hippie/client.js:475:32) at Client.verify (node_modules/hippie/lib/hippie/client.js:477:5) at /home/sbesson/GIT/TMP/node_modules/hippie/lib/hippie/client.js:437:12 at Client.exports.raw [as parse] (node_modules/hippie/lib/hippie/parsers.js:33:3) at Request._callback (node_modules/hippie/lib/hippie/client.js:435:10) at Request.self.callback (node_modules/request/request.js:185:22) at Request.&#x3C;anonymous&#x3E; (node_modules/request/request.js:1157:10) at IncomingMessage.&#x3C;anonymous&#x3E; (node_modules/request/request.js:1079:12) at endReadableNT (_stream_readable.js:1129:12) at process._tickCallback (internal/process/next_tick.js:63:19)</failure></testcase> <testcase classname="test" name="test ok" time="0"/> </testsuite> ``` **Actual behavior:** [What actually happens] ``` <testsuite name="Mocha Tests" tests="3" failures="0" errors="1" skipped="0" timestamp="Tue, 17 Sep 2019 13:33:12 GMT" time="0.195"> <testcase classname="test" name="test ok" time="0.001"/> ``` **Reproduces how often:** [What percentage of the time does it reproduce?] 100% ### Versions * node : 10.16.0 * mocha : 6.2.0 * hippie : 0.5.2 * OS : Linux Mint 19.1 Tessa * architecture : 64-bit * shell : bash ### Additional Information It works just fine with mocha@5.2.0. Best regards.
Please run the same test again and write the output to a file, see [docu](https://mochajs.org/#xunit). It's important to know wether the file output is correct or truncated as well. Hi, I am Serge's colleague. The output is truncated as well. [report-output.zip](https://github.com/mochajs/mocha/files/3626331/report-output.zip) It appears the output doesn't show up because the reporter crashes at [this line](https://github.com/mochajs/mocha/blob/master/lib/reporters/base.js#L424), which means it's actually a problem in the `diff` package. The exception that is thrown is `TypeError: value.split is not a function`. Edit: the expected result is achieved when casting both the expected and actual values to strings first. Seems `diff` was having trouble with `number` type values.
2019-10-14T20:55:10Z
6.2
mochajs/mocha
3,834
mochajs__mocha-3834
[ "3808" ]
a4f1a442a22e53ad629a5f565d4a17b687afce53
diff --git a/lib/cli/options.js b/lib/cli/options.js --- a/lib/cli/options.js +++ b/lib/cli/options.js @@ -80,11 +80,12 @@ const nargOpts = types.array /** * Wrapper around `yargs-parser` which applies our settings * @param {string|string[]} args - Arguments to parse + * @param {Object} defaultValues - Default values of mocharc.json * @param {...Object} configObjects - `configObjects` for yargs-parser * @private * @ignore */ -const parse = (args = [], ...configObjects) => { +const parse = (args = [], defaultValues = {}, ...configObjects) => { // save node-specific args for special handling. // 1. when these args have a "=" they should be considered to have values // 2. if they don't, they just boolean flags @@ -109,6 +110,7 @@ const parse = (args = [], ...configObjects) => { const result = yargsParser.detailed(args, { configuration, configObjects, + default: defaultValues, coerce: coerceOpts, narg: nargOpts, alias: aliases, @@ -324,11 +326,11 @@ const loadOptions = (argv = []) => { args = parse( args._, + mocharc, args, rcConfig || {}, pkgConfig || {}, - optsConfig || {}, - mocharc + optsConfig || {} ); // recombine positional arguments and "spec" diff --git a/lib/cli/run-helpers.js b/lib/cli/run-helpers.js --- a/lib/cli/run-helpers.js +++ b/lib/cli/run-helpers.js @@ -219,7 +219,7 @@ exports.singleRun = (mocha, {files = [], exit = false} = {}) => { */ exports.watchRun = ( mocha, - {extension = ['js'], grep = '', ui = 'bdd', files = []} = {} + {extension = [], grep = '', ui = 'bdd', files = []} = {} ) => { let runner; @@ -291,7 +291,7 @@ exports.watchRun = ( */ exports.runMocha = ( mocha, - {watch = false, extension = ['js'], grep = '', ui = 'bdd', exit = false} = {}, + {watch = false, extension = [], grep = '', ui = 'bdd', exit = false} = {}, files = [] ) => { if (watch) { diff --git a/lib/utils.js b/lib/utils.js --- a/lib/utils.js +++ b/lib/utils.js @@ -562,7 +562,6 @@ function isHiddenOnUnix(pathname) { * * @public * @memberof Mocha.utils - * @todo Fix extension handling * @param {string} filepath - Base path to start searching from. * @param {string[]} extensions - File extensions to look for. * @param {boolean} recursive - Whether to recurse into subdirectories. @@ -571,13 +570,18 @@ function isHiddenOnUnix(pathname) { * @throws {TypeError} if `filepath` is directory and `extensions` not provided. */ exports.lookupFiles = function lookupFiles(filepath, extensions, recursive) { + extensions = extensions || []; var files = []; var stat; if (!fs.existsSync(filepath)) { - if (fs.existsSync(filepath + '.js')) { - filepath += '.js'; - } else { + // check all extensions + extensions.forEach(function(ext) { + if (fs.existsSync(filepath + '.' + ext)) { + files.push(filepath + '.' + ext); + } + }); + if (!files.length) { // Handle glob files = glob.sync(filepath); if (!files.length) { @@ -586,8 +590,8 @@ exports.lookupFiles = function lookupFiles(filepath, extensions, recursive) { filepath ); } - return files; } + return files; } // Handle file @@ -618,7 +622,7 @@ exports.lookupFiles = function lookupFiles(filepath, extensions, recursive) { // ignore error return; } - if (!extensions) { + if (!extensions.length) { throw createMissingArgumentError( util.format( 'Argument %s required when argument %s is a directory',
diff --git a/test/integration/file-utils.spec.js b/test/integration/file-utils.spec.js --- a/test/integration/file-utils.spec.js +++ b/test/integration/file-utils.spec.js @@ -58,7 +58,7 @@ describe('file utils', function() { ex.and('to have length', expectedLength); }); - it('should parse extensions from extnsions parameter', function() { + it('should parse extensions from extensions parameter', function() { var nonJsFile = tmpFile('mocha-utils-text.txt'); fs.writeFileSync(nonJsFile, 'yippy skippy ying yang yow'); @@ -66,9 +66,14 @@ describe('file utils', function() { expect(res, 'to contain', nonJsFile).and('to have length', 1); }); - it('should not require the extensions parameter when looking up a file', function() { - var res = utils.lookupFiles(tmpFile('mocha-utils'), undefined, false); - expect(res, 'to be', tmpFile('mocha-utils.js')); + it('should require the extensions parameter when looking up a file', function() { + var dirLookup = function() { + return utils.lookupFiles(tmpFile('mocha-utils'), undefined, false); + }; + expect(dirLookup, 'to throw', { + name: 'Error', + code: 'ERR_MOCHA_NO_FILES_MATCH_PATTERN' + }); }); it('should require the extensions parameter when looking up a directory', function() { diff --git a/test/node-unit/cli/options.spec.js b/test/node-unit/cli/options.spec.js --- a/test/node-unit/cli/options.spec.js +++ b/test/node-unit/cli/options.spec.js @@ -27,7 +27,9 @@ const defaults = { timeout: 1000, timeouts: 1000, t: 1000, - opts: '/default/path/to/mocha.opts' + opts: '/default/path/to/mocha.opts', + extension: ['js'], + 'watch-extensions': ['js'] }; describe('options', function() { @@ -59,6 +61,7 @@ describe('options', function() { describe('loadOptions()', function() { describe('when no parameter provided', function() { beforeEach(function() { + this.timeout(500); readFileSync = sandbox.stub(); readFileSync.onFirstCall().returns('{}'); readFileSync.onSecondCall().returns('--retries 3'); @@ -497,8 +500,8 @@ describe('options', function() { beforeEach(function() { readFileSync = sandbox.stub(); config = '/some/.mocharc.json'; - readFileSync.onFirstCall().returns('--retries 3'); - readFileSync.onSecondCall().returns('{}'); + readFileSync.onFirstCall().returns('{}'); + readFileSync.onSecondCall().returns('--retries 3'); findConfig = sandbox.stub(); loadConfig = sandbox.stub().throws('Error', 'failed to parse'); findupSync = sandbox.stub().returns('/some/package.json'); @@ -542,8 +545,8 @@ describe('options', function() { beforeEach(function() { readFileSync = sandbox.stub(); - readFileSync.onFirstCall().returns('--retries 3'); - readFileSync.onSecondCall().returns('{}'); + readFileSync.onFirstCall().returns('{}'); + readFileSync.onSecondCall().throws(); findConfig = sandbox.stub().returns('/some/.mocharc.json'); loadConfig = sandbox.stub().returns({}); findupSync = sandbox.stub().returns('/some/package.json'); @@ -578,8 +581,8 @@ describe('options', function() { beforeEach(function() { readFileSync = sandbox.stub(); - readFileSync.onFirstCall().returns('--retries 3'); - readFileSync.onSecondCall().returns('{}'); + readFileSync.onFirstCall().returns('{}'); + readFileSync.onSecondCall().throws(); findConfig = sandbox.stub().returns(null); loadConfig = sandbox.stub().returns({}); findupSync = sandbox.stub().returns('/some/package.json'); @@ -716,5 +719,78 @@ describe('options', function() { }); }); }); + + describe('"extension" handling', function() { + describe('when user supplies "extension" option', function() { + let result; + + beforeEach(function() { + readFileSync = sandbox.stub(); + readFileSync.onFirstCall().throws(); + findConfig = sandbox.stub().returns('/some/.mocharc.json'); + loadConfig = sandbox.stub().returns({extension: ['tsx']}); + findupSync = sandbox.stub(); + loadOptions = proxyLoadOptions({ + readFileSync, + findConfig, + loadConfig, + findupSync + }); + result = loadOptions(['--extension', 'ts']); + }); + + it('should not concatenate the default value', function() { + expect(result, 'to have property', 'extension', ['ts', 'tsx']); + }); + }); + + describe('when user does not supply "extension" option', function() { + let result; + + beforeEach(function() { + readFileSync = sandbox.stub(); + readFileSync.onFirstCall().throws(); + findConfig = sandbox.stub().returns('/some/.mocharc.json'); + loadConfig = sandbox.stub().returns({}); + findupSync = sandbox.stub(); + loadOptions = proxyLoadOptions({ + readFileSync, + findConfig, + loadConfig, + findupSync + }); + result = loadOptions(); + }); + + it('should retain the default', function() { + expect(result, 'to have property', 'extension', ['js']); + }); + }); + }); + + describe('"spec" handling', function() { + describe('when user supplies "spec" in config and positional arguments', function() { + let result; + + beforeEach(function() { + readFileSync = sandbox.stub(); + readFileSync.onFirstCall().throws(); + findConfig = sandbox.stub().returns('/some/.mocharc.json'); + loadConfig = sandbox.stub().returns({spec: '*.spec.js'}); + findupSync = sandbox.stub(); + loadOptions = proxyLoadOptions({ + readFileSync, + findConfig, + loadConfig, + findupSync + }); + result = loadOptions(['*.test.js']); + }); + + it('should place both into the positional arguments array', function() { + expect(result, 'to have property', '_', ['*.test.js', '*.spec.js']); + }); + }); + }); }); });
Extension option does not clear the default .js ### Prerequisites - [x] Checked that your issue hasn't already been filed by cross-referencing [issues with the `faq` label](https://github.com/mochajs/mocha/issues?utf8=%E2%9C%93&q=is%3Aissue%20label%3Afaq%20) - [ x Checked next-gen ES issues and syntax problems by using the same environment and/or transpiler configuration without Mocha to ensure it isn't just a feature that actually isn't supported in the environment in question or a bug in your code. - [x] 'Smoke tested' the code to be tested by running it outside the real test suite to get a better sense of whether the problem is in the code under test, your usage of Mocha, or Mocha itself - [x] Ensured that there is no discrepancy between the locally and globally installed versions of Mocha. You can find them with: `node node_modules/.bin/mocha --version`(Local) and `mocha --version`(Global). We recommend that you _not_ install Mocha globally. ### Description According to [Mocha's own docs](https://mochajs.org/#-extension-ext-watch-extensions-ext) one should be able to specify which extensions mocha will automatically load and that by specifying an extension the default of `*.js` should be cleared. _This does not happen in version 6_ (and I have never seen it happen earlier either). ### Steps to Reproduce ``` git clone fatso83/test-driven-learning cd javascript/mocha-recursive-extensions-test npm i npm run test-bar ``` **Expected behavior:** Only this output should be shown, by Mocha loading only the file with the `.bar` extension ``` mocha --extension bar normal.bar ✓ should do nothing but register the test ``` **Actual behavior:** It loads the `normal.bar` - _in addition to_ all the files ending in `.js`. ``` mocha --extension bar This is not a test: no-test.js normal.bar ✓ should do nothing but register the test normal.bar.js ✓ should do nothing but register the test normal.js ✓ should do nothing but register the test ``` **Reproduces how often:** Always ### Versions 6.0.2
As far as I can tell this particular feature hasn't worked since v6. Tried on `6.0.0-0` and includes `.js` files. When run with debugger ``` DEBUG=mocha:cli:* mocha --extension bar ``` Gives initial cli "loaded opts" as `extension: [ 'bar', 'js' ]` Suggesting its appending a default early on. Perhaps feature was lost in migration to yargs. I came across this issue while finding out why the latest version of Mocha was also including all .js files in the watchlist of files. #3820 seems to fix it. However the ci build seems to fail, I'll see if I can find why and fix it.
2019-03-14T00:11:30Z
6.1
mochajs/mocha
3,816
mochajs__mocha-3816
[ "3813" ]
e6542538aa10c6137babf96d9ddc851fc6595ad3
diff --git a/lib/mocha.js b/lib/mocha.js --- a/lib/mocha.js +++ b/lib/mocha.js @@ -120,12 +120,15 @@ function Mocha(options) { utils.deprecate( 'enableTimeouts is DEPRECATED and will be removed from a future version of Mocha. Instead, use "timeout: false" to disable timeouts.' ); + if (options.enableTimeouts === false) { + this.timeout(0); + } + } + + // this guard exists because Suite#timeout does not consider `undefined` to be valid input + if (typeof options.timeout !== 'undefined') { + this.timeout(options.timeout === false ? 0 : options.timeout); } - this.timeout( - options.enableTimeouts === false || options.timeout === false - ? 0 - : options.timeout - ); if ('retries' in options) { this.retries(options.retries); diff --git a/lib/suite.js b/lib/suite.js --- a/lib/suite.js +++ b/lib/suite.js @@ -114,6 +114,7 @@ Suite.prototype.clone = function() { * Set or get timeout `ms` or short-hand such as "2s". * * @private + * @todo Do not attempt to set value if `ms` is undefined * @param {number|string} ms * @return {Suite|number} for chaining */
diff --git a/test/unit/mocha.spec.js b/test/unit/mocha.spec.js --- a/test/unit/mocha.spec.js +++ b/test/unit/mocha.spec.js @@ -20,24 +20,49 @@ describe('Mocha', function() { beforeEach(function() { sandbox.stub(Mocha.prototype, 'useColors').returnsThis(); sandbox.stub(utils, 'deprecate'); + sandbox.stub(Mocha.prototype, 'timeout').returnsThis(); }); - it('should prefer "color" over "useColors"', function() { - // eslint-disable-next-line no-new - new Mocha({useColors: true, color: false}); - expect(Mocha.prototype.useColors, 'to have a call satisfying', [false]); + describe('when "useColors" option is defined', function() { + it('should prefer "color" over "useColors"', function() { + // eslint-disable-next-line no-new + new Mocha({useColors: true, color: false}); + expect(Mocha.prototype.useColors, 'to have a call satisfying', [ + false + ]).and('was called once'); + }); + + it('should assign "useColors" to "color"', function() { + // eslint-disable-next-line no-new + new Mocha({useColors: true}); + expect(Mocha.prototype.useColors, 'to have a call satisfying', [ + true + ]).and('was called once'); + }); + + it('should call utils.deprecate()', function() { + // eslint-disable-next-line no-new + new Mocha({useColors: true}); + expect(utils.deprecate, 'was called once'); + }); }); - it('should assign "useColors" to "color"', function() { - // eslint-disable-next-line no-new - new Mocha({useColors: true}); - expect(Mocha.prototype.useColors, 'to have a call satisfying', [true]); + describe('when "timeout" option is `undefined`', function() { + it('should not attempt to set timeout', function() { + // eslint-disable-next-line no-new + new Mocha({timeout: undefined}); + expect(Mocha.prototype.timeout, 'was not called'); + }); }); - it('should call utils.deprecate()', function() { - // eslint-disable-next-line no-new - new Mocha({useColors: true}); - expect(utils.deprecate, 'was called'); + describe('when "timeout" option is `false`', function() { + it('should set a timeout of 0', function() { + // eslint-disable-next-line no-new + new Mocha({timeout: false}); + expect(Mocha.prototype.timeout, 'to have a call satisfying', [0]).and( + 'was called once' + ); + }); }); });
Regression caused by the deprecation of `enableTimeouts` ### Description There is a regression caused by the deprecation of `enableTimeouts` As you cas see here: https://github.com/mochajs/mocha/pull/3556/files#diff-aa849a970cef551664c12f04a4209f6fR121 `enableTimeouts` was marked as deprecated and the use of `timeout: false` should replace it. But in the same time, we removed: ```js if (typeof options.timeout !== 'undefined' && options.timeout !== null) { this.timeout(options.timeout); ``` The bug is present with `grunt-mocha-test`. As you can see, `grunt-mocha-test` parse all options as undefined: ```js { grep: undefined, reporter: 'spec', bail: false, ui: undefined, timeout: undefined, invert: undefined, ignoreLeaks: undefined, growl: undefined, globals: undefined, require: undefined, colors: undefined, slow: undefined } ``` The bug is not present with `grunt-simple-mocha` Should we kept it while `enableTimeouts` is marked as deprecated ? ### Steps to Reproduce ```sh npm install grunt grunt-mocha-test ``` in `Gruntfile.js` ```js module.exports = function(grunt) { grunt.loadNpmTasks('grunt-mocha-test'); grunt.initConfig({ mochaTest: { test: { src: ['test/**/*.js'] } } }); grunt.registerTask('default', 'mochaTest'); }; ``` Then launch test ```sh grunt mochaTest ``` ### Expected behavior Mocha v6 should work with options like {timeout: undefined} because there is only a deprecation warning in v6 for `enableTimeouts` option. ### Actual behavior: TypeError: Cannot read property 'toString' of undefined at Suite.timeout (/home/fwadmin-server/smc/node_modules/mocha/lib/suite.js:124:10) at Mocha.timeout (/home/fwadmin-server/smc/node_modules/mocha/lib/mocha.js:625:14) at new Mocha (/home/fwadmin-server/smc/node_modules/mocha/lib/mocha.js:124:8) at new MochaWrapper (/home/fwadmin-server/smc/node_modules/grunt-mocha-test/tasks/lib/MochaWrapper.js:30:15) ### Reproduces how often: Always ### Versions 6.0.2
Thanks. Looks like I obliterated this guard.
2019-03-07T01:48:43Z
6
mochajs/mocha
3,767
mochajs__mocha-3767
[ "3761" ]
6535965e8655a66de54fc0ad9465c2eb825f13f8
diff --git a/lib/cli/node-flags.js b/lib/cli/node-flags.js --- a/lib/cli/node-flags.js +++ b/lib/cli/node-flags.js @@ -29,16 +29,32 @@ const debugFlags = new Set(['debug', 'debug-brk', 'inspect', 'inspect-brk']); * - `--v8-*` (but *not* `--v8-options`) * @summary Whether or not to pass a flag along to the `node` executable. * @param {string} flag - Flag to test - * @returns {boolean} + * @param {boolean} [bareword=true] - If `false`, we expect `flag` to have one or two leading dashes. + * @returns {boolean} If the flag is considered a "Node" flag. * @private */ -exports.isNodeFlag = flag => - !/^(?:require|r)$/.test(flag) && - (nodeFlags.has(flag) || - debugFlags.has(flag) || - /(?:preserve-symlinks(?:-main)?|harmony(?:[_-]|$)|(?:trace[_-].+$)|gc(?:[_-]global)?$|es[_-]staging$|use[_-]strict$|v8[_-](?!options).+?$)/.test( - flag - )); +exports.isNodeFlag = (flag, bareword = true) => { + if (!bareword) { + // check if the flag begins with dashes; if not, not a node flag. + if (!/^--?/.test(flag)) { + return false; + } + // strip the leading dashes to match against subsequent checks + flag = flag.replace(/^--?/, ''); + } + return ( + // treat --require/-r as Mocha flag even though it's also a node flag + !(flag === 'require' || flag === 'r') && + // check actual node flags from `process.allowedNodeEnvironmentFlags`, + // then historical support for various V8 and non-`NODE_OPTIONS` flags + // and also any V8 flags with `--v8-` prefix + (nodeFlags.has(flag) || + debugFlags.has(flag) || + /(?:preserve-symlinks(?:-main)?|harmony(?:[_-]|$)|(?:trace[_-].+$)|gc(?:[_-]global)?$|es[_-]staging$|use[_-]strict$|v8[_-](?!options).+?$)/.test( + flag + )) + ); +}; /** * Returns `true` if the flag is a "debug-like" flag. These require timeouts diff --git a/lib/cli/options.js b/lib/cli/options.js --- a/lib/cli/options.js +++ b/lib/cli/options.js @@ -85,8 +85,9 @@ const parse = (args = [], ...configObjects) => { const nodeArgs = (Array.isArray(args) ? args : args.split(' ')).reduce( (acc, arg) => { const pair = arg.split('='); - const flag = pair[0].replace(/^--?/, ''); - if (isNodeFlag(flag)) { + let flag = pair[0]; + if (isNodeFlag(flag, false)) { + flag = flag.replace(/^--?/, ''); return arg.includes('=') ? acc.concat([[flag, pair[1]]]) : acc.concat([[flag, true]]);
diff --git a/test/integration/options/node-flags.spec.js b/test/integration/options/node-flags.spec.js new file mode 100644 --- /dev/null +++ b/test/integration/options/node-flags.spec.js @@ -0,0 +1,19 @@ +'use strict'; + +var invokeMocha = require('../helpers').invokeMocha; + +describe('node flags', function() { + it('should not consider argument values to be node flags', function(done) { + invokeMocha( + ['--require', 'trace-dependency'], + function(err, res) { + if (err) { + return done(err); + } + expect(res, 'not to have failed with output', /bad option/i); + done(); + }, + 'pipe' + ); + }); +}); diff --git a/test/node-unit/cli/node-flags.spec.js b/test/node-unit/cli/node-flags.spec.js --- a/test/node-unit/cli/node-flags.spec.js +++ b/test/node-unit/cli/node-flags.spec.js @@ -15,6 +15,18 @@ describe('node-flags', function() { }); }); + describe('when expecting leading dashes', function() { + it('should require leading dashes', function() { + expect(isNodeFlag('throw-deprecation', false), 'to be false'); + expect(isNodeFlag('--throw-deprecation', false), 'to be true'); + }); + + it('should return false for --require/-r', function() { + expect(isNodeFlag('--require', false), 'to be false'); + expect(isNodeFlag('-r', false), 'to be false'); + }); + }); + describe('special cases', function() { it('should return true for flags starting with "preserve-symlinks"', function() { expect(isNodeFlag('preserve-symlinks'), 'to be true');
cli option `--require trace-something` causes `bad option`-error ### Prerequisites - [x] Checked that your issue hasn't already been filed by cross-referencing [issues with the `faq` label](https://github.com/mochajs/mocha/issues?utf8=%E2%9C%93&q=is%3Aissue%20label%3Afaq%20) - [x] Checked next-gen ES issues and syntax problems by using the same environment and/or transpiler configuration without Mocha to ensure it isn't just a feature that actually isn't supported in the environment in question or a bug in your code. - [x] 'Smoke tested' the code to be tested by running it outside the real test suite to get a better sense of whether the problem is in the code under test, your usage of Mocha, or Mocha itself - [x] Ensured that there is no discrepancy between the locally and globally installed versions of Mocha. You can find them with: `node node_modules/.bin/mocha --version`(Local) and `mocha --version`(Global). We recommend avoiding the use of globally installed Mocha. ### Description When executing mocha with the option `--require trace-something`, the error `bad-option --trace-something` is output. In my case. it is `trace-and-clarify-if-possible`, but it is reproducible with other package names. This happens in `mocha@6.0.0` and `mocha@6.0.1`, but not in `mocha@6.0.0-1`, and only if the required package-name starts with "trace-" ### Steps to Reproduce ``` # Setup dependency with empty index.js-file mkdir trace-dependency cd trace-dependency/ npm init -y touch index.js cd .. # Setup project and add dependency mkdir test-project cd test-project npm init -y npm install ../trace-dependency mkdir test touch test/dash-spec.js ``` Then install mocha and run the tests: ``` npm install mocha@6.0.0 npx mocha --require trace-dependency # Error "bad option --trace-dependency" npm install mocha@6.0.1 npx mocha --require trace-dependency # Error "bad option --trace-dependency" npm install mocha@6.0.0-1 npx mocha --require trace-dependency # No error, "0 passing" ``` **Expected behavior:** [What you expect to happen] I would have expected the same result for each mocha-version, i.e. the output "0 passing". **Actual behavior:** [What actually happens] * `mocha@6.0.0` and `mocha@6.0.1` have the output "bad option --trace-dependency" * `mocha@6.0.0-1` has the expected output **Reproduces how often:** [What percentage of the time does it reproduce?] 100% ### Versions - The output of `mocha --version` and `node node_modules/.bin/mocha --version`: 6.0.0, 6.0.1 - The output of `node --version`: v10.15.1 - The version and architecture of your operating system: Xubuntu 18.04 - Your shell (bash, zsh, PowerShell, cmd, etc.): bash - Any other third party Mocha related modules (with versions): none - The code transpiler being used: none ### Additional Information <!-- Any additional information, configuration or data that might be necessary to reproduce the issue. -->
also happens when ``` mocha --trace-warnings --inspect /usr/local/bin/node: bad option: --trace-warnings --inspect ``` but not when ``` mocha --inspect ``` or ``` mocha --trace-warnings ``` Reasonably sure this has to do with the Node flag processing going on [here](https://github.com/mochajs/mocha/blob/6535965e8655a66de54fc0ad9465c2eb825f13f8/lib/cli/node-flags.js#L19-L41). The problem here is that its checking for node-flags, when the original `--` or `-` is already removed by yargs or [this little line](https://github.com/mochajs/mocha/blob/6535965e8655a66de54fc0ad9465c2eb825f13f8/lib/cli/options.js#L88). A workaround for my problem is to use `--require=trace-and-clarify-if-possible`, but for @Janpot this will not work. I don't have a good solution to this, other than doing the whole preprocessing before `yargs` gets to parse the arguments...
2019-02-25T05:06:20Z
6
mochajs/mocha
3,737
mochajs__mocha-3737
[ "3668" ]
5d9d3eb665825ea69435388f5776150f40c844be
diff --git a/lib/mocha.js b/lib/mocha.js --- a/lib/mocha.js +++ b/lib/mocha.js @@ -543,7 +543,9 @@ Mocha.prototype._growl = growl.notify; * mocha.globals(['jQuery', 'MyLib']); */ Mocha.prototype.globals = function(globals) { - this.options.globals = (this.options.globals || []).concat(globals); + this.options.globals = (this.options.globals || []) + .concat(globals) + .filter(Boolean); return this; };
diff --git a/test/unit/mocha.spec.js b/test/unit/mocha.spec.js --- a/test/unit/mocha.spec.js +++ b/test/unit/mocha.spec.js @@ -5,7 +5,7 @@ var Mocha = require('../../lib/mocha'); var sinon = require('sinon'); describe('Mocha', function() { - var opts = {reporter: function() {}}; // no output + var opts = {reporter: utils.noop}; // no output var sandbox; beforeEach(function() { @@ -41,88 +41,59 @@ describe('Mocha', function() { }); }); - describe('.run(fn)', function() { - it('should not raise errors if callback was not provided', function() { - sandbox.stub(Mocha.Runner.prototype, 'run'); + describe('#allowUncaught()', function() { + it('should set the allowUncaught option to true', function() { var mocha = new Mocha(opts); - expect(function() { - mocha.run(); - }, 'not to throw'); + mocha.allowUncaught(); + expect(mocha.options, 'to have property', 'allowUncaught', true); }); - it('should execute the callback when complete', function(done) { + it('should be chainable', function() { var mocha = new Mocha(opts); - sandbox.stub(Mocha.Runner.prototype, 'run').callsArg(0); - mocha.run(done); - }); - }); - - describe('.reporter("xunit").run(fn)', function() { - it('should not raise errors if callback was not provided', function() { - var mocha = new Mocha(); - expect(function() { - try { - mocha.reporter('xunit').run(); - } catch (e) { - console.log(e); - expect.fail(e.message); - } - }, 'not to throw'); + expect(mocha.allowUncaught(), 'to be', mocha); }); }); - describe('.invert()', function() { - it('should set the invert option to true', function() { + describe('#bail()', function() { + it('should set the suite._bail to true if there is no arguments', function() { var mocha = new Mocha(opts); - mocha.invert(); - expect(mocha.options, 'to have property', 'invert', true); + mocha.bail(); + expect(mocha.suite._bail, 'to be', true); }); it('should be chainable', function() { var mocha = new Mocha(opts); - expect(mocha.invert(), 'to be', mocha); + expect(mocha.bail(), 'to be', mocha); }); }); - describe('.ignoreLeaks()', function() { - it('should set the ignoreLeaks option to true when param equals true', function() { - var mocha = new Mocha(opts); - mocha.ignoreLeaks(true); - expect(mocha.options, 'to have property', 'ignoreLeaks', true); - }); - - it('should set the ignoreLeaks option to false when param equals false', function() { - var mocha = new Mocha(opts); - mocha.ignoreLeaks(false); - expect(mocha.options, 'to have property', 'ignoreLeaks', false); - }); - - it('should set the ignoreLeaks option to false when the param is undefined', function() { + describe('#checkLeaks()', function() { + it('should set the ignoreLeaks option to false', function() { var mocha = new Mocha(opts); - mocha.ignoreLeaks(); + mocha.checkLeaks(); expect(mocha.options, 'to have property', 'ignoreLeaks', false); }); it('should be chainable', function() { var mocha = new Mocha(opts); - expect(mocha.ignoreLeaks(), 'to be', mocha); + expect(mocha.checkLeaks(), 'to be', mocha); }); }); - describe('.checkLeaks()', function() { - it('should set the ignoreLeaks option to false', function() { + describe('#delay()', function() { + it('should set the delay option to true', function() { var mocha = new Mocha(opts); - mocha.checkLeaks(); - expect(mocha.options, 'to have property', 'ignoreLeaks', false); + mocha.delay(); + expect(mocha.options, 'to have property', 'delay', true); }); it('should be chainable', function() { var mocha = new Mocha(opts); - expect(mocha.checkLeaks(), 'to be', mocha); + expect(mocha.delay(), 'to be', mocha); }); }); - describe('.fullTrace()', function() { + describe('#fullTrace()', function() { it('should set the fullStackTrace option to true', function() { var mocha = new Mocha(opts); mocha.fullTrace(); @@ -135,7 +106,53 @@ describe('Mocha', function() { }); }); - describe('.growl()', function() { + describe('#globals()', function() { + it('should be an empty array initially', function() { + var mocha = new Mocha(); + expect(mocha.options.globals, 'to be empty'); + }); + + it('should be chainable', function() { + var mocha = new Mocha(opts); + expect(mocha.globals(), 'to be', mocha); + }); + + describe('when argument is invalid', function() { + it('should not modify the whitelist when given empty string', function() { + var mocha = new Mocha(opts); + mocha.globals(''); + expect(mocha.options.globals, 'to be empty'); + }); + + it('should not modify the whitelist when given empty array', function() { + var mocha = new Mocha(opts); + mocha.globals([]); + expect(mocha.options.globals, 'to be empty'); + }); + }); + + describe('when argument is valid', function() { + var elem = 'foo'; + var elem2 = 'bar'; + + it('should add string to the whitelist', function() { + var mocha = new Mocha(opts); + mocha.globals(elem); + expect(mocha.options.globals, 'to contain', elem); + expect(mocha.options.globals, 'to have length', 1); + }); + + it('should add contents of string array to the whitelist', function() { + var mocha = new Mocha(opts); + var elems = [elem, elem2]; + mocha.globals(elems); + expect(mocha.options.globals, 'to contain', elem, elem2); + expect(mocha.options.globals, 'to have length', elems.length); + }); + }); + }); + + describe('#growl()', function() { describe('if capable of notifications', function() { it('should set the growl option to true', function() { var mocha = new Mocha(opts); @@ -164,32 +181,45 @@ describe('Mocha', function() { }); }); - describe('.useInlineDiffs()', function() { - it('should set the useInlineDiffs option to true when param equals true', function() { + describe('#ignoreLeaks()', function() { + it('should set the ignoreLeaks option to true when param equals true', function() { var mocha = new Mocha(opts); - mocha.useInlineDiffs(true); - expect(mocha.options, 'to have property', 'useInlineDiffs', true); + mocha.ignoreLeaks(true); + expect(mocha.options, 'to have property', 'ignoreLeaks', true); }); - it('should set the useInlineDiffs option to false when param equals false', function() { + it('should set the ignoreLeaks option to false when param equals false', function() { var mocha = new Mocha(opts); - mocha.useInlineDiffs(false); - expect(mocha.options, 'to have property', 'useInlineDiffs', false); + mocha.ignoreLeaks(false); + expect(mocha.options, 'to have property', 'ignoreLeaks', false); }); - it('should set the useInlineDiffs option to false when the param is undefined', function() { + it('should set the ignoreLeaks option to false when the param is undefined', function() { var mocha = new Mocha(opts); - mocha.useInlineDiffs(); - expect(mocha.options, 'to have property', 'useInlineDiffs', false); + mocha.ignoreLeaks(); + expect(mocha.options, 'to have property', 'ignoreLeaks', false); }); it('should be chainable', function() { var mocha = new Mocha(opts); - expect(mocha.useInlineDiffs(), 'to be', mocha); + expect(mocha.ignoreLeaks(), 'to be', mocha); + }); + }); + + describe('#invert()', function() { + it('should set the invert option to true', function() { + var mocha = new Mocha(opts); + mocha.invert(); + expect(mocha.options, 'to have property', 'invert', true); + }); + + it('should be chainable', function() { + var mocha = new Mocha(opts); + expect(mocha.invert(), 'to be', mocha); }); }); - describe('.noHighlighting()', function() { + describe('#noHighlighting()', function() { // :NOTE: Browser-only option... it('should set the noHighlighting option to true', function() { var mocha = new Mocha(opts); @@ -203,57 +233,79 @@ describe('Mocha', function() { }); }); - describe('.allowUncaught()', function() { - it('should set the allowUncaught option to true', function() { - var mocha = new Mocha(opts); - mocha.allowUncaught(); - expect(mocha.options, 'to have property', 'allowUncaught', true); + describe('#reporter()', function() { + it('should throw reporter error if an invalid reporter is given', function() { + var updatedOpts = {reporter: 'invalidReporter', reporterOptions: {}}; + var throwError = function() { + // eslint-disable-next-line no-new + new Mocha(updatedOpts); + }; + expect(throwError, 'to throw', { + message: "invalid reporter 'invalidReporter'", + code: 'ERR_MOCHA_INVALID_REPORTER', + reporter: 'invalidReporter' + }); }); it('should be chainable', function() { var mocha = new Mocha(opts); - expect(mocha.allowUncaught(), 'to be', mocha); + expect(mocha.reporter(), 'to be', mocha); }); }); - describe('.delay()', function() { - it('should set the delay option to true', function() { + describe('#run(fn)', function() { + it('should execute the callback when complete', function(done) { var mocha = new Mocha(opts); - mocha.delay(); - expect(mocha.options, 'to have property', 'delay', true); + sandbox.stub(Mocha.Runner.prototype, 'run').callsArg(0); + mocha.run(done); }); - it('should be chainable', function() { + it('should not raise errors if callback was not provided', function() { + sandbox.stub(Mocha.Runner.prototype, 'run'); var mocha = new Mocha(opts); - expect(mocha.delay(), 'to be', mocha); + expect(function() { + mocha.run(); + }, 'not to throw'); + }); + + describe('#reporter("xunit")#run(fn)', function() { + // :TBD: Why does specifying reporter differentiate this test from preceding one + it('should not raise errors if callback was not provided', function() { + var mocha = new Mocha(); + expect(function() { + try { + mocha.reporter('xunit').run(); + } catch (e) { + console.log(e); + expect.fail(e.message); + } + }, 'not to throw'); + }); }); }); - describe('.bail()', function() { - it('should set the suite._bail to true if there is no arguments', function() { + describe('#useInlineDiffs()', function() { + it('should set the useInlineDiffs option to true when param equals true', function() { var mocha = new Mocha(opts); - mocha.bail(); - expect(mocha.suite._bail, 'to be', true); + mocha.useInlineDiffs(true); + expect(mocha.options, 'to have property', 'useInlineDiffs', true); }); - it('should be chainable', function() { + it('should set the useInlineDiffs option to false when param equals false', function() { var mocha = new Mocha(opts); - expect(mocha.bail(), 'to be', mocha); + mocha.useInlineDiffs(false); + expect(mocha.options, 'to have property', 'useInlineDiffs', false); }); - }); - describe('error handling', function() { - it('should throw reporter error if an invalid reporter is given', function() { - var updatedOpts = {reporter: 'invalidReporter', reporterOptions: {}}; - var throwError = function() { - // eslint-disable-next-line no-new - new Mocha(updatedOpts); - }; - expect(throwError, 'to throw', { - message: "invalid reporter 'invalidReporter'", - code: 'ERR_MOCHA_INVALID_REPORTER', - reporter: 'invalidReporter' - }); + it('should set the useInlineDiffs option to false when the param is undefined', function() { + var mocha = new Mocha(opts); + mocha.useInlineDiffs(); + expect(mocha.options, 'to have property', 'useInlineDiffs', false); + }); + + it('should be chainable', function() { + var mocha = new Mocha(opts); + expect(mocha.useInlineDiffs(), 'to be', mocha); }); }); });
bug fix: concat(undefined) returns [undefined] ### Requirements ### Description of the Change This line: `this.options.globals = (this.options.globals || []).concat(globals);` has the potential to produce arrays that contain the `undefined` element. The result of `concat(undefined)` is `[undefined]`, an array of length 1 that contains `undefined`. Later in `lib/runner` there is a filter that iterates on the globals list and raises an error when trying to reference `undefined`. ### Alternate Designs There is different syntax that could be used to achieve the same validation and fallback. If there is a preferred style I'm happy to adjust to it. ### Why should this be in core? It is fixing an edge case within the core libs. ### Benefits Protects users from an edge case where `globals` is has falsy value. ### Possible Drawbacks The impact is trivial, it increases the set up overhead by a little bit (if at all). ### Applicable issues patch release
[![Coverage Status](https://coveralls.io/builds/21038001/badge)](https://coveralls.io/builds/21038001) Coverage increased (+0.04%) to 90.814% when pulling **3efcc84c8b5f7e7ee7c85f93d62931748f7fffdd on givanse:concat-undefined** into **0a86e6f4a7d1724782d2cc4695ac6422bae94f37 on mochajs:master**. @givanse Seems plausible, but how did you encounter this? I encountered this by using Mocha programmatically with Testem. Right now I don't have access to the codebase. So it is going to take me a few days (maybe Wednesday) to get back to you with more details. Maybe the fallback could be moved up, to: https://github.com/mochajs/mocha/blob/6b5a7855110d9c493dc41aec9fb2cea15aaa42aa/lib/mocha.js#L112 and use `.globals(options.globals || []);`
2019-02-16T15:05:36Z
6
mochajs/mocha
3,699
mochajs__mocha-3699
[ "3681" ]
52b9a5fb97bc3a6581dc6538aa0092276e71ea41
diff --git a/lib/cli/node-flags.js b/lib/cli/node-flags.js --- a/lib/cli/node-flags.js +++ b/lib/cli/node-flags.js @@ -7,6 +7,7 @@ */ const nodeFlags = require('node-environment-flags'); +const unparse = require('yargs-unparser'); /** * These flags are considered "debug" flags. @@ -34,6 +35,7 @@ const debugFlags = new Set(['debug', 'debug-brk', 'inspect', 'inspect-brk']); exports.isNodeFlag = flag => !/^(?:require|r)$/.test(flag) && (nodeFlags.has(flag) || + debugFlags.has(flag) || /(?:preserve-symlinks(?:-main)?|harmony(?:[_-]|$)|(?:trace[_-].+$)|gc(?:[_-]global)?$|es[_-]staging$|use[_-]strict$|v8[_-](?!options).+?$)/.test( flag )); @@ -46,3 +48,22 @@ exports.isNodeFlag = flag => * @private */ exports.impliesNoTimeouts = flag => debugFlags.has(flag); + +/** + * All non-strictly-boolean arguments to node--those with values--must specify those values using `=`, e.g., `--inspect=0.0.0.0`. + * Unparse these arguments using `yargs-unparser` (which would result in `--inspect 0.0.0.0`), then supply `=` where we have values. + * There's probably an easier or more robust way to do this; fixes welcome + * @param {Object} opts - Arguments object + * @returns {string[]} Unparsed arguments using `=` to specify values + * @private + */ +exports.unparseNodeFlags = opts => { + var args = unparse(opts); + return args.length + ? args + .join(' ') + .split(/\b/) + .map(arg => (arg === ' ' ? '=' : arg)) + .join('') + : []; +}; diff --git a/lib/cli/options.js b/lib/cli/options.js --- a/lib/cli/options.js +++ b/lib/cli/options.js @@ -18,6 +18,7 @@ const findup = require('findup-sync'); const {deprecate} = require('../utils'); const debug = require('debug')('mocha:cli:options'); const {createMissingArgumentError} = require('../errors'); +const {isNodeFlag} = require('./node-flags'); /** * The `yargs-parser` namespace @@ -75,22 +76,46 @@ const nargOpts = types.array * @ignore */ const parse = (args = [], ...configObjects) => { - const result = yargsParser.detailed( - args, - Object.assign( - { - configuration, - configObjects, - coerce: coerceOpts, - narg: nargOpts, - alias: aliases - }, - types - ) + // save node-specific args for special handling. + // 1. when these args have a "=" they should be considered to have values + // 2. if they don't, they just boolean flags + // 3. to avoid explicitly defining the set of them, we tell yargs-parser they + // are ALL boolean flags. + // 4. we can then reapply the values after yargs-parser is done. + const nodeArgs = (Array.isArray(args) ? args : args.split(' ')).reduce( + (acc, arg) => { + const pair = arg.split('='); + const flag = pair[0].replace(/^--?/, ''); + if (isNodeFlag(flag)) { + return arg.includes('=') + ? acc.concat([[flag, pair[1]]]) + : acc.concat([[flag, true]]); + } + return acc; + }, + [] ); + + const result = yargsParser.detailed(args, { + configuration, + configObjects, + coerce: coerceOpts, + narg: nargOpts, + alias: aliases, + string: types.string, + array: types.array, + number: types.number, + boolean: types.boolean.concat(nodeArgs.map(pair => pair[0])) + }); if (result.error) { throw createMissingArgumentError(result.error.message); } + + // reapply "=" arg values from above + nodeArgs.forEach(([key, value]) => { + result.argv[key] = value; + }); + return result.argv; };
diff --git a/test/assertions.js b/test/assertions.js --- a/test/assertions.js +++ b/test/assertions.js @@ -9,7 +9,7 @@ exports.mixinMochaAssertions = function(expect) { return ( Object.prototype.toString.call(v) === '[object Object]' && typeof v.output === 'string' && - typeof v.code === 'number' && + 'code' in v && // may be null Array.isArray(v.args) ); } @@ -59,9 +59,9 @@ exports.mixinMochaAssertions = function(expect) { } ) .addAssertion( - '<RawResult|RawRunResult|JSONRunResult> [not] to have [completed with] [exit] code <number>', + '<RawRunResult|JSONRunResult> [not] to have completed with [exit] code <number>', function(expect, result, code) { - expect(result, '[not] to have property', 'code', code); + expect(result.code, '[not] to be', code); } ) .addAssertion( @@ -295,5 +295,11 @@ exports.mixinMochaAssertions = function(expect) { function(expect, result, output) { expect(result.output, '[not] to satisfy', output); } + ) + .addAssertion( + '<RawRunResult|JSONRunResult> to have [exit] code <number>', + function(expect, result, code) { + expect(result.code, 'to be', code); + } ); }; diff --git a/test/integration/helpers.js b/test/integration/helpers.js --- a/test/integration/helpers.js +++ b/test/integration/helpers.js @@ -81,10 +81,10 @@ module.exports = { var path; path = resolveFixturePath(fixturePath); - args = args || []; + args = (args || []).concat('--reporter', 'json', path); return invokeMocha( - args.concat(['--reporter', 'json', path]), + args, function(err, res) { if (err) return fn(err); @@ -95,8 +95,8 @@ module.exports = { fn( new Error( format( - 'Failed to parse JSON reporter output.\nArgs: %O\nResult:\n\n%O', - args, + 'Failed to parse JSON reporter output. Error:\n%O\nResponse:\n%O', + err, res ) ) diff --git a/test/integration/options/debug.spec.js b/test/integration/options/debug.spec.js --- a/test/integration/options/debug.spec.js +++ b/test/integration/options/debug.spec.js @@ -14,19 +14,117 @@ describe('--debug', function() { it('should invoke --inspect', function(done) { invokeMocha( - ['--debug', '--file', DEFAULT_FIXTURE], + ['--debug', DEFAULT_FIXTURE], function(err, res) { if (err) { return done(err); } - expect(res, 'to have passed').and( + expect(res, 'to contain output', /Debugger listening/i); + done(); + }, + 'pipe' + ); + }); + + it('should invoke --inspect-brk', function(done) { + var proc = invokeMocha( + ['--debug-brk', DEFAULT_FIXTURE], + function(err, res) { + if (err) { + return done(err); + } + expect(res, 'to contain output', /Debugger listening/i); + done(); + }, + 'pipe' + ); + + // debugger must be manually killed + setTimeout(function() { + process.kill(proc.pid, 'SIGINT'); + }, 2000); + }); + + it('should respect custom host/port', function(done) { + invokeMocha( + ['--debug=127.0.0.1:9229', DEFAULT_FIXTURE], + function(err, res) { + if (err) { + return done(err); + } + expect( + res, + 'to contain output', + /Debugger listening on .*127.0.0.1:9229/i + ); + done(); + }, + 'pipe' + ); + }); + + it('should warn about incorrect usage for version', function(done) { + invokeMocha( + ['--debug=127.0.0.1:9229', DEFAULT_FIXTURE], + function(err, res) { + if (err) { + return done(err); + } + expect(res, 'to contain output', /"--debug" is not available/i); + done(); + }, + 'pipe' + ); + }); + }); + + describe('Node.js v6', function() { + // note that v6.3.0 and newer supports --inspect but still supports --debug. + before(function() { + if (process.version.substring(0, 2) !== 'v6') { + this.skip(); + } + }); + + it('should start debugger', function(done) { + var proc = invokeMocha( + ['--debug', DEFAULT_FIXTURE], + function(err, res) { + if (err) { + return done(err); + } + expect(res, 'to contain output', /Debugger listening/i); + done(); + }, + 'pipe' + ); + + // debugger must be manually killed + setTimeout(function() { + process.kill(proc.pid, 'SIGINT'); + }, 2000); + }); + + it('should respect custom host/port', function(done) { + var proc = invokeMocha( + ['--debug=127.0.0.1:9229', DEFAULT_FIXTURE], + function(err, res) { + if (err) { + return done(err); + } + expect( + res, 'to contain output', - /Debugger listening/i + /Debugger listening on .*127.0.0.1:9229/i ); done(); }, - {stdio: 'pipe'} + 'pipe' ); + + setTimeout(function() { + process.kill(proc.pid, 'SIGINT'); + }, 2000); }); }); });
`--inspect` flag doesn't work When I tried to debug mocha with `--inspect` option, `ReferenceError: describe is not defined` error occurred. It seems it is not run under `mocha` context. ```sh $ ./node_modules/.bin/mocha --version 6.0.0-1 $ ./node_modules/.bin/mocha --inspect ex.test.js Debugger listening on ws://127.0.0.1:9229/aca2ad85-76ab-42ac-99bc-6acb84bd1bc2 For help, see: https://nodejs.org/en/docs/inspector /Users/outsider/temp/mocha-next/6/ex.test.js:1 (function (exports, require, module, __filename, __dirname) { describe('Suite', () => { ^ ReferenceError: describe is not defined at Object.<anonymous> (/Users/outsider/temp/mocha-next/6/ex.test.js:1:63) at Module._compile (internal/modules/cjs/loader.js:689:30) at Object.Module._extensions..js (internal/modules/cjs/loader.js:700:10) at Module.load (internal/modules/cjs/loader.js:599:32) at tryModuleLoad (internal/modules/cjs/loader.js:538:12) at Function.Module._load (internal/modules/cjs/loader.js:530:3) at Function.Module.runMain (internal/modules/cjs/loader.js:742:12) at startup (internal/bootstrap/node.js:283:19) at bootstrapNodeJSCore (internal/bootstrap/node.js:743:3) ``` With `mocha@5.2.0`, it is fine. ```sh $ ./node_modules/.bin/mocha --version 5.2.0 $ ./node_modules/.bin/mocha --inspect ex.test.js Debugger listening on ws://127.0.0.1:9229/4b56feea-1711-43bd-82a3-5a587171122f For help, see: https://nodejs.org/en/docs/inspector Suite ✓ test 1 passing (5ms) ```
I tried to fix this issue, but I could find a good solution. Any idea? /cc @mochajs/core ```sh $ ./bin/mocha --inspect example.js mocha:cli exec /Users/outsider/bin/lib/node.js/node-v10.15.0-darwin-x64/bin/node w/ args: [ '--inspect', 'example.js', '/Users/outsider/mocha/bin/_mocha', '--timeout', '200', '--no-config', '--no-package', '--no-opts', '--require', 'test/setup', '--ui', 'bdd', '--global', 'okGlobalA', '--global', 'okGlobalB', '--global', 'okGlobalC', '--global', 'callback*', '--diff', '--extension', 'js', '--reporter', 'spec', '--slow', '75' ] +2ms Debugger listening on ws://127.0.0.1:9229/296e0480-0771-4977-b4cd-539b3795ad71 For help, see: https://nodejs.org/en/docs/inspector ReferenceError: describe is not defined ``` In [here](https://github.com/mochajs/mocha/blob/master/bin/mocha#L24), [yargs-parser](https://github.com/yargs/yargs-parser) parsed `example.js` as `--inspect`'s value. Other [Node flags](https://nodejs.org/api/cli.html#cli_options) are same. So, below works. ```sh $ ./bin/mocha --inspect --exit example.js mocha:cli exec /Users/outsider/bin/lib/node.js/node-v10.15.0-darwin-x64/bin/node w/ args: [ '--inspect', '/Users/outsider/mocha/bin/_mocha', 'example.js', '--timeout', '200', '--exit', '--no-config', '--no-package', '--no-opts', '--require', 'test/setup', '--ui', 'bdd', '--global', 'okGlobalA', '--global', 'okGlobalB', '--global', 'okGlobalC', '--global', 'callback*', '--diff', '--extension', 'js', '--reporter', 'spec', '--slow', '75' ] +2ms Debugger listening on ws://127.0.0.1:9229/a2e27603-7321-4eb3-97dd-f0e64f224f2a For help, see: https://nodejs.org/en/docs/inspector 1 passing (3ms) ``` And this also works. ```sh $ ./bin/mocha example.js --inspector mocha:cli exec /Users/outsider/bin/lib/node.js/node-v10.15.0-darwin-x64/bin/node w/ args: [ '--inspect', '/Users/outsider/mocha/bin/_mocha', 'example.js', '--timeout', '200', '--no-config', '--no-package', '--no-opts', '--require', 'test/setup', '--ui', 'bdd', '--global', 'okGlobalA', '--global', 'okGlobalB', '--global', 'okGlobalC', '--global', 'callback*', '--diff', '--extension', 'js', '--reporter', 'spec', '--slow', '75' ] +2ms Debugger listening on ws://127.0.0.1:9229/a09fc498-5626-4023-94bd-e7eb1c8352e7 For help, see: https://nodejs.org/en/docs/inspector 1 passing (2ms) ``` However this doesn't work, because [yargs-unparser](https://github.com/yargs/yargs-unparser) unparsed `--inspect: 27.0.0.1:1234` as `--inspect 127.0.0.1:1234` not `--inspect=127.0.0.1:1234` in [here]([](https://github.com/mochajs/mocha/blob/master/bin/mocha#L90)). ```sh $ ./bin/mocha example.js --inspect=127.0.0.1:1234 mocha:cli exec /Users/outsider/bin/lib/node.js/node-v10.15.0-darwin-x64/bin/node w/ args: [ '--inspect', '127.0.0.1:1234', '/Users/outsider/mocha/bin/_mocha', 'example.js', '--timeout', '200', '--no-config', '--no-package', '--no-opts', '--require', 'test/setup', '--ui', 'bdd', '--global', 'okGlobalA', '--global', 'okGlobalB', '--global', 'okGlobalC', '--global', 'callback*', '--diff', '--extension', 'js', '--reporter', 'spec', '--slow', '75' ] +2ms Debugger listening on ws://127.0.0.1:9229/a77b7761-4aad-42fb-ba61-a96ab2c4490b For help, see: https://nodejs.org/en/docs/inspecto internal/modules/cjs/loader.js:583 throw err; ^ Error: Cannot find module '/Users/outsider/mocha/127.0.0.1:1234' ``` @outsideris see here https://github.com/mochajs/mocha/blob/0f95a7d73355ce9ab299c9f8dfa983b897ecf2fc/lib/cli/options.js#L290-L294 we may want to do the opposite--take everything from `_` and put it into `--spec`. (`--spec` is an alias for the default positional arguments; the "files" in other words) does that help? Have a look at https://github.com/mochajs/mocha/issues/3683#issuecomment-457184246 Maybe your issue has the same cause as mine. looks like we may just need to special-case this. basically that involves doing something like what v5.2.0 does for some node flags that optionally accept an argument. FWIW, `--inspect 0.0.0.0:12345 foo.spec.js` does *not* work on Mocha v5.2.0 either, and I wouldn't expect it to; the `=` is required. We could require users to distinguish between mocha flags and node flags. All node flags have to be attached at the end of CL, separated by the end parsing sign "--". Mocha would just pass these node flags to Node, without any checks or whatever. Node would do the parsing for us. Like "npm" running a script: `npm run server -- --port=1337` @juergba That'd be easier to implement, but also it'd break backwards-compat, so we should probably just make it work. I didn't find a proper approach even if I'm working on it.
2019-01-28T21:06:18Z
6
mochajs/mocha
3,632
mochajs__mocha-3632
[ "2755" ]
f223298b869e1a7f333678b37e3d2772efecc63d
diff --git a/lib/mocha.js b/lib/mocha.js --- a/lib/mocha.js +++ b/lib/mocha.js @@ -92,7 +92,7 @@ function Mocha(options) { this.files = []; this.options = options; // root suite - this.suite = new exports.Suite('', new exports.Context()); + this.suite = new exports.Suite('', new exports.Context(), true); if ('useColors' in options) { utils.deprecate( diff --git a/lib/suite.js b/lib/suite.js --- a/lib/suite.js +++ b/lib/suite.js @@ -41,15 +41,18 @@ exports.create = function(parent, title) { }; /** - * Initialize a new `Suite` with the given `title` and `ctx`. Derived from [EventEmitter](https://nodejs.org/api/events.html#events_class_eventemitter) + * Constructs a new `Suite` instance with the given `title`, `ctx`, and `isRoot`. * - * @memberof Mocha * @public * @class - * @param {string} title - * @param {Context} parentContext + * @extends EventEmitter + * @memberof Mocha + * @see {@link https://nodejs.org/api/events.html#events_class_eventemitter|EventEmitter} + * @param {string} title - Suite title. + * @param {Context} parentContext - Parent context instance. + * @param {boolean} [isRoot=false] - Whether this is the root suite. */ -function Suite(title, parentContext) { +function Suite(title, parentContext, isRoot) { if (!utils.isString(title)) { throw createInvalidArgumentTypeError( 'Suite argument "title" must be a string. Received type "' + @@ -70,7 +73,7 @@ function Suite(title, parentContext) { this._beforeAll = []; this._afterEach = []; this._afterAll = []; - this.root = !title; + this.root = isRoot === true; this._timeout = 2000; this._enableTimeouts = true; this._slow = 75; @@ -326,6 +329,7 @@ Suite.prototype.afterEach = function(title, fn) { */ Suite.prototype.addSuite = function(suite) { suite.parent = this; + suite.root = false; suite.timeout(this.timeout()); suite.retries(this.retries()); suite.enableTimeouts(this.enableTimeouts());
diff --git a/test/unit/suite.spec.js b/test/unit/suite.spec.js --- a/test/unit/suite.spec.js +++ b/test/unit/suite.spec.js @@ -302,6 +302,27 @@ describe('Suite', function() { }); }); + describe('.create()', function() { + before(function() { + this.first = new Suite('Root suite', {}, true); + this.second = new Suite('RottenRoot suite', {}, true); + this.first.addSuite(this.second); + }); + + it('does not create a second root suite', function() { + expect(this.second.parent, 'to be', this.first); + expect(this.first.root, 'to be', true); + expect(this.second.root, 'to be', false); + }); + + it('does not denote the root suite by being titleless', function() { + var emptyTitleSuite = Suite.create(this.second, ''); + expect(emptyTitleSuite.parent, 'to be', this.second); + expect(emptyTitleSuite.root, 'to be', false); + expect(this.second.root, 'to be', false); + }); + }); + describe('.addSuite()', function() { beforeEach(function() { this.first = new Suite('First suite'); @@ -390,8 +411,8 @@ describe('Suite', function() { describe('when there is a parent', function() { describe('the parent is the root suite', function() { it('returns the suite title', function() { - var parentSuite = new Suite(''); - parentSuite.addSuite(this.suite); + var rootSuite = new Suite('', {}, true); + rootSuite.addSuite(this.suite); expect(this.suite.titlePath(), 'to equal', ['A Suite']); }); });
Decouple the "root" property on Suite from title length. Right now, Mocha deems a `Suite` to be a `root` if the title for it is empty, as can be seen [here](https://github.com/mochajs/mocha/blob/2bb2b9fa35818db7a02e5068364b0c417436b1af/lib/suite.js#L60): ``` function Suite (title, parentContext) { if (!utils.isString(title)) { throw new Error('Suite `title` should be a "string" but "' + typeof title + '" was given instead.'); } this.title = title; // skipping non-pertinent stuff... this.root = !title; ``` Mocha creates a first `Suite` with `''` for title, which marks that suite as the root suite. This is a problem because, it seems to me when the user uses an empty string to name a suite, this should not result in Mocha building a structurally deficient tree of tests. (A tree with two "root" suites.) Being able to name suites with the empty string (without at the same time marking them as "root") is useful. Sometimes I have suites where testing what is logically a single piece of functionality is best done by dividing the tests into two groups: simple tests that can be performed with super fast setup code, more complex tests that need a costlier setup. Oh, I could use the setup code for the 2nd group for all tests, but that would increase the total run time. I end up with something like: ``` // Testing the method foo on class Bar. describe("#foo", () => { describe("", () => { beforeEach(() => { // create a trivial data structure that is quick to create and sufficient for the tests in this group. }); // tests... }); describe("", () => { beforeEach(() => { // create a complex structure that is costlier to create but needed for these tests. }); // tests... }); }); ``` The fact that the suite uses different setups for the two groups of tests is an internal detail that is not useful to know in test reports. The need for different test setups does not always correlate with divisions that are meaningful from the point of view of reporting successes or failures. It *often does*, but not always. So I would suggest that the code be changed to determine the value of `root` through some other test than the length of the suite's title. There could be a unique object that serves as a marker to indicate "this suite I'm building is a root suite". It could be "statically" added to `Suite` (e.g. `Suite.Root` so doing `new Suite(Suite.Root, ...)` would result in a root suite). Ultimately, though, the `root` property seems redundant to me: a `Suite` which has no `parent` is a root suite, no? So "root"-ness should correlate with the absence of a set `parent`. But maybe there's some scenario I'm missing? At any rate, removing `root` cannot be done without breakage. The `karma-mocha` plugin, for instance, relies on it to produce reports (which is how I discovered the problem).
2018-12-21T22:27:32Z
6
mochajs/mocha
3,375
mochajs__mocha-3375
[ "3370" ]
7613521a8d38fc51f50f70a83ae83a6557fbd36c
diff --git a/bin/options.js b/bin/options.js --- a/bin/options.js +++ b/bin/options.js @@ -32,6 +32,7 @@ function getOptions() { try { const opts = fs .readFileSync(optsPath, 'utf8') + .replace(/^#.*$/gm, '') .replace(/\\\s/g, '%20') .split(/\s/) .filter(Boolean)
diff --git a/test/integration/regression.spec.js b/test/integration/regression.spec.js --- a/test/integration/regression.spec.js +++ b/test/integration/regression.spec.js @@ -32,6 +32,7 @@ describe('regressions', function() { var processArgv = process.argv.join(''); var mochaOpts = fs .readFileSync(path.join(__dirname, '..', 'mocha.opts'), 'utf-8') + .replace(/^#.*$/gm, '') .split(/[\s]+/) .join(''); assert.notEqual( diff --git a/test/mocha.opts b/test/mocha.opts --- a/test/mocha.opts +++ b/test/mocha.opts @@ -1,6 +1,11 @@ +### +### mocha.opts +### + --require ./test/setup --ui bdd --globals okGlobalA,okGlobalB --globals okGlobalC --globals callback* --timeout 200 +
Add support for comment lines in Mocha options file ### Prerequisites - [x] Checked that your issue hasn't already been filed by cross-referencing [issues with the `faq` label](https://github.com/mochajs/mocha/issues?utf8=%E2%9C%93&q=is%3Aissue%20label%3Afaq%20) Related proposals: #2587 #2870 #2963 ### Description Add support for Beginning-of-Line (BOL) comments to "mocha.opts". As the file has far more in common with shell scripts (as it contains various Mocha cmdline arguments) than JavaScript tests, the comment character would be the hash ('#') character. Inline comments will **not** be supported. ### Rationale No current support for comment lines exists for "mocha.opts". With other related upcoming proposals for processing this file, the ability to document this file becomes more important than ever. Believe the other proposals got bogged down trying to do too much, arguing over more advanced features rather than providing the basics. As Larry would say, ["Git 'Er Done"](https://www.youtube.com/watch?v=xfbQ81SJn8s).
2018-05-03T22:44:01Z
5
mochajs/mocha
3,222
mochajs__mocha-3222
[ "3119" ]
3509029e5da38daca0d650094117600b6617a862
diff --git a/lib/runnable.js b/lib/runnable.js --- a/lib/runnable.js +++ b/lib/runnable.js @@ -53,7 +53,6 @@ function Runnable (title, fn) { this._slow = 75; this._enableTimeouts = true; this.timedOut = false; - this._trace = new Error('done() called multiple times'); this._retries = -1; this._currentRetry = 0; this.pending = false; @@ -278,7 +277,13 @@ Runnable.prototype.run = function (fn) { return; } emitted = true; - self.emit('error', err || new Error('done() called multiple times; stacktrace may be inaccurate')); + var msg = 'done() called multiple times'; + if (err && err.message) { + err.message += " (and Mocha's " + msg + ')'; + self.emit('error', err); + } else { + self.emit('error', new Error(msg)); + } } // finished @@ -287,8 +292,9 @@ Runnable.prototype.run = function (fn) { if (self.timedOut) { return; } + if (finished) { - return multiple(err || self._trace); + return multiple(err); } self.clearTimeout();
diff --git a/test/integration/fixtures/multiple-done-with-error.fixture.js b/test/integration/fixtures/multiple-done-with-error.fixture.js new file mode 100644 --- /dev/null +++ b/test/integration/fixtures/multiple-done-with-error.fixture.js @@ -0,0 +1,8 @@ +'use strict'; + +it('should fail in a test-case', function (done) { + process.nextTick(function () { + done(); + done(new Error('second error')); + }); +}); diff --git a/test/integration/multiple-done.spec.js b/test/integration/multiple-done.spec.js --- a/test/integration/multiple-done.spec.js +++ b/test/integration/multiple-done.spec.js @@ -15,15 +15,35 @@ describe('multiple calls to done()', function () { }); it('results in failures', function () { - assert.equal(res.stats.pending, 0); - assert.equal(res.stats.passes, 1); - assert.equal(res.stats.failures, 1); - assert.equal(res.code, 1); + assert.equal(res.stats.pending, 0, 'wrong "pending" count'); + assert.equal(res.stats.passes, 1, 'wrong "passes" count'); + assert.equal(res.stats.failures, 1, 'wrong "failures" count'); }); it('throws a descriptive error', function () { - assert.equal(res.failures[0].err.message, - 'done() called multiple times'); + assert.equal(res.failures[0].err.message, 'done() called multiple times'); + }); + }); + + describe('with error passed on second call', function () { + before(function (done) { + run('multiple-done-with-error.fixture.js', args, function (err, result) { + res = result; + done(err); + }); + }); + + it('results in failures', function () { + assert.equal(res.stats.pending, 0, 'wrong "pending" count'); + assert.equal(res.stats.passes, 1, 'wrong "passes" count'); + assert.equal(res.stats.failures, 1, 'wrong "failures" count'); + }); + + it('should throw a descriptive error', function () { + assert.equal( + res.failures[0].err.message, + "second error (and Mocha's done() called multiple times)" + ); }); }); @@ -44,8 +64,7 @@ describe('multiple calls to done()', function () { it('correctly attributes the error', function () { assert.equal(res.failures[0].fullTitle, 'suite test1'); - assert.equal(res.failures[0].err.message, - 'done() called multiple times'); + assert.equal(res.failures[0].err.message, 'done() called multiple times'); }); }); @@ -66,8 +85,7 @@ describe('multiple calls to done()', function () { it('correctly attributes the error', function () { assert.equal(res.failures[0].fullTitle, 'suite "before all" hook'); - assert.equal(res.failures[0].err.message, - 'done() called multiple times'); + assert.equal(res.failures[0].err.message, 'done() called multiple times'); }); }); @@ -90,8 +108,7 @@ describe('multiple calls to done()', function () { assert.equal(res.failures.length, 2); res.failures.forEach(function (failure) { assert.equal(failure.fullTitle, 'suite "before each" hook'); - assert.equal(failure.err.message, - 'done() called multiple times'); + assert.equal(failure.err.message, 'done() called multiple times'); }); }); }); diff --git a/test/unit/runnable.spec.js b/test/unit/runnable.spec.js --- a/test/unit/runnable.spec.js +++ b/test/unit/runnable.spec.js @@ -270,7 +270,8 @@ describe('Runnable(title, fn)', function () { test.on('error', function (err) { ++errCalls; - expect(err.message).to.equal('fail'); + expect(err.message).to.equal( + "fail (and Mocha's done() called multiple times)"); expect(calls).to.equal(1); expect(errCalls).to.equal(1); done();
Error object stored on Runnable._trace leaks memory FYI: In V8, Error objects keep closures alive until the `err.stack` property is accessed, which prevents collection of the closure (and associated objects) until the Error objects die. Mocha creates a long-living Error for each Runnable: https://github.com/mochajs/mocha/blob/2303c669ef1b47fef7cf86f5ef486d537033d443/lib/runnable.js#L56. Would it make sense to either 1. store a string message instead of an Error, or 2. clear Error.stack, or 3. format Error.stack eagerly instead? See also: https://crbug.com/v8/7142 and https://twitter.com/wSokra/status/935790750188625920.
@schuay Is this a bug in V8? [Webpack's workaround](https://github.com/webpack/webpack/commit/be2477bac7324af5ee0ad9abe899df9639fed263) I don't really think Mocha should be changing any code unless there are widespread problems with this. If it's a handful of projects having the problem, they should be able to work around it. if it turns out that this is a "feature" and not a "bug" in V8, then we should modify Mocha, and this seems reasonable: ```js this._trace = new Error().stack; ``` then we can just `console.error(this._trace)` if it triggers. I wouldn't necessarily call it a bug on the V8 side. Our expectation is that in general Error objects are not long-lived, that not a huge number of them are live at once, and that for most Error objects the `stack` property will not be accessed. Lazy stack formatting exploits that to skip expensive work. But that leads to the 'leak' in this case since we need to keep a few objects around in order to be able to format the stack later. Your proposed solution above will basically format `stack` eagerly, and depending on how often it's called, may lead to significant slowdown. So I'm not sure there's currently a perfect solution that will be ideal in all use-cases. In V8, we may be able to avoid keeping the closure alive, which might already improve things significantly. Would it be an option to avoid exposing stack traces entirely? @schuay Yes, you're right about the slowdown. The point of storing the `Error` is the stack itself. It's used if someone does this (and yes, code like this gets written; we just get fewer bug reports about it now that we provide a stack): ```js describe('test suite', function () { it('is an async test which is bad', function (done) { setTimeout(() => { done(); // this test passes setTimeout(done); // but this is a problem. }); }); it('is an async test just minding its own business', function (done) { setTimeout(() => { // the previous test's error appears somewhere around here, // even though this test also passes! done(); }); }); }); ``` IMO, the "real" solution is one of: 1. native language support for "zones" or "async hooks" or whatever, because it's painful to connect the dots otherwise, OR 2. tail call support so that we can run tests in a recursive manner w/o having to break the stack via calls to `setImmediate()` due to "Maximum call stack exceeded" problems Mocha could address this by basically implementing @sokra's hack behind a flag, I guess. ugh, and the `Error` never goes out of scope because we have to call `setImmediate()` to avoid maximum call stack problems, right? maybe someone can spend an afternoon and rewrite the test runner as a `for` loop 😝 Hmm, I'm not familiar with Mocha at all. Could you elaborate what the problem is in your previous code example? @schuay So, you call `done` to tell Mocha your test is finished. This allows you to do async things (in lieu of `Promise`s) within your test, e.g. `setTimeout()` or HTTP requests or whathaveyou. When that async execution is complete, the user executes the `done` callback, and the test is complete. But this doesn't necessarily halt execution of the test. Users can do weird stuff (and they do) like trying to call that `done` callback multiple times. This is indicative of a Problem in the user's code. Given the example as similar to the above: ```js describe('test suite', function () { it('is a good async test', function (done) { setTimeout(done); }); it('is a good synchronous test', function () { // omitting the parameter means the test is not expected to be asynchronous // unless a Promise is returned }); it('is an async test which is bad', function (done) { setTimeout(() => { done(); // this test passes setTimeout(done); // but this is a problem. }); }); it('is an async test just minding its own business', function (done) { setTimeout(() => { // the previous test's error appears somewhere around here, // even though this test also passes! done(); }); }); }); ``` We will see output from Mocha like this (roughly): ``` - test suite - is a good async test - **OK** - is a good synchronous test - **OK** - is an async test which is bad - **OK** - is an async test just minding its own business **ERROR**: done() callback called twice <insert stack trace here> - **OK** ``` Now, where did we call `done()` twice? This is confusing for the user. We won't know without that stack as saved via `this._trace`; each test is run in its own `setImmediate()` callback. Does that help? I see, thanks. If you don't necessarily need the full stack trace, another option would be V8's `prepareStackTrace` [API](https://github.com/v8/v8/wiki/Stack-Trace-API#customizing-stack-traces). Using that, you could fetch e.g. the line/column numbers and the file name of the top frame and skip full stack trace formatting. Once the stack is formatted that way, all internal information (e.g. the closure and receiver of each frame) is freed. Of course, ensuring that Error objects go out of scope at some point would be the best fix :) And just FYI, there's also support for async stacks in DevTools / the [inspector protocol](https://chromedevtools.github.io/devtools-protocol/tot/Debugger/#event-paused), although I suppose that won't be relevant for you. > I see, thanks. If you don't necessarily need the full stack trace, another option would be V8's prepareStackTrace API. Mocha doesn't *just* run in V8, so we can't use runtime-specific APIs. > And just FYI, there's also support for async stacks in DevTools / the inspector protocol, although I suppose that won't be relevant for you. It is relevant, actually. While it means the user will need to open a separate tool to track down what's going on, at least such a tool now exists. That means it may be OK to simply drop the stack and tell the user to open an inspector to figure it out. 😛 @schuay In what version of V8 was this behavior introduced (specifically, which Node.js version does it correspond to)? > Mocha doesn't just run in V8, so we can't use runtime-specific APIs. In theory you could check for the existence of `Error.prepareStackTrace`, but I agree it's not a nice solution. > @schuay In what version of V8 was this behavior introduced (specifically, which Node.js version does it correspond to)? Hmm, not sure, but it's been roughly one year from looking at `git blame`. cc @ak239 and @hashseed who will know exactly when async stack traces were released. Latest Node.js supports inspector bindings, so in theory you can enable async stacks using Debugger domain and then somehow fetch this stack trace. But I believe that we should provide capabilities in JavaScript runtime which will allow to enable / disable async stacks. Something like Error.captureAsyncStacks(true) and then Error.stack will contain async stacks as well. ``` js this._trace.stack = this._trace.stack; ``` This is a workaround without functional changes. It forces v8 to "calculate" the stack trace as string. > This is a workaround without functional changes. It forces v8 to "calculate" the stack trace as string. Accessing `stack` is enough to force eager formatting and clear internal data, no need to write to it. But as discussed above, eager formatting is probably to slow to be a viable fix. I think `this._trace` should just be removed and debugging async stacks delegated to proper tools. "Fixing" this bug will cause loss of functionality, but it's only "exceptional" functionality which should not be directly relied on, so I'm calling it `semver-patch` Could the stack trace be stored only when a command line flag (something like `--debug-multiple-done-calls`) is set. Then if `done()` is called twice mocha could instruct the user to run the test again, this time with the flag set. The slowdown of eager formatting would be acceptable because it would only occur when a user was trying to find where done was called twice. Just my two cents :) @HarrySarson Not a bad idea, but it's also kind of an obscure thing to add a command-line flag for. AFAIK this also affects browser (Chrome) users. I think I'd like to just rip it out for now and see how it goes.
2018-01-28T21:34:15Z
5
mochajs/mocha
3,268
mochajs__mocha-3268
[ "3265" ]
aaaa5abdd72e6d9db446c3c0d414947241ce6042
diff --git a/lib/utils.js b/lib/utils.js --- a/lib/utils.js +++ b/lib/utils.js @@ -8,8 +8,15 @@ var debug = require('debug')('mocha:watch'); var fs = require('fs'); var glob = require('glob'); var path = require('path'); +var join = path.join; var he = require('he'); +/** + * Ignored directories. + */ + +var ignore = ['node_modules', '.git']; + exports.inherits = require('util').inherits; /** @@ -54,6 +61,46 @@ exports.watch = function (files, fn) { }); }; +/** + * Ignored files. + * + * @api private + * @param {string} path + * @return {boolean} + */ +function ignored (path) { + return !~ignore.indexOf(path); +} + +/** + * Lookup files in the given `dir`. + * + * @api private + * @param {string} dir + * @param {string[]} [ext=['.js']] + * @param {Array} [ret=[]] + * @return {Array} + */ +exports.files = function (dir, ext, ret) { + ret = ret || []; + ext = ext || ['js']; + + var re = new RegExp('\\.(' + ext.join('|') + ')$'); + + fs.readdirSync(dir) + .filter(ignored) + .forEach(function (path) { + path = join(dir, path); + if (fs.lstatSync(path).isDirectory()) { + exports.files(path, ext, ret); + } else if (path.match(re)) { + ret.push(path); + } + }); + + return ret; +}; + /** * Compute a slug from the given `str`. *
diff --git a/test/node-unit/file-utils.spec.js b/test/node-unit/file-utils.spec.js --- a/test/node-unit/file-utils.spec.js +++ b/test/node-unit/file-utils.spec.js @@ -105,6 +105,26 @@ describe('file utils', function () { }); }); + describe('.files', function () { + (symlinkSupported ? it : it.skip)('should return broken symlink file path', function () { + expect(utils.files(tmpDir, ['js'])) + .to.contain(tmpFile('mocha-utils-link.js')) + .and.contain(tmpFile('mocha-utils.js')) + .and.have.length(2); + + expect(existsSync(tmpFile('mocha-utils-link.js'))) + .to.be(true); + + fs.renameSync(tmpFile('mocha-utils.js'), tmpFile('bob')); + + expect(existsSync(tmpFile('mocha-utils-link.js'))) + .to.be(false); + + expect(utils.files(tmpDir, ['js'])) + .to.eql([tmpFile('mocha-utils-link.js')]); + }); + }); + afterEach(removeTempDir); function makeTempDir () {
"utils.files is not a function" (mocha: 5.0.2) ### Description Hey, i've just upgraded to the latest mocha version `5.0.2` and I now start to see a `"utils.files is not a function"` error when running in watch mode with passing a glob path. Seems this commit ec8901a23c5194b6f7e6eee9c2568e5020c944ce removed the function which is still used in https://github.com/mochajs/mocha/blob/master/bin/_mocha#L532 ### Steps to Reproduce `NODE_ENV=test mocha './test/**/*.js'` **Expected behavior:** it should execute my tests as before. **Actual behavior:** it crashes all the time ```bash > NODE_ENV=test mocha './test/**/*.js' "--watch" /opt/MY-PACKAGE/node_modules/mocha/bin/_mocha:532 const watchFiles = utils.files(cwd, [ 'js' ].concat(program.watchExtensions)); ^ TypeError: utils.files is not a function at Object.<anonymous> (/opt/MY-PACKAGE/node_modules/mocha/bin/_mocha:532:28) at Module._compile (module.js:643:30) at Object.Module._extensions..js (module.js:654:10) at Module.load (module.js:556:32) at tryModuleLoad (module.js:499:12) at Function.Module._load (module.js:491:3) at Function.Module.runMain (module.js:684:10) at startup (bootstrap_node.js:187:16) at bootstrap_node.js:608:3 ``` ### Versions - `5.0.2` - shell: zsh - The code transpiler being used: babel
Same here, looks like `utils.files` was removed in https://github.com/mochajs/mocha/commit/ec8901a23c5194b6f7e6eee9c2568e5020c944ce but is still in use at https://github.com/mochajs/mocha/blob/master/bin/_mocha#L532. This exists in 5.0.1 as well it seems. Confirm this is still an issue. Downgraded to 5.0.1, it works.
2018-03-07T05:02:59Z
5
mochajs/mocha
3,024
mochajs__mocha-3024
[ "3018" ]
228dc4758dfebf0430b9295c6b75221c3390aee4
diff --git a/browser-entry.js b/browser-entry.js --- a/browser-entry.js +++ b/browser-entry.js @@ -45,7 +45,7 @@ process.removeListener = function (e, fn) { } else { global.onerror = function () {}; } - var i = Mocha.utils.indexOf(uncaughtExceptionHandlers, fn); + var i = uncaughtExceptionHandlers.indexOf(fn); if (i !== -1) { uncaughtExceptionHandlers.splice(i, 1); } @@ -103,7 +103,7 @@ Mocha.Runner.immediately = function (callback) { * only receive the 'message' attribute of the Error. */ mocha.throwError = function (err) { - Mocha.utils.forEach(uncaughtExceptionHandlers, function (fn) { + uncaughtExceptionHandlers.forEach(function (fn) { fn(err); }); throw err; diff --git a/karma.conf.js b/karma.conf.js --- a/karma.conf.js +++ b/karma.conf.js @@ -4,7 +4,6 @@ var fs = require('fs'); var path = require('path'); var mkdirp = require('mkdirp'); var baseBundleDirpath = path.join(__dirname, '.karma'); -var osName = require('os-name'); var browserPlatformPairs = { 'chrome@latest': 'Windows 8', @@ -24,15 +23,6 @@ module.exports = function (config) { 'expect', 'mocha' ], - plugins: [ - 'karma-browserify', - 'karma-chrome-launcher', - 'karma-phantomjs-launcher', - 'karma-expect', - 'karma-mocha', - 'karma-spec-reporter', - require('@coderbyheart/karma-sauce-launcher') - ], files: [ // we use the BDD interface for all of the tests that // aren't interface-specific. @@ -49,6 +39,7 @@ module.exports = function (config) { .ignore('fs') .ignore('path') .ignore('supports-color') + .require(path.join(__dirname, 'node_modules', 'buffer'), {expose: 'buffer'}) .on('bundled', function (err, content) { if (!err && bundleDirpath) { // write bundle to directory for debugging @@ -58,14 +49,17 @@ module.exports = function (config) { }); } }, - reporters: ['spec'], + reporters: ['mocha'], colors: true, - browsers: [osName() === 'macOS Sierra' ? 'Chrome' : 'PhantomJS'], // This is the default browser to run, locally - logLevel: config.LOG_DEBUG, + browsers: ['PhantomJS'], + logLevel: config.LOG_INFO, client: { mocha: { reporter: 'html' } + }, + mochaReporter: { + showDiff: true } }; diff --git a/lib/context.js b/lib/context.js --- a/lib/context.js +++ b/lib/context.js @@ -1,11 +1,5 @@ 'use strict'; -/** - * Module dependencies. - */ - -var JSON = require('json3'); - /** * Expose `Context`. */ @@ -77,11 +71,10 @@ Context.prototype.slow = function (ms) { * Mark a test as skipped. * * @api private - * @return {Context} self + * @throws Pending */ Context.prototype.skip = function () { this.runnable().skip(); - return this; }; /** diff --git a/lib/reporters/json-stream.js b/lib/reporters/json-stream.js --- a/lib/reporters/json-stream.js +++ b/lib/reporters/json-stream.js @@ -5,7 +5,6 @@ */ var Base = require('./base'); -var JSON = require('json3'); /** * Expose `List`. diff --git a/lib/runnable.js b/lib/runnable.js --- a/lib/runnable.js +++ b/lib/runnable.js @@ -5,12 +5,10 @@ */ var EventEmitter = require('events').EventEmitter; -var JSON = require('json3'); var Pending = require('./pending'); var debug = require('debug')('mocha:runnable'); var milliseconds = require('./ms'); var utils = require('./utils'); -var create = require('lodash.create'); /** * Save timer references to avoid Sinon interfering (see GH-237). @@ -64,9 +62,7 @@ function Runnable (title, fn) { /** * Inherit from `EventEmitter.prototype`. */ -Runnable.prototype = create(EventEmitter.prototype, { - constructor: Runnable -}); +utils.inherits(Runnable, EventEmitter); /** * Set & get timeout `ms`. diff --git a/lib/runner.js b/lib/runner.js --- a/lib/runner.js +++ b/lib/runner.js @@ -10,15 +10,10 @@ var utils = require('./utils'); var inherits = utils.inherits; var debug = require('debug')('mocha:runner'); var Runnable = require('./runnable'); -var filter = utils.filter; -var indexOf = utils.indexOf; -var some = utils.some; -var keys = utils.keys; var stackFilter = utils.stackTraceFilter(); var stringify = utils.stringify; var type = utils.type; var undefinedError = utils.undefinedError; -var isArray = utils.isArray; /** * Non-enumerable globals. @@ -150,11 +145,11 @@ Runner.prototype.grepTotal = function (suite) { * @api private */ Runner.prototype.globalProps = function () { - var props = keys(global); + var props = Object.keys(global); // non-enumerables for (var i = 0; i < globals.length; ++i) { - if (~indexOf(props, globals[i])) { + if (~props.indexOf(globals[i])) { continue; } props.push(globals[i]); @@ -316,7 +311,7 @@ Runner.prototype.hook = function (name, fn) { if (name === 'beforeEach' || name === 'afterEach') { self.test.pending = true; } else { - utils.forEach(suite.tests, function (test) { + suite.tests.forEach(function (test) { test.pending = true; }); // a pending hook won't be executed twice. @@ -780,19 +775,19 @@ function cleanSuiteReferences (suite) { } } - if (isArray(suite._beforeAll)) { + if (Array.isArray(suite._beforeAll)) { cleanArrReferences(suite._beforeAll); } - if (isArray(suite._beforeEach)) { + if (Array.isArray(suite._beforeEach)) { cleanArrReferences(suite._beforeEach); } - if (isArray(suite._afterAll)) { + if (Array.isArray(suite._afterAll)) { cleanArrReferences(suite._afterAll); } - if (isArray(suite._afterEach)) { + if (Array.isArray(suite._afterEach)) { cleanArrReferences(suite._afterEach); } @@ -890,7 +885,7 @@ function filterOnly (suite) { } else { // Otherwise, do not run any of the tests in this suite. suite.tests = []; - utils.forEach(suite._onlySuites, function (onlySuite) { + suite._onlySuites.forEach(function (onlySuite) { // If there are other `only` tests/suites nested in the current `only` suite, then filter that `only` suite. // Otherwise, all of the tests on this `only` suite should be run, so don't filter it. if (hasOnly(onlySuite)) { @@ -898,8 +893,8 @@ function filterOnly (suite) { } }); // Run the `only` suites, as well as any other suites that have `only` tests/suites as descendants. - suite.suites = filter(suite.suites, function (childSuite) { - return indexOf(suite._onlySuites, childSuite) !== -1 || filterOnly(childSuite); + suite.suites = suite.suites.filter(function (childSuite) { + return suite._onlySuites.indexOf(childSuite) !== -1 || filterOnly(childSuite); }); } // Keep the suite only if there is something to run @@ -914,7 +909,7 @@ function filterOnly (suite) { * @api private */ function hasOnly (suite) { - return suite._onlyTests.length || suite._onlySuites.length || some(suite.suites, hasOnly); + return suite._onlyTests.length || suite._onlySuites.length || suite.suites.some(hasOnly); } /** @@ -926,7 +921,7 @@ function hasOnly (suite) { * @return {Array} */ function filterLeaks (ok, globals) { - return filter(globals, function (key) { + return globals.filter(function (key) { // Firefox and Chrome exposes iframes as index inside the window object if (/^\d+/.test(key)) { return false; @@ -950,7 +945,7 @@ function filterLeaks (ok, globals) { return false; } - var matched = filter(ok, function (ok) { + var matched = ok.filter(function (ok) { if (~ok.indexOf('*')) { return key.indexOf(ok.split('*')[0]) === 0; } @@ -969,7 +964,7 @@ function filterLeaks (ok, globals) { function extraGlobals () { if (typeof process === 'object' && typeof process.version === 'string') { var parts = process.version.split('.'); - var nodeVersion = utils.reduce(parts, function (a, v) { + var nodeVersion = parts.reduce(function (a, v) { return a << 8 | v; }); diff --git a/lib/suite.js b/lib/suite.js --- a/lib/suite.js +++ b/lib/suite.js @@ -383,7 +383,7 @@ Suite.prototype.titlePath = function () { * @return {number} */ Suite.prototype.total = function () { - return utils.reduce(this.suites, function (sum, suite) { + return this.suites.reduce(function (sum, suite) { return sum + suite.total(); }, 0) + this.tests.length; }; @@ -397,8 +397,8 @@ Suite.prototype.total = function () { * @return {Suite} */ Suite.prototype.eachTest = function (fn) { - utils.forEach(this.tests, fn); - utils.forEach(this.suites, function (suite) { + this.tests.forEach(fn); + this.suites.forEach(function (suite) { suite.eachTest(fn); }); return this; diff --git a/lib/utils.js b/lib/utils.js --- a/lib/utils.js +++ b/lib/utils.js @@ -6,10 +6,9 @@ * Module dependencies. */ -var JSON = require('json3'); var basename = require('path').basename; var debug = require('debug')('mocha:watch'); -var exists = require('fs').existsSync || require('path').existsSync; +var exists = require('fs').existsSync; var glob = require('glob'); var path = require('path'); var join = path.join; @@ -39,20 +38,6 @@ exports.escape = function (html) { return he.encode(String(html), { useNamedReferences: false }); }; -/** - * Array#forEach (<=IE8) - * - * @api private - * @param {Array} arr - * @param {Function} fn - * @param {Object} scope - */ -exports.forEach = function (arr, fn, scope) { - for (var i = 0, l = arr.length; i < l; i++) { - fn.call(scope, arr[i], i); - } -}; - /** * Test if the given obj is type of string. * @@ -64,118 +49,6 @@ exports.isString = function (obj) { return typeof obj === 'string'; }; -/** - * Array#map (<=IE8) - * - * @api private - * @param {Array} arr - * @param {Function} fn - * @param {Object} scope - * @return {Array} - */ -exports.map = function (arr, fn, scope) { - var result = []; - for (var i = 0, l = arr.length; i < l; i++) { - result.push(fn.call(scope, arr[i], i, arr)); - } - return result; -}; - -/** - * Array#indexOf (<=IE8) - * - * @api private - * @param {Array} arr - * @param {Object} obj to find index of - * @param {number} start - * @return {number} - */ -var indexOf = exports.indexOf = function (arr, obj, start) { - for (var i = start || 0, l = arr.length; i < l; i++) { - if (arr[i] === obj) { - return i; - } - } - return -1; -}; - -/** - * Array#reduce (<=IE8) - * - * @api private - * @param {Array} arr - * @param {Function} fn - * @param {Object} val Initial value. - * @return {*} - */ -var reduce = exports.reduce = function (arr, fn, val) { - var rval = val; - - for (var i = 0, l = arr.length; i < l; i++) { - rval = fn(rval, arr[i], i, arr); - } - - return rval; -}; - -/** - * Array#filter (<=IE8) - * - * @api private - * @param {Array} arr - * @param {Function} fn - * @return {Array} - */ -exports.filter = function (arr, fn) { - var ret = []; - - for (var i = 0, l = arr.length; i < l; i++) { - var val = arr[i]; - if (fn(val, i, arr)) { - ret.push(val); - } - } - - return ret; -}; - -/** - * Array#some (<=IE8) - * - * @api private - * @param {Array} arr - * @param {Function} fn - * @return {Array} - */ -exports.some = function (arr, fn) { - for (var i = 0, l = arr.length; i < l; i++) { - if (fn(arr[i])) { - return true; - } - } - return false; -}; - -/** - * Object.keys (<=IE8) - * - * @api private - * @param {Object} obj - * @return {Array} keys - */ -exports.keys = typeof Object.keys === 'function' ? Object.keys : function (obj) { - var keys = []; - var has = Object.prototype.hasOwnProperty; // for `window` on <=IE8 - - for (var key in obj) { - if (has.call(obj, key)) { - keys.push(key); - } - } - - return keys; -}; - /** * Watch the given `files` for changes * and invoke `fn(file)` on modification. @@ -196,30 +69,6 @@ exports.watch = function (files, fn) { }); }; -/** - * Array.isArray (<=IE8) - * - * @api private - * @param {Object} obj - * @return {Boolean} - */ -var isArray = typeof Array.isArray === 'function' ? Array.isArray : function (obj) { - return Object.prototype.toString.call(obj) === '[object Array]'; -}; - -exports.isArray = isArray; - -/** - * Buffer.prototype.toJSON polyfill. - * - * @type {Function} - */ -if (typeof Buffer !== 'undefined' && Buffer.prototype) { - Buffer.prototype.toJSON = Buffer.prototype.toJSON || function () { - return Array.prototype.slice.call(this, 0); - }; -} - /** * Ignored files. * @@ -292,18 +141,7 @@ exports.clean = function (str) { str = str.replace(re, ''); - return exports.trim(str); -}; - -/** - * Trim the given `str`. - * - * @api private - * @param {string} str - * @return {string} - */ -exports.trim = function (str) { - return str.replace(/^\s+|\s+$/g, ''); + return str.trim(); }; /** @@ -314,7 +152,7 @@ exports.trim = function (str) { * @return {Object} */ exports.parseQuery = function (qs) { - return reduce(qs.replace('?', '').split('&'), function (obj, pair) { + return qs.replace('?', '').split('&').reduce(function (obj, pair) { var i = pair.indexOf('='); var key = pair.slice(0, i); var val = pair.slice(++i); @@ -411,7 +249,7 @@ var type = exports.type = function type (value) { return 'undefined'; } else if (value === null) { return 'null'; - } else if (typeof Buffer !== 'undefined' && Buffer.isBuffer(value)) { + } else if (Buffer.isBuffer(value)) { return 'buffer'; } return Object.prototype.toString.call(value) @@ -437,9 +275,9 @@ var type = exports.type = function type (value) { exports.stringify = function (value) { var typeHint = type(value); - if (!~indexOf(['object', 'array', 'function'], typeHint)) { + if (!~['object', 'array', 'function'].indexOf(typeHint)) { if (typeHint === 'buffer') { - var json = value.toJSON(); + var json = Buffer.prototype.toJSON.call(value); // Based on the toJSON result return jsonStringify(json.data && json.type ? json.data : json, 2) .replace(/,(\n|$)/g, '$1'); @@ -448,7 +286,7 @@ exports.stringify = function (value) { // IE7/IE8 has a bizarre String constructor; needs to be coerced // into an array and back to obj. if (typeHint === 'string' && typeof value === 'object') { - value = reduce(value.split(''), function (acc, char, idx) { + value = value.split('').reduce(function (acc, char, idx) { acc[idx] = char; return acc; }, {}); @@ -484,9 +322,9 @@ function jsonStringify (object, spaces, depth) { depth = depth || 1; var space = spaces * depth; - var str = isArray(object) ? '[' : '{'; - var end = isArray(object) ? ']' : '}'; - var length = typeof object.length === 'number' ? object.length : exports.keys(object).length; + var str = Array.isArray(object) ? '[' : '{'; + var end = Array.isArray(object) ? ']' : '}'; + var length = typeof object.length === 'number' ? object.length : Object.keys(object).length; // `.repeat()` polyfill function repeat (s, n) { return new Array(n).join(s); @@ -539,7 +377,7 @@ function jsonStringify (object, spaces, depth) { } --length; str += '\n ' + repeat(' ', space) + - (isArray(object) ? '' : '"' + i + '": ') + // key + (Array.isArray(object) ? '' : '"' + i + '": ') + // key _stringify(object[i]) + // value (length ? ',' : ''); // comma } @@ -549,17 +387,6 @@ function jsonStringify (object, spaces, depth) { (str.length !== 1 ? '\n' + repeat(' ', --space) + end : end); } -/** - * Test if a value is a buffer. - * - * @api private - * @param {*} value The value to test. - * @return {boolean} True if `value` is a buffer, otherwise false - */ -exports.isBuffer = function (value) { - return typeof Buffer !== 'undefined' && Buffer.isBuffer(value); -}; - /** * Return a new Thing that has the keys in sorted order. Recursive. * @@ -593,7 +420,7 @@ exports.canonicalize = function canonicalize (value, stack, typeHint) { stack = stack || []; - if (indexOf(stack, value) !== -1) { + if (stack.indexOf(value) !== -1) { return '[Circular]'; } @@ -605,7 +432,7 @@ exports.canonicalize = function canonicalize (value, stack, typeHint) { break; case 'array': withStack(value, function () { - canonicalizedObj = exports.map(value, function (item) { + canonicalizedObj = value.map(function (item) { return exports.canonicalize(item, stack); }); }); @@ -625,7 +452,7 @@ exports.canonicalize = function canonicalize (value, stack, typeHint) { case 'object': canonicalizedObj = canonicalizedObj || {}; withStack(value, function () { - exports.forEach(exports.keys(value).sort(), function (key) { + Object.keys(value).sort().forEach(function (key) { canonicalizedObj[key] = exports.canonicalize(value[key], stack); }); }); @@ -765,7 +592,7 @@ exports.stackTraceFilter = function () { return function (stack) { stack = stack.split('\n'); - stack = reduce(stack, function (list, line) { + stack = stack.reduce(function (list, line) { if (isMochaInternal(line)) { return list; }
diff --git a/lib/test.js b/lib/test.js --- a/lib/test.js +++ b/lib/test.js @@ -5,8 +5,8 @@ */ var Runnable = require('./runnable'); -var create = require('lodash.create'); -var isString = require('./utils').isString; +var utils = require('./utils'); +var isString = utils.isString; /** * Expose `Test`. @@ -33,9 +33,7 @@ function Test (title, fn) { /** * Inherit from `Runnable.prototype`. */ -Test.prototype = create(Runnable.prototype, { - constructor: Test -}); +utils.inherits(Test, Runnable); Test.prototype.clone = function () { var test = new Test(this.title, this.fn); diff --git a/test/unit/utils.spec.js b/test/unit/utils.spec.js --- a/test/unit/utils.spec.js +++ b/test/unit/utils.spec.js @@ -2,7 +2,6 @@ var utils = require('../../lib/utils'); var toISOString = require('../../lib/to-iso-string'); -var JSON = require('json3'); describe('lib/utils', function () { describe('clean', function () { @@ -495,91 +494,6 @@ describe('lib/utils', function () { }); }); - describe('isBuffer()', function () { - var isBuffer = utils.isBuffer; - it('should test if object is a Buffer', function () { - expect(isBuffer(Buffer.from([0x01]))) - .to - .equal(true); - expect(isBuffer({})) - .to - .equal(false); - }); - }); - - describe('map()', function () { - var map = utils.map; - it('should behave same as Array.prototype.map', function () { - if (!Array.prototype.map) { - this.skip(); - return; - } - - var arr = [ - 1, - 2, - 3 - ]; - expect(map(arr, JSON.stringify)) - .to - .eql(arr.map(JSON.stringify)); - }); - - it('should call the callback with 3 arguments[currentValue, index, array]', - function () { - var index = 0; - map([ - 1, - 2, - 3 - ], function (e, i, arr) { - expect(e).to.equal(arr[index]); - expect(i).to.equal(index++); - }); - }); - - it('should apply with the given scope', function () { - var scope = {}; - map([ - 'a', - 'b', - 'c' - ], function () { - expect(this).to.equal(scope); - }, scope); - }); - }); - - describe('some()', function () { - var some = utils.some; - - it( - 'should return true when some array elements pass the check of the fn parameter', - function () { - var result = some([ - 'a', - 'b', - 'c' - ], function (e) { - return e === 'b'; - }); - expect(result).to.eql(true); - }); - - it( - 'should return false when none of the array elements pass the check of the fn parameter', - function () { - var result = some([ - 'a', - 'b', - 'c' - ], function (e) { - return e === 'd'; - }); - expect(result).to.eql(false); - }); - }); - describe('parseQuery()', function () { var parseQuery = utils.parseQuery; it('should get queryString and return key-value object', function () {
upgrade (mostly) production deps after #3016 but before v4.0.0, these production deps should be upgraded to the latest version, if not too painful: - `commander` - `debug` - `diff` - `glob` - `lodash.create` - `readable-stream` - `supports-color` the following dev deps should be upgraded as well, with caveats: - `karma-phantomjs-launcher`: this should be part of #2979 - `karma`: depends on previous item - `browserify`: unknown effect on browser bundle
2017-09-28T04:18:03Z
3.5
mochajs/mocha
3,143
mochajs__mocha-3143
[ "3142" ]
5fbbce999284f137332ab359be11117d265b6cb1
diff --git a/lib/interfaces/bdd.js b/lib/interfaces/bdd.js --- a/lib/interfaces/bdd.js +++ b/lib/interfaces/bdd.js @@ -102,7 +102,7 @@ module.exports = function (suite) { */ context.xit = context.xspecify = context.it.skip = function (title) { - context.it(title); + return context.it(title); }; /**
diff --git a/test/integration/fixtures/pending/skip-shorthand.fixture.js b/test/integration/fixtures/pending/skip-shorthand.fixture.js new file mode 100644 --- /dev/null +++ b/test/integration/fixtures/pending/skip-shorthand.fixture.js @@ -0,0 +1,7 @@ +'use strict'; + +describe('pending shorthand', function () { + xit('pending spec', function () {}).timeout(0); + xspecify('pending spec', function () {}).timeout(0); + it.skip('pending spec', function () {}).timeout(0); +}); diff --git a/test/integration/pending.spec.js b/test/integration/pending.spec.js --- a/test/integration/pending.spec.js +++ b/test/integration/pending.spec.js @@ -19,6 +19,19 @@ describe('pending', function () { done(); }); }); + it('should return the test object when used via shorthand methods', function (done) { + run('pending/skip-shorthand.fixture.js', args, function (err, res) { + if (err) { + done(err); + return; + } + assert.equal(res.stats.pending, 3); + assert.equal(res.stats.passes, 0); + assert.equal(res.stats.failures, 0); + assert.equal(res.code, 0); + done(); + }); + }); }); describe('synchronous skip()', function () { diff --git a/test/interfaces/bdd.spec.js b/test/interfaces/bdd.spec.js --- a/test/interfaces/bdd.spec.js +++ b/test/interfaces/bdd.spec.js @@ -40,3 +40,27 @@ describe('pending suite', function () { }); }); }); + +describe('pending tests', function () { + it.skip('should not run', function () { + expect(1 + 1).to.equal(3); + }); +}); + +describe('setting timeout by appending it to test', function () { + var runningTest = it('enables users to call timeout on active tests', function () { + expect(1 + 1).to.equal(2); + }).timeout(1003); + + var skippedTest = xit('enables users to call timeout on pending tests', function () { + expect(1 + 1).to.equal(3); + }).timeout(1002); + + it('sets timeout on pending tests', function () { + expect(skippedTest._timeout).to.equal(1002); + }); + + it('sets timeout on running tests', function () { + expect(runningTest._timeout).to.equal(1003); + }); +});
Add timeout option to xits using arrow functions <!-- Have you read Mocha's Code of Conduct? By filing an Issue, you are expected to comply with it, including treating everyone with respect: https://github.com/mochajs/mocha/blob/master/.github/CODE_OF_CONDUCT.md For more, check out the Mocha Gitter chat room: https://gitter.im/mochajs/mocha --> ### Prerequisites <!-- Place an `x` between the square brackets on the lines below for every satisified prerequisite. --> * [x] Checked that your issue isn't already filed by cross referencing [issues with the `common mistake` label](https://github.com/mochajs/mocha/issues?utf8=%E2%9C%93&q=is%3Aissue%20label%3Acommon-mistake%20) * [x] Checked next-gen ES issues and syntax problems by using the same environment and/or transpiler configuration without Mocha to ensure it isn't just a feature that actually isn't supported in the environment in question or a bug in your code. * [x] 'Smoke tested' the code to be tested by running it outside the real test suite to get a better sense of whether the problem is in the code under test, your usage of Mocha, or Mocha itself * [x] Ensured that there is no discrepancy between the locally and globally installed versions of Mocha. You can find them with: `node node_modules/.bin/mocha --version`(Local) and `mocha --version`(Global). We recommend avoiding the use of globally installed Mocha. ### Description <!-- [Description of the issue] --> When using the arrow function syntax to override the default timeout. You specifiy it on the return value of the it function. it("should override the timeout", () => { }).timeout(100); But if we want to ignore the test by adding an x we get an exception, which is annoying. ### Steps to Reproduce // Throws xit("should override the timeout", () => { }).timeout(100); <!-- Please add a series of steps to reproduce the problem. See https://stackoverflow.com/help/mcve for in depth information on how to create a mimimal, complete, and verifiable example. --> **Expected behavior:** The test should be ignored **Actual behavior:** We get an exception >> Mocha` exploded! >> TypeError: Cannot read property 'timeout' of undefined **Reproduces how often:** 100% ### Versions mocha 4.0.1
Is this a documented API? I don't recall ever seeing anyone use this before. anyway, this looks pretty trivial to address. I'm going to call it a bug even though it's more of an "internal consistency" issue, since we don't necessarily expect consumers to use the object returned from `it` or `xit` for anything. @boneskull this is a very interesting side effect of returning the test object from inside the `it` function. I have a PR coming in a few.
2017-12-11T20:22:43Z
4
mochajs/mocha
2,746
mochajs__mocha-2746
[ "2745" ]
2d7e2526b84d536c14279761a6637ed7ce2b10a8
diff --git a/bin/options.js b/bin/options.js --- a/bin/options.js +++ b/bin/options.js @@ -17,6 +17,10 @@ module.exports = getOptions; */ function getOptions () { + if (process.argv.length === 3 && (process.argv[2] === '-h' || process.argv[2] === '--help')) { + return; + } + var optsPath = process.argv.indexOf('--opts') === -1 ? 'test/mocha.opts' : process.argv[process.argv.indexOf('--opts') + 1];
diff --git a/test/integration/fixtures/options/help/test/mocha.opts b/test/integration/fixtures/options/help/test/mocha.opts new file mode 100644 --- /dev/null +++ b/test/integration/fixtures/options/help/test/mocha.opts @@ -0,0 +1 @@ +foo diff --git a/test/integration/helpers.js b/test/integration/helpers.js --- a/test/integration/helpers.js +++ b/test/integration/helpers.js @@ -110,15 +110,34 @@ module.exports = { /** * regular expression used for splitting lines based on new line / dot symbol. */ - splitRegExp: new RegExp('[\\n' + baseReporter.symbols.dot + ']+') + splitRegExp: new RegExp('[\\n' + baseReporter.symbols.dot + ']+'), + + /** + * Invokes the mocha binary. Accepts an array of additional command line args + * to pass. The callback is invoked with the exit code and output. Optional + * current working directory as final parameter. + * + * In most cases runMocha should be used instead. + * + * Example response: + * { + * code: 1, + * output: '...' + * } + * + * @param {Array<string>} args - Extra args to mocha executable + * @param {Function} done - Callback + * @param {string} cwd - Current working directory for mocha run, optional + */ + invokeMocha: invokeMocha }; -function invokeMocha (args, fn) { +function invokeMocha (args, fn, cwd) { var output, mocha, listener; output = ''; - args = [path.join('bin', 'mocha')].concat(args); - mocha = spawn(process.execPath, args); + args = [path.join(__dirname, '..', '..', 'bin', 'mocha')].concat(args); + mocha = spawn(process.execPath, args, { cwd: cwd }); listener = function (data) { output += data; diff --git a/test/integration/options.spec.js b/test/integration/options.spec.js --- a/test/integration/options.spec.js +++ b/test/integration/options.spec.js @@ -1,7 +1,9 @@ 'use strict'; +var path = require('path'); var assert = require('assert'); var run = require('./helpers').runMochaJSON; +var directInvoke = require('./helpers').invokeMocha; var args = []; describe('options', function () { @@ -386,4 +388,14 @@ describe('options', function () { it('should not force exit after root suite completion', runExit(false, 'disabled')); }); }); + + describe('--help', function () { + it('works despite the presence of mocha.opts', function (done) { + directInvoke(['-h'], function (error, result) { + if (error) { return done(error); } + expect(result.output).to.contain('Usage:'); + done(); + }, path.join(__dirname, 'fixtures', 'options', 'help')); + }); + }); });
`mocha -h` doesn't always display help Pretty straightforward testcase: ``` zarel@Phenylalanine ~> mocha -h Usage: mocha [debug] [options] [files] [...] zarel@Phenylalanine ~> mkdir test zarel@Phenylalanine ~> echo "test" > test/mocha.opts zarel@Phenylalanine ~> mocha -h No test files found ``` In other words, if `test/mocha.opts` exists, `mocha -h` won't display help.
2017-03-17T18:17:09Z
4
mochajs/mocha
2,696
mochajs__mocha-2696
[ "1818" ]
ac0c1c8382a50963425e00d306e4d158f6bfd9c3
diff --git a/lib/mocha.js b/lib/mocha.js --- a/lib/mocha.js +++ b/lib/mocha.js @@ -483,6 +483,24 @@ Mocha.prototype.delay = function delay () { return this; }; +/** + * Tests marked only fail the suite + * @returns {Mocha} + */ +Mocha.prototype.forbidOnly = function () { + this.options.forbidOnly = true; + return this; +}; + +/** + * Pending tests and tests marked skip fail the suite + * @returns {Mocha} + */ +Mocha.prototype.forbidPending = function () { + this.options.forbidPending = true; + return this; +}; + /** * Run tests and invoke `fn()` when complete. * @@ -504,6 +522,8 @@ Mocha.prototype.run = function (fn) { runner.hasOnly = options.hasOnly; runner.asyncOnly = options.asyncOnly; runner.allowUncaught = options.allowUncaught; + runner.forbidOnly = options.forbidOnly; + runner.forbidPending = options.forbidPending; if (options.grep) { runner.grep(options.grep, options.invert); } diff --git a/lib/runner.js b/lib/runner.js --- a/lib/runner.js +++ b/lib/runner.js @@ -820,6 +820,12 @@ Runner.prototype.run = function (fn) { // callback this.on('end', function () { + if (self.forbidOnly && self.hasOnly) { + self.failures += self.stats.tests; + } + if (self.forbidPending) { + self.failures += self.stats.pending; + } debug('end'); process.removeListener('uncaughtException', uncaught); fn(self.failures);
diff --git a/test/integration/fixtures/options/forbid-only/only.js b/test/integration/fixtures/options/forbid-only/only.js new file mode 100644 --- /dev/null +++ b/test/integration/fixtures/options/forbid-only/only.js @@ -0,0 +1,7 @@ +'use strict'; + +describe('forbid only - test marked with only', function () { + it('test1', function () {}); + it.only('test2', function () {}); + it('test3', function () {}); +}); diff --git a/test/integration/fixtures/options/forbid-only/passed.js b/test/integration/fixtures/options/forbid-only/passed.js new file mode 100644 --- /dev/null +++ b/test/integration/fixtures/options/forbid-only/passed.js @@ -0,0 +1,7 @@ +'use strict'; + +describe('forbid only - `.only` is not used', function () { + it('test1', function () {}); + it('test2', function () {}); + it('test3', function () {}); +}); diff --git a/test/integration/fixtures/options/forbid-pending/passed.js b/test/integration/fixtures/options/forbid-pending/passed.js new file mode 100644 --- /dev/null +++ b/test/integration/fixtures/options/forbid-pending/passed.js @@ -0,0 +1,7 @@ +'use strict'; + +describe('forbid pending - all test pass', function () { + it('test1', function () {}); + it('test2', function () {}); + it('test3', function () {}); +}); diff --git a/test/integration/fixtures/options/forbid-pending/pending.js b/test/integration/fixtures/options/forbid-pending/pending.js new file mode 100644 --- /dev/null +++ b/test/integration/fixtures/options/forbid-pending/pending.js @@ -0,0 +1,7 @@ +'use strict'; + +describe('forbid pending - test without function', function () { + it('test1', function () {}); + it('test2'); + it('test3', function () {}); +}); diff --git a/test/integration/fixtures/options/forbid-pending/skip.js b/test/integration/fixtures/options/forbid-pending/skip.js new file mode 100644 --- /dev/null +++ b/test/integration/fixtures/options/forbid-pending/skip.js @@ -0,0 +1,7 @@ +'use strict'; + +describe('forbid pending - test marked with skip', function () { + it('test1', function () {}); + it.skip('test2', function () {}); + it('test3', function () {}); +}); diff --git a/test/integration/options.spec.js b/test/integration/options.spec.js --- a/test/integration/options.spec.js +++ b/test/integration/options.spec.js @@ -180,4 +180,56 @@ describe('options', function () { }); }); }); + + describe('--forbid-only', function () { + before(function () { + args = ['--forbid-only']; + }); + + it('succeeds if there are only passed tests', function (done) { + run('options/forbid-only/passed.js', args, function (err, res) { + assert(!err); + assert.equal(res.code, 0); + done(); + }); + }); + + it('fails if there are tests marked only', function (done) { + run('options/forbid-only/only.js', args, function (err, res) { + assert(!err); + assert.equal(res.code, 1); + done(); + }); + }); + }); + + describe('--forbid-pending', function () { + before(function () { + args = ['--forbid-pending']; + }); + + it('succeeds if there are only passed tests', function (done) { + run('options/forbid-pending/passed.js', args, function (err, res) { + assert(!err); + assert.equal(res.code, 0); + done(); + }); + }); + + it('fails if there are tests marked skip', function (done) { + run('options/forbid-pending/skip.js', args, function (err, res) { + assert(!err); + assert.equal(res.code, 1); + done(); + }); + }); + + it('fails if there are pending tests', function (done) { + run('options/forbid-pending/pending.js', args, function (err, res) { + assert(!err); + assert.equal(res.code, 1); + done(); + }); + }); + }); });
add --strict flag I'd like a `--strict` CLI flag that will cause my tests to exit with non-zero status if - any test is marked as only - any test is marked as skipped - any test is marked as pending
This may be a good solution to the problem of accidentally committing these. @mochajs/mocha any opinions? Hmm, I thought skipped tests are actually marked as pending? People do commit these (looking at you `.only`), so I believe this is useful, and I'm looking forward to it. Not so sure about the tests marked as skipped/pending.. If I skip tests is because I don't want to run them anymore, not even in prod.. not sure about you. I would love to have this! If you do not want a test (even in prod): it's probably because this is a dead/outdated test and need to be deleted from the source code. cc @mochajs/mocha Could we use some different terminology so that it's not confused with node/v8's `--use_strict` ? How about `--production` What is the status of this issue? I've been following the related PRs but they seem to be all closed. I think this is an important configuration and I was surprised that it didn't exist since some commit could add a `.only` and the test suite would be giving invalid feedback... Thanks! @IvanGuardado https://github.com/mochajs/mocha/pull/1935#issuecomment-242995238
2017-01-31T06:10:10Z
3.4
mochajs/mocha
2,642
mochajs__mocha-2642
[ "2626", "2626" ]
c82c49362971855a3cb4b1244ba7df8b3492b6a4
diff --git a/lib/utils.js b/lib/utils.js --- a/lib/utils.js +++ b/lib/utils.js @@ -322,7 +322,7 @@ exports.parseQuery = function (qs) { var key = pair.slice(0, i); var val = pair.slice(++i); - obj[key] = decodeURIComponent(val); + obj[key] = decodeURIComponent(val.replace(/\+/g, '%20')); return obj; }, {}); };
diff --git a/test/utils.spec.js b/test/utils.spec.js --- a/test/utils.spec.js +++ b/test/utils.spec.js @@ -95,6 +95,10 @@ describe('utils', function () { r3: '^co.*' }); }); + + it('should parse "+" as a space', function () { + parseQuery('?grep=foo+bar').should.eql({grep: 'foo bar'}); + }); }); describe('.stackTraceFilter()', function () {
3.x: url encoded grep not properly unescaped HI, Running `3.0.2`, in browser tests `grep` argument won t work properly with an url like `http://localhost:3000/test/?grep=CustomAjaxTable+should+update+header+class+on+sortable+header+click` It will work if the spaces are not replaced by `+` sign, `http://localhost:3000/test/?grep=CustomAjaxTable should update header class on sortable header click` It s a bit of problem, my test need to manage url parameters, it uses the URL / URLSearchParams API (https://developer.mozilla.org/en-US/docs/Web/API/URLSearchParams), and it seems like to encode it in such way mocha won t follow up. I gone through the changelog, look likes it was not fixed since the reported version. 3.x: url encoded grep not properly unescaped HI, Running `3.0.2`, in browser tests `grep` argument won t work properly with an url like `http://localhost:3000/test/?grep=CustomAjaxTable+should+update+header+class+on+sortable+header+click` It will work if the spaces are not replaced by `+` sign, `http://localhost:3000/test/?grep=CustomAjaxTable should update header class on sortable header click` It s a bit of problem, my test need to manage url parameters, it uses the URL / URLSearchParams API (https://developer.mozilla.org/en-US/docs/Web/API/URLSearchParams), and it seems like to encode it in such way mocha won t follow up. I gone through the changelog, look likes it was not fixed since the reported version.
2016-12-19T00:56:54Z
3.2
mochajs/mocha
2,513
mochajs__mocha-2513
[ "2490" ]
9915dfbc671f7f5d3cab55acde7356d839c1170c
diff --git a/.eslintrc.js b/.eslintrc.js new file mode 100644 --- /dev/null +++ b/.eslintrc.js @@ -0,0 +1,3 @@ +module.exports = { + "extends": "semistandard" +}; diff --git a/browser-entry.js b/browser-entry.js --- a/browser-entry.js +++ b/browser-entry.js @@ -35,12 +35,12 @@ var originalOnerrorHandler = global.onerror; * Revert to original onerror handler if previously defined. */ -process.removeListener = function(e, fn) { +process.removeListener = function (e, fn) { if (e === 'uncaughtException') { if (originalOnerrorHandler) { global.onerror = originalOnerrorHandler; } else { - global.onerror = function() {}; + global.onerror = function () {}; } var i = Mocha.utils.indexOf(uncaughtExceptionHandlers, fn); if (i !== -1) { @@ -53,9 +53,9 @@ process.removeListener = function(e, fn) { * Implements uncaughtException listener. */ -process.on = function(e, fn) { +process.on = function (e, fn) { if (e === 'uncaughtException') { - global.onerror = function(err, url, line) { + global.onerror = function (err, url, line) { fn(new Error(err + ' (' + url + ':' + line + ')')); return !mocha.allowUncaught; }; @@ -71,7 +71,7 @@ mocha.suite.removeAllListeners('pre-require'); var immediateQueue = []; var immediateTimeout; -function timeslice() { +function timeslice () { var immediateStart = new Date().getTime(); while (immediateQueue.length && (new Date().getTime() - immediateStart) < 100) { immediateQueue.shift()(); @@ -87,7 +87,7 @@ function timeslice() { * High-performance override of Runner.immediately. */ -Mocha.Runner.immediately = function(callback) { +Mocha.Runner.immediately = function (callback) { immediateQueue.push(callback); if (!immediateTimeout) { immediateTimeout = setTimeout(timeslice, 0); @@ -99,8 +99,8 @@ Mocha.Runner.immediately = function(callback) { * This is useful when running tests in a browser because window.onerror will * only receive the 'message' attribute of the Error. */ -mocha.throwError = function(err) { - Mocha.utils.forEach(uncaughtExceptionHandlers, function(fn) { +mocha.throwError = function (err) { + Mocha.utils.forEach(uncaughtExceptionHandlers, function (fn) { fn(err); }); throw err; @@ -111,7 +111,7 @@ mocha.throwError = function(err) { * Normally this would happen in Mocha.prototype.loadFiles. */ -mocha.ui = function(ui) { +mocha.ui = function (ui) { Mocha.prototype.ui.call(this, ui); this.suite.emit('pre-require', global, null, this); return this; @@ -121,7 +121,7 @@ mocha.ui = function(ui) { * Setup mocha with the given setting options. */ -mocha.setup = function(opts) { +mocha.setup = function (opts) { if (typeof opts === 'string') { opts = { ui: opts }; } @@ -137,7 +137,7 @@ mocha.setup = function(opts) { * Run mocha, returning the Runner. */ -mocha.run = function(fn) { +mocha.run = function (fn) { var options = mocha.options; mocha.globals('location'); @@ -152,7 +152,7 @@ mocha.run = function(fn) { mocha.invert(); } - return Mocha.prototype.run.call(mocha, function(err) { + return Mocha.prototype.run.call(mocha, function (err) { // The DOM Document is not available in Web Workers. var document = global.document; if (document && document.getElementById('mocha') && options.noHighlighting !== true) { diff --git a/karma.conf.js b/karma.conf.js --- a/karma.conf.js +++ b/karma.conf.js @@ -5,7 +5,7 @@ var path = require('path'); var mkdirp = require('mkdirp'); var baseBundleDirpath = path.join(__dirname, '.karma'); -module.exports = function(config) { +module.exports = function (config) { var bundleDirpath; var cfg = { frameworks: [ @@ -31,12 +31,12 @@ module.exports = function(config) { }, browserify: { debug: true, - configure: function configure(b) { + configure: function configure (b) { b.ignore('glob') .ignore('fs') .ignore('path') .ignore('supports-color') - .on('bundled', function(err, content) { + .on('bundled', function (err, content) { if (!err && bundleDirpath) { // write bundle to directory for debugging fs.writeFileSync(path.join(bundleDirpath, @@ -70,8 +70,8 @@ module.exports = function(config) { if (env.SAUCE_USERNAME && env.SAUCE_ACCESS_KEY) { // correlate build/tunnel with Travis sauceConfig = { - build: 'TRAVIS #' + env.TRAVIS_BUILD_NUMBER - + ' (' + env.TRAVIS_BUILD_ID + ')', + build: 'TRAVIS #' + env.TRAVIS_BUILD_NUMBER + + ' (' + env.TRAVIS_BUILD_ID + ')', tunnelIdentifier: env.TRAVIS_JOB_NUMBER }; console.error('Configured SauceLabs'); @@ -124,7 +124,7 @@ module.exports = function(config) { config.set(cfg); }; -function addSauceTests(cfg) { +function addSauceTests (cfg) { cfg.reporters.push('saucelabs'); cfg.customLaunchers = { diff --git a/lib/browser/debug.js b/lib/browser/debug.js --- a/lib/browser/debug.js +++ b/lib/browser/debug.js @@ -1,4 +1,4 @@ /* eslint-disable no-unused-vars */ -module.exports = function(type) { - return function() {}; +module.exports = function (type) { + return function () {}; }; diff --git a/lib/browser/events.js b/lib/browser/events.js --- a/lib/browser/events.js +++ b/lib/browser/events.js @@ -16,7 +16,7 @@ var objToString = Object.prototype.toString; * @param {*} val The value to test. * @return {boolean} true if the value is an array, otherwise false. */ -function isArray(val) { +function isArray (val) { return objToString.call(val) === '[object Array]'; } @@ -25,7 +25,7 @@ function isArray(val) { * * @api public */ -function EventEmitter() {} +function EventEmitter () {} /** * Add a listener. @@ -35,7 +35,7 @@ function EventEmitter() {} * @param {Function} fn Event handler. * @return {EventEmitter} Emitter instance. */ -EventEmitter.prototype.on = function(name, fn) { +EventEmitter.prototype.on = function (name, fn) { if (!this.$events) { this.$events = {}; } @@ -61,10 +61,10 @@ EventEmitter.prototype.addListener = EventEmitter.prototype.on; * @param {Function} fn Event handler. * @return {EventEmitter} Emitter instance. */ -EventEmitter.prototype.once = function(name, fn) { +EventEmitter.prototype.once = function (name, fn) { var self = this; - function on() { + function on () { self.removeListener(name, on); fn.apply(this, arguments); } @@ -83,7 +83,7 @@ EventEmitter.prototype.once = function(name, fn) { * @param {Function} fn Event handler. * @return {EventEmitter} Emitter instance. */ -EventEmitter.prototype.removeListener = function(name, fn) { +EventEmitter.prototype.removeListener = function (name, fn) { if (this.$events && this.$events[name]) { var list = this.$events[name]; @@ -121,7 +121,7 @@ EventEmitter.prototype.removeListener = function(name, fn) { * @param {string} name Event name. * @return {EventEmitter} Emitter instance. */ -EventEmitter.prototype.removeAllListeners = function(name) { +EventEmitter.prototype.removeAllListeners = function (name) { if (name === undefined) { this.$events = {}; return this; @@ -141,7 +141,7 @@ EventEmitter.prototype.removeAllListeners = function(name) { * @param {string} name Event name. * @return {EventEmitter} Emitter instance. */ -EventEmitter.prototype.listeners = function(name) { +EventEmitter.prototype.listeners = function (name) { if (!this.$events) { this.$events = {}; } @@ -164,7 +164,7 @@ EventEmitter.prototype.listeners = function(name) { * @param {string} name Event name. * @return {boolean} true if at least one handler was invoked, else false. */ -EventEmitter.prototype.emit = function(name) { +EventEmitter.prototype.emit = function (name) { if (!this.$events) { return false; } diff --git a/lib/browser/progress.js b/lib/browser/progress.js --- a/lib/browser/progress.js +++ b/lib/browser/progress.js @@ -7,7 +7,7 @@ module.exports = Progress; /** * Initialize a new `Progress` indicator. */ -function Progress() { +function Progress () { this.percent = 0; this.size(0); this.fontSize(11); @@ -21,7 +21,7 @@ function Progress() { * @param {number} size * @return {Progress} Progress instance. */ -Progress.prototype.size = function(size) { +Progress.prototype.size = function (size) { this._size = size; return this; }; @@ -33,7 +33,7 @@ Progress.prototype.size = function(size) { * @param {string} text * @return {Progress} Progress instance. */ -Progress.prototype.text = function(text) { +Progress.prototype.text = function (text) { this._text = text; return this; }; @@ -45,7 +45,7 @@ Progress.prototype.text = function(text) { * @param {number} size * @return {Progress} Progress instance. */ -Progress.prototype.fontSize = function(size) { +Progress.prototype.fontSize = function (size) { this._fontSize = size; return this; }; @@ -56,7 +56,7 @@ Progress.prototype.fontSize = function(size) { * @param {string} family * @return {Progress} Progress instance. */ -Progress.prototype.font = function(family) { +Progress.prototype.font = function (family) { this._font = family; return this; }; @@ -67,7 +67,7 @@ Progress.prototype.font = function(family) { * @param {number} n * @return {Progress} Progress instance. */ -Progress.prototype.update = function(n) { +Progress.prototype.update = function (n) { this.percent = n; return this; }; @@ -78,7 +78,7 @@ Progress.prototype.update = function(n) { * @param {CanvasRenderingContext2d} ctx * @return {Progress} Progress instance. */ -Progress.prototype.draw = function(ctx) { +Progress.prototype.draw = function (ctx) { try { var percent = Math.min(this.percent, 100); var size = this._size; diff --git a/lib/browser/tty.js b/lib/browser/tty.js --- a/lib/browser/tty.js +++ b/lib/browser/tty.js @@ -1,8 +1,8 @@ -exports.isatty = function isatty() { +exports.isatty = function isatty () { return true; }; -exports.getWindowSize = function getWindowSize() { +exports.getWindowSize = function getWindowSize () { if ('innerHeight' in global) { return [global.innerHeight, global.innerWidth]; } diff --git a/lib/context.js b/lib/context.js --- a/lib/context.js +++ b/lib/context.js @@ -15,7 +15,7 @@ module.exports = Context; * * @api private */ -function Context() {} +function Context () {} /** * Set or get the context `Runnable` to `runnable`. @@ -24,7 +24,7 @@ function Context() {} * @param {Runnable} runnable * @return {Context} */ -Context.prototype.runnable = function(runnable) { +Context.prototype.runnable = function (runnable) { if (!arguments.length) { return this._runnable; } @@ -39,7 +39,7 @@ Context.prototype.runnable = function(runnable) { * @param {number} ms * @return {Context} self */ -Context.prototype.timeout = function(ms) { +Context.prototype.timeout = function (ms) { if (!arguments.length) { return this.runnable().timeout(); } @@ -54,7 +54,7 @@ Context.prototype.timeout = function(ms) { * @param {boolean} enabled * @return {Context} self */ -Context.prototype.enableTimeouts = function(enabled) { +Context.prototype.enableTimeouts = function (enabled) { this.runnable().enableTimeouts(enabled); return this; }; @@ -66,7 +66,7 @@ Context.prototype.enableTimeouts = function(enabled) { * @param {number} ms * @return {Context} self */ -Context.prototype.slow = function(ms) { +Context.prototype.slow = function (ms) { this.runnable().slow(ms); return this; }; @@ -77,7 +77,7 @@ Context.prototype.slow = function(ms) { * @api private * @return {Context} self */ -Context.prototype.skip = function() { +Context.prototype.skip = function () { this.runnable().skip(); return this; }; @@ -89,7 +89,7 @@ Context.prototype.skip = function() { * @param {number} n * @return {Context} self */ -Context.prototype.retries = function(n) { +Context.prototype.retries = function (n) { if (!arguments.length) { return this.runnable().retries(); } @@ -103,8 +103,8 @@ Context.prototype.retries = function(n) { * @api private * @return {string} */ -Context.prototype.inspect = function() { - return JSON.stringify(this, function(key, val) { +Context.prototype.inspect = function () { + return JSON.stringify(this, function (key, val) { return key === 'runnable' || key === 'test' ? undefined : val; }, 2); }; diff --git a/lib/hook.js b/lib/hook.js --- a/lib/hook.js +++ b/lib/hook.js @@ -18,7 +18,7 @@ module.exports = Hook; * @param {Function} fn * @api private */ -function Hook(title, fn) { +function Hook (title, fn) { Runnable.call(this, title, fn); this.type = 'hook'; } @@ -35,7 +35,7 @@ inherits(Hook, Runnable); * @return {Error} * @api public */ -Hook.prototype.error = function(err) { +Hook.prototype.error = function (err) { if (!arguments.length) { err = this._error; this._error = null; diff --git a/lib/interfaces/bdd.js b/lib/interfaces/bdd.js --- a/lib/interfaces/bdd.js +++ b/lib/interfaces/bdd.js @@ -21,10 +21,10 @@ var Test = require('../test'); * * @param {Suite} suite Root suite. */ -module.exports = function(suite) { +module.exports = function (suite) { var suites = [suite]; - suite.on('pre-require', function(context, file, mocha) { + suite.on('pre-require', function (context, file, mocha) { var common = require('./common')(suites, context, mocha); context.before = common.before; @@ -38,7 +38,7 @@ module.exports = function(suite) { * and/or tests. */ - context.describe = context.context = function(title, fn) { + context.describe = context.context = function (title, fn) { return common.suite.create({ title: title, file: file, @@ -50,7 +50,7 @@ module.exports = function(suite) { * Pending describe. */ - context.xdescribe = context.xcontext = context.describe.skip = function(title, fn) { + context.xdescribe = context.xcontext = context.describe.skip = function (title, fn) { return common.suite.skip({ title: title, file: file, @@ -62,7 +62,7 @@ module.exports = function(suite) { * Exclusive suite. */ - context.describe.only = function(title, fn) { + context.describe.only = function (title, fn) { return common.suite.only({ title: title, file: file, @@ -76,7 +76,7 @@ module.exports = function(suite) { * acting as a thunk. */ - context.it = context.specify = function(title, fn) { + context.it = context.specify = function (title, fn) { var suite = suites[0]; if (suite.isPending()) { fn = null; @@ -91,7 +91,7 @@ module.exports = function(suite) { * Exclusive test-case. */ - context.it.only = function(title, fn) { + context.it.only = function (title, fn) { return common.test.only(mocha, context.it(title, fn)); }; @@ -99,14 +99,14 @@ module.exports = function(suite) { * Pending test case. */ - context.xit = context.xspecify = context.it.skip = function(title) { + context.xit = context.xspecify = context.it.skip = function (title) { context.it(title); }; /** * Number of attempts to retry. */ - context.it.retries = function(n) { + context.it.retries = function (n) { context.retries(n); }; }); diff --git a/lib/interfaces/common.js b/lib/interfaces/common.js --- a/lib/interfaces/common.js +++ b/lib/interfaces/common.js @@ -10,7 +10,7 @@ var Suite = require('../suite'); * @param {Mocha} mocha * @return {Object} An object containing common functions. */ -module.exports = function(suites, context, mocha) { +module.exports = function (suites, context, mocha) { return { /** * This is only present if flag --delay is passed into Mocha. It triggers @@ -19,8 +19,8 @@ module.exports = function(suites, context, mocha) { * @param {Suite} suite The root wuite. * @return {Function} A function which runs the root suite */ - runWithSuite: function runWithSuite(suite) { - return function run() { + runWithSuite: function runWithSuite (suite) { + return function run () { suite.run(); }; }, @@ -31,7 +31,7 @@ module.exports = function(suites, context, mocha) { * @param {string} name * @param {Function} fn */ - before: function(name, fn) { + before: function (name, fn) { suites[0].beforeAll(name, fn); }, @@ -41,7 +41,7 @@ module.exports = function(suites, context, mocha) { * @param {string} name * @param {Function} fn */ - after: function(name, fn) { + after: function (name, fn) { suites[0].afterAll(name, fn); }, @@ -51,7 +51,7 @@ module.exports = function(suites, context, mocha) { * @param {string} name * @param {Function} fn */ - beforeEach: function(name, fn) { + beforeEach: function (name, fn) { suites[0].beforeEach(name, fn); }, @@ -61,7 +61,7 @@ module.exports = function(suites, context, mocha) { * @param {string} name * @param {Function} fn */ - afterEach: function(name, fn) { + afterEach: function (name, fn) { suites[0].afterEach(name, fn); }, @@ -73,7 +73,7 @@ module.exports = function(suites, context, mocha) { * @param {Object} opts * @returns {Suite} */ - only: function only(opts) { + only: function only (opts) { mocha.options.hasOnly = true; opts.isOnly = true; return this.create(opts); @@ -86,7 +86,7 @@ module.exports = function(suites, context, mocha) { * @param {Object} opts * @returns {Suite} */ - skip: function skip(opts) { + skip: function skip (opts) { opts.pending = true; return this.create(opts); }, @@ -101,7 +101,7 @@ module.exports = function(suites, context, mocha) { * @param {boolean} [opts.isOnly] Is Suite exclusive? * @returns {Suite} */ - create: function create(opts) { + create: function create (opts) { var suite = Suite.create(suites[0], opts.title); suite.pending = Boolean(opts.pending); suite.file = opts.file; @@ -130,7 +130,7 @@ module.exports = function(suites, context, mocha) { * @param {Function} test * @returns {*} */ - only: function(mocha, test) { + only: function (mocha, test) { test.parent._onlyTests = test.parent._onlyTests.concat(test); mocha.options.hasOnly = true; return test; @@ -141,7 +141,7 @@ module.exports = function(suites, context, mocha) { * * @param {string} title */ - skip: function(title) { + skip: function (title) { context.test(title); }, @@ -150,7 +150,7 @@ module.exports = function(suites, context, mocha) { * * @param {number} n */ - retries: function(n) { + retries: function (n) { context.retries(n); } } diff --git a/lib/interfaces/exports.js b/lib/interfaces/exports.js --- a/lib/interfaces/exports.js +++ b/lib/interfaces/exports.js @@ -22,12 +22,12 @@ var Test = require('../test'); * * @param {Suite} suite Root suite. */ -module.exports = function(suite) { +module.exports = function (suite) { var suites = [suite]; suite.on('require', visit); - function visit(obj, file) { + function visit (obj, file) { var suite; for (var key in obj) { if (typeof obj[key] === 'function') { diff --git a/lib/interfaces/qunit.js b/lib/interfaces/qunit.js --- a/lib/interfaces/qunit.js +++ b/lib/interfaces/qunit.js @@ -29,10 +29,10 @@ var Test = require('../test'); * * @param {Suite} suite Root suite. */ -module.exports = function(suite) { +module.exports = function (suite) { var suites = [suite]; - suite.on('pre-require', function(context, file, mocha) { + suite.on('pre-require', function (context, file, mocha) { var common = require('./common')(suites, context, mocha); context.before = common.before; @@ -44,7 +44,7 @@ module.exports = function(suite) { * Describe a "suite" with the given `title`. */ - context.suite = function(title) { + context.suite = function (title) { if (suites.length > 1) { suites.shift(); } @@ -59,7 +59,7 @@ module.exports = function(suite) { * Exclusive Suite. */ - context.suite.only = function(title) { + context.suite.only = function (title) { if (suites.length > 1) { suites.shift(); } @@ -76,7 +76,7 @@ module.exports = function(suite) { * acting as a thunk. */ - context.test = function(title, fn) { + context.test = function (title, fn) { var test = new Test(title, fn); test.file = file; suites[0].addTest(test); @@ -87,7 +87,7 @@ module.exports = function(suite) { * Exclusive test-case. */ - context.test.only = function(title, fn) { + context.test.only = function (title, fn) { return common.test.only(mocha, context.test(title, fn)); }; diff --git a/lib/interfaces/tdd.js b/lib/interfaces/tdd.js --- a/lib/interfaces/tdd.js +++ b/lib/interfaces/tdd.js @@ -29,10 +29,10 @@ var Test = require('../test'); * * @param {Suite} suite Root suite. */ -module.exports = function(suite) { +module.exports = function (suite) { var suites = [suite]; - suite.on('pre-require', function(context, file, mocha) { + suite.on('pre-require', function (context, file, mocha) { var common = require('./common')(suites, context, mocha); context.setup = common.beforeEach; @@ -45,7 +45,7 @@ module.exports = function(suite) { * Describe a "suite" with the given `title` and callback `fn` containing * nested suites and/or tests. */ - context.suite = function(title, fn) { + context.suite = function (title, fn) { return common.suite.create({ title: title, file: file, @@ -56,7 +56,7 @@ module.exports = function(suite) { /** * Pending suite. */ - context.suite.skip = function(title, fn) { + context.suite.skip = function (title, fn) { return common.suite.skip({ title: title, file: file, @@ -67,7 +67,7 @@ module.exports = function(suite) { /** * Exclusive test-case. */ - context.suite.only = function(title, fn) { + context.suite.only = function (title, fn) { return common.suite.only({ title: title, file: file, @@ -79,7 +79,7 @@ module.exports = function(suite) { * Describe a specification or test-case with the given `title` and * callback `fn` acting as a thunk. */ - context.test = function(title, fn) { + context.test = function (title, fn) { var suite = suites[0]; if (suite.isPending()) { fn = null; @@ -94,7 +94,7 @@ module.exports = function(suite) { * Exclusive test-case. */ - context.test.only = function(title, fn) { + context.test.only = function (title, fn) { return common.test.only(mocha, context.test(title, fn)); }; diff --git a/lib/mocha.js b/lib/mocha.js --- a/lib/mocha.js +++ b/lib/mocha.js @@ -49,7 +49,7 @@ exports.Test = require('./test'); * @param {string} name * @return {string} */ -function image(name) { +function image (name) { return path.join(__dirname, '../images', name + '.png'); } @@ -72,7 +72,7 @@ function image(name) { * @param {Object} options * @api public */ -function Mocha(options) { +function Mocha (options) { options = options || {}; this.files = []; this.options = options; @@ -107,7 +107,7 @@ function Mocha(options) { * @api public * @param {boolean} [bail] */ -Mocha.prototype.bail = function(bail) { +Mocha.prototype.bail = function (bail) { if (!arguments.length) { bail = true; } @@ -121,7 +121,7 @@ Mocha.prototype.bail = function(bail) { * @api public * @param {string} file */ -Mocha.prototype.addFile = function(file) { +Mocha.prototype.addFile = function (file) { this.files.push(file); return this; }; @@ -135,7 +135,7 @@ Mocha.prototype.addFile = function(file) { * @param {string|Function} reporter name or constructor * @param {Object} reporterOptions optional options */ -Mocha.prototype.reporter = function(reporter, reporterOptions) { +Mocha.prototype.reporter = function (reporter, reporterOptions) { if (typeof reporter === 'function') { this._reporter = reporter; } else { @@ -156,9 +156,9 @@ Mocha.prototype.reporter = function(reporter, reporterOptions) { } } if (!_reporter && reporter === 'teamcity') { - console.warn('The Teamcity reporter was moved to a package named ' - + 'mocha-teamcity-reporter ' - + '(https://npmjs.org/package/mocha-teamcity-reporter).'); + console.warn('The Teamcity reporter was moved to a package named ' + + 'mocha-teamcity-reporter ' + + '(https://npmjs.org/package/mocha-teamcity-reporter).'); } if (!_reporter) { throw new Error('invalid reporter "' + reporter + '"'); @@ -175,7 +175,7 @@ Mocha.prototype.reporter = function(reporter, reporterOptions) { * @api public * @param {string} bdd */ -Mocha.prototype.ui = function(name) { +Mocha.prototype.ui = function (name) { name = name || 'bdd'; this._ui = exports.interfaces[name]; if (!this._ui) { @@ -187,7 +187,7 @@ Mocha.prototype.ui = function(name) { } this._ui = this._ui(this.suite); - this.suite.on('pre-require', function(context) { + this.suite.on('pre-require', function (context) { exports.afterEach = context.afterEach || context.teardown; exports.after = context.after || context.suiteTeardown; exports.beforeEach = context.beforeEach || context.setup; @@ -211,10 +211,10 @@ Mocha.prototype.ui = function(name) { * * @api private */ -Mocha.prototype.loadFiles = function(fn) { +Mocha.prototype.loadFiles = function (fn) { var self = this; var suite = this.suite; - this.files.forEach(function(file) { + this.files.forEach(function (file) { file = path.resolve(file); suite.emit('pre-require', global, file, self); suite.emit('require', require(file), file, self); @@ -228,10 +228,10 @@ Mocha.prototype.loadFiles = function(fn) { * * @api private */ -Mocha.prototype._growl = function(runner, reporter) { +Mocha.prototype._growl = function (runner, reporter) { var notify = require('growl'); - runner.on('end', function() { + runner.on('end', function () { var stats = reporter.stats; if (stats.failures) { var msg = stats.failures + ' of ' + runner.total + ' tests failed'; @@ -253,7 +253,7 @@ Mocha.prototype._growl = function(runner, reporter) { * @param str * @returns {Mocha} */ -Mocha.prototype.fgrep = function(str) { +Mocha.prototype.fgrep = function (str) { return this.grep(new RegExp(escapeRe(str))); }; @@ -266,7 +266,7 @@ Mocha.prototype.fgrep = function(str) { * @param {RegExp|string} re * @return {Mocha} */ -Mocha.prototype.grep = function(re) { +Mocha.prototype.grep = function (re) { if (utils.isString(re)) { // extract args if it's regex-like, i.e: [string, pattern, flag] var arg = re.match(/^\/(.*)\/(g|i|)$|.*/); @@ -282,7 +282,7 @@ Mocha.prototype.grep = function(re) { * @return {Mocha} * @api public */ -Mocha.prototype.invert = function() { +Mocha.prototype.invert = function () { this.options.invert = true; return this; }; @@ -296,7 +296,7 @@ Mocha.prototype.invert = function() { * @param {boolean} ignore * @return {Mocha} */ -Mocha.prototype.ignoreLeaks = function(ignore) { +Mocha.prototype.ignoreLeaks = function (ignore) { this.options.ignoreLeaks = Boolean(ignore); return this; }; @@ -307,7 +307,7 @@ Mocha.prototype.ignoreLeaks = function(ignore) { * @return {Mocha} * @api public */ -Mocha.prototype.checkLeaks = function() { +Mocha.prototype.checkLeaks = function () { this.options.ignoreLeaks = false; return this; }; @@ -318,7 +318,7 @@ Mocha.prototype.checkLeaks = function() { * @return {Mocha} * @api public */ -Mocha.prototype.fullTrace = function() { +Mocha.prototype.fullTrace = function () { this.options.fullStackTrace = true; return this; }; @@ -329,7 +329,7 @@ Mocha.prototype.fullTrace = function() { * @return {Mocha} * @api public */ -Mocha.prototype.growl = function() { +Mocha.prototype.growl = function () { this.options.growl = true; return this; }; @@ -343,7 +343,7 @@ Mocha.prototype.growl = function() { * @param {Array|string} globals * @return {Mocha} */ -Mocha.prototype.globals = function(globals) { +Mocha.prototype.globals = function (globals) { this.options.globals = (this.options.globals || []).concat(globals); return this; }; @@ -357,7 +357,7 @@ Mocha.prototype.globals = function(globals) { * @param {boolean} colors * @return {Mocha} */ -Mocha.prototype.useColors = function(colors) { +Mocha.prototype.useColors = function (colors) { if (colors !== undefined) { this.options.useColors = colors; } @@ -373,7 +373,7 @@ Mocha.prototype.useColors = function(colors) { * @param {boolean} inlineDiffs * @return {Mocha} */ -Mocha.prototype.useInlineDiffs = function(inlineDiffs) { +Mocha.prototype.useInlineDiffs = function (inlineDiffs) { this.options.useInlineDiffs = inlineDiffs !== undefined && inlineDiffs; return this; }; @@ -387,7 +387,7 @@ Mocha.prototype.useInlineDiffs = function(inlineDiffs) { * @param {number} timeout * @return {Mocha} */ -Mocha.prototype.timeout = function(timeout) { +Mocha.prototype.timeout = function (timeout) { this.suite.timeout(timeout); return this; }; @@ -399,7 +399,7 @@ Mocha.prototype.timeout = function(timeout) { * @return {Mocha} * @api public */ -Mocha.prototype.retries = function(n) { +Mocha.prototype.retries = function (n) { this.suite.retries(n); return this; }; @@ -413,7 +413,7 @@ Mocha.prototype.retries = function(n) { * @param {number} slow * @return {Mocha} */ -Mocha.prototype.slow = function(slow) { +Mocha.prototype.slow = function (slow) { this.suite.slow(slow); return this; }; @@ -427,7 +427,7 @@ Mocha.prototype.slow = function(slow) { * @param {boolean} enabled * @return {Mocha} */ -Mocha.prototype.enableTimeouts = function(enabled) { +Mocha.prototype.enableTimeouts = function (enabled) { this.suite.enableTimeouts(arguments.length && enabled !== undefined ? enabled : true); return this; }; @@ -438,7 +438,7 @@ Mocha.prototype.enableTimeouts = function(enabled) { * @return {Mocha} * @api public */ -Mocha.prototype.asyncOnly = function() { +Mocha.prototype.asyncOnly = function () { this.options.asyncOnly = true; return this; }; @@ -448,7 +448,7 @@ Mocha.prototype.asyncOnly = function() { * * @api public */ -Mocha.prototype.noHighlighting = function() { +Mocha.prototype.noHighlighting = function () { this.options.noHighlighting = true; return this; }; @@ -459,7 +459,7 @@ Mocha.prototype.noHighlighting = function() { * @return {Mocha} * @api public */ -Mocha.prototype.allowUncaught = function() { +Mocha.prototype.allowUncaught = function () { this.options.allowUncaught = true; return this; }; @@ -468,7 +468,7 @@ Mocha.prototype.allowUncaught = function() { * Delay root suite execution. * @returns {Mocha} */ -Mocha.prototype.delay = function delay() { +Mocha.prototype.delay = function delay () { this.options.delay = true; return this; }; @@ -480,7 +480,7 @@ Mocha.prototype.delay = function delay() { * @param {Function} fn * @return {Runner} */ -Mocha.prototype.run = function(fn) { +Mocha.prototype.run = function (fn) { if (this.files.length) { this.loadFiles(); } @@ -508,7 +508,7 @@ Mocha.prototype.run = function(fn) { } exports.reporters.Base.inlineDiffs = options.useInlineDiffs; - function done(failures) { + function done (failures) { if (reporter.done) { reporter.done(failures, fn); } else { diff --git a/lib/ms.js b/lib/ms.js --- a/lib/ms.js +++ b/lib/ms.js @@ -20,7 +20,7 @@ var y = d * 365.25; * @param {Object} options * @return {string|number} */ -module.exports = function(val, options) { +module.exports = function (val, options) { options = options || {}; if (typeof val === 'string') { return parse(val); @@ -36,7 +36,7 @@ module.exports = function(val, options) { * @param {string} str * @return {number} */ -function parse(str) { +function parse (str) { var match = (/^((?:\d+)?\.?\d+) *(ms|seconds?|s|minutes?|m|hours?|h|days?|d|years?|y)?$/i).exec(str); if (!match) { return; @@ -78,7 +78,7 @@ function parse(str) { * @param {number} ms * @return {string} */ -function shortFormat(ms) { +function shortFormat (ms) { if (ms >= d) { return Math.round(ms / d) + 'd'; } @@ -101,12 +101,12 @@ function shortFormat(ms) { * @param {number} ms * @return {string} */ -function longFormat(ms) { - return plural(ms, d, 'day') - || plural(ms, h, 'hour') - || plural(ms, m, 'minute') - || plural(ms, s, 'second') - || ms + ' ms'; +function longFormat (ms) { + return plural(ms, d, 'day') || + plural(ms, h, 'hour') || + plural(ms, m, 'minute') || + plural(ms, s, 'second') || + ms + ' ms'; } /** @@ -117,7 +117,7 @@ function longFormat(ms) { * @param {number} n * @param {string} name */ -function plural(ms, n, name) { +function plural (ms, n, name) { if (ms < n) { return; } diff --git a/lib/pending.js b/lib/pending.js --- a/lib/pending.js +++ b/lib/pending.js @@ -10,6 +10,6 @@ module.exports = Pending; * * @param {string} message */ -function Pending(message) { +function Pending (message) { this.message = message; } diff --git a/lib/reporters/base.js b/lib/reporters/base.js --- a/lib/reporters/base.js +++ b/lib/reporters/base.js @@ -101,7 +101,7 @@ if (process.platform === 'win32') { * @return {string} * @api private */ -var color = exports.color = function(type, str) { +var color = exports.color = function (type, str) { if (!exports.useColors) { return String(str); } @@ -127,23 +127,23 @@ if (isatty) { */ exports.cursor = { - hide: function() { + hide: function () { isatty && process.stdout.write('\u001b[?25l'); }, - show: function() { + show: function () { isatty && process.stdout.write('\u001b[?25h'); }, - deleteLine: function() { + deleteLine: function () { isatty && process.stdout.write('\u001b[2K'); }, - beginningOfLine: function() { + beginningOfLine: function () { isatty && process.stdout.write('\u001b[0G'); }, - CR: function() { + CR: function () { if (isatty) { exports.cursor.deleteLine(); exports.cursor.beginningOfLine(); @@ -160,13 +160,13 @@ exports.cursor = { * @api public */ -exports.list = function(failures) { +exports.list = function (failures) { console.log(); - failures.forEach(function(test, i) { + failures.forEach(function (test, i) { // format - var fmt = color('error title', ' %s) %s:\n') - + color('error message', ' %s') - + color('error stack', '\n%s\n'); + var fmt = color('error title', ' %s) %s:\n') + + color('error message', ' %s') + + color('error stack', '\n%s\n'); // msg var msg; @@ -236,7 +236,7 @@ exports.list = function(failures) { * @api public */ -function Base(runner) { +function Base (runner) { var stats = this.stats = { suites: 0, tests: 0, passes: 0, pending: 0, failures: 0 }; var failures = this.failures = []; @@ -247,21 +247,21 @@ function Base(runner) { runner.stats = stats; - runner.on('start', function() { + runner.on('start', function () { stats.start = new Date(); }); - runner.on('suite', function(suite) { + runner.on('suite', function (suite) { stats.suites = stats.suites || 0; suite.root || stats.suites++; }); - runner.on('test end', function() { + runner.on('test end', function () { stats.tests = stats.tests || 0; stats.tests++; }); - runner.on('pass', function(test) { + runner.on('pass', function (test) { stats.passes = stats.passes || 0; if (test.duration > test.slow()) { @@ -275,19 +275,19 @@ function Base(runner) { stats.passes++; }); - runner.on('fail', function(test, err) { + runner.on('fail', function (test, err) { stats.failures = stats.failures || 0; stats.failures++; test.err = err; failures.push(test); }); - runner.on('end', function() { + runner.on('end', function () { stats.end = new Date(); stats.duration = new Date() - stats.start; }); - runner.on('pending', function() { + runner.on('pending', function () { stats.pending++; }); } @@ -298,16 +298,16 @@ function Base(runner) { * * @api public */ -Base.prototype.epilogue = function() { +Base.prototype.epilogue = function () { var stats = this.stats; var fmt; console.log(); // passes - fmt = color('bright pass', ' ') - + color('green', ' %d passing') - + color('light', ' (%s)'); + fmt = color('bright pass', ' ') + + color('green', ' %d passing') + + color('light', ' (%s)'); console.log(fmt, stats.passes || 0, @@ -315,8 +315,8 @@ Base.prototype.epilogue = function() { // pending if (stats.pending) { - fmt = color('pending', ' ') - + color('pending', ' %d pending'); + fmt = color('pending', ' ') + + color('pending', ' %d pending'); console.log(fmt, stats.pending); } @@ -342,7 +342,7 @@ Base.prototype.epilogue = function() { * @param {string} len * @return {string} */ -function pad(str, len) { +function pad (str, len) { str = String(str); return Array(len - str.length + 1).join(' ') + str; } @@ -355,26 +355,26 @@ function pad(str, len) { * @param {boolean} escape * @return {string} Diff */ -function inlineDiff(err, escape) { +function inlineDiff (err, escape) { var msg = errorDiff(err, 'WordsWithSpace', escape); // linenos var lines = msg.split('\n'); if (lines.length > 4) { var width = String(lines.length).length; - msg = lines.map(function(str, i) { + msg = lines.map(function (str, i) { return pad(++i, width) + ' |' + ' ' + str; }).join('\n'); } // legend - msg = '\n' - + color('diff removed', 'actual') - + ' ' - + color('diff added', 'expected') - + '\n\n' - + msg - + '\n'; + msg = '\n' + + color('diff removed', 'actual') + + ' ' + + color('diff added', 'expected') + + '\n\n' + + msg + + '\n'; // indent msg = msg.replace(/^/gm, ' '); @@ -389,9 +389,9 @@ function inlineDiff(err, escape) { * @param {boolean} escape * @return {string} The diff. */ -function unifiedDiff(err, escape) { +function unifiedDiff (err, escape) { var indent = ' '; - function cleanUp(line) { + function cleanUp (line) { if (escape) { line = escapeInvisibles(line); } @@ -401,7 +401,7 @@ function unifiedDiff(err, escape) { if (line[0] === '-') { return indent + colorLines('diff removed', line); } - if (line.match(/\@\@/)) { + if (line.match(/@@/)) { return null; } if (line.match(/\\ No newline/)) { @@ -409,16 +409,16 @@ function unifiedDiff(err, escape) { } return indent + line; } - function notBlank(line) { + function notBlank (line) { return typeof line !== 'undefined' && line !== null; } var msg = diff.createPatch('string', err.actual, err.expected); var lines = msg.split('\n').splice(4); - return '\n ' - + colorLines('diff added', '+ expected') + ' ' - + colorLines('diff removed', '- actual') - + '\n\n' - + lines.map(cleanUp).filter(notBlank).join('\n'); + return '\n ' + + colorLines('diff added', '+ expected') + ' ' + + colorLines('diff removed', '- actual') + + '\n\n' + + lines.map(cleanUp).filter(notBlank).join('\n'); } /** @@ -430,10 +430,10 @@ function unifiedDiff(err, escape) { * @param {boolean} escape * @return {string} */ -function errorDiff(err, type, escape) { +function errorDiff (err, type, escape) { var actual = escape ? escapeInvisibles(err.actual) : err.actual; var expected = escape ? escapeInvisibles(err.expected) : err.expected; - return diff['diff' + type](actual, expected).map(function(str) { + return diff['diff' + type](actual, expected).map(function (str) { if (str.added) { return colorLines('diff added', str.value); } @@ -451,7 +451,7 @@ function errorDiff(err, type, escape) { * @param {string} line * @return {string} */ -function escapeInvisibles(line) { +function escapeInvisibles (line) { return line.replace(/\t/g, '<tab>') .replace(/\r/g, '<CR>') .replace(/\n/g, '<LF>\n'); @@ -465,8 +465,8 @@ function escapeInvisibles(line) { * @param {string} str * @return {string} */ -function colorLines(name, str) { - return str.split('\n').map(function(str) { +function colorLines (name, str) { + return str.split('\n').map(function (str) { return color(name, str); }).join('\n'); } @@ -484,6 +484,6 @@ var objToString = Object.prototype.toString; * @param {Object} b * @return {boolean} */ -function sameType(a, b) { +function sameType (a, b) { return objToString.call(a) === objToString.call(b); } diff --git a/lib/reporters/doc.js b/lib/reporters/doc.js --- a/lib/reporters/doc.js +++ b/lib/reporters/doc.js @@ -17,16 +17,16 @@ exports = module.exports = Doc; * @param {Runner} runner * @api public */ -function Doc(runner) { +function Doc (runner) { Base.call(this, runner); var indents = 2; - function indent() { + function indent () { return Array(indents).join(' '); } - runner.on('suite', function(suite) { + runner.on('suite', function (suite) { if (suite.root) { return; } @@ -37,7 +37,7 @@ function Doc(runner) { console.log('%s<dl>', indent()); }); - runner.on('suite end', function(suite) { + runner.on('suite end', function (suite) { if (suite.root) { return; } @@ -47,13 +47,13 @@ function Doc(runner) { --indents; }); - runner.on('pass', function(test) { + runner.on('pass', function (test) { console.log('%s <dt>%s</dt>', indent(), utils.escape(test.title)); var code = utils.escape(utils.clean(test.body)); console.log('%s <dd><pre><code>%s</code></pre></dd>', indent(), code); }); - runner.on('fail', function(test, err) { + runner.on('fail', function (test, err) { console.log('%s <dt class="error">%s</dt>', indent(), utils.escape(test.title)); var code = utils.escape(utils.clean(test.body)); console.log('%s <dd class="error"><pre><code>%s</code></pre></dd>', indent(), code); diff --git a/lib/reporters/dot.js b/lib/reporters/dot.js --- a/lib/reporters/dot.js +++ b/lib/reporters/dot.js @@ -18,25 +18,25 @@ exports = module.exports = Dot; * @api public * @param {Runner} runner */ -function Dot(runner) { +function Dot (runner) { Base.call(this, runner); var self = this; - var width = Base.window.width * .75 | 0; + var width = Base.window.width * 0.75 | 0; var n = -1; - runner.on('start', function() { + runner.on('start', function () { process.stdout.write('\n'); }); - runner.on('pending', function() { + runner.on('pending', function () { if (++n % width === 0) { process.stdout.write('\n '); } process.stdout.write(color('pending', Base.symbols.comma)); }); - runner.on('pass', function(test) { + runner.on('pass', function (test) { if (++n % width === 0) { process.stdout.write('\n '); } @@ -47,14 +47,14 @@ function Dot(runner) { } }); - runner.on('fail', function() { + runner.on('fail', function () { if (++n % width === 0) { process.stdout.write('\n '); } process.stdout.write(color('fail', Base.symbols.bang)); }); - runner.on('end', function() { + runner.on('end', function () { console.log(); self.epilogue(); }); diff --git a/lib/reporters/html.js b/lib/reporters/html.js --- a/lib/reporters/html.js +++ b/lib/reporters/html.js @@ -32,12 +32,12 @@ exports = module.exports = HTML; * Stats template. */ -var statsTemplate = '<ul id="mocha-stats">' - + '<li class="progress"><canvas width="40" height="40"></canvas></li>' - + '<li class="passes"><a href="javascript:void(0);">passes:</a> <em>0</em></li>' - + '<li class="failures"><a href="javascript:void(0);">failures:</a> <em>0</em></li>' - + '<li class="duration">duration: <em>0</em>s</li>' - + '</ul>'; +var statsTemplate = '<ul id="mocha-stats">' + + '<li class="progress"><canvas width="40" height="40"></canvas></li>' + + '<li class="passes"><a href="javascript:void(0);">passes:</a> <em>0</em></li>' + + '<li class="failures"><a href="javascript:void(0);">failures:</a> <em>0</em></li>' + + '<li class="duration">duration: <em>0</em>s</li>' + + '</ul>'; /** * Initialize a new `HTML` reporter. @@ -45,7 +45,7 @@ var statsTemplate = '<ul id="mocha-stats">' * @api public * @param {Runner} runner */ -function HTML(runner) { +function HTML (runner) { Base.call(this, runner); var self = this; @@ -80,7 +80,7 @@ function HTML(runner) { } // pass toggle - on(passesLink, 'click', function(evt) { + on(passesLink, 'click', function (evt) { evt.preventDefault(); unhide(); var name = (/pass/).test(report.className) ? '' : ' pass'; @@ -91,7 +91,7 @@ function HTML(runner) { }); // failure toggle - on(failuresLink, 'click', function(evt) { + on(failuresLink, 'click', function (evt) { evt.preventDefault(); unhide(); var name = (/fail/).test(report.className) ? '' : ' fail'; @@ -108,7 +108,7 @@ function HTML(runner) { progress.size(40); } - runner.on('suite', function(suite) { + runner.on('suite', function (suite) { if (suite.root) { return; } @@ -123,7 +123,7 @@ function HTML(runner) { el.appendChild(stack[0]); }); - runner.on('suite end', function(suite) { + runner.on('suite end', function (suite) { if (suite.root) { updateStats(); return; @@ -131,17 +131,17 @@ function HTML(runner) { stack.shift(); }); - runner.on('pass', function(test) { + runner.on('pass', function (test) { var url = self.testURL(test); - var markup = '<li class="test pass %e"><h2>%e<span class="duration">%ems</span> ' - + '<a href="%s" class="replay">‣</a></h2></li>'; + var markup = '<li class="test pass %e"><h2>%e<span class="duration">%ems</span> ' + + '<a href="%s" class="replay">‣</a></h2></li>'; var el = fragment(markup, test.speed, test.title, test.duration, url); self.addCodeToggle(el, test.body); appendToStack(el); updateStats(); }); - runner.on('fail', function(test) { + runner.on('fail', function (test) { var el = fragment('<li class="test fail"><h2>%e <a href="%e" class="replay">‣</a></h2></li>', test.title, self.testURL(test)); var stackString; // Note: Includes leading newline @@ -181,20 +181,20 @@ function HTML(runner) { updateStats(); }); - runner.on('pending', function(test) { + runner.on('pending', function (test) { var el = fragment('<li class="test pass pending"><h2>%e</h2></li>', test.title); appendToStack(el); updateStats(); }); - function appendToStack(el) { + function appendToStack (el) { // Don't call .appendChild if #mocha-report was already .shift()'ed off the stack. if (stack[0]) { stack[0].appendChild(el); } } - function updateStats() { + function updateStats () { // TODO: add to stats var percent = stats.tests / runner.total * 100 | 0; if (progress) { @@ -215,7 +215,7 @@ function HTML(runner) { * @param {string} s * @return {string} A new URL. */ -function makeUrl(s) { +function makeUrl (s) { var search = window.location.search; // Remove previous grep query parameter if present @@ -231,7 +231,7 @@ function makeUrl(s) { * * @param {Object} [suite] */ -HTML.prototype.suiteURL = function(suite) { +HTML.prototype.suiteURL = function (suite) { return makeUrl(suite.fullTitle()); }; @@ -240,7 +240,7 @@ HTML.prototype.suiteURL = function(suite) { * * @param {Object} [test] */ -HTML.prototype.testURL = function(test) { +HTML.prototype.testURL = function (test) { return makeUrl(test.fullTitle()); }; @@ -250,10 +250,10 @@ HTML.prototype.testURL = function(test) { * @param {HTMLLIElement} el * @param {string} contents */ -HTML.prototype.addCodeToggle = function(el, contents) { +HTML.prototype.addCodeToggle = function (el, contents) { var h2 = el.getElementsByTagName('h2')[0]; - on(h2, 'click', function() { + on(h2, 'click', function () { pre.style.display = pre.style.display === 'none' ? 'block' : 'none'; }); @@ -267,7 +267,7 @@ HTML.prototype.addCodeToggle = function(el, contents) { * * @param {string} msg */ -function error(msg) { +function error (msg) { document.body.appendChild(fragment('<div id="mocha-error">%s</div>', msg)); } @@ -276,12 +276,12 @@ function error(msg) { * * @param {string} html */ -function fragment(html) { +function fragment (html) { var args = arguments; var div = document.createElement('div'); var i = 1; - div.innerHTML = html.replace(/%([se])/g, function(_, type) { + div.innerHTML = html.replace(/%([se])/g, function (_, type) { switch (type) { case 's': return String(args[i++]); case 'e': return escape(args[i++]); @@ -298,7 +298,7 @@ function fragment(html) { * * @param {text} classname */ -function hideSuitesWithout(classname) { +function hideSuitesWithout (classname) { var suites = document.getElementsByClassName('suite'); for (var i = 0; i < suites.length; i++) { var els = suites[i].getElementsByClassName(classname); @@ -311,7 +311,7 @@ function hideSuitesWithout(classname) { /** * Unhide .hidden suites. */ -function unhide() { +function unhide () { var els = document.getElementsByClassName('suite hidden'); for (var i = 0; i < els.length; ++i) { els[i].className = els[i].className.replace('suite hidden', 'suite'); @@ -324,7 +324,7 @@ function unhide() { * @param {HTMLElement} el * @param {string} contents */ -function text(el, contents) { +function text (el, contents) { if (el.textContent) { el.textContent = contents; } else { @@ -335,7 +335,7 @@ function text(el, contents) { /** * Listen on `event` with callback `fn`. */ -function on(el, event, fn) { +function on (el, event, fn) { if (el.addEventListener) { el.addEventListener(event, fn, false); } else { diff --git a/lib/reporters/json-stream.js b/lib/reporters/json-stream.js --- a/lib/reporters/json-stream.js +++ b/lib/reporters/json-stream.js @@ -17,28 +17,28 @@ exports = module.exports = List; * @api public * @param {Runner} runner */ -function List(runner) { +function List (runner) { Base.call(this, runner); var self = this; var total = runner.total; - runner.on('start', function() { + runner.on('start', function () { console.log(JSON.stringify(['start', { total: total }])); }); - runner.on('pass', function(test) { + runner.on('pass', function (test) { console.log(JSON.stringify(['pass', clean(test)])); }); - runner.on('fail', function(test, err) { + runner.on('fail', function (test, err) { test = clean(test); test.err = err.message; test.stack = err.stack || null; console.log(JSON.stringify(['fail', test])); }); - runner.on('end', function() { + runner.on('end', function () { process.stdout.write(JSON.stringify(['end', self.stats])); }); } @@ -51,7 +51,7 @@ function List(runner) { * @param {Object} test * @return {Object} */ -function clean(test) { +function clean (test) { return { title: test.title, fullTitle: test.fullTitle(), diff --git a/lib/reporters/json.js b/lib/reporters/json.js --- a/lib/reporters/json.js +++ b/lib/reporters/json.js @@ -16,7 +16,7 @@ exports = module.exports = JSONReporter; * @api public * @param {Runner} runner */ -function JSONReporter(runner) { +function JSONReporter (runner) { Base.call(this, runner); var self = this; @@ -25,23 +25,23 @@ function JSONReporter(runner) { var failures = []; var passes = []; - runner.on('test end', function(test) { + runner.on('test end', function (test) { tests.push(test); }); - runner.on('pass', function(test) { + runner.on('pass', function (test) { passes.push(test); }); - runner.on('fail', function(test) { + runner.on('fail', function (test) { failures.push(test); }); - runner.on('pending', function(test) { + runner.on('pending', function (test) { pending.push(test); }); - runner.on('end', function() { + runner.on('end', function () { var obj = { stats: self.stats, tests: tests.map(clean), @@ -64,7 +64,7 @@ function JSONReporter(runner) { * @param {Object} test * @return {Object} */ -function clean(test) { +function clean (test) { return { title: test.title, fullTitle: test.fullTitle(), @@ -81,9 +81,9 @@ function clean(test) { * @param {Error} err * @return {Object} */ -function errorJSON(err) { +function errorJSON (err) { var res = {}; - Object.getOwnPropertyNames(err).forEach(function(key) { + Object.getOwnPropertyNames(err).forEach(function (key) { res[key] = err[key]; }, err); return res; diff --git a/lib/reporters/landing.js b/lib/reporters/landing.js --- a/lib/reporters/landing.js +++ b/lib/reporters/landing.js @@ -37,28 +37,28 @@ Base.colors.runway = 90; * @api public * @param {Runner} runner */ -function Landing(runner) { +function Landing (runner) { Base.call(this, runner); var self = this; - var width = Base.window.width * .75 | 0; + var width = Base.window.width * 0.75 | 0; var total = runner.total; var stream = process.stdout; var plane = color('plane', '✈'); var crashed = -1; var n = 0; - function runway() { + function runway () { var buf = Array(width).join('-'); return ' ' + color('runway', buf); } - runner.on('start', function() { + runner.on('start', function () { stream.write('\n\n\n '); cursor.hide(); }); - runner.on('test end', function(test) { + runner.on('test end', function (test) { // check if the plane crashed var col = crashed === -1 ? width * ++n / total | 0 : crashed; @@ -79,7 +79,7 @@ function Landing(runner) { stream.write('\u001b[0m'); }); - runner.on('end', function() { + runner.on('end', function () { cursor.show(); console.log(); self.epilogue(); diff --git a/lib/reporters/list.js b/lib/reporters/list.js --- a/lib/reporters/list.js +++ b/lib/reporters/list.js @@ -19,35 +19,35 @@ exports = module.exports = List; * @api public * @param {Runner} runner */ -function List(runner) { +function List (runner) { Base.call(this, runner); var self = this; var n = 0; - runner.on('start', function() { + runner.on('start', function () { console.log(); }); - runner.on('test', function(test) { + runner.on('test', function (test) { process.stdout.write(color('pass', ' ' + test.fullTitle() + ': ')); }); - runner.on('pending', function(test) { - var fmt = color('checkmark', ' -') - + color('pending', ' %s'); + runner.on('pending', function (test) { + var fmt = color('checkmark', ' -') + + color('pending', ' %s'); console.log(fmt, test.fullTitle()); }); - runner.on('pass', function(test) { - var fmt = color('checkmark', ' ' + Base.symbols.dot) - + color('pass', ' %s: ') - + color(test.speed, '%dms'); + runner.on('pass', function (test) { + var fmt = color('checkmark', ' ' + Base.symbols.dot) + + color('pass', ' %s: ') + + color(test.speed, '%dms'); cursor.CR(); console.log(fmt, test.fullTitle(), test.duration); }); - runner.on('fail', function(test) { + runner.on('fail', function (test) { cursor.CR(); console.log(color('fail', ' %d) %s'), ++n, test.fullTitle()); }); diff --git a/lib/reporters/markdown.js b/lib/reporters/markdown.js --- a/lib/reporters/markdown.js +++ b/lib/reporters/markdown.js @@ -23,29 +23,29 @@ exports = module.exports = Markdown; * @api public * @param {Runner} runner */ -function Markdown(runner) { +function Markdown (runner) { Base.call(this, runner); var level = 0; var buf = ''; - function title(str) { + function title (str) { return Array(level).join('#') + ' ' + str; } - function mapTOC(suite, obj) { + function mapTOC (suite, obj) { var ret = obj; var key = SUITE_PREFIX + suite.title; obj = obj[key] = obj[key] || { suite: suite }; - suite.suites.forEach(function(suite) { + suite.suites.forEach(function (suite) { mapTOC(suite, obj); }); return ret; } - function stringifyTOC(obj, level) { + function stringifyTOC (obj, level) { ++level; var buf = ''; var link; @@ -63,25 +63,25 @@ function Markdown(runner) { return buf; } - function generateTOC(suite) { + function generateTOC (suite) { var obj = mapTOC(suite, {}); return stringifyTOC(obj, 0); } generateTOC(runner.suite); - runner.on('suite', function(suite) { + runner.on('suite', function (suite) { ++level; var slug = utils.slug(suite.fullTitle()); buf += '<a name="' + slug + '"></a>' + '\n'; buf += title(suite.title) + '\n'; }); - runner.on('suite end', function() { + runner.on('suite end', function () { --level; }); - runner.on('pass', function(test) { + runner.on('pass', function (test) { var code = utils.clean(test.body); buf += test.title + '.\n'; buf += '\n```js\n'; @@ -89,7 +89,7 @@ function Markdown(runner) { buf += '```\n\n'; }); - runner.on('end', function() { + runner.on('end', function () { process.stdout.write('# TOC\n'); process.stdout.write(generateTOC(runner.suite)); process.stdout.write(buf); diff --git a/lib/reporters/min.js b/lib/reporters/min.js --- a/lib/reporters/min.js +++ b/lib/reporters/min.js @@ -17,10 +17,10 @@ exports = module.exports = Min; * @api public * @param {Runner} runner */ -function Min(runner) { +function Min (runner) { Base.call(this, runner); - runner.on('start', function() { + runner.on('start', function () { // clear screen process.stdout.write('\u001b[2J'); // set cursor position diff --git a/lib/reporters/nyan.js b/lib/reporters/nyan.js --- a/lib/reporters/nyan.js +++ b/lib/reporters/nyan.js @@ -18,11 +18,11 @@ exports = module.exports = NyanCat; * @api public */ -function NyanCat(runner) { +function NyanCat (runner) { Base.call(this, runner); var self = this; - var width = Base.window.width * .75 | 0; + var width = Base.window.width * 0.75 | 0; var nyanCatWidth = this.nyanCatWidth = 11; this.colorIndex = 0; @@ -33,24 +33,24 @@ function NyanCat(runner) { this.trajectories = [[], [], [], []]; this.trajectoryWidthMax = (width - nyanCatWidth); - runner.on('start', function() { + runner.on('start', function () { Base.cursor.hide(); self.draw(); }); - runner.on('pending', function() { + runner.on('pending', function () { self.draw(); }); - runner.on('pass', function() { + runner.on('pass', function () { self.draw(); }); - runner.on('fail', function() { + runner.on('fail', function () { self.draw(); }); - runner.on('end', function() { + runner.on('end', function () { Base.cursor.show(); for (var i = 0; i < self.numberOfLines; i++) { write('\n'); @@ -70,7 +70,7 @@ inherits(NyanCat, Base); * @api private */ -NyanCat.prototype.draw = function() { +NyanCat.prototype.draw = function () { this.appendRainbow(); this.drawScoreboard(); this.drawRainbow(); @@ -85,10 +85,10 @@ NyanCat.prototype.draw = function() { * @api private */ -NyanCat.prototype.drawScoreboard = function() { +NyanCat.prototype.drawScoreboard = function () { var stats = this.stats; - function draw(type, n) { + function draw (type, n) { write(' '); write(Base.color(type, n)); write('\n'); @@ -108,7 +108,7 @@ NyanCat.prototype.drawScoreboard = function() { * @api private */ -NyanCat.prototype.appendRainbow = function() { +NyanCat.prototype.appendRainbow = function () { var segment = this.tick ? '_' : '-'; var rainbowified = this.rainbowify(segment); @@ -127,10 +127,10 @@ NyanCat.prototype.appendRainbow = function() { * @api private */ -NyanCat.prototype.drawRainbow = function() { +NyanCat.prototype.drawRainbow = function () { var self = this; - this.trajectories.forEach(function(line) { + this.trajectories.forEach(function (line) { write('\u001b[' + self.scoreboardWidth + 'C'); write(line.join('')); write('\n'); @@ -144,7 +144,7 @@ NyanCat.prototype.drawRainbow = function() { * * @api private */ -NyanCat.prototype.drawNyanCat = function() { +NyanCat.prototype.drawNyanCat = function () { var self = this; var startWidth = this.scoreboardWidth + this.trajectories[0].length; var dist = '\u001b[' + startWidth + 'C'; @@ -180,7 +180,7 @@ NyanCat.prototype.drawNyanCat = function() { * @return {string} */ -NyanCat.prototype.face = function() { +NyanCat.prototype.face = function () { var stats = this.stats; if (stats.failures) { return '( x .x)'; @@ -199,7 +199,7 @@ NyanCat.prototype.face = function() { * @param {number} n */ -NyanCat.prototype.cursorUp = function(n) { +NyanCat.prototype.cursorUp = function (n) { write('\u001b[' + n + 'A'); }; @@ -210,7 +210,7 @@ NyanCat.prototype.cursorUp = function(n) { * @param {number} n */ -NyanCat.prototype.cursorDown = function(n) { +NyanCat.prototype.cursorDown = function (n) { write('\u001b[' + n + 'B'); }; @@ -220,7 +220,7 @@ NyanCat.prototype.cursorDown = function(n) { * @api private * @return {Array} */ -NyanCat.prototype.generateColors = function() { +NyanCat.prototype.generateColors = function () { var colors = []; for (var i = 0; i < (6 * 7); i++) { @@ -242,7 +242,7 @@ NyanCat.prototype.generateColors = function() { * @param {string} str * @return {string} */ -NyanCat.prototype.rainbowify = function(str) { +NyanCat.prototype.rainbowify = function (str) { if (!Base.useColors) { return str; } @@ -256,6 +256,6 @@ NyanCat.prototype.rainbowify = function(str) { * * @param {string} string A message to write to stdout. */ -function write(string) { +function write (string) { process.stdout.write(string); } diff --git a/lib/reporters/progress.js b/lib/reporters/progress.js --- a/lib/reporters/progress.js +++ b/lib/reporters/progress.js @@ -26,11 +26,11 @@ Base.colors.progress = 90; * @param {Runner} runner * @param {Object} options */ -function Progress(runner, options) { +function Progress (runner, options) { Base.call(this, runner); var self = this; - var width = Base.window.width * .50 | 0; + var width = Base.window.width * 0.50 | 0; var total = runner.total; var complete = 0; var lastN = -1; @@ -44,13 +44,13 @@ function Progress(runner, options) { options.verbose = false; // tests started - runner.on('start', function() { + runner.on('start', function () { console.log(); cursor.hide(); }); // tests complete - runner.on('test end', function() { + runner.on('test end', function () { complete++; var percent = complete / total; @@ -76,7 +76,7 @@ function Progress(runner, options) { // tests are complete, output some stats // and the failures if any - runner.on('end', function() { + runner.on('end', function () { cursor.show(); console.log(); self.epilogue(); diff --git a/lib/reporters/spec.js b/lib/reporters/spec.js --- a/lib/reporters/spec.js +++ b/lib/reporters/spec.js @@ -18,55 +18,55 @@ exports = module.exports = Spec; * @api public * @param {Runner} runner */ -function Spec(runner) { +function Spec (runner) { Base.call(this, runner); var self = this; var indents = 0; var n = 0; - function indent() { + function indent () { return Array(indents).join(' '); } - runner.on('start', function() { + runner.on('start', function () { console.log(); }); - runner.on('suite', function(suite) { + runner.on('suite', function (suite) { ++indents; console.log(color('suite', '%s%s'), indent(), suite.title); }); - runner.on('suite end', function() { + runner.on('suite end', function () { --indents; if (indents === 1) { console.log(); } }); - runner.on('pending', function(test) { + runner.on('pending', function (test) { var fmt = indent() + color('pending', ' - %s'); console.log(fmt, test.title); }); - runner.on('pass', function(test) { + runner.on('pass', function (test) { var fmt; if (test.speed === 'fast') { - fmt = indent() - + color('checkmark', ' ' + Base.symbols.ok) - + color('pass', ' %s'); + fmt = indent() + + color('checkmark', ' ' + Base.symbols.ok) + + color('pass', ' %s'); console.log(fmt, test.title); } else { - fmt = indent() - + color('checkmark', ' ' + Base.symbols.ok) - + color('pass', ' %s') - + color(test.speed, ' (%dms)'); + fmt = indent() + + color('checkmark', ' ' + Base.symbols.ok) + + color('pass', ' %s') + + color(test.speed, ' (%dms)'); console.log(fmt, test.title, test.duration); } }); - runner.on('fail', function(test) { + runner.on('fail', function (test) { console.log(indent() + color('fail', ' %d) %s'), ++n, test.title); }); diff --git a/lib/reporters/tap.js b/lib/reporters/tap.js --- a/lib/reporters/tap.js +++ b/lib/reporters/tap.js @@ -16,32 +16,32 @@ exports = module.exports = TAP; * @api public * @param {Runner} runner */ -function TAP(runner) { +function TAP (runner) { Base.call(this, runner); var n = 1; var passes = 0; var failures = 0; - runner.on('start', function() { + runner.on('start', function () { var total = runner.grepTotal(runner.suite); console.log('%d..%d', 1, total); }); - runner.on('test end', function() { + runner.on('test end', function () { ++n; }); - runner.on('pending', function(test) { + runner.on('pending', function (test) { console.log('ok %d %s # SKIP -', n, title(test)); }); - runner.on('pass', function(test) { + runner.on('pass', function (test) { passes++; console.log('ok %d %s', n, title(test)); }); - runner.on('fail', function(test, err) { + runner.on('fail', function (test, err) { failures++; console.log('not ok %d %s', n, title(test)); if (err.stack) { @@ -49,7 +49,7 @@ function TAP(runner) { } }); - runner.on('end', function() { + runner.on('end', function () { console.log('# tests ' + (passes + failures)); console.log('# pass ' + passes); console.log('# fail ' + failures); @@ -63,6 +63,6 @@ function TAP(runner) { * @param {Object} test * @return {String} */ -function title(test) { +function title (test) { return test.fullTitle().replace(/#/g, ''); } diff --git a/lib/reporters/xunit.js b/lib/reporters/xunit.js --- a/lib/reporters/xunit.js +++ b/lib/reporters/xunit.js @@ -34,7 +34,7 @@ exports = module.exports = XUnit; * @api public * @param {Runner} runner */ -function XUnit(runner, options) { +function XUnit (runner, options) { Base.call(this, runner); var stats = this.stats; @@ -49,19 +49,19 @@ function XUnit(runner, options) { self.fileStream = fs.createWriteStream(options.reporterOptions.output); } - runner.on('pending', function(test) { + runner.on('pending', function (test) { tests.push(test); }); - runner.on('pass', function(test) { + runner.on('pass', function (test) { tests.push(test); }); - runner.on('fail', function(test) { + runner.on('fail', function (test) { tests.push(test); }); - runner.on('end', function() { + runner.on('end', function () { self.write(tag('testsuite', { name: 'Mocha Tests', tests: stats.tests, @@ -72,7 +72,7 @@ function XUnit(runner, options) { time: (stats.duration / 1000) || 0 }, false)); - tests.forEach(function(t) { + tests.forEach(function (t) { self.test(t); }); @@ -91,9 +91,9 @@ inherits(XUnit, Base); * @param failures * @param {Function} fn */ -XUnit.prototype.done = function(failures, fn) { +XUnit.prototype.done = function (failures, fn) { if (this.fileStream) { - this.fileStream.end(function() { + this.fileStream.end(function () { fn(failures); }); } else { @@ -106,7 +106,7 @@ XUnit.prototype.done = function(failures, fn) { * * @param {string} line */ -XUnit.prototype.write = function(line) { +XUnit.prototype.write = function (line) { if (this.fileStream) { this.fileStream.write(line + '\n'); } else if (typeof process === 'object' && process.stdout) { @@ -121,7 +121,7 @@ XUnit.prototype.write = function(line) { * * @param {Test} test */ -XUnit.prototype.test = function(test) { +XUnit.prototype.test = function (test) { var attrs = { classname: test.parent.fullTitle(), name: test.title, @@ -147,7 +147,7 @@ XUnit.prototype.test = function(test) { * @param content * @return {string} */ -function tag(name, attrs, close, content) { +function tag (name, attrs, close, content) { var end = close ? '/>' : '>'; var pairs = []; var tag; diff --git a/lib/runnable.js b/lib/runnable.js --- a/lib/runnable.js +++ b/lib/runnable.js @@ -43,7 +43,7 @@ module.exports = Runnable; * @param {string} title * @param {Function} fn */ -function Runnable(title, fn) { +function Runnable (title, fn) { this.title = title; this.fn = fn; this.body = (fn || '').toString(); @@ -73,7 +73,7 @@ Runnable.prototype = create(EventEmitter.prototype, { * @param {number|string} ms * @return {Runnable|number} ms or Runnable instance. */ -Runnable.prototype.timeout = function(ms) { +Runnable.prototype.timeout = function (ms) { if (!arguments.length) { return this._timeout; } @@ -99,7 +99,7 @@ Runnable.prototype.timeout = function(ms) { * @param {number|string} ms * @return {Runnable|number} ms or Runnable instance. */ -Runnable.prototype.slow = function(ms) { +Runnable.prototype.slow = function (ms) { if (typeof ms === 'undefined') { return this._slow; } @@ -118,7 +118,7 @@ Runnable.prototype.slow = function(ms) { * @param {boolean} enabled * @return {Runnable|boolean} enabled or Runnable instance. */ -Runnable.prototype.enableTimeouts = function(enabled) { +Runnable.prototype.enableTimeouts = function (enabled) { if (!arguments.length) { return this._enableTimeouts; } @@ -132,7 +132,7 @@ Runnable.prototype.enableTimeouts = function(enabled) { * * @api public */ -Runnable.prototype.skip = function() { +Runnable.prototype.skip = function () { throw new Pending('sync skip'); }; @@ -141,7 +141,7 @@ Runnable.prototype.skip = function() { * * @api private */ -Runnable.prototype.isPending = function() { +Runnable.prototype.isPending = function () { return this.pending || (this.parent && this.parent.isPending()); }; @@ -150,7 +150,7 @@ Runnable.prototype.isPending = function() { * * @api private */ -Runnable.prototype.retries = function(n) { +Runnable.prototype.retries = function (n) { if (!arguments.length) { return this._retries; } @@ -162,7 +162,7 @@ Runnable.prototype.retries = function(n) { * * @api private */ -Runnable.prototype.currentRetry = function(n) { +Runnable.prototype.currentRetry = function (n) { if (!arguments.length) { return this._currentRetry; } @@ -176,7 +176,7 @@ Runnable.prototype.currentRetry = function(n) { * @api public * @return {string} */ -Runnable.prototype.fullTitle = function() { +Runnable.prototype.fullTitle = function () { return this.parent.fullTitle() + ' ' + this.title; }; @@ -185,7 +185,7 @@ Runnable.prototype.fullTitle = function() { * * @api private */ -Runnable.prototype.clearTimeout = function() { +Runnable.prototype.clearTimeout = function () { clearTimeout(this.timer); }; @@ -195,8 +195,8 @@ Runnable.prototype.clearTimeout = function() { * @api private * @return {string} */ -Runnable.prototype.inspect = function() { - return JSON.stringify(this, function(key, val) { +Runnable.prototype.inspect = function () { + return JSON.stringify(this, function (key, val) { if (key[0] === '_') { return; } @@ -215,7 +215,7 @@ Runnable.prototype.inspect = function() { * * @api private */ -Runnable.prototype.resetTimeout = function() { +Runnable.prototype.resetTimeout = function () { var self = this; var ms = this.timeout() || 1e9; @@ -223,7 +223,7 @@ Runnable.prototype.resetTimeout = function() { return; } this.clearTimeout(); - this.timer = setTimeout(function() { + this.timer = setTimeout(function () { if (!self._enableTimeouts) { return; } @@ -238,7 +238,7 @@ Runnable.prototype.resetTimeout = function() { * @api private * @param {string[]} globals */ -Runnable.prototype.globals = function(globals) { +Runnable.prototype.globals = function (globals) { if (!arguments.length) { return this._allowedGlobals; } @@ -251,7 +251,7 @@ Runnable.prototype.globals = function(globals) { * @param {Function} fn * @api private */ -Runnable.prototype.run = function(fn) { +Runnable.prototype.run = function (fn) { var self = this; var start = new Date(); var ctx = this.ctx; @@ -264,7 +264,7 @@ Runnable.prototype.run = function(fn) { } // called multiple times - function multiple(err) { + function multiple (err) { if (emitted) { return; } @@ -273,7 +273,7 @@ Runnable.prototype.run = function(fn) { } // finished - function done(err) { + function done (err) { var ms = self.timeout(); if (self.timedOut) { return; @@ -299,7 +299,7 @@ Runnable.prototype.run = function(fn) { this.resetTimeout(); // allows skip() to be used in an explicit async context - this.skip = function asyncSkip() { + this.skip = function asyncSkip () { done(new Pending('async skip call')); // halt execution. the Runnable will be marked pending // by the previous call, and the uncaught handler will ignore @@ -335,18 +335,18 @@ Runnable.prototype.run = function(fn) { done(utils.getError(err)); } - function callFn(fn) { + function callFn (fn) { var result = fn.call(ctx); if (result && typeof result.then === 'function') { self.resetTimeout(); result - .then(function() { + .then(function () { done(); // Return null so libraries like bluebird do not warn about // subsequently constructed Promises. return null; }, - function(reason) { + function (reason) { done(reason || new Error('Promise rejected with no or falsy reason')); }); } else { @@ -358,15 +358,15 @@ Runnable.prototype.run = function(fn) { } } - function callFnAsync(fn) { - var result = fn.call(ctx, function(err) { + function callFnAsync (fn) { + var result = fn.call(ctx, function (err) { if (err instanceof Error || toString.call(err) === '[object Error]') { return done(err); } if (err) { if (Object.prototype.toString.call(err) === '[object Object]') { - return done(new Error('done() invoked with non-Error: ' - + JSON.stringify(err))); + return done(new Error('done() invoked with non-Error: ' + + JSON.stringify(err))); } return done(new Error('done() invoked with non-Error: ' + err)); } diff --git a/lib/runner.js b/lib/runner.js --- a/lib/runner.js +++ b/lib/runner.js @@ -61,7 +61,7 @@ module.exports = Runner; * @param {boolean} [delay] Whether or not to delay execution of root suite * until ready. */ -function Runner(suite, delay) { +function Runner (suite, delay) { var self = this; this._globals = []; this._abort = false; @@ -70,10 +70,10 @@ function Runner(suite, delay) { this.started = false; this.total = suite.total(); this.failures = 0; - this.on('test end', function(test) { + this.on('test end', function (test) { self.checkGlobals(test); }); - this.on('hook end', function(hook) { + this.on('hook end', function (hook) { self.checkGlobals(hook); }); this._defaultGrep = /.*/; @@ -106,7 +106,7 @@ inherits(Runner, EventEmitter); * @param {boolean} invert * @return {Runner} Runner instance. */ -Runner.prototype.grep = function(re, invert) { +Runner.prototype.grep = function (re, invert) { debug('grep %s', re); this._grep = re; this._invert = invert; @@ -124,11 +124,11 @@ Runner.prototype.grep = function(re, invert) { * @param {Suite} suite * @return {number} */ -Runner.prototype.grepTotal = function(suite) { +Runner.prototype.grepTotal = function (suite) { var self = this; var total = 0; - suite.eachTest(function(test) { + suite.eachTest(function (test) { var match = self._grep.test(test.fullTitle()); if (self._invert) { match = !match; @@ -147,7 +147,7 @@ Runner.prototype.grepTotal = function(suite) { * @return {Array} * @api private */ -Runner.prototype.globalProps = function() { +Runner.prototype.globalProps = function () { var props = keys(global); // non-enumerables @@ -170,7 +170,7 @@ Runner.prototype.globalProps = function() { * @param {Array} arr * @return {Runner} Runner instance. */ -Runner.prototype.globals = function(arr) { +Runner.prototype.globals = function (arr) { if (!arguments.length) { return this._globals; } @@ -184,7 +184,7 @@ Runner.prototype.globals = function(arr) { * * @api private */ -Runner.prototype.checkGlobals = function(test) { +Runner.prototype.checkGlobals = function (test) { if (this.ignoreLeaks) { return; } @@ -219,7 +219,7 @@ Runner.prototype.checkGlobals = function(test) { * @param {Test} test * @param {Error} err */ -Runner.prototype.fail = function(test, err) { +Runner.prototype.fail = function (test, err) { if (test.isPending()) { return; } @@ -258,7 +258,7 @@ Runner.prototype.fail = function(test, err) { * @param {Hook} hook * @param {Error} err */ -Runner.prototype.failHook = function(hook, err) { +Runner.prototype.failHook = function (hook, err) { if (hook.ctx && hook.ctx.currentTest) { hook.originalTitle = hook.originalTitle || hook.title; hook.title = hook.originalTitle + ' for "' + hook.ctx.currentTest.title + '"'; @@ -278,12 +278,12 @@ Runner.prototype.failHook = function(hook, err) { * @param {Function} fn */ -Runner.prototype.hook = function(name, fn) { +Runner.prototype.hook = function (name, fn) { var suite = this.suite; var hooks = suite['_' + name]; var self = this; - function next(i) { + function next (i) { var hook = hooks[i]; if (!hook) { return fn(); @@ -295,12 +295,12 @@ Runner.prototype.hook = function(name, fn) { self.emit('hook', hook); if (!hook.listeners('error').length) { - hook.on('error', function(err) { + hook.on('error', function (err) { self.failHook(hook, err); }); } - hook.run(function(err) { + hook.run(function (err) { var testError = hook.error(); if (testError) { self.fail(self.test, testError); @@ -310,7 +310,7 @@ Runner.prototype.hook = function(name, fn) { if (name === 'beforeEach' || name === 'afterEach') { self.test.pending = true; } else { - utils.forEach(suite.tests, function(test) { + utils.forEach(suite.tests, function (test) { test.pending = true; }); // a pending hook won't be executed twice. @@ -329,7 +329,7 @@ Runner.prototype.hook = function(name, fn) { }); } - Runner.immediately(function() { + Runner.immediately(function () { next(0); }); }; @@ -343,11 +343,11 @@ Runner.prototype.hook = function(name, fn) { * @param {Array} suites * @param {Function} fn */ -Runner.prototype.hooks = function(name, suites, fn) { +Runner.prototype.hooks = function (name, suites, fn) { var self = this; var orig = this.suite; - function next(suite) { + function next (suite) { self.suite = suite; if (!suite) { @@ -355,7 +355,7 @@ Runner.prototype.hooks = function(name, suites, fn) { return fn(); } - self.hook(name, function(err) { + self.hook(name, function (err) { if (err) { var errSuite = self.suite; self.suite = orig; @@ -376,7 +376,7 @@ Runner.prototype.hooks = function(name, suites, fn) { * @param {Function} fn * @api private */ -Runner.prototype.hookUp = function(name, fn) { +Runner.prototype.hookUp = function (name, fn) { var suites = [this.suite].concat(this.parents()).reverse(); this.hooks(name, suites, fn); }; @@ -388,7 +388,7 @@ Runner.prototype.hookUp = function(name, fn) { * @param {Function} fn * @api private */ -Runner.prototype.hookDown = function(name, fn) { +Runner.prototype.hookDown = function (name, fn) { var suites = [this.suite].concat(this.parents()); this.hooks(name, suites, fn); }; @@ -400,7 +400,7 @@ Runner.prototype.hookDown = function(name, fn) { * @return {Array} * @api private */ -Runner.prototype.parents = function() { +Runner.prototype.parents = function () { var suite = this.suite; var suites = []; while (suite.parent) { @@ -416,7 +416,7 @@ Runner.prototype.parents = function() { * @param {Function} fn * @api private */ -Runner.prototype.runTest = function(fn) { +Runner.prototype.runTest = function (fn) { var self = this; var test = this.test; @@ -432,7 +432,7 @@ Runner.prototype.runTest = function(fn) { return test.run(fn); } try { - test.on('error', function(err) { + test.on('error', function (err) { self.fail(test, err); }); test.run(fn); @@ -448,12 +448,12 @@ Runner.prototype.runTest = function(fn) { * @param {Suite} suite * @param {Function} fn */ -Runner.prototype.runTests = function(suite, fn) { +Runner.prototype.runTests = function (suite, fn) { var self = this; var tests = suite.tests.slice(); var test; - function hookErr(_, errSuite, after) { + function hookErr (_, errSuite, after) { // before/after Each hook for errSuite failed: var orig = self.suite; @@ -463,7 +463,7 @@ Runner.prototype.runTests = function(suite, fn) { if (self.suite) { // call hookUp afterEach - self.hookUp('afterEach', function(err2, errSuite2) { + self.hookUp('afterEach', function (err2, errSuite2) { self.suite = orig; // some hooks may fail even now if (err2) { @@ -479,7 +479,7 @@ Runner.prototype.runTests = function(suite, fn) { } } - function next(err, errSuite) { + function next (err, errSuite) { // if we bail after first err if (self.failures && suite._bail) { return fn(); @@ -531,7 +531,7 @@ Runner.prototype.runTests = function(suite, fn) { // execute test and hook(s) self.emit('test', self.test = test); - self.hookDown('beforeEach', function(err, errSuite) { + self.hookDown('beforeEach', function (err, errSuite) { if (test.isPending()) { self.emit('pending', test); self.emit('test end', test); @@ -541,7 +541,7 @@ Runner.prototype.runTests = function(suite, fn) { return hookErr(err, errSuite, false); } self.currentRunnable = self.test; - self.runTest(function(err) { + self.runTest(function (err) { test = self.test; if (err) { var retry = test.currentRetry(); @@ -588,7 +588,7 @@ Runner.prototype.runTests = function(suite, fn) { * @param {Suite} suite * @param {Function} fn */ -Runner.prototype.runSuite = function(suite, fn) { +Runner.prototype.runSuite = function (suite, fn) { var i = 0; var self = this; var total = this.grepTotal(suite); @@ -602,7 +602,7 @@ Runner.prototype.runSuite = function(suite, fn) { this.emit('suite', this.suite = suite); - function next(errSuite) { + function next (errSuite) { if (errSuite) { // current suite failed on a hook from errSuite if (errSuite === suite) { @@ -628,7 +628,7 @@ Runner.prototype.runSuite = function(suite, fn) { // huge recursive loop and thus a maximum call stack error. // See comment in `this.runTests()` for more information. if (self._grep !== self._defaultGrep) { - Runner.immediately(function() { + Runner.immediately(function () { self.runSuite(curr, next); }); } else { @@ -636,7 +636,7 @@ Runner.prototype.runSuite = function(suite, fn) { } } - function done(errSuite) { + function done (errSuite) { self.suite = suite; self.nextSuite = next; @@ -650,7 +650,7 @@ Runner.prototype.runSuite = function(suite, fn) { // remove reference to test delete self.test; - self.hook('afterAll', function() { + self.hook('afterAll', function () { self.emit('suite end', suite); fn(errSuite); }); @@ -659,7 +659,7 @@ Runner.prototype.runSuite = function(suite, fn) { this.nextSuite = next; - this.hook('beforeAll', function(err) { + this.hook('beforeAll', function (err) { if (err) { return done(); } @@ -673,9 +673,9 @@ Runner.prototype.runSuite = function(suite, fn) { * @param {Error} err * @api private */ -Runner.prototype.uncaught = function(err) { +Runner.prototype.uncaught = function (err) { if (err) { - debug('uncaught exception %s', err !== function() { + debug('uncaught exception %s', err !== function () { return this; }.call(err) ? err : (err.message || err)); } else { @@ -746,8 +746,8 @@ Runner.prototype.uncaught = function(err) { * * @param {Suite} suite */ -function cleanSuiteReferences(suite) { - function cleanArrReferences(arr) { +function cleanSuiteReferences (suite) { + function cleanArrReferences (arr) { for (var i = 0; i < arr.length; i++) { delete arr[i].fn; } @@ -784,7 +784,7 @@ function cleanSuiteReferences(suite) { * @param {Function} fn * @return {Runner} Runner instance. */ -Runner.prototype.run = function(fn) { +Runner.prototype.run = function (fn) { var self = this; var rootSuite = this.suite; @@ -793,16 +793,16 @@ Runner.prototype.run = function(fn) { filterOnly(rootSuite); } - fn = fn || function() {}; + fn = fn || function () {}; - function uncaught(err) { + function uncaught (err) { self.uncaught(err); } - function start() { + function start () { self.started = true; self.emit('start'); - self.runSuite(rootSuite, function() { + self.runSuite(rootSuite, function () { debug('finished running'); self.emit('end'); }); @@ -814,7 +814,7 @@ Runner.prototype.run = function(fn) { this.on('suite end', cleanSuiteReferences); // callback - this.on('end', function() { + this.on('end', function () { debug('end'); process.removeListener('uncaughtException', uncaught); fn(self.failures); @@ -841,7 +841,7 @@ Runner.prototype.run = function(fn) { * @api public * @return {Runner} Runner instance. */ -Runner.prototype.abort = function() { +Runner.prototype.abort = function () { debug('aborting'); this._abort = true; @@ -855,7 +855,7 @@ Runner.prototype.abort = function() { * @returns {Boolean} * @api private */ -function filterOnly(suite) { +function filterOnly (suite) { if (suite._onlyTests.length) { // If the suite contains `only` tests, run those and ignore any nested suites. suite.tests = suite._onlyTests; @@ -863,7 +863,7 @@ function filterOnly(suite) { } else { // Otherwise, do not run any of the tests in this suite. suite.tests = []; - utils.forEach(suite._onlySuites, function(onlySuite) { + utils.forEach(suite._onlySuites, function (onlySuite) { // If there are other `only` tests/suites nested in the current `only` suite, then filter that `only` suite. // Otherwise, all of the tests on this `only` suite should be run, so don't filter it. if (hasOnly(onlySuite)) { @@ -871,7 +871,7 @@ function filterOnly(suite) { } }); // Run the `only` suites, as well as any other suites that have `only` tests/suites as descendants. - suite.suites = filter(suite.suites, function(childSuite) { + suite.suites = filter(suite.suites, function (childSuite) { return indexOf(suite._onlySuites, childSuite) !== -1 || filterOnly(childSuite); }); } @@ -886,7 +886,7 @@ function filterOnly(suite) { * @returns {Boolean} * @api private */ -function hasOnly(suite) { +function hasOnly (suite) { return suite._onlyTests.length || suite._onlySuites.length || some(suite.suites, hasOnly); } @@ -898,8 +898,8 @@ function hasOnly(suite) { * @param {Array} globals * @return {Array} */ -function filterLeaks(ok, globals) { - return filter(globals, function(key) { +function filterLeaks (ok, globals) { + return filter(globals, function (key) { // Firefox and Chrome exposes iframes as index inside the window object if (/^\d+/.test(key)) { return false; @@ -923,7 +923,7 @@ function filterLeaks(ok, globals) { return false; } - var matched = filter(ok, function(ok) { + var matched = filter(ok, function (ok) { if (~ok.indexOf('*')) { return key.indexOf(ok.split('*')[0]) === 0; } @@ -939,10 +939,10 @@ function filterLeaks(ok, globals) { * @return {Array} * @api private */ -function extraGlobals() { +function extraGlobals () { if (typeof process === 'object' && typeof process.version === 'string') { var parts = process.version.split('.'); - var nodeVersion = utils.reduce(parts, function(a, v) { + var nodeVersion = utils.reduce(parts, function (a, v) { return a << 8 | v; }); diff --git a/lib/suite.js b/lib/suite.js --- a/lib/suite.js +++ b/lib/suite.js @@ -25,7 +25,7 @@ exports = module.exports = Suite; * @param {string} title * @return {Suite} */ -exports.create = function(parent, title) { +exports.create = function (parent, title) { var suite = new Suite(title, parent.ctx); suite.parent = parent; title = suite.fullTitle(); @@ -40,12 +40,12 @@ exports.create = function(parent, title) { * @param {string} title * @param {Context} parentContext */ -function Suite(title, parentContext) { +function Suite (title, parentContext) { if (!utils.isString(title)) { throw new Error('Suite `title` should be a "string" but "' + typeof title + '" was given instead.'); } this.title = title; - function Context() {} + function Context () {} Context.prototype = parentContext; this.ctx = new Context(); this.suites = []; @@ -77,7 +77,7 @@ inherits(Suite, EventEmitter); * @api private * @return {Suite} */ -Suite.prototype.clone = function() { +Suite.prototype.clone = function () { var suite = new Suite(this.title); debug('clone'); suite.ctx = this.ctx; @@ -96,7 +96,7 @@ Suite.prototype.clone = function() { * @param {number|string} ms * @return {Suite|number} for chaining */ -Suite.prototype.timeout = function(ms) { +Suite.prototype.timeout = function (ms) { if (!arguments.length) { return this._timeout; } @@ -118,7 +118,7 @@ Suite.prototype.timeout = function(ms) { * @param {number|string} n * @return {Suite|number} for chaining */ -Suite.prototype.retries = function(n) { +Suite.prototype.retries = function (n) { if (!arguments.length) { return this._retries; } @@ -134,7 +134,7 @@ Suite.prototype.retries = function(n) { * @param {boolean} enabled * @return {Suite|boolean} self or enabled */ -Suite.prototype.enableTimeouts = function(enabled) { +Suite.prototype.enableTimeouts = function (enabled) { if (!arguments.length) { return this._enableTimeouts; } @@ -150,7 +150,7 @@ Suite.prototype.enableTimeouts = function(enabled) { * @param {number|string} ms * @return {Suite|number} for chaining */ -Suite.prototype.slow = function(ms) { +Suite.prototype.slow = function (ms) { if (!arguments.length) { return this._slow; } @@ -169,7 +169,7 @@ Suite.prototype.slow = function(ms) { * @param {boolean} bail * @return {Suite|number} for chaining */ -Suite.prototype.bail = function(bail) { +Suite.prototype.bail = function (bail) { if (!arguments.length) { return this._bail; } @@ -183,7 +183,7 @@ Suite.prototype.bail = function(bail) { * * @api private */ -Suite.prototype.isPending = function() { +Suite.prototype.isPending = function () { return this.pending || (this.parent && this.parent.isPending()); }; @@ -195,7 +195,7 @@ Suite.prototype.isPending = function() { * @param {Function} fn * @return {Suite} for chaining */ -Suite.prototype.beforeAll = function(title, fn) { +Suite.prototype.beforeAll = function (title, fn) { if (this.isPending()) { return this; } @@ -225,7 +225,7 @@ Suite.prototype.beforeAll = function(title, fn) { * @param {Function} fn * @return {Suite} for chaining */ -Suite.prototype.afterAll = function(title, fn) { +Suite.prototype.afterAll = function (title, fn) { if (this.isPending()) { return this; } @@ -255,7 +255,7 @@ Suite.prototype.afterAll = function(title, fn) { * @param {Function} fn * @return {Suite} for chaining */ -Suite.prototype.beforeEach = function(title, fn) { +Suite.prototype.beforeEach = function (title, fn) { if (this.isPending()) { return this; } @@ -285,7 +285,7 @@ Suite.prototype.beforeEach = function(title, fn) { * @param {Function} fn * @return {Suite} for chaining */ -Suite.prototype.afterEach = function(title, fn) { +Suite.prototype.afterEach = function (title, fn) { if (this.isPending()) { return this; } @@ -314,7 +314,7 @@ Suite.prototype.afterEach = function(title, fn) { * @param {Suite} suite * @return {Suite} for chaining */ -Suite.prototype.addSuite = function(suite) { +Suite.prototype.addSuite = function (suite) { suite.parent = this; suite.timeout(this.timeout()); suite.retries(this.retries()); @@ -333,7 +333,7 @@ Suite.prototype.addSuite = function(suite) { * @param {Test} test * @return {Suite} for chaining */ -Suite.prototype.addTest = function(test) { +Suite.prototype.addTest = function (test) { test.parent = this; test.timeout(this.timeout()); test.retries(this.retries()); @@ -352,7 +352,7 @@ Suite.prototype.addTest = function(test) { * @api public * @return {string} */ -Suite.prototype.fullTitle = function() { +Suite.prototype.fullTitle = function () { if (this.parent) { var full = this.parent.fullTitle(); if (full) { @@ -368,8 +368,8 @@ Suite.prototype.fullTitle = function() { * @api public * @return {number} */ -Suite.prototype.total = function() { - return utils.reduce(this.suites, function(sum, suite) { +Suite.prototype.total = function () { + return utils.reduce(this.suites, function (sum, suite) { return sum + suite.total(); }, 0) + this.tests.length; }; @@ -382,9 +382,9 @@ Suite.prototype.total = function() { * @param {Function} fn * @return {Suite} */ -Suite.prototype.eachTest = function(fn) { +Suite.prototype.eachTest = function (fn) { utils.forEach(this.tests, fn); - utils.forEach(this.suites, function(suite) { + utils.forEach(this.suites, function (suite) { suite.eachTest(fn); }); return this; @@ -393,7 +393,7 @@ Suite.prototype.eachTest = function(fn) { /** * This will run the root suite if we happen to be running in delayed mode. */ -Suite.prototype.run = function run() { +Suite.prototype.run = function run () { if (this.root) { this.emit('run'); } diff --git a/lib/utils.js b/lib/utils.js --- a/lib/utils.js +++ b/lib/utils.js @@ -31,7 +31,7 @@ exports.inherits = require('util').inherits; * @param {string} html * @return {string} */ -exports.escape = function(html) { +exports.escape = function (html) { return String(html) .replace(/&/g, '&amp;') .replace(/"/g, '&quot;') @@ -47,7 +47,7 @@ exports.escape = function(html) { * @param {Function} fn * @param {Object} scope */ -exports.forEach = function(arr, fn, scope) { +exports.forEach = function (arr, fn, scope) { for (var i = 0, l = arr.length; i < l; i++) { fn.call(scope, arr[i], i); } @@ -60,7 +60,7 @@ exports.forEach = function(arr, fn, scope) { * @param {Object} obj * @return {boolean} */ -exports.isString = function(obj) { +exports.isString = function (obj) { return typeof obj === 'string'; }; @@ -73,7 +73,7 @@ exports.isString = function(obj) { * @param {Object} scope * @return {Array} */ -exports.map = function(arr, fn, scope) { +exports.map = function (arr, fn, scope) { var result = []; for (var i = 0, l = arr.length; i < l; i++) { result.push(fn.call(scope, arr[i], i, arr)); @@ -90,7 +90,7 @@ exports.map = function(arr, fn, scope) { * @param {number} start * @return {number} */ -var indexOf = exports.indexOf = function(arr, obj, start) { +var indexOf = exports.indexOf = function (arr, obj, start) { for (var i = start || 0, l = arr.length; i < l; i++) { if (arr[i] === obj) { return i; @@ -108,7 +108,7 @@ var indexOf = exports.indexOf = function(arr, obj, start) { * @param {Object} val Initial value. * @return {*} */ -var reduce = exports.reduce = function(arr, fn, val) { +var reduce = exports.reduce = function (arr, fn, val) { var rval = val; for (var i = 0, l = arr.length; i < l; i++) { @@ -126,7 +126,7 @@ var reduce = exports.reduce = function(arr, fn, val) { * @param {Function} fn * @return {Array} */ -exports.filter = function(arr, fn) { +exports.filter = function (arr, fn) { var ret = []; for (var i = 0, l = arr.length; i < l; i++) { @@ -147,7 +147,7 @@ exports.filter = function(arr, fn) { * @param {Function} fn * @return {Array} */ -exports.some = function(arr, fn) { +exports.some = function (arr, fn) { for (var i = 0, l = arr.length; i < l; i++) { if (fn(arr[i])) { return true; @@ -163,7 +163,7 @@ exports.some = function(arr, fn) { * @param {Object} obj * @return {Array} keys */ -exports.keys = typeof Object.keys === 'function' ? Object.keys : function(obj) { +exports.keys = typeof Object.keys === 'function' ? Object.keys : function (obj) { var keys = []; var has = Object.prototype.hasOwnProperty; // for `window` on <=IE8 @@ -184,11 +184,11 @@ exports.keys = typeof Object.keys === 'function' ? Object.keys : function(obj) { * @param {Array} files * @param {Function} fn */ -exports.watch = function(files, fn) { +exports.watch = function (files, fn) { var options = { interval: 100 }; - files.forEach(function(file) { + files.forEach(function (file) { debug('file %s', file); - watchFile(file, options, function(curr, prev) { + watchFile(file, options, function (curr, prev) { if (prev.mtime < curr.mtime) { fn(file); } @@ -203,7 +203,7 @@ exports.watch = function(files, fn) { * @param {Object} obj * @return {Boolean} */ -var isArray = typeof Array.isArray === 'function' ? Array.isArray : function(obj) { +var isArray = typeof Array.isArray === 'function' ? Array.isArray : function (obj) { return Object.prototype.toString.call(obj) === '[object Array]'; }; @@ -215,7 +215,7 @@ exports.isArray = isArray; * @type {Function} */ if (typeof Buffer !== 'undefined' && Buffer.prototype) { - Buffer.prototype.toJSON = Buffer.prototype.toJSON || function() { + Buffer.prototype.toJSON = Buffer.prototype.toJSON || function () { return Array.prototype.slice.call(this, 0); }; } @@ -227,7 +227,7 @@ if (typeof Buffer !== 'undefined' && Buffer.prototype) { * @param {string} path * @return {boolean} */ -function ignored(path) { +function ignored (path) { return !~ignore.indexOf(path); } @@ -240,7 +240,7 @@ function ignored(path) { * @param {Array} [ret=[]] * @return {Array} */ -exports.files = function(dir, ext, ret) { +exports.files = function (dir, ext, ret) { ret = ret || []; ext = ext || ['js']; @@ -248,7 +248,7 @@ exports.files = function(dir, ext, ret) { readdirSync(dir) .filter(ignored) - .forEach(function(path) { + .forEach(function (path) { path = join(dir, path); if (statSync(path).isDirectory()) { exports.files(path, ext, ret); @@ -267,7 +267,7 @@ exports.files = function(dir, ext, ret) { * @param {string} str * @return {string} */ -exports.slug = function(str) { +exports.slug = function (str) { return str .toLowerCase() .replace(/ +/g, '-') @@ -280,7 +280,7 @@ exports.slug = function(str) { * @param {string} str * @return {string} */ -exports.clean = function(str) { +exports.clean = function (str) { str = str .replace(/\r\n?|[\n\u2028\u2029]/g, '\n').replace(/^\uFEFF/, '') // (traditional)-> space/name parameters body (lambda)-> parameters body multi-statement/single keep body content @@ -288,7 +288,7 @@ exports.clean = function(str) { var spaces = str.match(/^\n?( *)/)[1].length; var tabs = str.match(/^\n?(\t*)/)[1].length; - var re = new RegExp('^\n?' + (tabs ? '\t' : ' ') + '{' + (tabs ? tabs : spaces) + '}', 'gm'); + var re = new RegExp('^\n?' + (tabs ? '\t' : ' ') + '{' + (tabs || spaces) + '}', 'gm'); str = str.replace(re, ''); @@ -302,7 +302,7 @@ exports.clean = function(str) { * @param {string} str * @return {string} */ -exports.trim = function(str) { +exports.trim = function (str) { return str.replace(/^\s+|\s+$/g, ''); }; @@ -313,8 +313,8 @@ exports.trim = function(str) { * @param {string} qs * @return {Object} */ -exports.parseQuery = function(qs) { - return reduce(qs.replace('?', '').split('&'), function(obj, pair) { +exports.parseQuery = function (qs) { + return reduce(qs.replace('?', '').split('&'), function (obj, pair) { var i = pair.indexOf('='); var key = pair.slice(0, i); var val = pair.slice(++i); @@ -331,7 +331,7 @@ exports.parseQuery = function(qs) { * @param {string} js * @return {string} */ -function highlight(js) { +function highlight (js) { return js .replace(/</g, '&lt;') .replace(/>/g, '&gt;') @@ -349,7 +349,7 @@ function highlight(js) { * @api private * @param {string} name */ -exports.highlightTags = function(name) { +exports.highlightTags = function (name) { var code = document.getElementById('mocha').getElementsByTagName(name); for (var i = 0, len = code.length; i < len; ++i) { code[i].innerHTML = highlight(code[i].innerHTML); @@ -370,7 +370,7 @@ exports.highlightTags = function(name) { * @param {string} typeHint The type of the value * @returns {string} */ -function emptyRepresentation(value, typeHint) { +function emptyRepresentation (value, typeHint) { switch (typeHint) { case 'function': return '[Function]'; @@ -404,7 +404,7 @@ function emptyRepresentation(value, typeHint) { * type(global) // 'global' * type(new String('foo') // 'object' */ -var type = exports.type = function type(value) { +var type = exports.type = function type (value) { if (value === undefined) { return 'undefined'; } else if (value === null) { @@ -432,7 +432,7 @@ var type = exports.type = function type(value) { * @param {*} value * @return {string} */ -exports.stringify = function(value) { +exports.stringify = function (value) { var typeHint = type(value); if (!~indexOf(['object', 'array', 'function'], typeHint)) { @@ -446,7 +446,7 @@ exports.stringify = function(value) { // IE7/IE8 has a bizarre String constructor; needs to be coerced // into an array and back to obj. if (typeHint === 'string' && typeof value === 'object') { - value = reduce(value.split(''), function(acc, char, idx) { + value = reduce(value.split(''), function (acc, char, idx) { acc[idx] = char; return acc; }, {}); @@ -474,7 +474,7 @@ exports.stringify = function(value) { * @param {number=} depth * @returns {*} */ -function jsonStringify(object, spaces, depth) { +function jsonStringify (object, spaces, depth) { if (typeof spaces === 'undefined') { // primitive types return _stringify(object); @@ -486,11 +486,11 @@ function jsonStringify(object, spaces, depth) { var end = isArray(object) ? ']' : '}'; var length = typeof object.length === 'number' ? object.length : exports.keys(object).length; // `.repeat()` polyfill - function repeat(s, n) { + function repeat (s, n) { return new Array(n).join(s); } - function _stringify(val) { + function _stringify (val) { switch (type(val)) { case 'null': case 'undefined': @@ -536,15 +536,15 @@ function jsonStringify(object, spaces, depth) { continue; // not my business } --length; - str += '\n ' + repeat(' ', space) - + (isArray(object) ? '' : '"' + i + '": ') // key - + _stringify(object[i]) // value - + (length ? ',' : ''); // comma + str += '\n ' + repeat(' ', space) + + (isArray(object) ? '' : '"' + i + '": ') + // key + _stringify(object[i]) + // value + (length ? ',' : ''); // comma } - return str + return str + // [], {} - + (str.length !== 1 ? '\n' + repeat(' ', --space) + end : end); + (str.length !== 1 ? '\n' + repeat(' ', --space) + end : end); } /** @@ -554,7 +554,7 @@ function jsonStringify(object, spaces, depth) { * @param {*} value The value to test. * @return {boolean} True if `value` is a buffer, otherwise false */ -exports.isBuffer = function(value) { +exports.isBuffer = function (value) { return typeof Buffer !== 'undefined' && Buffer.isBuffer(value); }; @@ -577,13 +577,13 @@ exports.isBuffer = function(value) { * @param {string} [typeHint] Type hint * @return {(Object|Array|Function|string|undefined)} */ -exports.canonicalize = function canonicalize(value, stack, typeHint) { +exports.canonicalize = function canonicalize (value, stack, typeHint) { var canonicalizedObj; /* eslint-disable no-unused-vars */ var prop; /* eslint-enable no-unused-vars */ typeHint = typeHint || type(value); - function withStack(value, fn) { + function withStack (value, fn) { stack.push(value); fn(); stack.pop(); @@ -602,8 +602,8 @@ exports.canonicalize = function canonicalize(value, stack, typeHint) { canonicalizedObj = value; break; case 'array': - withStack(value, function() { - canonicalizedObj = exports.map(value, function(item) { + withStack(value, function () { + canonicalizedObj = exports.map(value, function (item) { return exports.canonicalize(item, stack); }); }); @@ -622,8 +622,8 @@ exports.canonicalize = function canonicalize(value, stack, typeHint) { /* falls through */ case 'object': canonicalizedObj = canonicalizedObj || {}; - withStack(value, function() { - exports.forEach(exports.keys(value).sort(), function(key) { + withStack(value, function () { + exports.forEach(exports.keys(value).sort(), function (key) { canonicalizedObj[key] = exports.canonicalize(value[key], stack); }); }); @@ -651,7 +651,7 @@ exports.canonicalize = function canonicalize(value, stack, typeHint) { * @param {boolean} recursive Whether or not to recurse into subdirectories. * @return {string[]} An array of paths. */ -exports.lookupFiles = function lookupFiles(path, extensions, recursive) { +exports.lookupFiles = function lookupFiles (path, extensions, recursive) { var files = []; var re = new RegExp('\\.(' + extensions.join('|') + ')$'); @@ -677,7 +677,7 @@ exports.lookupFiles = function lookupFiles(path, extensions, recursive) { return; } - readdirSync(path).forEach(function(file) { + readdirSync(path).forEach(function (file) { file = join(path, file); try { var stat = statSync(file); @@ -706,7 +706,7 @@ exports.lookupFiles = function lookupFiles(path, extensions, recursive) { * @return {Error} */ -exports.undefinedError = function() { +exports.undefinedError = function () { return new Error('Caught undefined error, did you throw without specifying what?'); }; @@ -717,7 +717,7 @@ exports.undefinedError = function() { * @return {Error} */ -exports.getError = function(err) { +exports.getError = function (err) { return err || exports.undefinedError(); }; @@ -730,7 +730,7 @@ exports.getError = function(err) { * (i.e: strip Mocha and internal node functions from stack trace). * @returns {Function} */ -exports.stackTraceFilter = function() { +exports.stackTraceFilter = function () { // TODO: Replace with `process.browser` var is = typeof document === 'undefined' ? { node: true } : { browser: true }; var slash = path.sep; @@ -742,26 +742,26 @@ exports.stackTraceFilter = function() { slash = '/'; } - function isMochaInternal(line) { - return (~line.indexOf('node_modules' + slash + 'mocha' + slash)) - || (~line.indexOf('node_modules' + slash + 'mocha.js')) - || (~line.indexOf('bower_components' + slash + 'mocha.js')) - || (~line.indexOf(slash + 'mocha.js')); + function isMochaInternal (line) { + return (~line.indexOf('node_modules' + slash + 'mocha' + slash)) || + (~line.indexOf('node_modules' + slash + 'mocha.js')) || + (~line.indexOf('bower_components' + slash + 'mocha.js')) || + (~line.indexOf(slash + 'mocha.js')); } - function isNodeInternal(line) { - return (~line.indexOf('(timers.js:')) - || (~line.indexOf('(events.js:')) - || (~line.indexOf('(node.js:')) - || (~line.indexOf('(module.js:')) - || (~line.indexOf('GeneratorFunctionPrototype.next (native)')) - || false; + function isNodeInternal (line) { + return (~line.indexOf('(timers.js:')) || + (~line.indexOf('(events.js:')) || + (~line.indexOf('(node.js:')) || + (~line.indexOf('(module.js:')) || + (~line.indexOf('GeneratorFunctionPrototype.next (native)')) || + false; } - return function(stack) { + return function (stack) { stack = stack.split('\n'); - stack = reduce(stack, function(list, line) { + stack = reduce(stack, function (list, line) { if (isMochaInternal(line)) { return list; } @@ -789,6 +789,6 @@ exports.stackTraceFilter = function() { * @param {*} value * @returns {boolean} Whether or not `value` is a Promise */ -exports.isPromise = function isPromise(value) { +exports.isPromise = function isPromise (value) { return typeof value === 'object' && typeof value.then === 'function'; }; diff --git a/scripts/dedefine.js b/scripts/dedefine.js --- a/scripts/dedefine.js +++ b/scripts/dedefine.js @@ -8,16 +8,16 @@ var through = require('through2'); var defineRx = /typeof define === ['"]function['"] && define\.amd/g; -function createStream() { - return through.obj(function(chunk, enc, next) { +function createStream () { + return through.obj(function (chunk, enc, next) { this.push(String(chunk) .replace(defineRx, 'false')); next(); }); } -module.exports = function(b) { - function wrap() { +module.exports = function (b) { + function wrap () { b.pipeline.get('wrap').push(createStream()); }
diff --git a/lib/test.js b/lib/test.js --- a/lib/test.js +++ b/lib/test.js @@ -19,7 +19,7 @@ module.exports = Test; * @param {String} title * @param {Function} fn */ -function Test(title, fn) { +function Test (title, fn) { if (!isString(title)) { throw new Error('Test `title` should be a "string" but "' + typeof title + '" was given instead.'); } @@ -35,7 +35,7 @@ Test.prototype = create(Runnable.prototype, { constructor: Test }); -Test.prototype.clone = function() { +Test.prototype.clone = function () { var test = new Test(this.title, this.fn); test.timeout(this.timeout()); test.slow(this.slow()); diff --git a/test/.eslintrc b/test/.eslintrc deleted file mode 100644 --- a/test/.eslintrc +++ /dev/null @@ -1,5 +0,0 @@ ---- -env: - mocha: true -globals: - expect: false diff --git a/test/.eslintrc.js b/test/.eslintrc.js new file mode 100644 --- /dev/null +++ b/test/.eslintrc.js @@ -0,0 +1,8 @@ +module.exports = { + env: { + mocha: true + }, + globals: { + expect: false + } +};
use semistandard We have some code standards in place, but not enough. We have disabled many warnings which we probably shouldn't have, to avoid changing too much code when we picked up ESLint originally. I'd also prefer to use a widely-used style instead of a custom style. - Install eslint-semistandard-config and all necessary dependencies - Update `.eslintrc` to be just `{"extends": "semistandard"}` - `eslint --fix` everything - Manually fix everything else If anyone disagrees, please let me know. Unless that disagreement involves "semicolons"; if so, please keep it to yourself. :smile:
Great move! 👍
2016-09-30T13:04:19Z
3.1
mochajs/mocha
2,479
mochajs__mocha-2479
[ "2465" ]
539255c8a8c8bba5b432156b0f86f57eb13b0ce5
diff --git a/lib/runnable.js b/lib/runnable.js --- a/lib/runnable.js +++ b/lib/runnable.js @@ -133,7 +133,7 @@ Runnable.prototype.enableTimeouts = function(enabled) { * @api public */ Runnable.prototype.skip = function() { - throw new Pending(); + throw new Pending('sync skip'); }; /** @@ -298,14 +298,19 @@ Runnable.prototype.run = function(fn) { if (this.async) { this.resetTimeout(); + // allows skip() to be used in an explicit async context + this.skip = function asyncSkip() { + done(new Pending('async skip call')); + // halt execution. the Runnable will be marked pending + // by the previous call, and the uncaught handler will ignore + // the failure. + throw new Pending('async skip; aborting execution'); + }; + if (this.allowUncaught) { return callFnAsync(this.fn); } try { - // allows skip() to be used in an explicit async context - this.skip = function() { - done(new Pending()); - }; callFnAsync(this.fn); } catch (err) { done(utils.getError(err)); diff --git a/lib/runner.js b/lib/runner.js --- a/lib/runner.js +++ b/lib/runner.js @@ -220,6 +220,10 @@ Runner.prototype.checkGlobals = function(test) { * @param {Error} err */ Runner.prototype.fail = function(test, err) { + if (test.isPending()) { + return; + } + ++this.failures; test.state = 'failed'; @@ -309,6 +313,8 @@ Runner.prototype.hook = function(name, fn) { suite.tests.forEach(function(test) { test.pending = true; }); + // a pending hook won't be executed twice. + hook.pending = true; } } else { self.failHook(hook, err); @@ -695,8 +701,8 @@ Runner.prototype.uncaught = function(err) { runnable.clearTimeout(); - // Ignore errors if complete - if (runnable.state) { + // Ignore errors if complete or pending + if (runnable.state || runnable.isPending()) { return; } this.fail(runnable, err);
diff --git a/test/runner.js b/test/runner.js --- a/test/runner.js +++ b/test/runner.js @@ -1,55 +1,59 @@ -var mocha = require('../') - , Suite = mocha.Suite - , Runner = mocha.Runner - , Test = mocha.Test; +var mocha = require('../'); +var Suite = mocha.Suite; +var Runner = mocha.Runner; +var Test = mocha.Test; +var Hook = mocha.Hook; -describe('Runner', function(){ - var suite, runner; +function noop() {} - beforeEach(function(){ +describe('Runner', function() { + var suite; + var runner; + + beforeEach(function() { suite = new Suite('Suite', 'root'); runner = new Runner(suite); - }) + }); - describe('.grep()', function(){ - it('should update the runner.total with number of matched tests', function(){ - suite.addTest(new Test('im a test about lions')); - suite.addTest(new Test('im another test about lions')); - suite.addTest(new Test('im a test about bears')); + describe('.grep()', function() { + it('should update the runner.total with number of matched tests', function() { + suite.addTest(new Test('im a test about lions', noop)); + suite.addTest(new Test('im another test about lions', noop)); + suite.addTest(new Test('im a test about bears', noop)); var newRunner = new Runner(suite); newRunner.grep(/lions/); newRunner.total.should.equal(2); - }) + }); - it('should update the runner.total with number of matched tests when inverted', function(){ - suite.addTest(new Test('im a test about lions')); - suite.addTest(new Test('im another test about lions')); - suite.addTest(new Test('im a test about bears')); + it('should update the runner.total with number of matched tests when inverted', function() { + suite.addTest(new Test('im a test about lions', noop)); + suite.addTest(new Test('im another test about lions', noop)); + suite.addTest(new Test('im a test about bears', noop)); var newRunner = new Runner(suite); newRunner.grep(/lions/, true); newRunner.total.should.equal(1); - }) - }) - - describe('.grepTotal()', function(){ - it('should return the total number of matched tests', function(){ - suite.addTest(new Test('im a test about lions')); - suite.addTest(new Test('im another test about lions')); - suite.addTest(new Test('im a test about bears')); + }); + }); + + describe('.grepTotal()', function() { + it('should return the total number of matched tests', function() { + suite.addTest(new Test('im a test about lions', noop)); + suite.addTest(new Test('im another test about lions', noop)); + suite.addTest(new Test('im a test about bears', noop)); runner.grep(/lions/); runner.grepTotal(suite).should.equal(2); - }) + }); - it('should return the total number of matched tests when inverted', function(){ - suite.addTest(new Test('im a test about lions')); - suite.addTest(new Test('im another test about lions')); - suite.addTest(new Test('im a test about bears')); + it('should return the total number of matched tests when inverted', function() { + suite.addTest(new Test('im a test about lions', noop)); + suite.addTest(new Test('im another test about lions', noop)); + suite.addTest(new Test('im a test about bears', noop)); runner.grep(/lions/, true); runner.grepTotal(suite).should.equal(1); - }) - }) + }); + }); - describe('.globalProps()', function(){ + describe('.globalProps()', function() { it('should include common non enumerable globals', function() { var props = runner.globalProps(); props.should.containEql('setTimeout'); @@ -61,19 +65,19 @@ describe('Runner', function(){ }); }); - describe('.globals()', function(){ - it('should default to the known globals', function(){ + describe('.globals()', function() { + it('should default to the known globals', function() { runner.globals().length.should.be.above(16); - }) + }); - it('should white-list globals', function(){ + it('should white-list globals', function() { runner.globals(['foo', 'bar']); runner.globals().should.containEql('foo'); runner.globals().should.containEql('bar'); - }) - }) + }); + }); - describe('.checkGlobals(test)', function(){ + describe('.checkGlobals(test)', function() { it('should allow variables that match a wildcard', function(done) { runner.globals(['foo*', 'giz*']); global.foo = 'baz'; @@ -81,43 +85,44 @@ describe('Runner', function(){ runner.checkGlobals(); delete global.foo; delete global.gizmo; - done() - }) + done(); + }); - it('should emit "fail" when a new global is introduced', function(done){ + it('should emit "fail" when a new global is introduced', function(done) { + var test = new Test('im a test', noop); runner.checkGlobals(); global.foo = 'bar'; - runner.on('fail', function(test, err){ - test.should.equal('im a test'); + runner.on('fail', function(_test, err) { + _test.should.equal(test); err.message.should.equal('global leak detected: foo'); delete global.foo; done(); }); - runner.checkGlobals('im a test'); - }) + runner.checkGlobals(test); + }); it('should emit "fail" when a single new disallowed global is introduced after a single extra global is allowed', function(done) { var doneCalled = false; runner.globals('good'); global.bad = 1; - runner.on('fail', function(test, err) { + runner.on('fail', function() { delete global.bad; done(); doneCalled = true; }); - runner.checkGlobals('test'); + runner.checkGlobals(new Test('yet another test', noop)); if (!doneCalled) { - done(Error("Expected test failure did not occur.")); + done(Error('Expected test failure did not occur.')); } }); - it ('should not fail when a new common global is introduced', function(){ + it('should not fail when a new common global is introduced', function() { // verify that the prop isn't enumerable delete global.XMLHttpRequest; global.propertyIsEnumerable('XMLHttpRequest').should.not.be.ok(); // create a new runner and keep a reference to the test. - var test = new Test('im a test about bears'); + var test = new Test('im a test about bears', noop); suite.addTest(test); var newRunner = new Runner(suite); @@ -133,22 +138,23 @@ describe('Runner', function(){ delete global.XMLHttpRequest; }); - it('should pluralize the error message when several are introduced', function(done){ + it('should pluralize the error message when several are introduced', function(done) { + var test = new Test('im a test', noop); runner.checkGlobals(); global.foo = 'bar'; global.bar = 'baz'; - runner.on('fail', function(test, err){ - test.should.equal('im a test'); + runner.on('fail', function(_test, err) { + _test.should.equal(test); err.message.should.equal('global leaks detected: foo, bar'); delete global.foo; delete global.bar; done(); }); - runner.checkGlobals('im a test'); - }) + runner.checkGlobals(test); + }); it('should respect per test whitelisted globals', function() { - var test = new Test('im a test about lions'); + var test = new Test('im a test about lions', noop); test.globals(['foo']); suite.addTest(test); @@ -161,166 +167,179 @@ describe('Runner', function(){ test.should.not.have.key('state'); delete global.foo; - }) + }); it('should respect per test whitelisted globals but still detect other leaks', function(done) { - var test = new Test('im a test about lions'); + var test = new Test('im a test about lions', noop); test.globals(['foo']); suite.addTest(test); global.foo = 'bar'; global.bar = 'baz'; - runner.on('fail', function(test, err){ + runner.on('fail', function(test, err) { test.title.should.equal('im a test about lions'); err.message.should.equal('global leak detected: bar'); delete global.foo; done(); }); runner.checkGlobals(test); - }) + }); it('should emit "fail" when a global beginning with d is introduced', function(done) { global.derp = 'bar'; - runner.on('fail', function(test, err){ + runner.on('fail', function() { delete global.derp; done(); }); - runner.checkGlobals('im a test'); + runner.checkGlobals(new Test('herp', function() {})); }); - }) + }); - describe('.hook(name, fn)', function(){ - it('should execute hooks after failed test if suite bail is true', function(done){ - runner.fail({}); + describe('.hook(name, fn)', function() { + it('should execute hooks after failed test if suite bail is true', function(done) { + runner.fail(new Test('failed test', noop)); suite.bail(true); - suite.afterEach(function(){ + suite.afterEach(function() { suite.afterAll(function() { done(); - }) + }); }); - runner.hook('afterEach', function(){}); - runner.hook('afterAll', function(){}); - }) - }) + runner.hook('afterEach', function() {}); + runner.hook('afterAll', function() {}); + }); + }); - describe('.fail(test, err)', function(){ - it('should increment .failures', function(){ + describe('.fail(test, err)', function() { + it('should increment .failures', function() { runner.failures.should.equal(0); - runner.fail({}, {}); + runner.fail(new Test('one', noop), {}); runner.failures.should.equal(1); - runner.fail({}, {}); + runner.fail(new Test('two', noop), {}); runner.failures.should.equal(2); - }) + }); - it('should set test.state to "failed"', function(){ - var test = {}; + it('should set test.state to "failed"', function() { + var test = new Test('some test', noop); runner.fail(test, 'some error'); test.state.should.equal('failed'); - }) + }); - it('should emit "fail"', function(done){ - var test = {}, err = {}; - runner.on('fail', function(test, err){ + it('should emit "fail"', function(done) { + var test = new Test('some other test', noop); + var err = {}; + runner.on('fail', function(test, err) { test.should.equal(test); err.should.equal(err); done(); }); runner.fail(test, err); - }) + }); - it('should emit a helpful message when failed with a string', function(done){ - var test = {}, err = 'string'; - runner.on('fail', function(test, err){ + it('should emit a helpful message when failed with a string', function(done) { + var test = new Test('helpful test', noop); + var err = 'string'; + runner.on('fail', function(test, err) { err.message.should.equal('the string "string" was thrown, throw an Error :)'); done(); }); runner.fail(test, err); - }) + }); - it('should emit a the error when failed with an Error instance', function(done){ - var test = {}, err = new Error('an error message'); - runner.on('fail', function(test, err){ + it('should emit a the error when failed with an Error instance', function(done) { + var test = new Test('a test', noop); + var err = new Error('an error message'); + runner.on('fail', function(test, err) { err.message.should.equal('an error message'); done(); }); runner.fail(test, err); - }) + }); - it('should emit the error when failed with an Error-like object', function(done){ - var test = {}, err = {message: 'an error message'}; - runner.on('fail', function(test, err){ + it('should emit the error when failed with an Error-like object', function(done) { + var test = new Test('a test', noop); + var err = { message: 'an error message' }; + runner.on('fail', function(test, err) { err.message.should.equal('an error message'); done(); }); runner.fail(test, err); - }) + }); - it('should emit a helpful message when failed with an Object', function(done){ - var test = {}, err = { x: 1 }; - runner.on('fail', function(test, err){ + it('should emit a helpful message when failed with an Object', function(done) { + var test = new Test('a test', noop); + var err = { x: 1 }; + runner.on('fail', function(test, err) { err.message.should.equal('the object {\n "x": 1\n} was thrown, throw an Error :)'); done(); }); runner.fail(test, err); - }) + }); - it('should emit a helpful message when failed with an Array', function(done){ - var test = {}, err = [1,2]; - runner.on('fail', function(test, err){ + it('should emit a helpful message when failed with an Array', function(done) { + var test = new Test('a test', noop); + var err = [ + 1, + 2 + ]; + runner.on('fail', function(test, err) { err.message.should.equal('the array [\n 1\n 2\n] was thrown, throw an Error :)'); done(); }); runner.fail(test, err); - }) - }) + }); + }); - describe('.failHook(hook, err)', function(){ - it('should increment .failures', function(){ + describe('.failHook(hook, err)', function() { + it('should increment .failures', function() { runner.failures.should.equal(0); - runner.failHook({}, {}); + runner.failHook(new Test('fail hook 1', noop), {}); runner.failures.should.equal(1); - runner.failHook({}, {}); + runner.failHook(new Test('fail hook 2', noop), {}); runner.failures.should.equal(2); - }) + }); + + it('should augment hook title with current test title', function() { + var hook = new Hook('"before each" hook'); + hook.ctx = { currentTest: new Test('should behave', noop) }; - it('should augment hook title with current test title', function(){ - var hook = { - title: '"before each" hook', - ctx: { currentTest: new Test('should behave') } - }; runner.failHook(hook, {}); hook.title.should.equal('"before each" hook for "should behave"'); - hook.ctx.currentTest = new Test('should obey'); + hook.ctx.currentTest = new Test('should obey', noop); runner.failHook(hook, {}); hook.title.should.equal('"before each" hook for "should obey"'); - }) + }); - it('should emit "fail"', function(done){ - var hook = {}, err = {}; - runner.on('fail', function(hook, err){ + it('should emit "fail"', function(done) { + var hook = new Hook(); + var err = {}; + runner.on('fail', function(hook, err) { hook.should.equal(hook); err.should.equal(err); done(); }); runner.failHook(hook, err); - }) + }); - it('should emit "end" if suite bail is true', function(done){ - var hook = {}, err = {}; + it('should emit "end" if suite bail is true', function(done) { + var hook = new Hook(); + var err = {}; suite.bail(true); runner.on('end', done); runner.failHook(hook, err); - }) + }); - it('should not emit "end" if suite bail is not true', function(done){ - var hook = {}, err = {}; + it('should not emit "end" if suite bail is not true', function(done) { + var hook = new Hook(); + var err = {}; suite.bail(false); - runner.on('end', function() { throw new Error('"end" was emit, but the bail is false'); }); + runner.on('end', function() { + throw new Error('"end" was emit, but the bail is false'); + }); runner.failHook(hook, err); done(); - }) + }); }); describe('allowUncaught', function() { @@ -339,27 +358,29 @@ describe('Runner', function(){ }); describe('stackTrace', function() { - var stack = [ 'AssertionError: foo bar' - , 'at EventEmitter.<anonymous> (/usr/local/dev/test.js:16:12)' - , 'at Context.<anonymous> (/usr/local/dev/test.js:19:5)' - , 'Test.Runnable.run (/usr/local/lib/node_modules/mocha/lib/runnable.js:244:7)' - , 'Runner.runTest (/usr/local/lib/node_modules/mocha/lib/runner.js:374:10)' - , '/usr/local/lib/node_modules/mocha/lib/runner.js:452:12' - , 'next (/usr/local/lib/node_modules/mocha/lib/runner.js:299:14)' - , '/usr/local/lib/node_modules/mocha/lib/runner.js:309:7' - , 'next (/usr/local/lib/node_modules/mocha/lib/runner.js:248:23)' - , 'Immediate._onImmediate (/usr/local/lib/node_modules/mocha/lib/runner.js:276:5)' - , 'at processImmediate [as _immediateCallback] (timers.js:321:17)']; + var stack = [ + 'AssertionError: foo bar', + 'at EventEmitter.<anonymous> (/usr/local/dev/test.js:16:12)', + 'at Context.<anonymous> (/usr/local/dev/test.js:19:5)', + 'Test.Runnable.run (/usr/local/lib/node_modules/mocha/lib/runnable.js:244:7)', + 'Runner.runTest (/usr/local/lib/node_modules/mocha/lib/runner.js:374:10)', + '/usr/local/lib/node_modules/mocha/lib/runner.js:452:12', + 'next (/usr/local/lib/node_modules/mocha/lib/runner.js:299:14)', + '/usr/local/lib/node_modules/mocha/lib/runner.js:309:7', + 'next (/usr/local/lib/node_modules/mocha/lib/runner.js:248:23)', + 'Immediate._onImmediate (/usr/local/lib/node_modules/mocha/lib/runner.js:276:5)', + 'at processImmediate [as _immediateCallback] (timers.js:321:17)' + ]; describe('shortStackTrace', function() { it('should prettify the stack-trace', function(done) { - var hook = {}, - err = new Error(); + var hook = new Hook(); + var err = new Error(); // Fake stack-trace err.stack = stack.join('\n'); - runner.on('fail', function(hook, err){ - err.stack.should.equal(stack.slice(0,3).join('\n')); + runner.on('fail', function(hook, err) { + err.stack.should.equal(stack.slice(0, 3).join('\n')); done(); }); runner.failHook(hook, err); @@ -368,14 +389,14 @@ describe('Runner', function(){ describe('longStackTrace', function() { it('should display the full stack-trace', function(done) { - var hook = {}, - err = new Error(); + var hook = new Hook(); + var err = new Error(); // Fake stack-trace err.stack = stack.join('\n'); // Add --stack-trace option runner.fullStackTrace = true; - runner.on('fail', function(hook, err){ + runner.on('fail', function(hook, err) { err.stack.should.equal(stack.join('\n')); done(); });
Calling done() after this.skip() results in 'done() called multiple times' The following code skips the test in mocha v2, but causes an error in v3 #### Test ``` describe('broken skip behaviour', function() { it('should not report done() called multiple times', function(done) { this.skip() done() }) }) ``` #### v2 Output ``` broken skip behaviour - should not report done() called multiple times 0 passing (11ms) 1 pending ``` #### v3 Output ``` broken skip behaviour - should not report done() called multiple times 1) should not report done() called multiple times 0 passing (15ms) 1 pending 1 failing 1) broken skip behaviour should not report done() called multiple times: Error: done() called multiple times at Suite.<anonymous> (test/index.test.js:3:5) at Object.<anonymous> (test/index.test.js:1:63) at require (internal/module.js:20:19) at Array.forEach (native) at node.js:463:3 ``` Unfortunately I rely on this behaviour for skipping tests in [Yadda](https://github.com/acuminous/yadda/blob/master/lib/plugins/mocha/StepLevelPlugin.js#L51-L52). I've tried with various version of mocha from 1-3 and it looks like I can simply remove the call to done() in all versions. Can you confirm?
> I've tried with various version of mocha from 1-3 and it looks like I can simply remove the call to done() in all versions. Can you confirm? You don't need `done` if all your test code executes synchronously; you need it (or to return a promise encapsulating the asynchronous code) if your test executes anything synchronous. Either way, I'm pretty sure this is a legit bug -- once `skip` is called the test should be aborted as pending and `done` shouldn't affect anything, unless I'm missing something. Hi @ScottFreeCode, thanks for responding. > > I've tried with various version of mocha from 1-3 and it looks like I can simply remove the call to done() in all versions. Can you confirm? > > You don't need done if all your test code executes synchronously; you need it (or to return a promise encapsulating the asynchronous code) if your test executes anything synchronous. I mean removing the call to done if `this.skip()` was called (Yadda calls this.skip() conditionally), e.g. ``` it('should not report done() called multiple times', function(done) { if (shouldAbort()) return this.skip() doStuff((result) => { assert(result) done() }) } ``` The above seems to work fine, but not sure if it was an intentional change from 2 => 3 or accidental one. Yeah, that seems like it should work, but it also seems like it shouldn't be necessary; @boneskull, do you know if there was any intended behavior change in 3.x that might be causing this? Yes. @cressie176 Mocha pre-v3 doesn't truly support `this.skip()` in async tests. In your example, your test is not actually running async code, so you wouldn't notice: ``` js describe('broken skip behaviour', function() { it('should not report done() called multiple times', function(done) { this.skip() done() // this is never actually called. }) }) ``` But: ``` js describe('broken skip behaviour', function() { it('should not report done() called multiple times', function(done) { setTimeout(() => { this.skip() // uncaught exception done() // still never called }) }) }) ``` results in (again, Mocha 2.x here): ``` broken skip behaviour 1) should not report done() called multiple times 0 passing (16ms) 1 failing 1) broken skip behaviour should not report done() called multiple times: Error: the object { "message": [undefined] "uncaught": true } was thrown, throw an Error :) at process._fatalException (bootstrap_node.js:296:26) ``` See [this comment](https://github.com/mochajs/mocha/pull/946#issuecomment-227715287) and subsequent discussion. But, yes, there's a bug here; Mocha 3.x does this: ``` js describe('broken skip behaviour', function() { it('should not report done() called multiple times', function(done) { this.skip() done() // this actually gets called, which is wrong. }) }) ``` So that needs fixing; hopefully it won't be too much of a headache.
2016-09-14T23:26:52Z
3
mochajs/mocha
2,499
mochajs__mocha-2499
[ "2496" ]
8ccccba817143539f074362f2f95b3f731d23cef
diff --git a/lib/utils.js b/lib/utils.js --- a/lib/utils.js +++ b/lib/utils.js @@ -89,7 +89,7 @@ exports.map = function(arr, fn, scope) { * @param {number} start * @return {number} */ -exports.indexOf = function(arr, obj, start) { +var indexOf = exports.indexOf = function(arr, obj, start) { for (var i = start || 0, l = arr.length; i < l; i++) { if (arr[i] === obj) { return i; @@ -107,7 +107,7 @@ exports.indexOf = function(arr, obj, start) { * @param {Object} val Initial value. * @return {*} */ -exports.reduce = function(arr, fn, val) { +var reduce = exports.reduce = function(arr, fn, val) { var rval = val; for (var i = 0, l = arr.length; i < l; i++) { @@ -313,7 +313,7 @@ exports.trim = function(str) { * @return {Object} */ exports.parseQuery = function(qs) { - return exports.reduce(qs.replace('?', '').split('&'), function(obj, pair) { + return reduce(qs.replace('?', '').split('&'), function(obj, pair) { var i = pair.indexOf('='); var key = pair.slice(0, i); var val = pair.slice(++i); @@ -366,13 +366,11 @@ exports.highlightTags = function(name) { * * @api private * @param {*} value The value to inspect. - * @param {string} [type] The type of the value, if known. + * @param {string} typeHint The type of the value * @returns {string} */ -function emptyRepresentation(value, type) { - type = type || exports.type(value); - - switch (type) { +function emptyRepresentation(value, typeHint) { + switch (typeHint) { case 'function': return '[Function]'; case 'object': @@ -391,7 +389,7 @@ function emptyRepresentation(value, type) { * @api private * @see https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object/toString * @param {*} value The value to test. - * @returns {string} + * @returns {string} Computed type * @example * type({}) // 'object' * type([]) // 'array' @@ -403,8 +401,9 @@ function emptyRepresentation(value, type) { * type(/foo/) // 'regexp' * type('type') // 'string' * type(global) // 'global' + * type(new String('foo') // 'object' */ -exports.type = function type(value) { +var type = exports.type = function type(value) { if (value === undefined) { return 'undefined'; } else if (value === null) { @@ -433,25 +432,36 @@ exports.type = function type(value) { * @return {string} */ exports.stringify = function(value) { - var type = exports.type(value); + var typeHint = type(value); + + if (!~indexOf(['object', 'array', 'function'], typeHint)) { + if (typeHint === 'buffer') { + var json = value.toJSON(); + // Based on the toJSON result + return jsonStringify(json.data && json.type ? json.data : json, 2) + .replace(/,(\n|$)/g, '$1'); + } - if (!~exports.indexOf(['object', 'array', 'function'], type)) { - if (type !== 'buffer') { + // IE7/IE8 has a bizarre String constructor; needs to be coerced + // into an array and back to obj. + if (typeHint === 'string' && typeof value === 'object') { + value = reduce(value.split(''), function(acc, char, idx) { + acc[idx] = char; + return acc; + }, {}); + typeHint = 'object'; + } else { return jsonStringify(value); } - var json = value.toJSON(); - // Based on the toJSON result - return jsonStringify(json.data && json.type ? json.data : json, 2) - .replace(/,(\n|$)/g, '$1'); } for (var prop in value) { if (Object.prototype.hasOwnProperty.call(value, prop)) { - return jsonStringify(exports.canonicalize(value), 2).replace(/,(\n|$)/g, '$1'); + return jsonStringify(exports.canonicalize(value, null, typeHint), 2).replace(/,(\n|$)/g, '$1'); } } - return emptyRepresentation(value, type); + return emptyRepresentation(value, typeHint); }; /** @@ -480,7 +490,7 @@ function jsonStringify(object, spaces, depth) { } function _stringify(val) { - switch (exports.type(val)) { + switch (type(val)) { case 'null': case 'undefined': val = '[' + val + ']'; @@ -563,14 +573,15 @@ exports.isBuffer = function(value) { * @see {@link exports.stringify} * @param {*} value Thing to inspect. May or may not have properties. * @param {Array} [stack=[]] Stack of seen values + * @param {string} [typeHint] Type hint * @return {(Object|Array|Function|string|undefined)} */ -exports.canonicalize = function(value, stack) { +exports.canonicalize = function canonicalize(value, stack, typeHint) { var canonicalizedObj; /* eslint-disable no-unused-vars */ var prop; /* eslint-enable no-unused-vars */ - var type = exports.type(value); + typeHint = typeHint || type(value); function withStack(value, fn) { stack.push(value); fn(); @@ -579,11 +590,11 @@ exports.canonicalize = function(value, stack) { stack = stack || []; - if (exports.indexOf(stack, value) !== -1) { + if (indexOf(stack, value) !== -1) { return '[Circular]'; } - switch (type) { + switch (typeHint) { case 'undefined': case 'buffer': case 'null': @@ -604,7 +615,7 @@ exports.canonicalize = function(value, stack) { } /* eslint-enable guard-for-in */ if (!canonicalizedObj) { - canonicalizedObj = emptyRepresentation(value, type); + canonicalizedObj = emptyRepresentation(value, typeHint); break; } /* falls through */ @@ -745,7 +756,7 @@ exports.stackTraceFilter = function() { return function(stack) { stack = stack.split('\n'); - stack = exports.reduce(stack, function(list, line) { + stack = reduce(stack, function(list, line) { if (isMochaInternal(line)) { return list; }
diff --git a/test/acceptance/utils.spec.js b/test/acceptance/utils.spec.js --- a/test/acceptance/utils.spec.js +++ b/test/acceptance/utils.spec.js @@ -77,6 +77,10 @@ describe('lib/utils', function () { var stringify = utils.stringify; + it('should return an object representation of a string created with a String constructor', function() { + expect(stringify(new String('foo'))).to.equal('{\n "0": "f"\n "1": "o"\n "2": "o"\n}'); + }); + it('should return Buffer with .toJSON representation', function() { expect(stringify(new Buffer([0x01]))).to.equal('[\n 1\n]'); expect(stringify(new Buffer([0x01, 0x02]))).to.equal('[\n 1\n 2\n]');
Objects prematurely coerced into string primitives This is regarding an issue I reported on the WebStorm tracker: https://youtrack.jetbrains.com/issue/WEB-23383 Differences between symbols that are not strictly equal, appear as being equal and are thus hard to read and confusing. Here is a simplified version of a test where I noticed this behavior: ``` js "use strict"; const chai = require( "chai" ); const expect = chai.expect; chai.config.truncateThreshold = 0; describe( "Strings", () => { it( "should correctly display differences", () => { expect( new String( "foo" ) ).to.equal( "foo" ); } ); } ); ``` Because `new String( "foo" )` is not equal to the primitive `"foo"`, the test will correctly fail. However, in the resulting output, the difference is not immediately apparent. In this simple test, the `AssertionError` _will_ pretty clearly identify the issue. However, in larger objects, this can be much harder to identify, _especially_ if the truncation threshold is not raised.
How assertion libraries generate error messages is outside of the scope of Mocha. I suggest taking this up with chai or the relevant chai plugin you might be using @Munter Apparently the behavior is related to https://github.com/mochajs/mocha/blob/master/lib/utils.js#L435 Also, @boneskull suggested to post this here Quite reproducible: ``` js it('should foo', function () { var err = new Error('oh no'); err.actual = new String('foo'); err.expected = 'foo'; throw err; }); ``` Output: ``` $ mocha foo.js 1) should foo 0 passing (8ms) 1 failing 1) should foo: Error: oh no + expected - actual at Context.<anonymous> (foo.js:2:15) ``` @Munter Yeah, the problem seems to be in our diffing code; Chai actually outputs the difference correctly. See [original issue](https://youtrack.jetbrains.com/issue/WEB-23383).
2016-09-22T21:22:04Z
3
mochajs/mocha
2,345
mochajs__mocha-2345
[ "1813", "1813" ]
96d9e06669262c395d74a12c2f15490535829507
diff --git a/karma.conf.js b/karma.conf.js --- a/karma.conf.js +++ b/karma.conf.js @@ -41,7 +41,7 @@ module.exports = function(config) { // TO RUN LOCALLY: // Execute `CI=1 make test-browser`, once you've set the SAUCE_USERNAME and // SAUCE_ACCESS_KEY env vars. - if (process.env.CI) { + if (process.env.CI && !process.env.APPVEYOR) { // we can't run SauceLabs tests on PRs from forks on Travis cuz security. if (process.env.TRAVIS) { if (process.env.TRAVIS_REPO_SLUG === 'mochajs/mocha' diff --git a/mocha.js b/mocha.js --- a/mocha.js +++ b/mocha.js @@ -2474,7 +2474,7 @@ function coverageClass(coveragePctg) { return 'terrible'; } -}).call(this,require('_process'),"/lib/reporters") +}).call(this,require('_process'),"/lib\\reporters") },{"./json-cov":23,"_process":58,"fs":43,"jade":43,"path":43}],21:[function(require,module,exports){ (function (global){ /* eslint-env browser */ @@ -4814,7 +4814,13 @@ Runner.prototype.hook = function(name, fn) { } if (err) { if (err instanceof Pending) { - suite.pending = true; + if (name === 'beforeEach' || name === 'afterEach') { + self.test.pending = true; + } else { + suite.tests.forEach(function(test) { + test.pending = true; + }); + } } else { self.failHook(hook, err); @@ -5028,7 +5034,7 @@ Runner.prototype.runTests = function(suite, fn) { // execute test and hook(s) self.emit('test', self.test = test); self.hookDown('beforeEach', function(err, errSuite) { - if (suite.isPending()) { + if (test.isPending()) { self.emit('pending', test); self.emit('test end', test); return next(); @@ -5355,8 +5361,8 @@ function filterLeaks(ok, globals) { } // in firefox - // if runner runs in an iframe, this iframe's window.getInterface method not init at first - // it is assigned in some seconds + // if runner runs in an iframe, this iframe's window.getInterface method + // not init at first it is assigned in some seconds if (global.navigator && (/^getInterface/).test(key)) { return false; } @@ -6117,8 +6123,8 @@ exports.slug = function(str) { exports.clean = function(str) { str = str .replace(/\r\n?|[\n\u2028\u2029]/g, '\n').replace(/^\uFEFF/, '') - .replace(/^function *\(.*\)\s*\{|\(.*\) *=> *\{?/, '') - .replace(/\s+\}$/, ''); + // (traditional)-> space/name parameters body (lambda)-> parameters body multi-statement/single keep body content + .replace(/^function(?:\s*|\s+[^(]*)\([^)]*\)\s*\{((?:.|\n)*?)\s*\}$|^\([^)]*\)\s*=>\s*(?:\{((?:.|\n)*?)\s*\}|((?:.|\n)*))$/, '$1$2$3'); var spaces = str.match(/^\n?( *)/)[1].length; var tabs = str.match(/^\n?(\t*)/)[1].length;
diff --git a/test/acceptance/fs.js b/test/acceptance/fs.js --- a/test/acceptance/fs.js +++ b/test/acceptance/fs.js @@ -1,16 +1,18 @@ var fs = require('fs'); +var path = require('path'); +var tmpFile = path.join.bind(path, require('os-tmpdir')()); describe('fs.readFile()', function(){ describe('when the file exists', function(){ it('should succeed', function(done){ - fs.writeFile('/tmp/mocha', 'wahoo', done) + fs.writeFile(tmpFile('mocha'), 'wahoo', done) }) }) describe('when the file does not exist', function(){ it('should fail', function(done){ // uncomment - // fs.readFile('/tmp/does-not-exist', done); + // fs.readFile(tmpFile('does-not-exist'), done); done(); }) }) diff --git a/test/acceptance/lookup-files.js b/test/acceptance/lookup-files.js --- a/test/acceptance/lookup-files.js +++ b/test/acceptance/lookup-files.js @@ -1,56 +1,83 @@ var utils = require('../../lib/utils'); describe('lookupFiles', function() { - var fs = require('fs'), path = require('path'), existsSync = fs.existsSync || - path.existsSync; + var fs = require('fs'), + path = require('path'), + existsSync = fs.existsSync || path.existsSync, + tmpDir = require('os-tmpdir')(), + tmpFile = path.join.bind(path, tmpDir), + symlinkSupported = false; + + fs.writeFileSync(tmpFile('mocha-utils.js'), 'yippy skippy ying yang yow'); + try { + fs.symlinkSync(tmpFile('mocha-utils.js'), tmpFile('mocha-utils-link.js')); + symlinkSupported = true; + } catch (ignored) { + } + + cleanup(); beforeEach(function() { - fs.writeFileSync('/tmp/mocha-utils.js', 'yippy skippy ying yang yow'); - fs.symlinkSync('/tmp/mocha-utils.js', '/tmp/mocha-utils-link.js'); + fs.writeFileSync(tmpFile('mocha-utils.js'), 'yippy skippy ying yang yow'); + if (symlinkSupported) { + fs.symlinkSync(tmpFile('mocha-utils.js'), tmpFile('mocha-utils-link.js')); + } }); - it('should not choke on symlinks', function() { - expect(utils.lookupFiles('/tmp', ['js'], false)) + (symlinkSupported ? it : it.skip)('should not choke on symlinks', function() { + expect(utils.lookupFiles(tmpDir, ['js'], false)) .to - .contain('/tmp/mocha-utils-link.js') + .contain(tmpFile('mocha-utils-link.js')) .and - .contain('/tmp/mocha-utils.js') + .contain(tmpFile('mocha-utils.js')) .and .have .length(2); - expect(existsSync('/tmp/mocha-utils-link.js')) + expect(existsSync(tmpFile('mocha-utils-link.js'))) .to .be(true); - fs.renameSync('/tmp/mocha-utils.js', '/tmp/bob'); - expect(existsSync('/tmp/mocha-utils-link.js')) + fs.renameSync(tmpFile('mocha-utils.js'), tmpFile('bob')); + expect(existsSync(tmpFile('mocha-utils-link.js'))) .to .be(false); - expect(utils.lookupFiles('/tmp', ['js'], false)) + expect(utils.lookupFiles(tmpDir, ['js'], false)) .to .eql([]); }); it('should accept a glob "path" value', function() { - expect(utils.lookupFiles('/tmp/mocha-utils*', ['js'], false)) + var res = utils.lookupFiles(tmpFile('mocha-utils*'), ['js'], false) + .map(path.normalize.bind(path)); + + var expectedLength = 0; + var ex = expect(res) .to - .contain('/tmp/mocha-utils-link.js') - .and - .contain('/tmp/mocha-utils.js') - .and + .contain(tmpFile('mocha-utils.js')); + expectedLength++; + + if (symlinkSupported) { + ex = ex.and + .contain(tmpFile('mocha-utils-link.js')); + expectedLength++; + } + + ex.and .have - .length(2); + .length(expectedLength); }); - afterEach(function() { + afterEach(cleanup); + + function cleanup() { [ - '/tmp/mocha-utils.js', - '/tmp/mocha-utils-link.js', - '/tmp/bob' + 'mocha-utils.js', + 'mocha-utils-link.js', + 'bob' ].forEach(function(path) { try { - fs.unlinkSync(path); + fs.unlinkSync(tmpFile(path)); } catch (ignored) { } }); - }); + } }); diff --git a/test/color.js b/test/color.js --- a/test/color.js +++ b/test/color.js @@ -1,12 +1,13 @@ var assert = require('assert'); var child_process = require('child_process'); +var path = require('path'); describe('Mocha', function() { this.timeout(1000); it('should not output colors to pipe', function(cb) { - var command = 'bin/mocha --grep missing-test'; - child_process.exec(command, function(err, stdout, stderr) { + var command = [path.join('bin', 'mocha'), '--grep', 'missing-test']; + child_process.execFile(process.execPath, command, function(err, stdout, stderr) { if (err) return cb(err); assert(stdout.indexOf('[90m') === -1); diff --git a/test/integration/diffs.js b/test/integration/diffs.js --- a/test/integration/diffs.js +++ b/test/integration/diffs.js @@ -15,7 +15,7 @@ function getExpectedOutput() { describe('diffs', function() { var diffs, expected; - this.timeout(1000); + this.timeout(2000); before(function(done) { run('diffs/diffs.js', ['-C'], function(err, res) { diff --git a/test/integration/fixtures/hooks/multiple.hook.async.error.js b/test/integration/fixtures/hooks/multiple.hook.async.error.js --- a/test/integration/fixtures/hooks/multiple.hook.async.error.js +++ b/test/integration/fixtures/hooks/multiple.hook.async.error.js @@ -9,54 +9,54 @@ describe('1', function () { console.log('1 before each'); }); - describe('1.1', function () { + describe('1-1', function () { before(function () { - console.log('1.1 before'); + console.log('1-1 before'); }); beforeEach(function (done) { - console.log('1.1 before each'); + console.log('1-1 before each'); process.nextTick(function () { - throw new Error('1.1 before each hook failed'); + throw new Error('1-1 before each hook failed'); }); }); - it('1.1 test 1', function () { - console.log('1.1 test 1'); + it('1-1 test 1', function () { + console.log('1-1 test 1'); }); - it('1.1 test 2', function () { - console.log('1.1 test 2'); + it('1-1 test 2', function () { + console.log('1-1 test 2'); }); afterEach(function () { - console.log('1.1 after each'); + console.log('1-1 after each'); }); after(function (done) { - console.log('1.1 after'); + console.log('1-1 after'); process.nextTick(function () { - throw new Error('1.1 after hook failed'); + throw new Error('1-1 after hook failed'); }); }); }); - describe('1.2', function () { + describe('1-2', function () { before(function () { - console.log('1.2 before'); + console.log('1-2 before'); }); beforeEach(function () { - console.log('1.2 before each'); + console.log('1-2 before each'); }); - it('1.2 test 1', function () { - console.log('1.2 test 1'); + it('1-2 test 1', function () { + console.log('1-2 test 1'); }); - it('1.2 test 2', function () { - console.log('1.2 test 2'); + it('1-2 test 2', function () { + console.log('1-2 test 2'); }); afterEach(function (done) { - console.log('1.2 after each'); + console.log('1-2 after each'); process.nextTick(function () { - throw new Error('1.2 after each hook failed'); + throw new Error('1-2 after each hook failed'); }); }); after(function () { - console.log('1.2 after'); + console.log('1-2 after'); }); }); @@ -77,45 +77,45 @@ describe('2', function () { }); }); - describe('2.1', function () { + describe('2-1', function () { before(function () { - console.log('2.1 before'); + console.log('2-1 before'); }); beforeEach(function () { - console.log('2.1 before each'); + console.log('2-1 before each'); }); - it('2.1 test 1', function () { - console.log('2.1 test 1'); + it('2-1 test 1', function () { + console.log('2-1 test 1'); }); - it('2.1 test 2', function () { - console.log('2.1 test 2'); + it('2-1 test 2', function () { + console.log('2-1 test 2'); }); afterEach(function () { - console.log('2.1 after each'); + console.log('2-1 after each'); }); after(function () { - console.log('2.1 after'); + console.log('2-1 after'); }); }); - describe('2.2', function () { + describe('2-2', function () { before(function () { - console.log('2.2 before'); + console.log('2-2 before'); }); beforeEach(function () { - console.log('2.2 before each'); + console.log('2-2 before each'); }); - it('2.2 test 1', function () { - console.log('2.2 test 1'); + it('2-2 test 1', function () { + console.log('2-2 test 1'); }); - it('2.2 test 2', function () { - console.log('2.2 test 2'); + it('2-2 test 2', function () { + console.log('2-2 test 2'); }); afterEach(function () { - console.log('2.2 after each'); + console.log('2-2 after each'); }); after(function () { - console.log('2.2 after'); + console.log('2-2 after'); }); }); diff --git a/test/integration/fixtures/hooks/multiple.hook.error.js b/test/integration/fixtures/hooks/multiple.hook.error.js --- a/test/integration/fixtures/hooks/multiple.hook.error.js +++ b/test/integration/fixtures/hooks/multiple.hook.error.js @@ -9,48 +9,48 @@ describe('1', function () { console.log('1 before each'); }); - describe('1.1', function () { + describe('1-1', function () { before(function () { - console.log('1.1 before'); + console.log('1-1 before'); }); beforeEach(function () { - console.log('1.1 before each'); - throw new Error('1.1 before each hook failed'); + console.log('1-1 before each'); + throw new Error('1-1 before each hook failed'); }); - it('1.1 test 1', function () { - console.log('1.1 test 1'); + it('1-1 test 1', function () { + console.log('1-1 test 1'); }); - it('1.1 test 2', function () { - console.log('1.1 test 2'); + it('1-1 test 2', function () { + console.log('1-1 test 2'); }); afterEach(function () { - console.log('1.1 after each'); + console.log('1-1 after each'); }); after(function () { - console.log('1.1 after'); - throw new Error('1.1 after hook failed'); + console.log('1-1 after'); + throw new Error('1-1 after hook failed'); }); }); - describe('1.2', function () { + describe('1-2', function () { before(function () { - console.log('1.2 before'); + console.log('1-2 before'); }); beforeEach(function () { - console.log('1.2 before each'); + console.log('1-2 before each'); }); - it('1.2 test 1', function () { - console.log('1.2 test 1'); + it('1-2 test 1', function () { + console.log('1-2 test 1'); }); - it('1.2 test 2', function () { - console.log('1.2 test 2'); + it('1-2 test 2', function () { + console.log('1-2 test 2'); }); afterEach(function () { - console.log('1.2 after each'); - throw new Error('1.2 after each hook failed'); + console.log('1-2 after each'); + throw new Error('1-2 after each hook failed'); }); after(function () { - console.log('1.2 after'); + console.log('1-2 after'); }); }); @@ -69,45 +69,45 @@ describe('2', function () { throw new Error('2 before each hook failed'); }); - describe('2.1', function () { + describe('2-1', function () { before(function () { - console.log('2.1 before'); + console.log('2-1 before'); }); beforeEach(function () { - console.log('2.1 before each'); + console.log('2-1 before each'); }); - it('2.1 test 1', function () { - console.log('2.1 test 1'); + it('2-1 test 1', function () { + console.log('2-1 test 1'); }); - it('2.1 test 2', function () { - console.log('2.1 test 2'); + it('2-1 test 2', function () { + console.log('2-1 test 2'); }); afterEach(function () { - console.log('2.1 after each'); + console.log('2-1 after each'); }); after(function () { - console.log('2.1 after'); + console.log('2-1 after'); }); }); - describe('2.2', function () { + describe('2-2', function () { before(function () { - console.log('2.2 before'); + console.log('2-2 before'); }); beforeEach(function () { - console.log('2.2 before each'); + console.log('2-2 before each'); }); - it('2.2 test 1', function () { - console.log('2.2 test 1'); + it('2-2 test 1', function () { + console.log('2-2 test 1'); }); - it('2.2 test 2', function () { - console.log('2.2 test 2'); + it('2-2 test 2', function () { + console.log('2-2 test 2'); }); afterEach(function () { - console.log('2.2 after each'); + console.log('2-2 after each'); }); after(function () { - console.log('2.2 after'); + console.log('2-2 after'); }); }); diff --git a/test/integration/fixtures/regression/issue-1991.js b/test/integration/fixtures/regression/issue-1991.js --- a/test/integration/fixtures/regression/issue-1991.js +++ b/test/integration/fixtures/regression/issue-1991.js @@ -39,7 +39,7 @@ for (var i = 0; i < numOfTests; i += 1) { it('access a variable via a closure', function () { // slow performance on older node.js versions - this.timeout(1000); + this.timeout(2000); closureVar = new MemoryLeak(); }); diff --git a/test/integration/helpers.js b/test/integration/helpers.js --- a/test/integration/helpers.js +++ b/test/integration/helpers.js @@ -1,6 +1,7 @@ var spawn = require('child_process').spawn; var path = require('path'); var fs = require('fs'); +var baseReporter = require('../../lib/reporters/base'); module.exports = { /** @@ -142,14 +143,20 @@ module.exports = { return diffs.map(function(diff) { return diff.slice(1, -3).join('\n'); }); - } + }, + + /** + * regular expression used for splitting lines based on new line / dot symbol. + */ + splitRegExp: new RegExp('[\\n' + baseReporter.symbols.dot + ']+') }; function invokeMocha(args, fn) { var output, mocha, listener; output = ''; - mocha = spawn('./bin/mocha', args); + args = [path.join('bin', 'mocha')].concat(args); + mocha = spawn(process.execPath, args); listener = function(data) { output += data; diff --git a/test/integration/hook.err.js b/test/integration/hook.err.js --- a/test/integration/hook.err.js +++ b/test/integration/hook.err.js @@ -1,8 +1,9 @@ var assert = require('assert'); var runMocha = require('./helpers').runMocha; +var splitRegExp = require('./helpers').splitRegExp; describe('hook error handling', function() { - this.timeout(1000); + this.timeout(2000); var lines; @@ -63,30 +64,30 @@ describe('hook error handling', function() { lines, [ 'root before', - '1.1 before', + '1-1 before', 'root before each', '1 before each', - '1.1 before each', - '1.1 after each', + '1-1 before each', + '1-1 after each', '1 after each', 'root after each', - '1.1 after', - '1.2 before', + '1-1 after', + '1-2 before', 'root before each', '1 before each', - '1.2 before each', - '1.2 test 1', - '1.2 after each', + '1-2 before each', + '1-2 test 1', + '1-2 after each', '1 after each', 'root after each', - '1.2 after', + '1-2 after', '1 after', - '2.1 before', + '2-1 before', 'root before each', '2 before each', '2 after each', 'root after each', - '2.1 after', + '2-1 after', '2 after', 'root after' ] @@ -151,30 +152,30 @@ describe('hook error handling', function() { lines, [ 'root before', - '1.1 before', + '1-1 before', 'root before each', '1 before each', - '1.1 before each', - '1.1 after each', + '1-1 before each', + '1-1 after each', '1 after each', 'root after each', - '1.1 after', - '1.2 before', + '1-1 after', + '1-2 before', 'root before each', '1 before each', - '1.2 before each', - '1.2 test 1', - '1.2 after each', + '1-2 before each', + '1-2 test 1', + '1-2 after each', '1 after each', 'root after each', - '1.2 after', + '1-2 after', '1 after', - '2.1 before', + '2-1 before', 'root before each', '2 before each', '2 after each', 'root after each', - '2.1 after', + '2-1 after', '2 after', 'root after' ] @@ -188,7 +189,7 @@ describe('hook error handling', function() { assert.ifError(err); lines = res.output - .split(/[\n․]+/) + .split(splitRegExp) .map(function(line) { return line.trim(); }) diff --git a/test/integration/hooks.js b/test/integration/hooks.js --- a/test/integration/hooks.js +++ b/test/integration/hooks.js @@ -1,9 +1,10 @@ var assert = require('assert'); var run = require('./helpers').runMocha; +var splitRegExp = require('./helpers').splitRegExp; var args = []; describe('hooks', function() { - this.timeout(1000); + this.timeout(2000); it('are ran in correct order', function(done) { run('cascade.js', args, function(err, res) { @@ -11,7 +12,7 @@ describe('hooks', function() { assert(!err); - lines = res.output.split(/[\n․]+/).map(function(line) { + lines = res.output.split(splitRegExp).map(function(line) { return line.trim(); }).filter(function(line) { return line.length; diff --git a/test/integration/multiple.done.js b/test/integration/multiple.done.js --- a/test/integration/multiple.done.js +++ b/test/integration/multiple.done.js @@ -4,7 +4,7 @@ var args = []; describe('multiple calls to done()', function() { var res; - this.timeout(1000); + this.timeout(2000); describe('from a spec', function() { before(function(done) { diff --git a/test/integration/pending.js b/test/integration/pending.js --- a/test/integration/pending.js +++ b/test/integration/pending.js @@ -4,7 +4,7 @@ var args = []; describe('pending', function() { describe('pending specs', function() { - this.timeout(1000); + this.timeout(2000); it('should be created by omitting a function', function(done) { run('pending/spec.js', args, function(err, res) { @@ -19,7 +19,7 @@ describe('pending', function() { }); describe('synchronous skip()', function() { - this.timeout(1000); + this.timeout(2000); describe('in spec', function() { it('should immediately skip the spec and run all others', function(done) { @@ -62,7 +62,7 @@ describe('pending', function() { }); describe('asynchronous skip()', function() { - this.timeout(1000); + this.timeout(2000); describe('in spec', function() { it('should immediately skip the spec and run all others', function(done) { diff --git a/test/integration/regression.js b/test/integration/regression.js --- a/test/integration/regression.js +++ b/test/integration/regression.js @@ -4,7 +4,7 @@ var path = require('path'); var run = require('./helpers').runMocha; describe('regressions', function() { - this.timeout(1000); + this.timeout(2000); it('issue-1327: should run all 3 specs exactly once', function(done) { var args = []; diff --git a/test/integration/retries.js b/test/integration/retries.js --- a/test/integration/retries.js +++ b/test/integration/retries.js @@ -11,7 +11,7 @@ describe('retries', function() { assert(!err); - lines = res.output.split(/[\n․]+/).map(function(line) { + lines = res.output.split(helpers.splitRegExp).map(function(line) { return line.trim(); }).filter(function(line) { return line.length; @@ -76,7 +76,7 @@ describe('retries', function() { assert(!err); - lines = res.output.split(/[\n․]+/).map(function(line) { + lines = res.output.split(helpers.splitRegExp).map(function(line) { return line.trim(); }).filter(function(line) { return line.length; diff --git a/test/integration/timeout.js b/test/integration/timeout.js --- a/test/integration/timeout.js +++ b/test/integration/timeout.js @@ -3,7 +3,7 @@ var run = require('./helpers').runMochaJSON; var args = []; describe('this.timeout()', function() { - this.timeout(1000); + this.timeout(2000); it('is respected by sync and async suites', function(done) { run('timeout.js', args, function(err, res) { diff --git a/test/integration/uncaught.js b/test/integration/uncaught.js --- a/test/integration/uncaught.js +++ b/test/integration/uncaught.js @@ -3,7 +3,7 @@ var run = require('./helpers').runMochaJSON; var args = []; describe('uncaught exceptions', function() { - this.timeout(1000); + this.timeout(2000); it('handles uncaught exceptions from hooks', function(done) { run('uncaught.hook.js', args, function(err, res) {
Tests are broken in Windows Having read the contributing guidelines, I realized that on a windows machine I could not confirm whether any changes I committed failed tests or not because many of the tests are failing in windows. Summary of issues and potential fixes: - I wasn't able to get the windows make to work properly at all. using cygwin seems to works though. - many references to '/tmp' inside acceptance fs/utils - these can be updated to use os.tmpdir() - passing `bin/mocha` to exec fails, "bin is not recognized as an internal or external command" -- updating it to `node bin/mocha` makes it work. - using `spawn` in integration/helpers doesn't work at all. Updating to use `exec`, passing args delimited by a space works. (also need to preface with `node`, similar to above) The trickiest one relates to the dot symbol used by windows -- Integration/hook tests takes the output of test results and splits by new lines and the dot symbol. Thing is, windows doesn't use the dot symbol (\u2024) - [it uses a full stop](https://github.com/mochajs/mocha/blob/master/lib/reporters/base.js#L88), so the tests fail because the split doesn't work, and a full stop winds up in some of the titles. Updating the [regexp](https://github.com/mochajs/mocha/blob/master/test/integration/hook.err.js#L340) to use a dot in addition to \u2024 makes it fail in a different way (similar to how the tests would fail if the test titles included \u2024). I think the best way to fix this is to first, change the test to use whatever base reporter exports for the dot symbol, and second, change the windows dot to use \u00B7. \u2024 would work, but using the default cmd font causes a beep every time it is used (which gets exceedingly annoying) - \u00B7 looks pretty close to the dot character, is supported by cmd and likely wouldn't be used in test titles. I have fixes ready for these and will send a PR... I'm looking for confirmation that these changes (a) are welcome, (b) wouldn't break the tests when run in a unix environment (unsure if spawn vs. exec produces different results and whether prefacing the command with `node` causes issues) Tests are broken in Windows Having read the contributing guidelines, I realized that on a windows machine I could not confirm whether any changes I committed failed tests or not because many of the tests are failing in windows. Summary of issues and potential fixes: - I wasn't able to get the windows make to work properly at all. using cygwin seems to works though. - many references to '/tmp' inside acceptance fs/utils - these can be updated to use os.tmpdir() - passing `bin/mocha` to exec fails, "bin is not recognized as an internal or external command" -- updating it to `node bin/mocha` makes it work. - using `spawn` in integration/helpers doesn't work at all. Updating to use `exec`, passing args delimited by a space works. (also need to preface with `node`, similar to above) The trickiest one relates to the dot symbol used by windows -- Integration/hook tests takes the output of test results and splits by new lines and the dot symbol. Thing is, windows doesn't use the dot symbol (\u2024) - [it uses a full stop](https://github.com/mochajs/mocha/blob/master/lib/reporters/base.js#L88), so the tests fail because the split doesn't work, and a full stop winds up in some of the titles. Updating the [regexp](https://github.com/mochajs/mocha/blob/master/test/integration/hook.err.js#L340) to use a dot in addition to \u2024 makes it fail in a different way (similar to how the tests would fail if the test titles included \u2024). I think the best way to fix this is to first, change the test to use whatever base reporter exports for the dot symbol, and second, change the windows dot to use \u00B7. \u2024 would work, but using the default cmd font causes a beep every time it is used (which gets exceedingly annoying) - \u00B7 looks pretty close to the dot character, is supported by cmd and likely wouldn't be used in test titles. I have fixes ready for these and will send a PR... I'm looking for confirmation that these changes (a) are welcome, (b) wouldn't break the tests when run in a unix environment (unsure if spawn vs. exec produces different results and whether prefacing the command with `node` causes issues)
Hi @tswaters, > I'm looking for confirmation that these changes (a) are welcome Fantastic initiative! I must admit I wasn't even aware of the failing tests on windows, but fixing these is more than welcome! > (b) wouldn't break the tests when run in a unix environment (unsure if spawn vs. exec produces different results and whether prefacing the command with node causes issues) The easiest way for you to automatically test in a unix environment would be via Travis-CI. If you create a PR for these fixes it will run automatically, but you can also enable it for your own for `tswaters/mocha` so you can experiment before creating a PR. Currently at least one test fails when running `npm test` ``` bash 34 passing (8s) 1 failing 1) options --grep runs specs matching a RegExp: Uncaught AssertionError: false == true + expected - actual -false +true at test/integration/options.js:117:9 at test/integration/helpers.js:102:16 at ChildProcess.<anonymous> (test/integration/helpers.js:169:5) at maybeClose (child_process.js:1015:16) at Socket.<anonymous> (child_process.js:1183:11) at Pipe.close (net.js:485:12) make: *** [test-integration] Error 1 ``` Going over your individual points: > I wasn't able to get the windows make to work properly at all. using cygwin seems to works though. I'm not familiar with Windows `make` at all. Is there a compatible syntax that both versions of `make` can use? Or would this entail a Windows-specific Makefile? I would like to avoid the latter, if at all possible. > many references to '/tmp' inside acceptance fs/utils - these can be updated to use os.tmpdir() Good catch. > passing bin/mocha to exec fails, "bin is not recognized as an internal or external command" -- updating it to node bin/mocha makes it work. That should be the same. As you can see `bin/mocha` stats with a [`shebang`](https://en.wikipedia.org/wiki/Shebang_%28Unix%29) ``#!/usr/bin/env node`, which indeed means as much as 'run this file with the `node` version on the user's path'. > using spawn in integration/helpers doesn't work at all. Updating to use exec, passing args delimited by a space works. (also need to preface with node, similar to above) What's the issue with `spawn`? As far as I know this should be supported on Windows. The risk with using `exec` is that the max buffer size could potentially be exceeded, as we actually parse the `stdout` of the `child_process`. On the other hand, this probably won't actually be an issue as the output is relatively small. > The trickiest one relates to the dot symbol used by windows -- > I think the best way to fix this is to first, change the test to use whatever base reporter exports for the dot symbol, :+1: > and second, change the windows dot to use \u00B7. This sounds like it's a backwards incompatible change. Doesn't mean we can't do it, but would require a release note and a bump in major versions. I'm not entirely sure about the problems with make -- the windows make is basically this: http://gnuwin32.sourceforge.net/packages/make.htm -- it returns some nonsensical errors and ends up dying -- here's the output I see: ``` C:\Users\Tyler\github\mocha>npm test > mocha@2.2.5 test C:\Users\Tyler\github\mocha > make test-all Access denied - LIB File not found - -NAME File not found - -TYPE File not found - F Access denied - LIB File not found - -NAME File not found - -TYPE File not found - F 'node_modules' is not recognized as an internal or external command, operable program or batch file. make: *** [lint] Error 1 npm ERR! Test failed. See above for more details. ``` ## So, the problem with Windows and spawn is that it doesn't read shebangs -- like, at all -- and has a tendency to fail if you pass spawn a file that isn't a bat, cmd or exe. Windows doesn't read the shabang, doesn't recognize the file as an executable and flops. exec works a little bit differently than spawn in that you can at least pass it node and a script and it works. There's a lot of stuff I'm probably glossing over here, but if you're interested here's a node core ticket that's been open for 4.5 years that describes a lot of what is going on https://github.com/joyent/node/issues/2318 I have a feeling the failing test is a symptom of switching to use `exec`, which is what I was afraid of. My guess would be that this one is failing because of the regexp string passed directly into `exec` ... spawn must work better with the arguments. There is another module that can be used as a drop-in replacement for spawn called https://github.com/IndigoUnited/node-cross-spawn-async -- this will work some magic on the commands before passing it to spawn to make it work properly in a windows environment. Anyway, I'll give the travisCI a go in my repo and see if I can get a clean patch that has everything working. Ok, I got it working but I borked the squash... starting over. So I blew away my repo and reapplied the one commit -- travis ci was buggered, still pointing at the deleted repo -- got that setup again, but I need to push something to get it to go. I'm going to go ahead and create the PR - last I checked, I had everything working. Is having the build/tests working on windows desirable then? I did some work on it too and can submit a pull request. Most changes require changing some paths to use `path.join`, using `os.tmpDir` instead of /tmp, and running scripts through `child_process.exec(node <script>,..)` instead of assuming a working shebang behavior. I still have some issues on the integration tests on master (I think mostly caused by EOL differences), but I'll keep working on it if it's on scope. I suppose it's not an option, but moving from the Makefile to use npm scripts might be a good idea, getting make to run on windows does take some time :). Oh hey, I forgot about this.... I've re-based this against master (5b325c8) and verified everything is still working (it appears to be, my travis ci passed anyways). After running tests again after rebase under cygwin make, I noticed two that were failing -- each was doing a split based on new line and bullet. I had added a test helper export that has a working regex so I just updated them to use that instead (1cf2f60). if I recall the only crunchy bit on this was updating the dot to not use full stop under windows and make it \u00B7 which is a breaking change. Now that I've had 7 months to think about it, it may be easier to instead update the titles of the tests to not include periods so the split works as expected and the tests pass. I've done that. (06911ab) Hi @tswaters, > I'm looking for confirmation that these changes (a) are welcome Fantastic initiative! I must admit I wasn't even aware of the failing tests on windows, but fixing these is more than welcome! > (b) wouldn't break the tests when run in a unix environment (unsure if spawn vs. exec produces different results and whether prefacing the command with node causes issues) The easiest way for you to automatically test in a unix environment would be via Travis-CI. If you create a PR for these fixes it will run automatically, but you can also enable it for your own for `tswaters/mocha` so you can experiment before creating a PR. Currently at least one test fails when running `npm test` ``` bash 34 passing (8s) 1 failing 1) options --grep runs specs matching a RegExp: Uncaught AssertionError: false == true + expected - actual -false +true at test/integration/options.js:117:9 at test/integration/helpers.js:102:16 at ChildProcess.<anonymous> (test/integration/helpers.js:169:5) at maybeClose (child_process.js:1015:16) at Socket.<anonymous> (child_process.js:1183:11) at Pipe.close (net.js:485:12) make: *** [test-integration] Error 1 ``` Going over your individual points: > I wasn't able to get the windows make to work properly at all. using cygwin seems to works though. I'm not familiar with Windows `make` at all. Is there a compatible syntax that both versions of `make` can use? Or would this entail a Windows-specific Makefile? I would like to avoid the latter, if at all possible. > many references to '/tmp' inside acceptance fs/utils - these can be updated to use os.tmpdir() Good catch. > passing bin/mocha to exec fails, "bin is not recognized as an internal or external command" -- updating it to node bin/mocha makes it work. That should be the same. As you can see `bin/mocha` stats with a [`shebang`](https://en.wikipedia.org/wiki/Shebang_%28Unix%29) ``#!/usr/bin/env node`, which indeed means as much as 'run this file with the `node` version on the user's path'. > using spawn in integration/helpers doesn't work at all. Updating to use exec, passing args delimited by a space works. (also need to preface with node, similar to above) What's the issue with `spawn`? As far as I know this should be supported on Windows. The risk with using `exec` is that the max buffer size could potentially be exceeded, as we actually parse the `stdout` of the `child_process`. On the other hand, this probably won't actually be an issue as the output is relatively small. > The trickiest one relates to the dot symbol used by windows -- > I think the best way to fix this is to first, change the test to use whatever base reporter exports for the dot symbol, :+1: > and second, change the windows dot to use \u00B7. This sounds like it's a backwards incompatible change. Doesn't mean we can't do it, but would require a release note and a bump in major versions. I'm not entirely sure about the problems with make -- the windows make is basically this: http://gnuwin32.sourceforge.net/packages/make.htm -- it returns some nonsensical errors and ends up dying -- here's the output I see: ``` C:\Users\Tyler\github\mocha>npm test > mocha@2.2.5 test C:\Users\Tyler\github\mocha > make test-all Access denied - LIB File not found - -NAME File not found - -TYPE File not found - F Access denied - LIB File not found - -NAME File not found - -TYPE File not found - F 'node_modules' is not recognized as an internal or external command, operable program or batch file. make: *** [lint] Error 1 npm ERR! Test failed. See above for more details. ``` ## So, the problem with Windows and spawn is that it doesn't read shebangs -- like, at all -- and has a tendency to fail if you pass spawn a file that isn't a bat, cmd or exe. Windows doesn't read the shabang, doesn't recognize the file as an executable and flops. exec works a little bit differently than spawn in that you can at least pass it node and a script and it works. There's a lot of stuff I'm probably glossing over here, but if you're interested here's a node core ticket that's been open for 4.5 years that describes a lot of what is going on https://github.com/joyent/node/issues/2318 I have a feeling the failing test is a symptom of switching to use `exec`, which is what I was afraid of. My guess would be that this one is failing because of the regexp string passed directly into `exec` ... spawn must work better with the arguments. There is another module that can be used as a drop-in replacement for spawn called https://github.com/IndigoUnited/node-cross-spawn-async -- this will work some magic on the commands before passing it to spawn to make it work properly in a windows environment. Anyway, I'll give the travisCI a go in my repo and see if I can get a clean patch that has everything working. Ok, I got it working but I borked the squash... starting over. So I blew away my repo and reapplied the one commit -- travis ci was buggered, still pointing at the deleted repo -- got that setup again, but I need to push something to get it to go. I'm going to go ahead and create the PR - last I checked, I had everything working. Is having the build/tests working on windows desirable then? I did some work on it too and can submit a pull request. Most changes require changing some paths to use `path.join`, using `os.tmpDir` instead of /tmp, and running scripts through `child_process.exec(node <script>,..)` instead of assuming a working shebang behavior. I still have some issues on the integration tests on master (I think mostly caused by EOL differences), but I'll keep working on it if it's on scope. I suppose it's not an option, but moving from the Makefile to use npm scripts might be a good idea, getting make to run on windows does take some time :). Oh hey, I forgot about this.... I've re-based this against master (5b325c8) and verified everything is still working (it appears to be, my travis ci passed anyways). After running tests again after rebase under cygwin make, I noticed two that were failing -- each was doing a split based on new line and bullet. I had added a test helper export that has a working regex so I just updated them to use that instead (1cf2f60). if I recall the only crunchy bit on this was updating the dot to not use full stop under windows and make it \u00B7 which is a breaking change. Now that I've had 7 months to think about it, it may be easier to instead update the titles of the tests to not include periods so the split works as expected and the tests pass. I've done that. (06911ab)
2016-07-01T06:19:32Z
2.5
mochajs/mocha
2,094
mochajs__mocha-2094
[ "2089" ]
2a8594424c73ffeca41ef1668446372160528b4a
diff --git a/lib/utils.js b/lib/utils.js --- a/lib/utils.js +++ b/lib/utils.js @@ -472,6 +472,7 @@ function jsonStringify(object, spaces, depth) { break; case 'boolean': case 'regexp': + case 'symbol': case 'number': val = val === 0 && (1 / val) === -Infinity // `-0` ? '-0' @@ -597,6 +598,7 @@ exports.canonicalize = function(value, stack) { case 'number': case 'regexp': case 'boolean': + case 'symbol': canonicalizedObj = value; break; default:
diff --git a/test/acceptance/utils.js b/test/acceptance/utils.js --- a/test/acceptance/utils.js +++ b/test/acceptance/utils.js @@ -331,6 +331,15 @@ describe('lib/utils', function () { stringify(a).should.equal('{\n "foo": 1\n}'); }); + + // In old version node.js, Symbol is not available by default. + if (typeof global.Symbol === 'function') { + it('should handle Symbol', function () { + var symbol = Symbol('value'); + stringify(symbol).should.equal('Symbol(value)'); + stringify({symbol: symbol}).should.equal('{\n "symbol": Symbol(value)\n}') + }); + } }); describe('type', function () {
Diff doesn't be displayed when object has Symbol value When I assert an object that has a `Symbol` value, the test doesn't display diff even if it fails. For example, the following test fails without generating diff. ``` javascript // test.js const assert = require('assert'); it('should generate diff', () => { const obj = { type: Symbol('foo') }; assert.deepEqual(obj, { type: null }); }); ``` result: ![mocha-symbol-before](https://cloud.githubusercontent.com/assets/12684251/12785091/01ee67a0-cace-11e5-83f9-34577f1872cb.png) Then I found the `stringify` function in [lib/utils](https://github.com/mochajs/mocha/blob/master/lib/utils.js) throws a following error when the argument object has a `Symbol` value. ``` TypeError: Cannot convert a Symbol value to a string ``` environment: - mocha: v2.4.5 - node: v4.2.2
2016-02-06T04:09:58Z
2.4
mochajs/mocha
2,081
mochajs__mocha-2081
[ "1760", "1936" ]
cf513cbff35141dc7275325bef9fe6d6e13ce979
diff --git a/lib/interfaces/bdd.js b/lib/interfaces/bdd.js --- a/lib/interfaces/bdd.js +++ b/lib/interfaces/bdd.js @@ -79,7 +79,7 @@ module.exports = function(suite) { var it = context.it = context.specify = function(title, fn) { var suite = suites[0]; - if (suite.pending) { + if (suite.isPending()) { fn = null; } var test = new Test(title, fn); diff --git a/lib/interfaces/tdd.js b/lib/interfaces/tdd.js --- a/lib/interfaces/tdd.js +++ b/lib/interfaces/tdd.js @@ -81,7 +81,7 @@ module.exports = function(suite) { */ context.test = function(title, fn) { var suite = suites[0]; - if (suite.pending) { + if (suite.isPending()) { fn = null; } var test = new Test(title, fn); diff --git a/lib/reporters/html.js b/lib/reporters/html.js --- a/lib/reporters/html.js +++ b/lib/reporters/html.js @@ -155,7 +155,7 @@ function HTML(runner) { if (test.state === 'passed') { var url = self.testURL(test); el = fragment('<li class="test pass %e"><h2>%e<span class="duration">%ems</span> <a href="%s" class="replay">‣</a></h2></li>', test.speed, test.title, test.duration, url); - } else if (test.pending) { + } else if (test.isPending()) { el = fragment('<li class="test pass pending"><h2>%e</h2></li>', test.title); } else { el = fragment('<li class="test fail"><h2>%e <a href="%e" class="replay">‣</a></h2></li>', test.title, self.testURL(test)); @@ -193,7 +193,7 @@ function HTML(runner) { // toggle code // TODO: defer - if (!test.pending) { + if (!test.isPending()) { var h2 = el.getElementsByTagName('h2')[0]; on(h2, 'click', function() { diff --git a/lib/reporters/xunit.js b/lib/reporters/xunit.js --- a/lib/reporters/xunit.js +++ b/lib/reporters/xunit.js @@ -131,7 +131,7 @@ XUnit.prototype.test = function(test) { if (test.state === 'failed') { var err = test.err; this.write(tag('testcase', attrs, false, tag('failure', {}, false, cdata(escape(err.message) + '\n' + err.stack)))); - } else if (test.pending) { + } else if (test.isPending()) { this.write(tag('testcase', attrs, false, tag('skipped', {}, true))); } else { this.write(tag('testcase', attrs, true)); diff --git a/lib/runnable.js b/lib/runnable.js --- a/lib/runnable.js +++ b/lib/runnable.js @@ -54,6 +54,7 @@ function Runnable(title, fn) { this._trace = new Error('done() called multiple times'); this._retries = -1; this._currentRetry = 0; + this.pending = false; } /** @@ -124,12 +125,21 @@ Runnable.prototype.enableTimeouts = function(enabled) { /** * Halt and mark as pending. * - * @api private + * @api public */ Runnable.prototype.skip = function() { throw new Pending(); }; +/** + * Check if this runnable or its parent suite is marked as pending. + * + * @api private + */ +Runnable.prototype.isPending = function() { + return this.pending || (this.parent && this.parent.isPending()); +}; + /** * Set number of retries. * @@ -302,7 +312,7 @@ Runnable.prototype.run = function(fn) { // sync or promise-returning try { - if (this.pending) { + if (this.isPending()) { done(); } else { callFn(this.fn); diff --git a/lib/runner.js b/lib/runner.js --- a/lib/runner.js +++ b/lib/runner.js @@ -507,12 +507,7 @@ Runner.prototype.runTests = function(suite, fn) { return; } - function parentPending(suite) { - return suite.pending || (suite.parent && parentPending(suite.parent)); - } - - // pending - if (test.pending || parentPending(test.parent)) { + if (test.isPending()) { self.emit('pending', test); self.emit('test end', test); return next(); @@ -521,7 +516,7 @@ Runner.prototype.runTests = function(suite, fn) { // execute test and hook(s) self.emit('test', self.test = test); self.hookDown('beforeEach', function(err, errSuite) { - if (suite.pending) { + if (suite.isPending()) { self.emit('pending', test); self.emit('test end', test); return next(); diff --git a/lib/suite.js b/lib/suite.js --- a/lib/suite.js +++ b/lib/suite.js @@ -28,9 +28,6 @@ exports = module.exports = Suite; exports.create = function(parent, title) { var suite = new Suite(title, parent.ctx); suite.parent = parent; - if (parent.pending) { - suite.pending = true; - } title = suite.fullTitle(); parent.addSuite(suite); return suite; @@ -176,6 +173,15 @@ Suite.prototype.bail = function(bail) { return this; }; +/** + * Check if this suite or its parent suite is marked as pending. + * + * @api private + */ +Suite.prototype.isPending = function() { + return this.pending || (this.parent && this.parent.isPending()); +}; + /** * Run `fn(test[, done])` before running tests. * @@ -185,7 +191,7 @@ Suite.prototype.bail = function(bail) { * @return {Suite} for chaining */ Suite.prototype.beforeAll = function(title, fn) { - if (this.pending) { + if (this.isPending()) { return this; } if (typeof title === 'function') { @@ -215,7 +221,7 @@ Suite.prototype.beforeAll = function(title, fn) { * @return {Suite} for chaining */ Suite.prototype.afterAll = function(title, fn) { - if (this.pending) { + if (this.isPending()) { return this; } if (typeof title === 'function') { @@ -245,7 +251,7 @@ Suite.prototype.afterAll = function(title, fn) { * @return {Suite} for chaining */ Suite.prototype.beforeEach = function(title, fn) { - if (this.pending) { + if (this.isPending()) { return this; } if (typeof title === 'function') { @@ -275,7 +281,7 @@ Suite.prototype.beforeEach = function(title, fn) { * @return {Suite} for chaining */ Suite.prototype.afterEach = function(title, fn) { - if (this.pending) { + if (this.isPending()) { return this; } if (typeof title === 'function') {
diff --git a/test/suite.js b/test/suite.js --- a/test/suite.js +++ b/test/suite.js @@ -288,6 +288,11 @@ describe('Suite', function(){ this.first.suites.should.have.length(1); this.first.suites[0].should.equal(this.second); }); + + it('treats suite as pending if its parent is pending', function(){ + this.first.pending = true + this.second.isPending.should.be.true + }); }); // describe('.addTest()', function(){ diff --git a/test/test.js b/test/test.js --- a/test/test.js +++ b/test/test.js @@ -1,4 +1,5 @@ var mocha = require('../') + , should = require('should') , Context = mocha.Context , Test = mocha.Test; @@ -52,4 +53,24 @@ describe('Test', function(){ this._test.clone().file.should.equal('bar'); }); }); + + describe('.isPending()', function(){ + beforeEach(function(){ + this._test = new Test('Is it skipped', function () {}); + }); + + it('should not be pending by default', function(){ + should(this._test.isPending()).not.be.ok(); + }); + + it('should be pending when marked as such', function(){ + this._test.pending = true; + should(this._test.isPending()).be.ok(); + }); + + it('should be pending when its parent is pending', function(){ + this._test.parent = { isPending: function(){ return true } }; + should(this._test.isPending()).be.ok(); + }); + }); });
`this.skip()` completely broken if html reporter used. Demo: https://github.com/Kirill89/mocha-test `Error: TypeError: test.err is undefined (http://localhost:1111/node_modules/mocha/mocha.js:2786)` - Firefox 38.0.5 - Chrome 43.0.2357.124 (64-bit) this.skip causes: Cannot read property 'toString' of undefined Error line in mocha.js ``` js var el; if (test.state === 'passed') { var url = self.testURL(test); el = fragment('<li class="test pass %e"><h2>%e<span class="duration">%ems</span> <a href="%s" class="replay">‣</a></h2></li>', test.speed, test.title, test.duration, url); } else if (test.pending) { el = fragment('<li class="test pass pending"><h2>%e</h2></li>', test.title); } else { el = fragment('<li class="test fail"><h2>%e <a href="%e" class="replay">‣</a></h2></li>', test.title, self.testURL(test)); var stackString; // Note: Includes leading newline var message = test.err.toString(); // mocha.js:2427 ``` My test cases are ``` js describe('mod', function(){ describe('mod1', function(){ before(function(){ if(!modLoaded) this.skip(); // causes the error }); it('should do stuff', function(){ // stuff }); }); }); ``` Not sure why `this.skip()` is causing an error...
what are you trying to accomplish by skipping the hook? @boneskull I'm trying skip whole "describe" section. @boneskull mocha already has same [test](https://github.com/mochajs/mocha/blob/master/test/integration/fixtures/pending/skip.sync.before.js), and it is works for reporter "spec". Can you explain me why doesn't work for "html" reporter? Looks like you're right. Avoiding async hooks/specs, I was able to replicate in the browser: ``` javascript describe('foo', function () { before(function() { this.skip(); }); it('bar', function() { // test }); }); ``` From the console: ``` foo - bar 0 passing (9ms) 1 pending ``` But in the browser: <img width="318" alt="screen shot 2015-07-15 at 2 39 57 am" src="https://cloud.githubusercontent.com/assets/817212/8695598/d4897ed6-2a9a-11e5-9c48-02eecff70d1c.png"> So seems like it's not being correctly labeled as pending. @Kirill89 Are you able to get `this.skip()` to work from a spec, in the browser? ``` javascript describe('foo', function () { it('bar', function() { this.skip(); }); }); ``` @danielstjules I am surprised, but it is also doesn't work. But with another error. ![](https://www.dropbox.com/s/1jfrf9dp13bvi4m/Screenshot%202015-07-15%2018.46.53.png?dl=1) Thanks for confirming! Both errors are being propagated by the HTML reporter due to it trying to act on a nonexistent err property for the test, since it wasn't correctly marked as pending. Will look into where that regression may have been introduced. Appreciate your help! really nice feature this.skip( what I was looking for ) in hooks(before, after, etc). My question: why is that not documented? it there more nice magic features what is not documented? :) @danielstjules any chances to get it fixed soon? Could you change a label? Needs-Feedback -> Bug? Problem is with this check https://github.com/mochajs/mocha/blob/9b1e587b2a78c8f99248d2c4b0a48ea6620fd468/lib/reporters/html.js#L158 If i change this line to ``` js } else if (test.pending || !test.err) { ``` ...then code seems to work. But i'm not sure such hack is correct. Could anyone check why `.pending` flag is lost in `test end` event after `this.skip()`? This will be fixed by https://github.com/mochajs/mocha/pull/1945#issuecomment-170399078 Thanks for info! Still not fixed with 2.4.3. Test from the first message does not shows error on screen, but throw errors to console and mark test as failed in summary. Something become better, but not all. You're right - `this.skip()` is mostly broken in the browser. I haven't gotten around to it though. It seems to be fixed for `it`, but not for `before` (most demanded)
2016-01-28T11:39:17Z
2.4
mochajs/mocha
1,965
mochajs__mocha-1965
[ "1964" ]
3af1b8a54d7c17fceccf10f4fe02fd03000841c5
diff --git a/lib/interfaces/bdd.js b/lib/interfaces/bdd.js --- a/lib/interfaces/bdd.js +++ b/lib/interfaces/bdd.js @@ -77,7 +77,7 @@ module.exports = function(suite) { * acting as a thunk. */ - context.it = context.specify = function(title, fn) { + var it = context.it = context.specify = function(title, fn) { var suite = suites[0]; if (suite.pending) { fn = null; @@ -93,7 +93,7 @@ module.exports = function(suite) { */ context.it.only = function(title, fn) { - var test = context.it(title, fn); + var test = it(title, fn); var reString = '^' + escapeRe(test.fullTitle()) + '$'; mocha.grep(new RegExp(reString)); return test;
diff --git a/test/acceptance/misc/only/bdd-require.js b/test/acceptance/misc/only/bdd-require.js new file mode 100644 --- /dev/null +++ b/test/acceptance/misc/only/bdd-require.js @@ -0,0 +1,18 @@ +/*jshint node: true */ + +var mocha = require('../../../../lib/mocha'); + +var beforeEach = mocha.beforeEach; +var it = mocha.it; +var describe = mocha.describe; + +describe('it.only via require("mocha")', function() { + beforeEach(function() { + this.didRunBeforeEach = true; + }); + describe("nested within a describe/context", function() { + it.only('should run all enclosing beforeEach hooks', function() { + require('assert').equal(this.didRunBeforeEach, true); + }); + }); +});
require('mocha').beforeEach does not run outer lifecycle hooks when running inner testcases marked by `it.only` This code associated with this writeup is available here: https://gist.github.com/cowboyd/f5611829c7a3ad084642 given the following mocha code: ``` javascript var beforeEach = require('mocha').beforeEach; describe("the outer context", function () { beforeEach(function() { this.didRunBeforeEach = true; }); describe("the inner context", function() { it("runs its parent beforeEach", function() { expect(this.didRunBeforeEach).to.equal(true); }); }) ; }); ``` because `"the inner context"` is nested inside `"the outer context"`, it should run the outer context's `beforeEach` hook. It does do this under normal circumstances: ``` javascript ~/C/E/mocha-demo ❯❯❯ npm t > mocha-demo@1.0.0 test /Users/cowboyd/Code/Ember/mocha-demo > mocha index.js the outer context the inner context ✓ runs its parent beforeEach 1 passing (7ms) ``` But if we configure the inner context to use `it.only` to select only the single test-case: ``` javascript var beforeEach = require('mocha').beforeEach; describe("the outer context", function () { beforeEach(function() { this.didRunBeforeEach = true; }); describe("the inner context", function() { it.only("runs its parent beforeEach", function() { expect(this.didRunBeforeEach).to.equal(true); }); }) ; }); ``` Then it fails with the following error: ``` ~/C/E/mocha-demo ❯❯❯ npm t ⏎ > mocha-demo@1.0.0 test /Users/cowboyd/Code/Ember/mocha-demo > mocha index.js 1) runs its parent beforeEach 0 passing (9ms) 1 failing 1) runs its parent beforeEach: AssertionError: expected undefined to equal true at Context.<anonymous> (index.js:15:40) npm ERR! Test failed. See above for more details. ``` Which indicates that the hook is either not being run. ¯
After further investigation, this only happens when using the [Require](http://mochajs.org/#require) interface. If using the global BDD interface, this is not an issue. Also, it appears to be a problem with the `require`d `beforEach`. When using the `require`'d `it` and `describe`, but the global `beforeEach`, everything works as expected.
2015-11-12T21:06:34Z
2.3
mochajs/mocha
1,410
mochajs__mocha-1410
[ "1395", "1395" ]
9cb8a91996d9ecee4e0a954aca7daaa9bc057a46
diff --git a/lib/runnable.js b/lib/runnable.js --- a/lib/runnable.js +++ b/lib/runnable.js @@ -4,7 +4,8 @@ var EventEmitter = require('events').EventEmitter , debug = require('debug')('mocha:runnable') - , milliseconds = require('./ms'); + , milliseconds = require('./ms') + , utils = require('./utils'); /** * Save timer references to avoid Sinon interfering (see GH-237). @@ -226,7 +227,7 @@ Runnable.prototype.run = function(fn){ done(); }); } catch (err) { - done(err); + done(utils.getError(err)); } return; } @@ -243,7 +244,7 @@ Runnable.prototype.run = function(fn){ callFn(this.fn); } } catch (err) { - done(err); + done(utils.getError(err)); } function callFn(fn) { diff --git a/lib/runner.js b/lib/runner.js --- a/lib/runner.js +++ b/lib/runner.js @@ -540,7 +540,7 @@ Runner.prototype.uncaught = function(err){ }.call(err) ? err : ( err.message || err )); } else { debug('uncaught undefined exception'); - err = new Error('Caught undefined error, did you throw without specifying what?'); + err = utils.undefinedError(); } err.uncaught = true; diff --git a/lib/utils.js b/lib/utils.js --- a/lib/utils.js +++ b/lib/utils.js @@ -398,3 +398,25 @@ exports.lookupFiles = function lookupFiles(path, extensions, recursive) { return files; }; + +/** + * Generate an undefined error with a message warning the user. + * + * @return {Error} + */ + +exports.undefinedError = function(){ + return new Error('Caught undefined error, did you throw without specifying what?'); +}; + +/** + * Generate an undefined error if `err` is not defined. + * + * @param {Error} err + * @return {Error} + */ + +exports.getError = function(err){ + return err || exports.undefinedError(); +}; +
diff --git a/test/acceptance/throw.js b/test/acceptance/throw.js new file mode 100644 --- /dev/null +++ b/test/acceptance/throw.js @@ -0,0 +1,111 @@ +var mocha = require('../../') + , Suite = mocha.Suite + , Runner = mocha.Runner + , Test = mocha.Test; + +describe('a test that throws', function () { + var suite, runner; + + beforeEach(function(){ + suite = new Suite(null, 'root'); + runner = new Runner(suite); + }) + + this.timeout(50); + + describe('undefined', function (){ + it('should not pass if throwing sync and test is sync', function(done) { + var test = new Test('im sync and throw undefined sync', function(){ + throw undefined; + }); + suite.addTest(test); + runner = new Runner(suite); + runner.on('end', function(){ + runner.failures.should.equal(1); + test.state.should.equal('failed'); + done(); + }); + runner.run(); + }) + + it('should not pass if throwing sync and test is async', function(done){ + var test = new Test('im async and throw undefined sync', function(done2){ + throw undefined; + process.nexTick(done2); + }); + suite.addTest(test); + runner = new Runner(suite); + runner.on('end', function(){ + runner.failures.should.equal(1); + test.state.should.equal('failed'); + done(); + }); + runner.run(); + }); + + it('should not pass if throwing async and test is async', function(done){ + var test = new Test('im async and throw undefined async', function(done2){ + process.nexTick(function(){ + throw undefined; + done2(); + }); + }); + suite.addTest(test); + runner = new Runner(suite); + runner.on('end', function(){ + runner.failures.should.equal(1); + test.state.should.equal('failed'); + done(); + }); + runner.run(); + }) + }) + + describe('null', function (){ + it('should not pass if throwing sync and test is sync', function(done) { + var test = new Test('im sync and throw null sync', function(){ + throw null; + }); + suite.addTest(test); + runner = new Runner(suite); + runner.on('end', function(){ + runner.failures.should.equal(1); + test.state.should.equal('failed'); + done(); + }); + runner.run(); + }) + + it('should not pass if throwing sync and test is async', function(done){ + var test = new Test('im async and throw null sync', function(done2){ + throw null; + process.nexTick(done2); + }); + suite.addTest(test); + runner = new Runner(suite); + runner.on('end', function(){ + runner.failures.should.equal(1); + test.state.should.equal('failed'); + done(); + }); + runner.run(); + }); + + it('should not pass if throwing async and test is async', function(done){ + var test = new Test('im async and throw null async', function(done2){ + process.nexTick(function(){ + throw null; + done2(); + }); + }); + suite.addTest(test); + runner = new Runner(suite); + runner.on('end', function(){ + runner.failures.should.equal(1); + test.state.should.equal('failed'); + done(); + }); + runner.run(); + }) + }) +}) \ No newline at end of file diff --git a/test/runnable.js b/test/runnable.js --- a/test/runnable.js +++ b/test/runnable.js @@ -225,7 +225,7 @@ describe('Runnable(title, fn)', function(){ }); }) - it('should not throw its own exception if passed a non-object', function (done) { + it.skip('should not throw its own exception if passed a non-object', function (done) { var test = new Runnable('foo', function(done) { throw null; process.nextTick(done);
throw undefined ``` js it('expected', function () { throw 'foo'; }); it('unexpected', function () { throw undefined; }); ``` then ``` sh 1) expected ✓ unexpected ``` in 1.21.5 throw undefined ``` js it('expected', function () { throw 'foo'; }); it('unexpected', function () { throw undefined; }); ``` then ``` sh 1) expected ✓ unexpected ``` in 1.21.5
I'm working on this issue right now. I agree that throwing `undefined` should make the test fail. What about throwing `null`? Current behaviour is: a test passes if thrown either `undefined` or `null`. UNLESS you are throwing async on an async test, due to `Runner.prototype.uncaught`. It even generates an error with the message `'Caught undefined error, did you throw without specifying what?'`. Only failing in that case must not be an intended behaviour. Checking if the thrown exception is `undefined` can only be done in the `catch` block, since passing `undefined` to the callback means 'no error here'. (Well, you could pass to the callback a reference to a global object that means `undefined`, and then check the reference later, but that's quite ugly) I made changes so that throwing `undefined` or `null` makes the test fail. However, it breaks the following project test case (line 228 at test/runnable.js): `Runnable(title, fn), .run(fn), when async, when an exception is thrown, should not throw its own exception if passed a non-object`. If I make it so that throwing `null` doesn't make a test fail whereas throwing `undefined` does make a test fail, the Runnable project test case does not break. But that's because it only checks throwing `null`, and not throwing `undefined`, which I believe is also a non-object. I will send a PR with some changes and new tests. I'm marking as pending the Runnable test that breaks until we decide something. IMO, if you are throwing, it should make the test fail. I'm working on this issue right now. I agree that throwing `undefined` should make the test fail. What about throwing `null`? Current behaviour is: a test passes if thrown either `undefined` or `null`. UNLESS you are throwing async on an async test, due to `Runner.prototype.uncaught`. It even generates an error with the message `'Caught undefined error, did you throw without specifying what?'`. Only failing in that case must not be an intended behaviour. Checking if the thrown exception is `undefined` can only be done in the `catch` block, since passing `undefined` to the callback means 'no error here'. (Well, you could pass to the callback a reference to a global object that means `undefined`, and then check the reference later, but that's quite ugly) I made changes so that throwing `undefined` or `null` makes the test fail. However, it breaks the following project test case (line 228 at test/runnable.js): `Runnable(title, fn), .run(fn), when async, when an exception is thrown, should not throw its own exception if passed a non-object`. If I make it so that throwing `null` doesn't make a test fail whereas throwing `undefined` does make a test fail, the Runnable project test case does not break. But that's because it only checks throwing `null`, and not throwing `undefined`, which I believe is also a non-object. I will send a PR with some changes and new tests. I'm marking as pending the Runnable test that breaks until we decide something. IMO, if you are throwing, it should make the test fail.
2014-11-01T13:51:53Z
2
mochajs/mocha
1,520
mochajs__mocha-1520
[ "1496" ]
b9256736095e616d510edb4dfb8b50f96528492f
diff --git a/lib/mocha.js b/lib/mocha.js --- a/lib/mocha.js +++ b/lib/mocha.js @@ -410,9 +410,7 @@ Mocha.prototype.run = function(fn){ function done(failures) { if (reporter.done) { reporter.done(failures, fn); - } else { - fn(failures); - } + } else fn && fn(failures); } return runner.run(done);
diff --git a/test/mocha.js b/test/mocha.js new file mode 100644 --- /dev/null +++ b/test/mocha.js @@ -0,0 +1,33 @@ +var Mocha = require('../'); +var Test = Mocha.Test; + +describe('Mocha', function(){ + var blankOpts = { reporter: function(){} }; // no output + + describe('.run(fn)', function(){ + it('should not raise errors if callback was not provided', function(){ + var mocha = new Mocha(blankOpts); + mocha.run(); + }) + + it('should execute the callback when complete', function(done) { + var mocha = new Mocha(blankOpts); + mocha.run(function(){ + done(); + }) + }) + + it('should execute the callback with the number of failures '+ + 'as parameter', function(done) { + var mocha = new Mocha(blankOpts); + var failingTest = new Test('failing test', function(){ + throw new Error('such fail'); + }); + mocha.suite.addTest(failingTest); + mocha.run(function(failures) { + failures.should.equal(1); + done(); + }); + }) + }) +})
Mocha.run throws a TypeError if callback is not provided To reproduce the issue, install Mocha 2.1.0 and run this script on Node console. ``` javascript var Mocha = require("mocha"); var mocha = new Mocha(); mocha.addFile("./test.js"); mocha.run(); ``` Here is `test.js` ``` javascript it("nothing", function() {}); ``` Then, I get: ``` Donghwan@DONGHWAN ~/Documents/GitHub/vibe-javascript-client (master) $ node test-runner √ nothing 1 passing (7ms) c:\Users\Donghwan\Documents\GitHub\vibe-javascript-client\node_modules\mocha\lib\mocha.js:414 fn(failures); ^ TypeError: undefined is not a function at done (c:\Users\Donghwan\Documents\GitHub\vibe-javascript-client\node_modules\mocha\lib\mocha.js:414:11) at Runner.<anonymous> (c:\Users\Donghwan\Documents\GitHub\vibe-javascript-client\node_modules\mocha\lib\runner.js:590:5) at Runner.emit (events.js:117:20) at c:\Users\Donghwan\Documents\GitHub\vibe-javascript-client\node_modules\mocha\lib\runner.js:597:10 at c:\Users\Donghwan\Documents\GitHub\vibe-javascript-client\node_modules\mocha\lib\runner.js:518:7 at next (c:\Users\Donghwan\Documents\GitHub\vibe-javascript-client\node_modules\mocha\lib\runner.js:248:23) at Object._onImmediate (c:\Users\Donghwan\Documents\GitHub\vibe-javascript-client\node_modules\mocha\lib\runner.js:276:5) at processImmediate [as _immediateCallback] (timers.js:345:15) ``` 2.0.1 works fine. As the [wiki page](https://github.com/mochajs/mocha/wiki/Using-mocha-programmatically) shows use of `mocha.run()`, I think it's a regression in 2.1.0. A workaround is [to pass an empty callback](https://github.com/vibe-project/vibe-javascript-client/commit/058f9d5020306cf2800e9f2862b4a773637483c5). It looks like that https://github.com/mochajs/mocha/commit/30582e646d579ce67ef4229922651da498e79a80 is the cause. Before executing `fn(failures);`, it should check if `fn` is available.
Same issue here, the workaround solved it for now. @dasilvacontin As @julien-f experienced the same issue, you may want to change the label, unconfirmed, to something like invalid or bug? Sure, thanks for the heads up, @flowersinthesand!
2015-01-31T23:35:21Z
2.1
mochajs/mocha
1,878
mochajs__mocha-1878
[ "1864" ]
75a7f71ceeabf8ff1dc775fc08ea30aa20f0928b
diff --git a/lib/reporters/xunit.js b/lib/reporters/xunit.js --- a/lib/reporters/xunit.js +++ b/lib/reporters/xunit.js @@ -77,6 +77,11 @@ function XUnit(runner, options) { }); } +/** + * Inherit from `Base.prototype`. + */ +inherits(XUnit, Base); + /** * Override done to close the stream (if it's a file). * @@ -93,11 +98,6 @@ XUnit.prototype.done = function(failures, fn) { } }; -/** - * Inherit from `Base.prototype`. - */ -inherits(XUnit, Base); - /** * Write out the given line. *
diff --git a/test/integration/reporters.js b/test/integration/reporters.js --- a/test/integration/reporters.js +++ b/test/integration/reporters.js @@ -1,8 +1,12 @@ var assert = require('assert'); +var os = require('os'); +var fs = require('fs'); +var crypto = require('crypto'); +var path = require('path'); var run = require('./helpers').runMocha; describe('reporters', function() { - this.timeout(1000); + this.timeout(3000); describe('markdown', function() { var res; @@ -28,4 +32,29 @@ describe('reporters', function() { assert(res.output.indexOf(src) !== -1, 'No assert found'); }); }); + + describe('xunit', function() { + it('prints test cases with --reporter-options output (issue: 1864)', function(done) { + var randomStr = crypto.randomBytes(8).toString('hex'); + var tmpDir = os.tmpDir().replace(new RegExp(path.sep + '$'), ''); + var tmpFile = tmpDir + path.sep + 'test-issue-1864-' + randomStr + '.xml'; + + var args = ['--reporter=xunit', '--reporter-options', 'output=' + tmpFile]; + var expectedOutput = [ + '<testcase classname="suite" name="test1" time="0"/>', + '<testcase classname="suite" name="test2" time="0"/>', + '</testsuite>' + ].join('\n'); + + run('passing.js', args, function(err, result) { + if (err) return done(err); + + var xml = fs.readFileSync(tmpFile, 'utf8'); + fs.unlinkSync(tmpFile); + + assert(xml.indexOf(expectedOutput) !== -1, 'Did not output all xml'); + done(err); + }); + }); + }); });
xunit reporter does not report <testcase /> currently Xunit reporter only reports the top ``` <testsuite name="Mocha Tests" tests="16" failures="0" errors="0" skipped="0" timestamp="Fri, 04 Sep 2015 23:05:49 GMT" time="0.043">` ``` but not the child test cases something that i would expect is: ``` <testsuite name="Mocha Tests" tests="16" failures="0" errors="0" skipped="0" timestamp="Fri, 04 Sep 2015 23:35:14 GMT" time="0.043"> <testcase classname="case1" name="abc" time="0.2"/> <testcase classname="case2" name="cde" time="0.1"/> </testsuite> ``` is there a reason for it?
quite possible it's broken since we don't have good tests for the reporters yep. since i upgraded mocha from 2.2.4 to 2.3.0. using: ``` --reporter xunit --reporter-options output=build/testreport-integration.xml ``` only writes the first line: ``` <testsuite name="Mocha Tests" tests="242" failures="0" errors="0" skipped="21" timestamp="Fri, 04 Sep 2015 18:55:09 GMT" time="25.353"> ``` Just to help confirm this. Stupid simple test file ``` > cat test.js describe('my test', function() { it('should pass', function(done) { done(); }); it('should fail', function(done) { done(new Error('fail')); }); }); ``` Works as expected in `2.2.5` ``` > mocha --version 2.2.5 > mocha --reporter=xunit test.js <testsuite name="Mocha Tests" tests="2" failures="1" errors="1" skipped="0" timestamp="Wed, 09 Sep 2015 18:29:50 GMT" time="0.006"> <testcase classname="my test" name="should pass" time="0.001"/> <testcase classname="my test" name="should fail" time="0"><failure><![CDATA[fail Error: fail at Context.&lt;anonymous&gt; (test.js:7:8)]]></failure></testcase> </testsuite> > mocha --reporter=xunit --reporter-options output=results.xml test.js; cat results.xml <testsuite name="Mocha Tests" tests="2" failures="1" errors="1" skipped="0" timestamp="Wed, 09 Sep 2015 18:29:56 GMT" time="0.007"> <testcase classname="my test" name="should pass" time="0.001"/> <testcase classname="my test" name="should fail" time="0"><failure><![CDATA[fail Error: fail at Context.&lt;anonymous&gt; (test.js:7:8)]]></failure></testcase> </testsuite> ``` But not `2.3.0` ``` > mocha --version 2.3.0 > mocha --reporter=xunit test.js <testsuite name="Mocha Tests" tests="2" failures="1" errors="1" skipped="0" timestamp="Wed, 09 Sep 2015 18:31:10 GMT" time="0.007"> <testcase classname="my test" name="should pass" time="0.001"/> <testcase classname="my test" name="should fail" time="0"><failure><![CDATA[fail Error: fail at Context.&lt;anonymous&gt; (test.js:7:8)]]></failure></testcase> </testsuite> > mocha --reporter=xunit --reporter-options output=results.xml test.js; cat results.xml <testsuite name="Mocha Tests" tests="2" failures="1" errors="1" skipped="0" timestamp="Wed, 09 Sep 2015 18:31:12 GMT" time="0.007"> ```
2015-09-09T19:33:21Z
2.3
mochajs/mocha
1,698
mochajs__mocha-1698
[ "1687" ]
28e0a4f9abfdae15841ce01c89e5c59224f6fc2d
diff --git a/lib/reporters/html.js b/lib/reporters/html.js --- a/lib/reporters/html.js +++ b/lib/reporters/html.js @@ -5,6 +5,7 @@ var Base = require('./base') , utils = require('../utils') , Progress = require('../browser/progress') + , escapeRe = require('../browser/escape-string-regexp') , escape = utils.escape; /** @@ -200,7 +201,7 @@ var makeUrl = function makeUrl(s) { search = search.replace(/[?&]grep=[^&\s]*/g, '').replace(/^&/, '?'); } - return window.location.pathname + (search ? search + '&' : '?' ) + 'grep=' + encodeURIComponent(s); + return window.location.pathname + (search ? search + '&' : '?' ) + 'grep=' + encodeURIComponent(escapeRe(s)); }; /**
diff --git a/test/browser/ui.html b/test/browser/ui.html new file mode 100644 --- /dev/null +++ b/test/browser/ui.html @@ -0,0 +1,46 @@ +<html> + <head> + <title>Mocha</title> + <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"> + <meta name="viewport" content="width=device-width, initial-scale=1.0"> + <link rel="stylesheet" href="../../mocha.css" /> + <script src="../../mocha.js"></script> + <script>mocha.setup('bdd')</script> + <script> + function assert(expr, msg) { + if (!expr) throw new Error(msg || 'failed'); + } + </script> + <script src="ui.js"></script> + </head> + <body> + <div id="mocha"></div> + <script> + (function(window) { + var location = window.location; + mocha.checkLeaks(); + var runner = mocha.run(); + var count = 0; + setTimeout(run, 1000); + + function run() { + var regex = [ + '', // All + '%5C%24%5C.jQuery', // $.jQuery + '%5C%24%5C.jQuery%20%5C.on%5C(%5C)', // $.jQuery .on() + ] + , qs = location.search.replace('?grep=', '') + , re = ~qs.indexOf('%') ? qs : decodeURIComponent(qs) + , grep = regex[regex.indexOf(re) + 1] + , anchors = document.getElementsByTagName('a'); + + // Locate first 'a' element w/ matching grep param; click it + for (var i = 0; i < anchors.length; i++) { + if (anchors[i].href && anchors[i].href.indexOf(grep) > -1) + return void anchors[i].click(); + } + } + })(window); + </script> + </body> +</html> diff --git a/test/browser/ui.js b/test/browser/ui.js new file mode 100644 --- /dev/null +++ b/test/browser/ui.js @@ -0,0 +1,31 @@ +// test titles containing regex-conflicting characters + +// leading $ +describe('$.jQuery', function() { + // parens + describe('.on()', function () { + it('should set an event', function() { + assert(true); + }); + }); + + describe('.off()', function () { + it('should remove an event', function () { + + }); + }); +}); + +// another generic describe block to verify it is absent +// when greeping on $.jQuery +describe('@Array', function() { + it('.pop()', function() { + assert(true); + }); + it('.push()', function() { + assert(true); + }); + it('.length', function() { + assert(true); + }); +});
html reporter link to specific test is broken on some special characters If the test title contains some special characters (for instance $ or () ) then its link in the HTML report does not work. IMO the fix should be escaping regex [here](https://github.com/mochajs/mocha/blob/908d8dbe15b5ef5f4a5658f6cc870bf563e80717/lib/reporters/html.js#L192) before calling encodeURIComponent(s)
I can reproduce the bug. Digging deeper, the issue is caused because the `grep` query string argument is passed verbatim into the `RegExp` constructor, [here](https://github.com/mochajs/mocha/blob/master/support/tail.js): ``` javascript if (query.grep) mocha.grep(new RegExp(query.grep)); ``` Because the grep argument isn't escaped, it produces regex objects like: ``` javascript /test that $.jQuery works/ ``` Which of course don't work. Patch incoming.
2015-05-13T06:45:46Z
2.2
mochajs/mocha
1,337
mochajs__mocha-1337
[ "1066" ]
9b747dba4217baac45eb47aa7303a3dba55c2a9f
diff --git a/lib/runnable.js b/lib/runnable.js --- a/lib/runnable.js +++ b/lib/runnable.js @@ -45,6 +45,7 @@ function Runnable(title, fn) { this._slow = 75; this._enableTimeouts = true; this.timedOut = false; + this._trace = new Error('done() called multiple times') } /** @@ -190,14 +191,14 @@ Runnable.prototype.run = function(fn){ function multiple(err) { if (emitted) return; emitted = true; - self.emit('error', err || new Error('done() called multiple times')); + self.emit('error', err || new Error('done() called multiple times; stacktrace may be inaccurate')); } // finished function done(err) { var ms = self.timeout(); if (self.timedOut) return; - if (finished) return multiple(err); + if (finished) return multiple(err || self._trace); self.clearTimeout(); self.duration = new Date - start; finished = true;
diff --git a/test/acceptance/multiple.done.js b/test/acceptance/multiple.done.js --- a/test/acceptance/multiple.done.js +++ b/test/acceptance/multiple.done.js @@ -12,4 +12,12 @@ describe('multiple calls to done()', function(){ // done(); }); }) + + it('should produce a reasonable trace', function (done) { + process.nextTick(function() { + done(); + // uncomment + // done() + }) + }); })
Misleading source info in "done() called multiple times" message The error message "done() called multiple times" will be associated with whatever part of the test suite that's currently running. This can lead to a lot of confusion for the uninitiated developer. For example, consider the following suite: ``` javascript describe("a simple test suite", function () { beforeEach(function (done) { setTimeout(done, 150); }); afterEach(function (done) { setTimeout(done, 50); setTimeout(done, 100); }); it("has a simple test", function (done) { setTimeout(done, 100); }); it("has another simple test", function (done) { setTimeout(done, 100); }); }); ``` Running `mocha done-message.js` will result in: ``` ․․ 1 passing (356ms) 1 failing 1) a simple test suite "before each" hook: Uncaught Error: done() called multiple times at multiple (/usr/local/lib/node_modules/mocha/lib/runnable.js:175:31) at done (/usr/local/lib/node_modules/mocha/lib/runnable.js:181:26) at null._onTimeout (/usr/local/lib/node_modules/mocha/lib/runnable.js:197:9) at Timer.listOnTimeout [as ontimeout] (timers.js:110:15) ``` Note how the "done() called multiple times" will be listed having the `beforeEach` hook as the point where it came from. This is of course wrong, the `beforeEach` simply happens to be the active code part when the second `done()` call is being run. Additionally, the second test gets marked as a failure, even though it did nothing wrong! I don't know Mocha's internals, so I don't know whether this can be fixed easily at all. Some ideas: - Pass a new `done` callback to the tests each time, that has the source information as a closure variable? - Change the error reporting so that the `done` error message is not associated to any code part at all? - Make it clear in the error message that this message can literally come from everywhere? - Make double-done-calling a fatal error that causes the entire suite to stop and tell the user that the second call to `done` can come from anywhere? Tested with Mocha 1.15.1. This issue may or may not be related to #990. --- More examples: ``` javascript describe("a simple test suite", function () { beforeEach(function (done) { setTimeout(done, 150); }); afterEach(function (done) { setTimeout(done, 200); done(); }); it("has a simple test", function (done) { setTimeout(done, 100); }); it("has another simple test", function (done) { setTimeout(done, 100); }); }); ``` This will cause the error to be associated to the second test. --- ``` javascript describe("a simple test suite", function () { beforeEach(function (done) { setTimeout(done, 150); }); afterEach(function (done) { done(); done(); }); it("has a simple test", function (done) { setTimeout(done, 100); }); it("has another simple test", function (done) { setTimeout(done, 100); }); }); ``` This will result in no error at all, even though `done` gets called multiple times!
I ran into the same issue. Will be nice mocha can fix this. +1 Seeing this in version 1.19.0 as well. This does look to be the same issue as #990 but this report has simple code examples that demonstrate the problem. Perhaps @hallas won't dismiss this issue with obstinacy. For me this turned out to be a library, sailsjs (based on express), that I was using calling its done callback multiple times. Originally, my `after()` block looked like: ``` javascript after(function(done) { app.lower(done); }); ``` I added in a check and arbitrary wait time: ``` javascript after(function(done) { var done_called = false; app.lower(function() { if (!done_called) { done_called = true; setTimeout(function() { sails.log.debug("inside app.lower, callback not called yet. calling."); done(); }, 1000); } else { sails.log.debug("inside app.lower, callback already called."); } }); }); ``` This is not perfect but it allows me to call done once and use the `watch` feature of mocha.
2014-09-05T06:37:26Z
1.21
mochajs/mocha
1,243
mochajs__mocha-1243
[ "1242" ]
fb0447ed60a7b74493746217de0b06d30dafd5bd
diff --git a/lib/runnable.js b/lib/runnable.js --- a/lib/runnable.js +++ b/lib/runnable.js @@ -160,7 +160,6 @@ Runnable.prototype.globals = function(arr){ Runnable.prototype.run = function(fn){ var self = this - , ms = this.timeout() , start = new Date , ctx = this.ctx , finished @@ -177,11 +176,13 @@ Runnable.prototype.run = function(fn){ // finished function done(err) { + var ms = self.timeout(); if (self.timedOut) return; if (finished) return multiple(err); self.clearTimeout(); self.duration = new Date - start; finished = true; + if (!err && self.duration > ms) err = new Error('timeout of ' + ms + 'ms exceeded'); fn(err); }
diff --git a/test/acceptance/failing/timeout.js b/test/acceptance/failing/timeout.js new file mode 100644 --- /dev/null +++ b/test/acceptance/failing/timeout.js @@ -0,0 +1,18 @@ + +describe('timeout', function(){ + this.timeout(20); + + it('should be honored with sync suites', function(){ + sleep(30); + }); + + it('should be honored with async suites', function(done){ + sleep(30); + done(); + }); + + function sleep(ms){ + var start = Date.now(); + while(start + ms > Date.now())continue; + } +});
-t isn't honored for long running sync operations. I believe the duration should be checked against `_timeout` [here](https://github.com/visionmedia/mocha/blob/master/lib/runnable.js#L183). Thoughts? Something like: ``` javascript if(that._timeout < duration)err = new Error('took too long!'); ```
2014-06-19T06:40:22Z
1.2
mochajs/mocha
1,224
mochajs__mocha-1224
[ "1191" ]
bc708c17225f60b1c8ceec5c2f77298273bdf297
diff --git a/lib/runnable.js b/lib/runnable.js --- a/lib/runnable.js +++ b/lib/runnable.js @@ -223,7 +223,13 @@ Runnable.prototype.run = function(fn){ var result = fn.call(ctx); if (result && typeof result.then === 'function') { self.resetTimeout(); - result.then(function(){ done() }, done); + result + .then(function() { + done() + }, + function(reason) { + done(reason || new Error('Promise rejected with no or falsy reason')) + }); } else { done(); }
diff --git a/test/runnable.js b/test/runnable.js --- a/test/runnable.js +++ b/test/runnable.js @@ -299,6 +299,28 @@ describe('Runnable(title, fn)', function(){ }) }) + describe('when the promise is rejected without a reason', function(){ + var expectedErr = new Error('Promise rejected with no or falsy reason'); + var rejectedPromise = { + then: function (fulfilled, rejected) { + process.nextTick(function () { + rejected(); + }); + } + }; + + it('should invoke the callback', function(done){ + var test = new Runnable('foo', function(){ + return rejectedPromise; + }); + + test.run(function(err){ + err.should.eql(expectedErr); + done(); + }); + }) + }) + describe('when the promise takes too long to settle', function(){ var foreverPendingPromise = { then: function () { }
Tests succeed on rejected falsey promises Using Q, one of these two tests fails and the other one succeeds: ``` describe("mocha with promises", function() { it("should fail on falsey rejected promises", function() { return q.reject(false); }); it("should fail on truthy rejected promises", function() { return q.reject(true); }); }); ``` It appears that `q.reject(x)` where `x` is one of the six falsey values does not fail the test, whereas truthy values do.
2014-05-22T21:04:36Z
1.19
mochajs/mocha
1,110
mochajs__mocha-1110
[ "1015" ]
ffaa38d49d10d4a5efd8e8b67db2960c4731cdc3
diff --git a/lib/runner.js b/lib/runner.js --- a/lib/runner.js +++ b/lib/runner.js @@ -168,10 +168,6 @@ Runner.prototype.checkGlobals = function(test){ ok = ok.concat(test._allowedGlobals || []); } - // check length - 2 ('errno' and 'location' globals) - if (isNode && 1 == ok.length - globals.length) return; - else if (2 == ok.length - globals.length) return; - if(this.prevGlobalsLength == globals.length) return; this.prevGlobalsLength = globals.length;
diff --git a/test/runner.js b/test/runner.js --- a/test/runner.js +++ b/test/runner.js @@ -97,6 +97,21 @@ describe('Runner', function(){ runner.checkGlobals('im a test'); }) + it('should emit "fail" when a single new disallowed global is introduced after a single extra global is allowed', function(done) { + var doneCalled = false; + runner.globals('good'); + global.bad = 1; + runner.on('fail', function(test, err) { + delete global.bad; + done(); + doneCalled = true; + }); + runner.checkGlobals('test'); + if (!doneCalled) { + done(Error("Expected test failure did not occur.")); + } + }); + it ('should not fail when a new common global is introduced', function(){ // verify that the prop isn't enumerable delete global.XMLHttpRequest;
How is mocha.globals supposed to work? Sorry for the terrible issue title :grin:. When testing a jQuery plugin with Mocha, I'was warned about a global variable leak which [seems to be normal](http://bugs.jquery.com/ticket/14156). So, I decided to get rid of it using `mocha.globals(["jQuery*"])` and it worked as expected... but I also tried without the wildcard like this `mocha.globals(["jQuery"])`, this worked too... and even `mocha.globals(["wth"])` got the job done. So, maybe there is something a little buggy here, what do you think?
2014-01-13T03:33:27Z
1.17
mochajs/mocha
795
mochajs__mocha-795
[ "797" ]
4b05a59ed249095496a29930c975dfccd056e64e
diff --git a/lib/runner.js b/lib/runner.js --- a/lib/runner.js +++ b/lib/runner.js @@ -234,6 +234,8 @@ Runner.prototype.hook = function(name, fn){ if (!hook) return fn(); self.currentRunnable = hook; + hook.ctx.currentTest = self.test; + self.emit('hook', hook); hook.on('error', function(err){ @@ -246,6 +248,7 @@ Runner.prototype.hook = function(name, fn){ if (testError) self.fail(self.test, testError); if (err) return self.failHook(hook, err); self.emit('hook end', hook); + delete hook.ctx.currentTest; next(++i); }); }
diff --git a/test/hook.async.js b/test/hook.async.js --- a/test/hook.async.js +++ b/test/hook.async.js @@ -13,18 +13,24 @@ describe('async', function(){ , 'before all' , 'parent before' , 'before' + , 'before test one' , 'one' , 'after' + , 'after test one passed' , 'parent after' , 'parent before' , 'before' + , 'before test two' , 'two' , 'after' + , 'after test two passed' , 'parent after' , 'parent before' , 'before' + , 'before test three' , 'three' , 'after' + , 'after test three passed' , 'parent after' , 'after all' , 'root after all']); @@ -48,8 +54,12 @@ describe('async', function(){ }); beforeEach(function(done){ + var ctx = this; process.nextTick(function(){ calls.push('before'); + if (ctx.currentTest) { + calls.push('before test ' + ctx.currentTest.title); + } done(); }) }) @@ -59,7 +69,8 @@ describe('async', function(){ 'root before all' , 'before all' , 'parent before' - , 'before']); + , 'before' + , 'before test one']); calls.push('one'); process.nextTick(done); }) @@ -70,11 +81,14 @@ describe('async', function(){ , 'before all' , 'parent before' , 'before' + , 'before test one' , 'one' , 'after' + , 'after test one passed' , 'parent after' , 'parent before' - , 'before']); + , 'before' + , 'before test two']); calls.push('two'); }) @@ -84,22 +98,31 @@ describe('async', function(){ , 'before all' , 'parent before' , 'before' + , 'before test one' , 'one' , 'after' + , 'after test one passed' , 'parent after' , 'parent before' , 'before' + , 'before test two' , 'two' , 'after' + , 'after test two passed' , 'parent after' , 'parent before' - , 'before']); + , 'before' + , 'before test three']); calls.push('three'); }) afterEach(function(done){ + var ctx = this; process.nextTick(function(){ calls.push('after'); + if (ctx.currentTest) { + calls.push('after test ' + ctx.currentTest.title + ' ' + ctx.currentTest.state); + } done(); }) }) diff --git a/test/hook.sync.js b/test/hook.sync.js --- a/test/hook.sync.js +++ b/test/hook.sync.js @@ -13,10 +13,16 @@ describe('serial', function(){ describe('hooks', function(){ beforeEach(function(){ calls.push('before'); + if (this.currentTest) { + calls.push('before test ' + this.currentTest.title); + } }) it('one', function(){ - calls.should.eql(['parent before', 'before']); + calls.should.eql([ + 'parent before' + , 'before' + , 'before test one']); calls.push('one'); }) @@ -24,11 +30,14 @@ describe('serial', function(){ calls.should.eql([ 'parent before' , 'before' + , 'before test one' , 'one' , 'after' + , 'after test one passed' , 'parent after' , 'parent before' - , 'before']); + , 'before' + , 'before test two']); calls.push('two'); }) @@ -36,39 +45,53 @@ describe('serial', function(){ calls.should.eql([ 'parent before' , 'before' + , 'before test one' , 'one' , 'after' + , 'after test one passed' , 'parent after' , 'parent before' , 'before' + , 'before test two' , 'two' , 'after' + , 'after test two passed' , 'parent after' , 'parent before' - , 'before']); + , 'before' + , 'before test three']); calls.push('three'); }) afterEach(function(){ calls.push('after'); + if (this.currentTest) { + calls.push('after test ' + this.currentTest.title + ' ' + this.currentTest.state); + } }) after(function(){ calls.should.eql([ 'parent before' , 'before' + , 'before test one' , 'one' , 'after' + , 'after test one passed' , 'parent after' , 'parent before' , 'before' + , 'before test two' , 'two' , 'after' + , 'after test two passed' , 'parent after' , 'parent before' , 'before' + , 'before test three' , 'three' , 'after' + , 'after test three passed' , 'parent after']); }) }) diff --git a/test/hook.sync.nested.js b/test/hook.sync.nested.js --- a/test/hook.sync.nested.js +++ b/test/hook.sync.nested.js @@ -5,61 +5,94 @@ describe('serial', function(){ beforeEach(function(){ calls.push('parent before'); + if (this.currentTest) { + calls.push('parent before test ' + this.currentTest.title); + } }) afterEach(function(){ calls.push('parent after'); + if (this.currentTest) { + calls.push('parent after test ' + this.currentTest.title + ' ' + this.currentTest.state); + } }); it('foo', function(){ - calls.should.eql(['parent before']); + calls.should.eql([ + 'parent before' + , 'parent before test foo']); calls.push('foo'); }) it('bar', function(){ calls.should.eql([ 'parent before' + , 'parent before test foo' , 'foo' , 'parent after' - , 'parent before']); + , 'parent after test foo passed' + , 'parent before' + , 'parent before test bar']); }) describe('hooks', function(){ beforeEach(function(){ calls.push('before'); + if (this.currentTest) { + calls.push('before test ' + this.currentTest.title); + } }) it('one', function(){ calls.should.eql([ 'parent before' + , 'parent before test foo' , 'foo' , 'parent after' + , 'parent after test foo passed' , 'parent before' + , 'parent before test bar' , 'parent after' + , 'parent after test bar passed' , 'parent before' - , 'before']); + , 'parent before test one' + , 'before' + , 'before test one']); calls.push('one'); }) it('two', function(){ calls.should.eql([ 'parent before' + , 'parent before test foo' , 'foo' , 'parent after' + , 'parent after test foo passed' , 'parent before' + , 'parent before test bar' , 'parent after' + , 'parent after test bar passed' , 'parent before' + , 'parent before test one' , 'before' + , 'before test one' , 'one' , 'after' + , 'after test one passed' , 'parent after' + , 'parent after test one passed' , 'parent before' - , 'before']); + , 'parent before test two' + , 'before' + , 'before test two']); calls.push('two'); }); afterEach(function(){ calls.push('after'); + if (this.currentTest) { + calls.push('after test ' + this.currentTest.title + ' ' + this.currentTest.state); + } }) }) })
how to get test data in beforeEach() and afterEach() I need to get the name of each test in `beforeEach` and the pass/fail status of each finished test in `afterEach`. I looked at using `this.test`, but from there the best thing I found was `this.test.parent`, which is the suite of tests. That's all well and good, but how do I know which of those tests is the one whose before/after block I'm in?
2013-04-05T12:07:00Z
1.9
mochajs/mocha
577
mochajs__mocha-577
[ "402" ]
d162c324ec1f077785d6ec282d4abfaff8476335
diff --git a/lib/context.js b/lib/context.js --- a/lib/context.js +++ b/lib/context.js @@ -40,6 +40,19 @@ Context.prototype.timeout = function(ms){ return this; }; +/** + * Set test slowness threshold `ms`. + * + * @param {Number} ms + * @return {Context} self + * @api private + */ + +Context.prototype.slow = function(ms){ + this.runnable().slow(ms); + return this; +}; + /** * Inspect the context void of `._runnable`. * diff --git a/lib/mocha.js b/lib/mocha.js --- a/lib/mocha.js +++ b/lib/mocha.js @@ -52,6 +52,7 @@ function image(name) { * - `reporter` reporter instance, defaults to `mocha.reporters.Dot` * - `globals` array of accepted globals * - `timeout` timeout in milliseconds + * - `slow` milliseconds to wait before considering a test slow * - `ignoreLeaks` ignore global leaks * - `grep` string or regexp to filter tests with * @@ -68,6 +69,7 @@ function Mocha(options) { this.ui(options.ui); this.reporter(options.reporter); if (options.timeout) this.timeout(options.timeout); + if (options.slow) this.slow(options.slow); } /** @@ -239,6 +241,19 @@ Mocha.prototype.timeout = function(timeout){ return this; }; +/** + * Set slowness threshold in milliseconds. + * + * @param {Number} slow + * @return {Mocha} + * @api public + */ + +Mocha.prototype.slow = function(slow){ + this.suite.slow(slow); + return this; +}; + /** * Run tests and invoke `fn()` when complete. * diff --git a/lib/reporters/base.js b/lib/reporters/base.js --- a/lib/reporters/base.js +++ b/lib/reporters/base.js @@ -119,13 +119,6 @@ exports.cursor = { } }; -/** - * A test is considered slow if it - * exceeds the following value in milliseconds. - */ - -exports.slow = 75; - /** * Outut the given `failures` as a list. * @@ -227,8 +220,8 @@ function Base(runner) { runner.on('pass', function(test){ stats.passes = stats.passes || 0; - var medium = exports.slow / 2; - test.speed = test.duration > exports.slow + var medium = test.slow() / 2; + test.speed = test.duration > test.slow() ? 'slow' : test.duration > medium ? 'medium' diff --git a/lib/runnable.js b/lib/runnable.js --- a/lib/runnable.js +++ b/lib/runnable.js @@ -36,6 +36,7 @@ function Runnable(title, fn) { this.async = fn && fn.length; this.sync = ! this.async; this._timeout = 2000; + this._slow = 75; this.timedOut = false; } @@ -61,6 +62,21 @@ Runnable.prototype.timeout = function(ms){ return this; }; +/** + * Set & get slow `ms`. + * + * @param {Number} ms + * @return {Runnable|Number} ms or self + * @api private + */ + +Runnable.prototype.slow = function(ms){ + if (0 === arguments.length) return this._slow; + debug('timeout %d', ms); + this._slow = ms; + return this; +}; + /** * Return the full title generated by recursively * concatenating the parent's full title. diff --git a/lib/runner.js b/lib/runner.js --- a/lib/runner.js +++ b/lib/runner.js @@ -176,7 +176,6 @@ Runner.prototype.failHook = function(hook, err){ Runner.prototype.hook = function(name, fn){ var suite = this.suite , hooks = suite['_' + name] - , ms = suite._timeout , self = this , timer; @@ -485,4 +484,4 @@ function filterLeaks(ok) { }); return matched.length == 0 && (!global.navigator || 'onerror' !== key); }); -} \ No newline at end of file +} diff --git a/lib/suite.js b/lib/suite.js --- a/lib/suite.js +++ b/lib/suite.js @@ -57,6 +57,7 @@ function Suite(title, ctx) { this._afterAll = []; this.root = !title; this._timeout = 2000; + this._slow = 75; this._bail = false; } @@ -78,6 +79,7 @@ Suite.prototype.clone = function(){ debug('clone'); suite.ctx = this.ctx; suite.timeout(this.timeout()); + suite.slow(this.slow()); suite.bail(this.bail()); return suite; }; @@ -98,6 +100,22 @@ Suite.prototype.timeout = function(ms){ return this; }; +/** + * Set slow `ms` or short-hand such as "2s". + * + * @param {Number|String} ms + * @return {Suite|Number} for chaining + * @api private + */ + +Suite.prototype.slow = function(ms){ + if (0 === arguments.length) return this._slow; + if (String(ms).match(/s$/)) ms = parseFloat(ms) * 1000; + debug('slow %d', ms); + this._slow = parseInt(ms, 10); + return this; +}; + /** * Sets whether to bail after first error. * @@ -126,6 +144,7 @@ Suite.prototype.beforeAll = function(fn){ var hook = new Hook('"before all" hook', fn); hook.parent = this; hook.timeout(this.timeout()); + hook.slow(this.slow()); hook.ctx = this.ctx; this._beforeAll.push(hook); this.emit('beforeAll', hook); @@ -145,6 +164,7 @@ Suite.prototype.afterAll = function(fn){ var hook = new Hook('"after all" hook', fn); hook.parent = this; hook.timeout(this.timeout()); + hook.slow(this.slow()); hook.ctx = this.ctx; this._afterAll.push(hook); this.emit('afterAll', hook); @@ -164,6 +184,7 @@ Suite.prototype.beforeEach = function(fn){ var hook = new Hook('"before each" hook', fn); hook.parent = this; hook.timeout(this.timeout()); + hook.slow(this.slow()); hook.ctx = this.ctx; this._beforeEach.push(hook); this.emit('beforeEach', hook); @@ -183,6 +204,7 @@ Suite.prototype.afterEach = function(fn){ var hook = new Hook('"after each" hook', fn); hook.parent = this; hook.timeout(this.timeout()); + hook.slow(this.slow()); hook.ctx = this.ctx; this._afterEach.push(hook); this.emit('afterEach', hook); @@ -200,6 +222,7 @@ Suite.prototype.afterEach = function(fn){ Suite.prototype.addSuite = function(suite){ suite.parent = this; suite.timeout(this.timeout()); + suite.slow(this.slow()); suite.bail(this.bail()); this.suites.push(suite); this.emit('suite', suite); @@ -217,6 +240,7 @@ Suite.prototype.addSuite = function(suite){ Suite.prototype.addTest = function(test){ test.parent = this; test.timeout(this.timeout()); + test.slow(this.slow()); test.ctx = this.ctx; this.tests.push(test); this.emit('test', test);
diff --git a/test/runnable.js b/test/runnable.js --- a/test/runnable.js +++ b/test/runnable.js @@ -41,6 +41,14 @@ describe('Runnable(title, fn)', function(){ }) }) + describe('#slow(ms)', function(){ + it('should set the slow threshold', function(){ + var run = new Runnable; + run.slow(100) + run.slow().should.equal(100); + }) + }) + describe('.title', function(){ it('should be present', function(){ new Runnable('foo').title.should.equal('foo'); @@ -206,4 +214,4 @@ describe('Runnable(title, fn)', function(){ }) }) -}) \ No newline at end of file +}) diff --git a/test/suite.js b/test/suite.js --- a/test/suite.js +++ b/test/suite.js @@ -9,6 +9,7 @@ describe('Suite', function(){ beforeEach(function(){ this.suite = new Suite('To be cloned'); this.suite._timeout = 3043; + this.suite._slow = 101; this.suite._bail = true; this.suite.suites.push(1); this.suite.tests.push('hello'); @@ -26,6 +27,10 @@ describe('Suite', function(){ this.suite.clone().timeout().should.equal(3043); }); + it('should copy the slow value', function(){ + this.suite.clone().slow().should.equal(101); + }); + it('should copy the bail value', function(){ this.suite.clone().bail().should.be.true; }); @@ -74,6 +79,25 @@ describe('Suite', function(){ }); }); + describe('.slow()', function(){ + beforeEach(function(){ + this.suite = new Suite('A Suite'); + }); + + describe('when no argument is passed', function(){ + it('should return the slow value', function(){ + this.suite.slow().should.equal(75); + }); + }); + + describe('when argument is passed', function(){ + it('should return the Suite object', function(){ + var newSuite = this.suite.slow(5000); + newSuite.slow().should.equal(5000); + }); + }); + }); + describe('.bail()', function(){ beforeEach(function(){ this.suite = new Suite('A Suite'); @@ -170,6 +194,7 @@ describe('Suite', function(){ beforeEach(function(){ this.first = new Suite('First suite'); this.first.timeout(4002); + this.first.slow(200); this.second = new Suite('Second suite'); this.first.addSuite(this.second); }); @@ -182,6 +207,10 @@ describe('Suite', function(){ this.second.timeout().should.equal(4002); }); + it('copies the slow value', function(){ + this.second.slow().should.equal(200); + }); + it('adds the suite to the suites collection', function(){ this.first.suites.should.have.length(1); this.first.suites[0].should.equal(this.second);
Configure 'slow' property per test Hi! I'm using mocha in the browser. Just the same way I can configure a single test 'timeout' threshold: ``` javascript it('should take less than 500ms', function(done){ this.timeout(500); setTimeout(done, 300); }); ``` it would be great to configure 'slow' threshold as there may be some functions heavier than others. Maybe: ``` javascript it('would take less than 500ms', function(done){ this.slow(500); setTimeout(done, 800); }) ``` Cheers.
+1 from me Would it make sense to segregate fast and slow tests into separate files/directories and run them separately? @nanodeath if you wanted to yeah sure, no reason you couldn't. Or you could even keep them in context and "tag" them, something like `it('should do something @slow', callback)` and `--grep @slow` etc This issue has been inactive for over 1 month so I'm closing it. If you think it's still an issue re-open. - tjbot I think this new feature is not yet implemented, is it? +1, maybe I'll give this a try? +1 We often have to mock libraries that are required by requirejs and it adds a considerable amount of overhead to the tests. While we'll ideally find a way to optimize this, it's not a high priority and we honestly don't care that much if the tests run for 50-100 ms each. Being able to suppress the warnings for these specific tests would be nice.
2012-09-07T18:01:59Z
1.4
mochajs/mocha
635
mochajs__mocha-635
[ "243" ]
7d488041b618cba0b2c5afcb9d53db09d2f979b3
diff --git a/lib/reporters/html.js b/lib/reporters/html.js --- a/lib/reporters/html.js +++ b/lib/reporters/html.js @@ -28,7 +28,7 @@ exports = module.exports = HTML; * Stats template. */ -var statsTemplate = '<ul id="stats">' +var statsTemplate = '<ul id="mocha-stats">' + '<li class="progress"><canvas width="40" height="40"></canvas></li>' + '<li class="passes"><a href="#">passes:</a> <em>0</em></li>' + '<li class="failures"><a href="#">failures:</a> <em>0</em></li>' @@ -56,7 +56,7 @@ function HTML(runner, root) { , failuresLink = items[2].getElementsByTagName('a')[0] , duration = items[3].getElementsByTagName('em')[0] , canvas = stat.getElementsByTagName('canvas')[0] - , report = fragment('<ul id="report"></ul>') + , report = fragment('<ul id="mocha-report"></ul>') , stack = [report] , progress , ctx @@ -183,7 +183,7 @@ function HTML(runner, root) { */ function error(msg) { - document.body.appendChild(fragment('<div id="error">%s</div>', msg)); + document.body.appendChild(fragment('<div id="mocha-error">%s</div>', msg)); } /** diff --git a/lib/runner.js b/lib/runner.js --- a/lib/runner.js +++ b/lib/runner.js @@ -523,6 +523,8 @@ function filterLeaks(ok, globals) { return filter(globals, function(key){ var matched = filter(ok, function(ok){ if (~ok.indexOf('*')) return 0 == key.indexOf(ok.split('*')[0]); + // Opera and IE expose global variables for HTML element IDs (issue #243) + if (/^mocha-/.test(key)) return true; return key == ok; }); return matched.length == 0 && (!global.navigator || 'onerror' !== key); diff --git a/mocha.js b/mocha.js --- a/mocha.js +++ b/mocha.js @@ -937,6 +937,14 @@ module.exports = function(suite){ var test = context.test(title, fn); mocha.grep(test.fullTitle()); }; + + /** + * Pending test case. + */ + + context.test.skip = function(title){ + context.test(title); + }; }); }; @@ -1211,6 +1219,18 @@ Mocha.prototype.slow = function(slow){ return this; }; +/** + * Makes all tests async (accepting a callback) + * + * @return {Mocha} + * @api public + */ + +Mocha.prototype.asyncOnly = function(){ + this.options.asyncOnly = true; + return this; +}; + /** * Run tests and invoke `fn()` when complete. * @@ -1226,6 +1246,7 @@ Mocha.prototype.run = function(fn){ var runner = new exports.Runner(suite); var reporter = new this._reporter(runner); runner.ignoreLeaks = options.ignoreLeaks; + runner.asyncOnly = options.asyncOnly; if (options.grep) runner.grep(options.grep, options.invert); if (options.globals) runner.globals(options.globals); if (options.growl) this._growl(runner, reporter); @@ -1306,14 +1327,14 @@ function parse(str) { */ function format(ms) { - if (ms == d) return (ms / d) + ' day'; - if (ms > d) return (ms / d) + ' days'; - if (ms == h) return (ms / h) + ' hour'; - if (ms > h) return (ms / h) + ' hours'; - if (ms == m) return (ms / m) + ' minute'; - if (ms > m) return (ms / m) + ' minutes'; - if (ms == s) return (ms / s) + ' second'; - if (ms > s) return (ms / s) + ' seconds'; + if (ms == d) return Math.round(ms / d) + ' day'; + if (ms > d) return Math.round(ms / d) + ' days'; + if (ms == h) return Math.round(ms / h) + ' hour'; + if (ms > h) return Math.round(ms / h) + ' hours'; + if (ms == m) return Math.round(ms / m) + ' minute'; + if (ms > m) return Math.round(ms / m) + ' minutes'; + if (ms == s) return Math.round(ms / s) + ' second'; + if (ms > s) return Math.round(ms / s) + ' seconds'; return ms + ' ms'; } }); // module: ms.js @@ -1899,7 +1920,7 @@ exports = module.exports = HTML; * Stats template. */ -var statsTemplate = '<ul id="stats">' +var statsTemplate = '<ul id="mocha-stats">' + '<li class="progress"><canvas width="40" height="40"></canvas></li>' + '<li class="passes"><a href="#">passes:</a> <em>0</em></li>' + '<li class="failures"><a href="#">failures:</a> <em>0</em></li>' @@ -1927,7 +1948,7 @@ function HTML(runner, root) { , failuresLink = items[2].getElementsByTagName('a')[0] , duration = items[3].getElementsByTagName('em')[0] , canvas = stat.getElementsByTagName('canvas')[0] - , report = fragment('<ul id="report"></ul>') + , report = fragment('<ul id="mocha-report"></ul>') , stack = [report] , progress , ctx @@ -1994,7 +2015,7 @@ function HTML(runner, root) { window.scrollTo(0, document.body.scrollHeight); // TODO: add to stats - var percent = stats.tests / total * 100 | 0; + var percent = stats.tests / this.total * 100 | 0; if (progress) progress.update(percent).draw(ctx); // update stats @@ -2054,7 +2075,7 @@ function HTML(runner, root) { */ function error(msg) { - document.body.appendChild(fragment('<div id="error">%s</div>', msg)); + document.body.appendChild(fragment('<div id="mocha-error">%s</div>', msg)); } /** @@ -2682,7 +2703,7 @@ function Markdown(runner) { runner.on('suite', function(suite){ ++level; var slug = utils.slug(suite.fullTitle()); - buf += '<a name="' + slug + '" />' + '\n'; + buf += '<a name="' + slug + '"></a>' + '\n'; buf += title(suite.title) + '\n'; }); @@ -3658,7 +3679,11 @@ Runnable.prototype.run = function(fn){ } return; } - + + if (this.asyncOnly) { + return done(new Error('--async-only option in use without declaring `done()`')); + } + // sync try { if (!this.pending) this.fn.call(ctx); @@ -3685,6 +3710,19 @@ var EventEmitter = require('browser/events').EventEmitter , keys = utils.keys , noop = function(){}; +/** + * Non-enumerable globals. + */ + +var globals = [ + 'setTimeout', + 'clearTimeout', + 'setInterval', + 'clearInterval', + 'XMLHttpRequest', + 'Date' +]; + /** * Expose `Runner`. */ @@ -3719,7 +3757,7 @@ function Runner(suite) { this.on('test end', function(test){ self.checkGlobals(test); }); this.on('hook end', function(hook){ self.checkGlobals(hook); }); this.grep(/.*/); - this.globals(utils.keys(global).concat(['errno'])); + this.globals(this.globalProps().concat(['errno'])); } /** @@ -3770,6 +3808,25 @@ Runner.prototype.grepTotal = function(suite) { return total; }; +/** + * Return a list of global properties. + * + * @return {Array} + * @api private + */ + +Runner.prototype.globalProps = function() { + var props = utils.keys(global); + + // non-enumerables + for (var i = 0; i < globals.length; ++i) { + if (~props.indexOf(globals[i])) continue; + props.push(globals[i]); + } + + return props; +}; + /** * Allow the given `arr` of globals. * @@ -3796,7 +3853,7 @@ Runner.prototype.globals = function(arr){ Runner.prototype.checkGlobals = function(test){ if (this.ignoreLeaks) return; var ok = this._globals; - var globals = keys(global); + var globals = this.globalProps(); var isNode = process.kill; var leaks; @@ -3976,6 +4033,8 @@ Runner.prototype.runTest = function(fn){ var test = this.test , self = this; + if (this.asyncOnly) test.asyncOnly = true; + try { test.on('error', function(err){ self.fail(test, err); @@ -4165,6 +4224,8 @@ function filterLeaks(ok, globals) { return filter(globals, function(key){ var matched = filter(ok, function(ok){ if (~ok.indexOf('*')) return 0 == key.indexOf(ok.split('*')[0]); + // Opera and IE expose global variables for HTML element IDs (issue #243) + if (/^mocha-/.test(key)) return true; return key == ok; }); return matched.length == 0 && (!global.navigator || 'onerror' !== key); @@ -4901,6 +4962,7 @@ process.on = function(e, fn){ var query = Mocha.utils.parseQuery(window.location.search || ''); if (query.grep) mocha.grep(query.grep); + if (query.invert) mocha.invert(); return Mocha.prototype.run.call(mocha, function(){ Mocha.utils.highlightTags('code');
diff --git a/test/acceptance/globals.js b/test/acceptance/globals.js --- a/test/acceptance/globals.js +++ b/test/acceptance/globals.js @@ -27,7 +27,13 @@ describe('global leaks', function(){ it('should pass with wildcard', function(){ global.callback123 = 'foo'; global.callback345 = 'bar'; - }) + }); + + it('should pass when prefixed "mocha-"', function(){ + // Opera and IE do this for HTML element IDs anyway + // but to sure we can assert this in any browser, simulate it. + global['mocha-example'] = { nodeType: 1 }; + }); afterEach(function(){ // uncomment to test
Opera reports 1 failure when actually everything passed I have a test case that has exactly 41 tests and while testing in node or Chrome/Safari/Firefox everything works great except in Opera. It says: ``` passes: 41 failures: 1 duration: 0.21s ``` In chrome (for example) it says: ``` passes: 41 failures: 0 duration: 0.31s ``` This can be reproduced by checking out https://github.com/TrinityJS/Classify and opening test/Classify.all.html in Opera.
+1, same issue in paulmillr/es6-shim +1, same issue in Chrome on one of my projects. I find it occurs after the very first test operation, incrementing both the passes and failures by one, no matter what the first test operation is. hmm interesting I'll take a look k yeah i get the same, nice that opera finally has a decent console apparently we have a "stats" global that's not being reported, it's getting the "test end" event but `.passed` is true even though it failed, some state issue +1 Now in opera an error is thrown: global leak detected: stats Thats exactly what @visionmedia said, mocha has a global leak :p This issue has been inactive for over 1 month so I'm closing it. If you think it's still an issue re-open. - tjbot @visionmedia the problem still persists.. but I don't have permission to reopen.. I have the same issue and I temporarily resolved using this code: ``` js var globals = []; if (navigator.userAgent.indexOf('Opera') !== -1) { globals.push('stats', 'report'); } mocha .run() .globals(globals); ``` Still this should be fixed in mocha itself. Bump! From chaplinjs/chaplin#147: <blockquote>I get strange <code>global leaks detected: stats, report</code> errors in Opera. We’re definitely not creating these vars, this must be an error in Mocha.</blockquote> I just got the same. Working fine in Firefox 15 and Chrome 22, but causing these global detected failures in **the default Mocha test suite, both in Opera 12 and IE9**. So I booted up BrowserStack and looked up in the console what these variables where actually set to: ![Mocha Standard tests fail in Opera and IE](https://s3.amazonaws.com/f.cl.ly/items/1a2s18290O3K2o3J3U2S/Mocha%20Standard%20failure.png) Turns out these are the DOM elements Mocha creates during the test. And guess what, both Opera and IE are famous for exposing global variables for every element ID. The way we fixed this in QUnit (jquery/qunit#212) is by prefixing our IDs with "qunit-" (never a bad habit, but with "mocha-", of course). And filtering these out in the global check ([jquery/qunit@`1e21c01e`](https://github.com/jquery/qunit/commit/1e21c01eed6ace2188d1a57a741a76a36f4233ce)). hahaha yikes, that's messed, thanks for looking into it I'm preparing a pull request now.
2012-11-02T01:08:56Z
1.6
mochajs/mocha
368
mochajs__mocha-368
[ "124", "166" ]
10471eb988f66da2f4eb9ef516d2ecdafc9c98ab
diff --git a/lib/runner.js b/lib/runner.js --- a/lib/runner.js +++ b/lib/runner.js @@ -53,7 +53,8 @@ function Runner(suite) { Runner.prototype.__proto__ = EventEmitter.prototype; /** - * Run tests with full titles matching `re`. + * Run tests with full titles matching `re`. Updates runner.total + * with number of tests matched. * * @param {RegExp} re * @return {Runner} for chaining @@ -63,9 +64,33 @@ Runner.prototype.__proto__ = EventEmitter.prototype; Runner.prototype.grep = function(re){ debug('grep %s', re); this._grep = re; + this.total = this.grepTotal(this.suite); + return this; }; +/** + * Returns the number of tests matching the grep search for the + * given suite. + * + * @param {Suite} suite + * @return {Number} + * @api public + */ + +Runner.prototype.grepTotal = function(suite) { + var self = this; + var total = 0; + + suite.eachTest(function(test){ + if (self._grep.test(test.fullTitle())){ + total++; + }; + }); + + return total; +}; + /** * Allow the given `arr` of globals. * @@ -346,7 +371,10 @@ Runner.prototype.runSuite = function(suite, fn){ , i = 0; debug('run suite %s', suite.fullTitle()); - this.emit('suite', this.suite = suite); + + if(self.grepTotal(suite)) { + this.emit('suite', this.suite = suite); + } function next() { var curr = suite.suites[i++]; diff --git a/lib/suite.js b/lib/suite.js --- a/lib/suite.js +++ b/lib/suite.js @@ -245,3 +245,25 @@ Suite.prototype.total = function(){ return sum + suite.total(); }, 0) + this.tests.length; }; + +/** + * Iterates through each suite recursively to find + * all tests. Applies a function in the format + * `fn(test)`. + * + * @param {Function} fn + * @return {Suite} + * @api public + */ + +Suite.prototype.eachTest = function(fn){ + var self = this; + utils.forEach(self.tests, function(test){ + fn(test); + }); + utils.forEach(self.suites, function(suite){ + suite.eachTest(fn); + }); + return this; +}; +
diff --git a/test/runner.js b/test/runner.js --- a/test/runner.js +++ b/test/runner.js @@ -1,7 +1,8 @@ var mocha = require('../') , Suite = mocha.Suite - , Runner = mocha.Runner; + , Runner = mocha.Runner + , Test = mocha.Test; describe('Runner', function(){ var suite, runner; @@ -11,6 +12,27 @@ describe('Runner', function(){ runner = new Runner(suite); }) + describe('.grep()', function(){ + it('should update the runner.total with number of matched tests', function(){ + suite.addTest(new Test('im a test about lions')); + suite.addTest(new Test('im another test about lions')); + suite.addTest(new Test('im a test about bears')); + var newRunner = new Runner(suite); + newRunner.grep(/lions/); + newRunner.total.should.equal(2); + }) + }) + + describe('.grepTotal()', function(){ + it('should return the total number of matched tests', function(){ + suite.addTest(new Test('im a test about lions')); + suite.addTest(new Test('im another test about lions')); + suite.addTest(new Test('im a test about bears')); + runner.grep(/lions/); + runner.grepTotal(suite).should.equal(2); + }) + }) + describe('.globals()', function(){ it('should default to the known globals', function(){ runner.globals().length.should.be.above(10); @@ -102,4 +124,4 @@ describe('Runner', function(){ runner.failHook(hook, err); }) }) -}) \ No newline at end of file +}) diff --git a/test/suite.js b/test/suite.js --- a/test/suite.js +++ b/test/suite.js @@ -249,4 +249,52 @@ describe('Suite', function(){ }); }); }); + + describe('.eachTest(fn)', function(){ + beforeEach(function(){ + this.suite = new Suite('A Suite'); + }); + + describe('when there are no nested suites or tests', function(){ + it('should return 0', function(){ + var counter = 0; + function fn(){ + counter++; + } + this.suite.eachTest(fn); + counter.should.equal(0); + }); + }); + + describe('when there are several tests in the suite', function(){ + it('should return the number', function(){ + this.suite.addTest(new Test('a child test')); + this.suite.addTest(new Test('another child test')); + + var counter = 0; + function fn(){ + counter++; + } + this.suite.eachTest(fn); + counter.should.equal(2); + }); + }); + + describe('when there are several levels of nested suites', function(){ + it('should return the number', function(){ + this.suite.addTest(new Test('a child test')); + var suite = (new Suite('a child suite')); + suite.addTest(new Test('a test in a child suite')); + this.suite.addSuite(suite); + + var counter = 0; + function fn(){ + counter++; + } + this.suite.eachTest(fn); + counter.should.equal(2); + }); + }); + + }); });
total should adjust for --grep when grepping don't display the empty suites
It looks like this might already be fixed in 1.0.1. I testing on websocket.io's test and the total (3 instead of 17) was displayed correctly. ``` $ mocha --grep fire -R spec websocket server connections ✓ must fire a connection event ✓ must fire a close event when client closes ✓ must fire a close event when server closes messages request path client tracking websocket.io ✔ 3 tests complete (41ms) ``` Just discovered --grep and glad that you already know about this issue. I think it may be worth showing an example for exact matching using ^. I was having an unintended test fire because it included part of a word that I was only looking for exactly in a description.
2012-04-09T21:47:25Z
1
mochajs/mocha
462
mochajs__mocha-462
[ "396" ]
5ef889459242d00bddcfe33cb746c2491c91ebd2
diff --git a/lib/interfaces/bdd.js b/lib/interfaces/bdd.js --- a/lib/interfaces/bdd.js +++ b/lib/interfaces/bdd.js @@ -84,7 +84,7 @@ module.exports = function(suite){ * acting as a thunk. */ - context.it = function(title, fn){ + context.it = context.specify = function(title, fn){ suites[0].addTest(new Test(title, fn)); }; });
diff --git a/test/acceptance/interfaces/bdd.js b/test/acceptance/interfaces/bdd.js --- a/test/acceptance/interfaces/bdd.js +++ b/test/acceptance/interfaces/bdd.js @@ -22,4 +22,14 @@ describe('Array', function(){ arr.should.eql([1,2]); }) }) -}) \ No newline at end of file +}) + +context('Array', function(){ + beforeEach(function(){ + this.arr = [1,2,3]; + }) + + specify('has a length property', function(){ + this.arr.length.should.equal(3); + }) +})
Add `specify` as a synonym for `it` in BDD interface Apparently this is in RSpec, as per <a href="http://codebetter.com/jameskovacs/2012/04/19/ruby-and-rspec-powerful-languages-allow-simpler-frameworks/">a blog post I read</a>. It does seem nice in some cases. Yes/no?
doesn't matter much to me, i dont dislike it but having two functions for the same thing could get kinda lame if your team starts mixing them. anyone else? +1, -1? Yeah I'm happy to let this simmer until someone who really wants it comes along; no strong feelings. I've run into a few cases where it might make sense but do see the potential for lameness.
2012-06-14T16:07:29Z
1.1