On Wed, Nov 13, 2013 at 08:51:49AM -0800, Dylan Baker wrote:
> This patch causes tests with subtests to be treated as a group, rather
> than as a test. This means that the status the test itself stores will
> be overwritten by those in the subtest.
> 
> There is one oddity about this to be aware of; a test with subtests that
> crashes or fails before any of the subtests run will report a fraction
> of 0/1 with the appropriate color, even though all of the subtests will
> report Not Run.
> 
> v2: - Add subtests to the results file as full tests (the internal view
>       of the json), without this they will not appear in changes, fixes,
>       etc
>     - Render the background color of Not Run tests correctly in HTML
>     - Apply subtest fractions down the stack
> 
> Signed-off-by: Dylan Baker <baker.dyla...@gmail.com>

Both patches are:

Tested-by: Tom Stellard <thomas.stell...@amd.com>

This is a nice improvement, thanks.

-Tom

> ---
>  framework/summary.py | 73 
> ++++++++++++++++++++++++++++++++++++++++++----------
>  1 file changed, 59 insertions(+), 14 deletions(-)
> 
> diff --git a/framework/summary.py b/framework/summary.py
> index a587712..d31be19 100644
> --- a/framework/summary.py
> +++ b/framework/summary.py
> @@ -161,8 +161,17 @@ class HTMLIndex(list):
>              # is a KeyError (a result doesn't contain a particular test),
>              # return Not Run, with clas skip for highlighting
>              for each in summary.results:
> +                # If the "group" at the top of the key heirachy contains
> +                # 'subtest' then it is really not a group, link to that page
>                  try:
> -                    self._testResult(each.name, key, 
> each.tests[key]['result'])
> +                    if each.tests[path.dirname(key)]['subtest']:
> +                        href = path.dirname(key)
> +                except KeyError:
> +                    href = key
> +
> +                try:
> +                    self._testResult(each.name, href,
> +                                     summary.status[each.name][key])
>                  except KeyError:
>                      self.append({'type': 'other',
>                                   'text': '<td class="skip">Not Run</td>'})
> @@ -221,8 +230,14 @@ class HTMLIndex(list):
>          displaying pass/fail/crash/etc and formatting the cell to the
>          correct color.
>          """
> +        # "Not Run" is not a valid class, if it apears set the class to skip
> +        if isinstance(text, so.NotRun):
> +            css = 'skip'
> +        else:
> +            css = text
> +
>          self.append({'type': 'testResult',
> -                     'class': text,
> +                     'class': css,
>                       'href': path.join(group, href + ".html"),
>                       'text': text})
>  
> @@ -277,19 +292,49 @@ class Summary:
>              fraction = self.fractions[results.name]
>              status = self.status[results.name]
>  
> +            # store the results to be appeneded to results. Adding them in 
> the
> +            # loop will cause a RuntimeError
> +            temp_results = {}
> +
>              for key, value in results.tests.iteritems():
> -                #FIXME: Add subtest support
> -
> -                # Walk the test name as if it was a path, at each level 
> update
> -                # the tests passed over the total number of tests 
> (fractions),
> -                # and update the status of the current level if the status of
> -                # the previous level was worse, but is not skip
> -                while key != '':
> -                    fgh(key, value['result'])
> -                    key = path.dirname(key)
> -
> -                # when we hit the root update the 'all' group and stop
> -                fgh('all', value['result'])
> +                # Treat a test with subtests as if it is a group, assign the
> +                # subtests' statuses and fractions down to the test, and then
> +                # proceed like normal.
> +                try:
> +                    for (subt, subv) in value['subtest'].iteritems():
> +                        subt = path.join(key, subt)
> +                        subv = so.status_lookup(subv)
> +
> +                        # Add the subtest to the fractions and status lists
> +                        fraction[subt] = subv.fraction
> +                        status[subt] = subv
> +                        temp_results.update({subt: {'result': subv}})
> +
> +                        self.tests['all'].add(subt)
> +                        while subt != '':
> +                            fgh(subt, subv)
> +                            subt = path.dirname(subt)
> +                        fgh('all', subv)
> +
> +                    # remove the test from the 'all' list, this will cause to
> +                    # be treated as a group
> +                    self.tests['all'].discard(key)
> +                except KeyError:
> +                    # Walk the test name as if it was a path, at each level 
> update
> +                    # the tests passed over the total number of tests 
> (fractions),
> +                    # and update the status of the current level if the 
> status of
> +                    # the previous level was worse, but is not skip
> +                    while key != '':
> +                        fgh(key, value['result'])
> +                        key = path.dirname(key)
> +
> +                    # when we hit the root update the 'all' group and stop
> +                    fgh('all', value['result'])
> +
> +            # Update the the results.tests dictionary with the subtests so 
> that
> +            # they are entered into the appropriate pages other than all.
> +            # Updating it in the loop will raise a RuntimeError
> +            results.tests.update({k:v for k,v in temp_results.iteritems()})
>  
>          # Create the lists of statuses like problems, regressions, fixes,
>          # changes and skips
> -- 
> 1.8.1.5
> 
_______________________________________________
Piglit mailing list
Piglit@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/piglit

Reply via email to