Generalize unexpected passes expectations module

Generalizes the expectations module of the unexpected pass finder code
by pulling out all the GPU-specific code into a GPU-specific
implementation that extends the base class.

Also does some general refactoring on the expectations module, as a
sizable portion of the code belonged in different modules.

Bug: 1222826
Change-Id: Id02668856253528c2806ef4c3f715023a14e57ff
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/3011896
Reviewed-by: Dirk Pranke <[email protected]>
Commit-Queue: Brian Sheedy <[email protected]>
Cr-Commit-Position: refs/heads/master@{#901718}
diff --git a/testing/PRESUBMIT.py b/testing/PRESUBMIT.py
index d04a511..65f2d62 100644
--- a/testing/PRESUBMIT.py
+++ b/testing/PRESUBMIT.py
@@ -13,13 +13,8 @@
 
 def CommonChecks(input_api, output_api):
   testing_env = dict(input_api.environ)
-  # TODO(crbug.com/1222826): Remove this path addition once all GPU-specific
-  # code is pulled out of the common code.
-  gpu_path = input_api.os_path.join(
-      input_api.PresubmitLocalPath(), '..', 'content', 'test', 'gpu')
   testing_env.update({
-      'PYTHONPATH': input_api.os_path.pathsep.join(
-          [input_api.PresubmitLocalPath(), gpu_path]),
+      'PYTHONPATH': input_api.PresubmitLocalPath(),
       'PYTHONDONTWRITEBYTECODE': '1',
   })
 
diff --git a/testing/unexpected_passes_common/data_types.py b/testing/unexpected_passes_common/data_types.py
index 2a7f7f74..1bd77b8 100644
--- a/testing/unexpected_passes_common/data_types.py
+++ b/testing/unexpected_passes_common/data_types.py
@@ -5,10 +5,16 @@
 
 from __future__ import print_function
 
+import copy
 import fnmatch
+import logging
 
 import six
 
+FULL_PASS = 1
+NEVER_PASS = 2
+PARTIAL_PASS = 3
+
 
 class Expectation(object):
   """Container for a test expectation.
@@ -205,6 +211,39 @@
         for nested_value in v.IterToValueType(value_type):
           yield (k, ) + nested_value
 
+  def Merge(self, other_map, reference_map=None):
+    """Merges |other_map| into self.
+
+    Args:
+      other_map: A BaseTypedMap whose contents will be merged into self.
+      reference_map: A dict containing the information that was originally in
+          self. Used for ensuring that a single expectation/builder/step
+          combination is only ever updated once. If None, a copy of self will be
+          used.
+    """
+    assert isinstance(other_map, self.__class__)
+    # We should only ever encounter a single updated BuildStats for an
+    # expectation/builder/step combination. Use the reference map to determine
+    # if a particular BuildStats has already been updated or not.
+    reference_map = reference_map or copy.deepcopy(self)
+    for key, value in other_map.items():
+      if key not in self:
+        self[key] = value
+      else:
+        if isinstance(value, dict):
+          self[key].Merge(value, reference_map.get(key, {}))
+        else:
+          assert isinstance(value, BuildStats)
+          # Ensure we haven't updated this BuildStats already. If the reference
+          # map doesn't have a corresponding BuildStats, then base_map shouldn't
+          # have initially either, and thus it would have been added before
+          # reaching this point. Otherwise, the two values must match, meaning
+          # that base_map's BuildStats hasn't been updated yet.
+          reference_stats = reference_map.get(key, None)
+          assert reference_stats is not None
+          assert reference_stats == self[key]
+          self[key] = value
+
 
 class TestExpectationMap(BaseTypedMap):
   """Typed map for string types -> ExpectationBuilderMap.
@@ -245,6 +284,171 @@
     """
     return self.IterToValueType(BuilderStepMap)
 
+  def AddResultList(self, builder, results):
+    """Adds |results| to |self|.
+
+    Args:
+      builder: A string containing the builder |results| came from. Should be
+          prefixed with something to distinguish between identically named CI
+          and try builders.
+      results: A list of data_types.Result objects corresponding to the ResultDB
+          data queried for |builder|.
+
+    Returns:
+      A list of data_types.Result objects who did not have a matching
+      expectation in |self|.
+    """
+    failure_results = set()
+    pass_results = set()
+    unmatched_results = []
+    for r in results:
+      if r.actual_result == 'Pass':
+        pass_results.add(r)
+      else:
+        failure_results.add(r)
+
+    # Remove any cases of failure -> pass from the passing set. If a test is
+    # flaky, we get both pass and failure results for it, so we need to remove
+    # the any cases of a pass result having a corresponding, earlier failure
+    # result.
+    modified_failing_retry_results = set()
+    for r in failure_results:
+      modified_failing_retry_results.add(
+          Result(r.test, r.tags, 'Pass', r.step, r.build_id))
+    pass_results -= modified_failing_retry_results
+
+    for r in pass_results | failure_results:
+      found_matching = self._AddResult(r, builder)
+      if not found_matching:
+        unmatched_results.append(r)
+
+    return unmatched_results
+
+  def _AddResult(self, result, builder):
+    """Adds a single |result| to |self|.
+
+    Args:
+      result: A data_types.Result object to add.
+      builder: A string containing the name of the builder |result| came from.
+
+    Returns:
+      True if an expectation in |self| applied to |result|, otherwise False.
+    """
+    found_matching_expectation = False
+    # We need to use fnmatch since wildcards are supported, so there's no point
+    # in checking the test name key right now. The AppliesToResult check already
+    # does an fnmatch check.
+    for _, expectation, builder_map in self.IterBuilderStepMaps():
+      if expectation.AppliesToResult(result):
+        found_matching_expectation = True
+        step_map = builder_map.setdefault(builder, StepBuildStatsMap())
+        stats = step_map.setdefault(result.step, BuildStats())
+        if result.actual_result == 'Pass':
+          stats.AddPassedBuild()
+        else:
+          stats.AddFailedBuild(result.build_id)
+    return found_matching_expectation
+
+  def SplitByStaleness(self):
+    """Separates stored data based on expectation staleness.
+
+    Returns:
+      Three TestExpectationMaps (stale_dict, semi_stale_dict, active_dict). All
+      three combined contain the information of |self|. |stale_dict| contains
+      entries for expectations that are no longer being helpful,
+      |semi_stale_dict| contains entries for expectations that might be
+      removable or modifiable, but have at least one failed test run.
+      |active_dict| contains entries for expectations that are preventing
+      failures on all builders they're active on, and thus shouldn't be removed.
+    """
+    stale_dict = TestExpectationMap()
+    semi_stale_dict = TestExpectationMap()
+    active_dict = TestExpectationMap()
+
+    # This initially looks like a good target for using
+    # TestExpectationMap's iterators since there are many nested loops.
+    # However, we need to reset state in different loops, and the alternative of
+    # keeping all the state outside the loop and resetting under certain
+    # conditions ends up being less readable than just using nested loops.
+    for test_name, expectation_map in self.items():
+      for expectation, builder_map in expectation_map.items():
+        # A temporary map to hold data so we can later determine whether an
+        # expectation is stale, semi-stale, or active.
+        tmp_map = {
+            FULL_PASS: BuilderStepMap(),
+            NEVER_PASS: BuilderStepMap(),
+            PARTIAL_PASS: BuilderStepMap(),
+        }
+
+        split_stats_map = builder_map.SplitBuildStatsByPass()
+        for builder_name, (fully_passed, never_passed,
+                           partially_passed) in split_stats_map.items():
+          if fully_passed:
+            tmp_map[FULL_PASS][builder_name] = fully_passed
+          if never_passed:
+            tmp_map[NEVER_PASS][builder_name] = never_passed
+          if partially_passed:
+            tmp_map[PARTIAL_PASS][builder_name] = partially_passed
+
+        def _CopyPassesIntoBuilderMap(builder_map, pass_types):
+          for pt in pass_types:
+            for builder, steps in tmp_map[pt].items():
+              builder_map.setdefault(builder, StepBuildStatsMap()).update(steps)
+
+        # Handle the case of a stale expectation.
+        if not (tmp_map[NEVER_PASS] or tmp_map[PARTIAL_PASS]):
+          builder_map = stale_dict.setdefault(
+              test_name,
+              ExpectationBuilderMap()).setdefault(expectation, BuilderStepMap())
+          _CopyPassesIntoBuilderMap(builder_map, [FULL_PASS])
+        # Handle the case of an active expectation.
+        elif not tmp_map[FULL_PASS]:
+          builder_map = active_dict.setdefault(
+              test_name,
+              ExpectationBuilderMap()).setdefault(expectation, BuilderStepMap())
+          _CopyPassesIntoBuilderMap(builder_map, [NEVER_PASS, PARTIAL_PASS])
+        # Handle the case of a semi-stale expectation.
+        else:
+          # TODO(crbug.com/998329): Sort by pass percentage so it's easier to
+          # find problematic builders without highlighting.
+          builder_map = semi_stale_dict.setdefault(
+              test_name,
+              ExpectationBuilderMap()).setdefault(expectation, BuilderStepMap())
+          _CopyPassesIntoBuilderMap(builder_map,
+                                    [FULL_PASS, PARTIAL_PASS, NEVER_PASS])
+    return stale_dict, semi_stale_dict, active_dict
+
+  def FilterOutUnusedExpectations(self):
+    """Filters out any unused Expectations from stored data.
+
+    An Expectation is considered unused if its corresponding dictionary is
+    empty. If removing Expectations results in a top-level test key having an
+    empty dictionary, that test entry will also be removed.
+
+    Returns:
+      A list containing any Expectations that were removed.
+    """
+    logging.info('Filtering out unused expectations')
+    unused_expectations = []
+    for _, expectation, builder_map in self.IterBuilderStepMaps():
+      if not builder_map:
+        unused_expectations.append(expectation)
+    for unused in unused_expectations:
+      for _, expectation_map in self.items():
+        if unused in expectation_map:
+          del expectation_map[unused]
+    logging.debug('Found %d unused expectations', len(unused_expectations))
+
+    empty_tests = []
+    for test_name, expectation_map in self.items():
+      if not expectation_map:
+        empty_tests.append(test_name)
+    for empty in empty_tests:
+      del self[empty]
+    logging.debug('Found %d empty tests: %s', len(empty_tests), empty_tests)
+
+    return unused_expectations
+
 
 class ExpectationBuilderMap(BaseTypedMap):
   """Typed map for Expectation -> BuilderStepMap."""
diff --git a/testing/unexpected_passes_common/data_types_unittest.py b/testing/unexpected_passes_common/data_types_unittest.py
index 71b691a..b221d008 100755
--- a/testing/unexpected_passes_common/data_types_unittest.py
+++ b/testing/unexpected_passes_common/data_types_unittest.py
@@ -5,6 +5,7 @@
 
 from __future__ import print_function
 
+import copy
 import sys
 import unittest
 
@@ -14,6 +15,7 @@
   import unittest.mock as mock
 
 from unexpected_passes_common import data_types
+from unexpected_passes_common import unittest_utils as uu
 
 GENERIC_EXPECTATION = data_types.Expectation('test', ['tag1', 'tag2'], ['Pass'])
 GENERIC_RESULT = data_types.Result('test', ['tag1', 'tag2'], 'Pass',
@@ -286,5 +288,849 @@
       test_expectation_map.IterToValueType(int)
 
 
+class TypedMapMergeUnittest(unittest.TestCase):
+  def testEmptyBaseMap(self):
+    """Tests that a merge with an empty base map copies the merge map."""
+    base_map = data_types.TestExpectationMap()
+    merge_map = data_types.TestExpectationMap({
+        'foo':
+        data_types.ExpectationBuilderMap({
+            data_types.Expectation('foo', ['win'], 'Failure'):
+            data_types.BuilderStepMap({
+                'builder':
+                data_types.StepBuildStatsMap({
+                    'step': data_types.BuildStats(),
+                }),
+            }),
+        }),
+    })
+    original_merge_map = copy.deepcopy(merge_map)
+    base_map.Merge(merge_map)
+    self.assertEqual(base_map, merge_map)
+    self.assertEqual(merge_map, original_merge_map)
+
+  def testEmptyMergeMap(self):
+    """Tests that a merge with an empty merge map is a no-op."""
+    base_map = data_types.TestExpectationMap({
+        'foo':
+        data_types.ExpectationBuilderMap({
+            data_types.Expectation('foo', ['win'], 'Failure'):
+            data_types.BuilderStepMap({
+                'builder':
+                data_types.StepBuildStatsMap({
+                    'step': data_types.BuildStats(),
+                }),
+            }),
+        }),
+    })
+    merge_map = data_types.TestExpectationMap()
+    original_base_map = copy.deepcopy(base_map)
+    base_map.Merge(merge_map)
+    self.assertEqual(base_map, original_base_map)
+    self.assertEqual(merge_map, {})
+
+  def testMissingKeys(self):
+    """Tests that missing keys are properly copied to the base map."""
+    base_map = data_types.TestExpectationMap({
+        'foo':
+        data_types.ExpectationBuilderMap({
+            data_types.Expectation('foo', ['win'], 'Failure'):
+            data_types.BuilderStepMap({
+                'builder':
+                data_types.StepBuildStatsMap({
+                    'step': data_types.BuildStats(),
+                }),
+            }),
+        }),
+    })
+    merge_map = data_types.TestExpectationMap({
+        'foo':
+        data_types.ExpectationBuilderMap({
+            data_types.Expectation('foo', ['win'], 'Failure'):
+            data_types.BuilderStepMap({
+                'builder':
+                data_types.StepBuildStatsMap({
+                    'step2': data_types.BuildStats(),
+                }),
+                'builder2':
+                data_types.StepBuildStatsMap({
+                    'step': data_types.BuildStats(),
+                }),
+            }),
+            data_types.Expectation('foo', ['mac'], 'Failure'):
+            data_types.BuilderStepMap({
+                'builder':
+                data_types.StepBuildStatsMap({
+                    'step': data_types.BuildStats(),
+                })
+            })
+        }),
+        'bar':
+        data_types.ExpectationBuilderMap({
+            data_types.Expectation('bar', ['win'], 'Failure'):
+            data_types.BuilderStepMap({
+                'builder':
+                data_types.StepBuildStatsMap({
+                    'step': data_types.BuildStats(),
+                }),
+            }),
+        }),
+    })
+    expected_base_map = {
+        'foo': {
+            data_types.Expectation('foo', ['win'], 'Failure'): {
+                'builder': {
+                    'step': data_types.BuildStats(),
+                    'step2': data_types.BuildStats(),
+                },
+                'builder2': {
+                    'step': data_types.BuildStats(),
+                },
+            },
+            data_types.Expectation('foo', ['mac'], 'Failure'): {
+                'builder': {
+                    'step': data_types.BuildStats(),
+                }
+            }
+        },
+        'bar': {
+            data_types.Expectation('bar', ['win'], 'Failure'): {
+                'builder': {
+                    'step': data_types.BuildStats(),
+                },
+            },
+        },
+    }
+    base_map.Merge(merge_map)
+    self.assertEqual(base_map, expected_base_map)
+
+  def testMergeBuildStats(self):
+    """Tests that BuildStats for the same step are merged properly."""
+    base_map = data_types.TestExpectationMap({
+        'foo':
+        data_types.ExpectationBuilderMap({
+            data_types.Expectation('foo', ['win'], 'Failure'):
+            data_types.BuilderStepMap({
+                'builder':
+                data_types.StepBuildStatsMap({
+                    'step': data_types.BuildStats(),
+                }),
+            }),
+        }),
+    })
+    merge_stats = data_types.BuildStats()
+    merge_stats.AddFailedBuild('1')
+    merge_map = data_types.TestExpectationMap({
+        'foo':
+        data_types.ExpectationBuilderMap({
+            data_types.Expectation('foo', ['win'], 'Failure'):
+            data_types.BuilderStepMap({
+                'builder':
+                data_types.StepBuildStatsMap({
+                    'step': merge_stats,
+                }),
+            }),
+        }),
+    })
+    expected_stats = data_types.BuildStats()
+    expected_stats.AddFailedBuild('1')
+    expected_base_map = {
+        'foo': {
+            data_types.Expectation('foo', ['win'], 'Failure'): {
+                'builder': {
+                    'step': expected_stats,
+                },
+            },
+        },
+    }
+    base_map.Merge(merge_map)
+    self.assertEqual(base_map, expected_base_map)
+
+  def testInvalidMerge(self):
+    """Tests that updating a BuildStats instance twice is an error."""
+    base_map = data_types.TestExpectationMap({
+        'foo':
+        data_types.ExpectationBuilderMap({
+            data_types.Expectation('foo', ['win'], 'Failure'):
+            data_types.BuilderStepMap({
+                'builder':
+                data_types.StepBuildStatsMap({
+                    'step': data_types.BuildStats(),
+                }),
+            }),
+        }),
+    })
+    merge_stats = data_types.BuildStats()
+    merge_stats.AddFailedBuild('1')
+    merge_map = data_types.TestExpectationMap({
+        'foo':
+        data_types.ExpectationBuilderMap({
+            data_types.Expectation('foo', ['win'], 'Failure'):
+            data_types.BuilderStepMap({
+                'builder':
+                data_types.StepBuildStatsMap({
+                    'step': merge_stats,
+                }),
+            }),
+        }),
+    })
+    original_base_map = copy.deepcopy(base_map)
+    base_map.Merge(merge_map, original_base_map)
+    with self.assertRaises(AssertionError):
+      base_map.Merge(merge_map, original_base_map)
+
+
+class TestExpectationMapAddResultListUnittest(unittest.TestCase):
+  def GetGenericRetryExpectation(self):
+    return data_types.Expectation('foo/test', ['win10'], 'RetryOnFailure')
+
+  def GetGenericFailureExpectation(self):
+    return data_types.Expectation('foo/test', ['win10'], 'Failure')
+
+  def GetEmptyMapForGenericRetryExpectation(self):
+    foo_expectation = self.GetGenericRetryExpectation()
+    return data_types.TestExpectationMap({
+        'foo/test':
+        data_types.ExpectationBuilderMap({
+            foo_expectation:
+            data_types.BuilderStepMap(),
+        }),
+    })
+
+  def GetEmptyMapForGenericFailureExpectation(self):
+    foo_expectation = self.GetGenericFailureExpectation()
+    return data_types.TestExpectationMap({
+        'foo/test':
+        data_types.ExpectationBuilderMap({
+            foo_expectation:
+            data_types.BuilderStepMap(),
+        }),
+    })
+
+  def GetPassedMapForExpectation(self, expectation):
+    stats = data_types.BuildStats()
+    stats.AddPassedBuild()
+    return self.GetMapForExpectationAndStats(expectation, stats)
+
+  def GetFailedMapForExpectation(self, expectation):
+    stats = data_types.BuildStats()
+    stats.AddFailedBuild('build_id')
+    return self.GetMapForExpectationAndStats(expectation, stats)
+
+  def GetMapForExpectationAndStats(self, expectation, stats):
+    return data_types.TestExpectationMap({
+        expectation.test:
+        data_types.ExpectationBuilderMap({
+            expectation:
+            data_types.BuilderStepMap({
+                'builder':
+                data_types.StepBuildStatsMap({
+                    'pixel_tests': stats,
+                }),
+            }),
+        }),
+    })
+
+  def testRetryOnlyPassMatching(self):
+    """Tests when the only tests are retry expectations that pass and match."""
+    foo_result = data_types.Result('foo/test', ['win10'], 'Pass', 'pixel_tests',
+                                   'build_id')
+    expectation_map = self.GetEmptyMapForGenericRetryExpectation()
+    unmatched_results = expectation_map.AddResultList('builder', [foo_result])
+    self.assertEqual(unmatched_results, [])
+
+    expected_expectation_map = self.GetPassedMapForExpectation(
+        self.GetGenericRetryExpectation())
+    self.assertEqual(expectation_map, expected_expectation_map)
+
+  def testRetryOnlyFailMatching(self):
+    """Tests when the only tests are retry expectations that fail and match."""
+    foo_result = data_types.Result('foo/test', ['win10'], 'Failure',
+                                   'pixel_tests', 'build_id')
+    expectation_map = self.GetEmptyMapForGenericRetryExpectation()
+    unmatched_results = expectation_map.AddResultList('builder', [foo_result])
+    self.assertEqual(unmatched_results, [])
+
+    expected_expectation_map = self.GetFailedMapForExpectation(
+        self.GetGenericRetryExpectation())
+    self.assertEqual(expectation_map, expected_expectation_map)
+
+  def testRetryFailThenPassMatching(self):
+    """Tests when there are pass and fail results for retry expectations."""
+    foo_fail_result = data_types.Result('foo/test', ['win10'], 'Failure',
+                                        'pixel_tests', 'build_id')
+    foo_pass_result = data_types.Result('foo/test', ['win10'], 'Pass',
+                                        'pixel_tests', 'build_id')
+    expectation_map = self.GetEmptyMapForGenericRetryExpectation()
+    unmatched_results = expectation_map.AddResultList(
+        'builder', [foo_fail_result, foo_pass_result])
+    self.assertEqual(unmatched_results, [])
+
+    expected_expectation_map = self.GetFailedMapForExpectation(
+        self.GetGenericRetryExpectation())
+    self.assertEqual(expectation_map, expected_expectation_map)
+
+  def testFailurePassMatching(self):
+    """Tests when there are pass results for failure expectations."""
+    foo_result = data_types.Result('foo/test', ['win10'], 'Pass', 'pixel_tests',
+                                   'build_id')
+    expectation_map = self.GetEmptyMapForGenericFailureExpectation()
+    unmatched_results = expectation_map.AddResultList('builder', [foo_result])
+    self.assertEqual(unmatched_results, [])
+
+    expected_expectation_map = self.GetPassedMapForExpectation(
+        self.GetGenericFailureExpectation())
+    self.assertEqual(expectation_map, expected_expectation_map)
+
+  def testFailureFailureMatching(self):
+    """Tests when there are failure results for failure expectations."""
+    foo_result = data_types.Result('foo/test', ['win10'], 'Failure',
+                                   'pixel_tests', 'build_id')
+    expectation_map = self.GetEmptyMapForGenericFailureExpectation()
+    unmatched_results = expectation_map.AddResultList('builder', [foo_result])
+    self.assertEqual(unmatched_results, [])
+
+    expected_expectation_map = self.GetFailedMapForExpectation(
+        self.GetGenericFailureExpectation())
+    self.assertEqual(expectation_map, expected_expectation_map)
+
+  def testMismatches(self):
+    """Tests that unmatched results get returned."""
+    foo_match_result = data_types.Result('foo/test', ['win10'], 'Pass',
+                                         'pixel_tests', 'build_id')
+    foo_mismatch_result = data_types.Result('foo/not_a_test', ['win10'],
+                                            'Failure', 'pixel_tests',
+                                            'build_id')
+    bar_result = data_types.Result('bar/test', ['win10'], 'Pass', 'pixel_tests',
+                                   'build_id')
+    expectation_map = self.GetEmptyMapForGenericFailureExpectation()
+    unmatched_results = expectation_map.AddResultList(
+        'builder', [foo_match_result, foo_mismatch_result, bar_result])
+    self.assertEqual(len(set(unmatched_results)), 2)
+    self.assertEqual(set(unmatched_results),
+                     set([foo_mismatch_result, bar_result]))
+
+    expected_expectation_map = self.GetPassedMapForExpectation(
+        self.GetGenericFailureExpectation())
+    self.assertEqual(expectation_map, expected_expectation_map)
+
+
+class TestExpectationMapAddResultUnittest(unittest.TestCase):
+  def testResultMatchPassingNew(self):
+    """Test adding a passing result when no results for a builder exist."""
+    r = data_types.Result('some/test/case', ['win', 'win10'], 'Pass',
+                          'pixel_tests', 'build_id')
+    e = data_types.Expectation('some/test/*', ['win10'], 'Failure')
+    expectation_map = data_types.TestExpectationMap({
+        'some/test/*':
+        data_types.ExpectationBuilderMap({
+            e: data_types.BuilderStepMap(),
+        }),
+    })
+    found_matching = expectation_map._AddResult(r, 'builder')
+    self.assertTrue(found_matching)
+    stats = data_types.BuildStats()
+    stats.AddPassedBuild()
+    expected_expectation_map = {
+        'some/test/*': {
+            e: {
+                'builder': {
+                    'pixel_tests': stats,
+                },
+            },
+        },
+    }
+    self.assertEqual(expectation_map, expected_expectation_map)
+
+  def testResultMatchFailingNew(self):
+    """Test adding a failing result when no results for a builder exist."""
+    r = data_types.Result('some/test/case', ['win', 'win10'], 'Failure',
+                          'pixel_tests', 'build_id')
+    e = data_types.Expectation('some/test/*', ['win10'], 'Failure')
+    expectation_map = data_types.TestExpectationMap({
+        'some/test/*':
+        data_types.ExpectationBuilderMap({
+            e: data_types.BuilderStepMap(),
+        }),
+    })
+    found_matching = expectation_map._AddResult(r, 'builder')
+    self.assertTrue(found_matching)
+    stats = data_types.BuildStats()
+    stats.AddFailedBuild('build_id')
+    expected_expectation_map = {
+        'some/test/*': {
+            e: {
+                'builder': {
+                    'pixel_tests': stats,
+                },
+            }
+        }
+    }
+    self.assertEqual(expectation_map, expected_expectation_map)
+
+  def testResultMatchPassingExisting(self):
+    """Test adding a passing result when results for a builder exist."""
+    r = data_types.Result('some/test/case', ['win', 'win10'], 'Pass',
+                          'pixel_tests', 'build_id')
+    e = data_types.Expectation('some/test/*', ['win10'], 'Failure')
+    stats = data_types.BuildStats()
+    stats.AddFailedBuild('build_id')
+    expectation_map = data_types.TestExpectationMap({
+        'some/test/*':
+        data_types.ExpectationBuilderMap({
+            e:
+            data_types.BuilderStepMap({
+                'builder':
+                data_types.StepBuildStatsMap({
+                    'pixel_tests': stats,
+                }),
+            }),
+        }),
+    })
+    found_matching = expectation_map._AddResult(r, 'builder')
+    self.assertTrue(found_matching)
+    stats = data_types.BuildStats()
+    stats.AddFailedBuild('build_id')
+    stats.AddPassedBuild()
+    expected_expectation_map = {
+        'some/test/*': {
+            e: {
+                'builder': {
+                    'pixel_tests': stats,
+                },
+            },
+        },
+    }
+    self.assertEqual(expectation_map, expected_expectation_map)
+
+  def testResultMatchFailingExisting(self):
+    """Test adding a failing result when results for a builder exist."""
+    r = data_types.Result('some/test/case', ['win', 'win10'], 'Failure',
+                          'pixel_tests', 'build_id')
+    e = data_types.Expectation('some/test/*', ['win10'], 'Failure')
+    stats = data_types.BuildStats()
+    stats.AddPassedBuild()
+    expectation_map = data_types.TestExpectationMap({
+        'some/test/*':
+        data_types.ExpectationBuilderMap({
+            e:
+            data_types.BuilderStepMap({
+                'builder':
+                data_types.StepBuildStatsMap({
+                    'pixel_tests': stats,
+                }),
+            }),
+        }),
+    })
+    found_matching = expectation_map._AddResult(r, 'builder')
+    self.assertTrue(found_matching)
+    stats = data_types.BuildStats()
+    stats.AddFailedBuild('build_id')
+    stats.AddPassedBuild()
+    expected_expectation_map = {
+        'some/test/*': {
+            e: {
+                'builder': {
+                    'pixel_tests': stats,
+                },
+            },
+        },
+    }
+    self.assertEqual(expectation_map, expected_expectation_map)
+
+  def testResultMatchMultiMatch(self):
+    """Test adding a passing result when multiple expectations match."""
+    r = data_types.Result('some/test/case', ['win', 'win10'], 'Pass',
+                          'pixel_tests', 'build_id')
+    e = data_types.Expectation('some/test/*', ['win10'], 'Failure')
+    e2 = data_types.Expectation('some/test/case', ['win10'], 'Failure')
+    expectation_map = data_types.TestExpectationMap({
+        'some/test/*':
+        data_types.ExpectationBuilderMap({
+            e: data_types.BuilderStepMap(),
+            e2: data_types.BuilderStepMap(),
+        }),
+    })
+    found_matching = expectation_map._AddResult(r, 'builder')
+    self.assertTrue(found_matching)
+    stats = data_types.BuildStats()
+    stats.AddPassedBuild()
+    expected_expectation_map = {
+        'some/test/*': {
+            e: {
+                'builder': {
+                    'pixel_tests': stats,
+                },
+            },
+            e2: {
+                'builder': {
+                    'pixel_tests': stats,
+                },
+            }
+        }
+    }
+    self.assertEqual(expectation_map, expected_expectation_map)
+
+  def testResultNoMatch(self):
+    """Tests that a result is not added if no match is found."""
+    r = data_types.Result('some/test/case', ['win', 'win10'], 'Failure',
+                          'pixel_tests', 'build_id')
+    e = data_types.Expectation('some/test/*', ['win10', 'foo'], 'Failure')
+    expectation_map = data_types.TestExpectationMap({
+        'some/test/*':
+        data_types.ExpectationBuilderMap({
+            e: data_types.BuilderStepMap(),
+        })
+    })
+    found_matching = expectation_map._AddResult(r, 'builder')
+    self.assertFalse(found_matching)
+    expected_expectation_map = {'some/test/*': {e: {}}}
+    self.assertEqual(expectation_map, expected_expectation_map)
+
+
+class TestExpectationMapSplitByStalenessUnittest(unittest.TestCase):
+  def testEmptyInput(self):
+    """Tests that nothing blows up with empty input."""
+    stale_dict, semi_stale_dict, active_dict =\
+        data_types.TestExpectationMap().SplitByStaleness()
+    self.assertEqual(stale_dict, {})
+    self.assertEqual(semi_stale_dict, {})
+    self.assertEqual(active_dict, {})
+    self.assertIsInstance(stale_dict, data_types.TestExpectationMap)
+    self.assertIsInstance(semi_stale_dict, data_types.TestExpectationMap)
+    self.assertIsInstance(active_dict, data_types.TestExpectationMap)
+
+  def testStaleExpectations(self):
+    """Tests output when only stale expectations are provided."""
+    expectation_map = data_types.TestExpectationMap({
+        'foo':
+        data_types.ExpectationBuilderMap({
+            data_types.Expectation('foo', ['win'], ['Failure']):
+            data_types.BuilderStepMap({
+                'foo_builder':
+                data_types.StepBuildStatsMap({
+                    'step1':
+                    uu.CreateStatsWithPassFails(1, 0),
+                    'step2':
+                    uu.CreateStatsWithPassFails(2, 0),
+                }),
+                'bar_builder':
+                data_types.StepBuildStatsMap({
+                    'step1':
+                    uu.CreateStatsWithPassFails(3, 0),
+                    'step2':
+                    uu.CreateStatsWithPassFails(4, 0)
+                }),
+            }),
+            data_types.Expectation('foo', ['linux'], ['RetryOnFailure']):
+            data_types.BuilderStepMap({
+                'foo_builder':
+                data_types.StepBuildStatsMap({
+                    'step1':
+                    uu.CreateStatsWithPassFails(5, 0),
+                    'step2':
+                    uu.CreateStatsWithPassFails(6, 0),
+                }),
+            }),
+        }),
+        'bar':
+        data_types.ExpectationBuilderMap({
+            data_types.Expectation('bar', ['win'], ['Failure']):
+            data_types.BuilderStepMap({
+                'foo_builder':
+                data_types.StepBuildStatsMap({
+                    'step1':
+                    uu.CreateStatsWithPassFails(7, 0),
+                }),
+            }),
+        }),
+    })
+    expected_stale_dict = copy.deepcopy(expectation_map)
+    stale_dict, semi_stale_dict, active_dict =\
+        expectation_map.SplitByStaleness()
+    self.assertEqual(stale_dict, expected_stale_dict)
+    self.assertEqual(semi_stale_dict, {})
+    self.assertEqual(active_dict, {})
+
+  def testActiveExpectations(self):
+    """Tests output when only active expectations are provided."""
+    expectation_map = data_types.TestExpectationMap({
+        'foo':
+        data_types.ExpectationBuilderMap({
+            data_types.Expectation('foo', ['win'], ['Failure']):
+            data_types.BuilderStepMap({
+                'foo_builder':
+                data_types.StepBuildStatsMap({
+                    'step1':
+                    uu.CreateStatsWithPassFails(0, 1),
+                    'step2':
+                    uu.CreateStatsWithPassFails(0, 2),
+                }),
+                'bar_builder':
+                data_types.StepBuildStatsMap({
+                    'step1':
+                    uu.CreateStatsWithPassFails(0, 3),
+                    'step2':
+                    uu.CreateStatsWithPassFails(0, 4)
+                }),
+            }),
+            data_types.Expectation('foo', ['linux'], ['RetryOnFailure']):
+            data_types.BuilderStepMap({
+                'foo_builder':
+                data_types.StepBuildStatsMap({
+                    'step1':
+                    uu.CreateStatsWithPassFails(0, 5),
+                    'step2':
+                    uu.CreateStatsWithPassFails(0, 6),
+                }),
+            }),
+        }),
+        'bar':
+        data_types.ExpectationBuilderMap({
+            data_types.Expectation('bar', ['win'], ['Failure']):
+            data_types.BuilderStepMap({
+                'foo_builder':
+                data_types.StepBuildStatsMap({
+                    'step1':
+                    uu.CreateStatsWithPassFails(0, 7),
+                }),
+            }),
+        }),
+    })
+    expected_active_dict = copy.deepcopy(expectation_map)
+    stale_dict, semi_stale_dict, active_dict =\
+        expectation_map.SplitByStaleness()
+    self.assertEqual(stale_dict, {})
+    self.assertEqual(semi_stale_dict, {})
+    self.assertEqual(active_dict, expected_active_dict)
+
+  def testSemiStaleExpectations(self):
+    """Tests output when only semi-stale expectations are provided."""
+    expectation_map = data_types.TestExpectationMap({
+        'foo':
+        data_types.ExpectationBuilderMap({
+            data_types.Expectation('foo', ['win'], ['Failure']):
+            data_types.BuilderStepMap({
+                'foo_builder':
+                data_types.StepBuildStatsMap({
+                    'step1':
+                    uu.CreateStatsWithPassFails(1, 0),
+                    'step2':
+                    uu.CreateStatsWithPassFails(2, 2),
+                }),
+                'bar_builder':
+                data_types.StepBuildStatsMap({
+                    'step1':
+                    uu.CreateStatsWithPassFails(3, 0),
+                    'step2':
+                    uu.CreateStatsWithPassFails(0, 4)
+                }),
+            }),
+            data_types.Expectation('foo', ['linux'], ['RetryOnFailure']):
+            data_types.BuilderStepMap({
+                'foo_builder':
+                data_types.StepBuildStatsMap({
+                    'step1':
+                    uu.CreateStatsWithPassFails(5, 0),
+                    'step2':
+                    uu.CreateStatsWithPassFails(6, 6),
+                }),
+            }),
+        }),
+        'bar':
+        data_types.ExpectationBuilderMap({
+            data_types.Expectation('bar', ['win'], ['Failure']):
+            data_types.BuilderStepMap({
+                'foo_builder':
+                data_types.StepBuildStatsMap({
+                    'step1':
+                    uu.CreateStatsWithPassFails(7, 0),
+                }),
+                'bar_builder':
+                data_types.StepBuildStatsMap({
+                    'step1':
+                    uu.CreateStatsWithPassFails(0, 8),
+                }),
+            }),
+        }),
+    })
+    expected_semi_stale_dict = copy.deepcopy(expectation_map)
+    stale_dict, semi_stale_dict, active_dict =\
+        expectation_map.SplitByStaleness()
+    self.assertEqual(stale_dict, {})
+    self.assertEqual(semi_stale_dict, expected_semi_stale_dict)
+    self.assertEqual(active_dict, {})
+
+  def testAllExpectations(self):
+    """Tests output when all three types of expectations are provided."""
+    expectation_map = data_types.TestExpectationMap({
+        'foo':
+        data_types.ExpectationBuilderMap({
+            data_types.Expectation('foo', ['stale'], 'Failure'):
+            data_types.BuilderStepMap({
+                'foo_builder':
+                data_types.StepBuildStatsMap({
+                    'step1':
+                    uu.CreateStatsWithPassFails(1, 0),
+                    'step2':
+                    uu.CreateStatsWithPassFails(2, 0),
+                }),
+                'bar_builder':
+                data_types.StepBuildStatsMap({
+                    'step1':
+                    uu.CreateStatsWithPassFails(3, 0),
+                    'step2':
+                    uu.CreateStatsWithPassFails(4, 0)
+                }),
+            }),
+            data_types.Expectation('foo', ['semistale'], 'Failure'):
+            data_types.BuilderStepMap({
+                'foo_builder':
+                data_types.StepBuildStatsMap({
+                    'step1':
+                    uu.CreateStatsWithPassFails(1, 0),
+                    'step2':
+                    uu.CreateStatsWithPassFails(2, 2),
+                }),
+                'bar_builder':
+                data_types.StepBuildStatsMap({
+                    'step1':
+                    uu.CreateStatsWithPassFails(3, 0),
+                    'step2':
+                    uu.CreateStatsWithPassFails(0, 4)
+                }),
+            }),
+            data_types.Expectation('foo', ['active'], 'Failure'):
+            data_types.BuilderStepMap({
+                'foo_builder':
+                data_types.StepBuildStatsMap({
+                    'step1':
+                    uu.CreateStatsWithPassFails(1, 1),
+                    'step2':
+                    uu.CreateStatsWithPassFails(2, 2),
+                }),
+                'bar_builder':
+                data_types.StepBuildStatsMap({
+                    'step1':
+                    uu.CreateStatsWithPassFails(3, 3),
+                    'step2':
+                    uu.CreateStatsWithPassFails(0, 4)
+                }),
+            }),
+        }),
+    })
+    expected_stale = {
+        'foo': {
+            data_types.Expectation('foo', ['stale'], 'Failure'): {
+                'foo_builder': {
+                    'step1': uu.CreateStatsWithPassFails(1, 0),
+                    'step2': uu.CreateStatsWithPassFails(2, 0),
+                },
+                'bar_builder': {
+                    'step1': uu.CreateStatsWithPassFails(3, 0),
+                    'step2': uu.CreateStatsWithPassFails(4, 0)
+                },
+            },
+        },
+    }
+    expected_semi_stale = {
+        'foo': {
+            data_types.Expectation('foo', ['semistale'], 'Failure'): {
+                'foo_builder': {
+                    'step1': uu.CreateStatsWithPassFails(1, 0),
+                    'step2': uu.CreateStatsWithPassFails(2, 2),
+                },
+                'bar_builder': {
+                    'step1': uu.CreateStatsWithPassFails(3, 0),
+                    'step2': uu.CreateStatsWithPassFails(0, 4)
+                },
+            },
+        },
+    }
+    expected_active = {
+        'foo': {
+            data_types.Expectation('foo', ['active'], 'Failure'): {
+                'foo_builder': {
+                    'step1': uu.CreateStatsWithPassFails(1, 1),
+                    'step2': uu.CreateStatsWithPassFails(2, 2),
+                },
+                'bar_builder': {
+                    'step1': uu.CreateStatsWithPassFails(3, 3),
+                    'step2': uu.CreateStatsWithPassFails(0, 4)
+                },
+            },
+        },
+    }
+
+    stale_dict, semi_stale_dict, active_dict =\
+        expectation_map.SplitByStaleness()
+    self.assertEqual(stale_dict, expected_stale)
+    self.assertEqual(semi_stale_dict, expected_semi_stale)
+    self.assertEqual(active_dict, expected_active)
+
+
+class TestExpectationMapFilterOutUnusedExpectationsUnittest(unittest.TestCase):
+  def testNoUnused(self):
+    """Tests that filtering is a no-op if there are no unused expectations."""
+    expectation_map = data_types.TestExpectationMap({
+        'foo/test':
+        data_types.ExpectationBuilderMap({
+            data_types.Expectation('foo/test', ['win'], ['Failure']):
+            data_types.BuilderStepMap({
+                'SomeBuilder':
+                data_types.StepBuildStatsMap(),
+            }),
+        })
+    })
+    expected_expectation_map = copy.deepcopy(expectation_map)
+    unused_expectations = expectation_map.FilterOutUnusedExpectations()
+    self.assertEqual(len(unused_expectations), 0)
+    self.assertEqual(expectation_map, expected_expectation_map)
+
+  def testUnusedButNotEmpty(self):
+    """Tests filtering if there is an unused expectation but no empty tests."""
+    expectation_map = data_types.TestExpectationMap({
+        'foo/test':
+        data_types.ExpectationBuilderMap({
+            data_types.Expectation('foo/test', ['win'], ['Failure']):
+            data_types.BuilderStepMap({
+                'SomeBuilder':
+                data_types.StepBuildStatsMap(),
+            }),
+            data_types.Expectation('foo/test', ['linux'], ['Failure']):
+            data_types.BuilderStepMap(),
+        })
+    })
+    expected_expectation_map = data_types.TestExpectationMap({
+        'foo/test':
+        data_types.ExpectationBuilderMap({
+            data_types.Expectation('foo/test', ['win'], ['Failure']):
+            data_types.BuilderStepMap({
+                'SomeBuilder':
+                data_types.StepBuildStatsMap(),
+            }),
+        }),
+    })
+    unused_expectations = expectation_map.FilterOutUnusedExpectations()
+    self.assertEqual(
+        unused_expectations,
+        [data_types.Expectation('foo/test', ['linux'], ['Failure'])])
+    self.assertEqual(expectation_map, expected_expectation_map)
+
+  def testUnusedAndEmpty(self):
+    """Tests filtering if there is an expectation that causes an empty test."""
+    expectation_map = data_types.TestExpectationMap({
+        'foo/test':
+        data_types.ExpectationBuilderMap({
+            data_types.Expectation('foo/test', ['win'], ['Failure']):
+            data_types.BuilderStepMap(),
+        }),
+    })
+    unused_expectations = expectation_map.FilterOutUnusedExpectations()
+    self.assertEqual(unused_expectations,
+                     [data_types.Expectation('foo/test', ['win'], ['Failure'])])
+    self.assertEqual(expectation_map, {})
+
+
 if __name__ == '__main__':
   unittest.main(verbosity=2)
diff --git a/testing/unexpected_passes_common/expectations.py b/testing/unexpected_passes_common/expectations.py
index d422689d..fbc357f 100644
--- a/testing/unexpected_passes_common/expectations.py
+++ b/testing/unexpected_passes_common/expectations.py
@@ -5,438 +5,298 @@
 
 from __future__ import print_function
 
-import collections
-import copy
 import logging
-import os
 import sys
 
-import validate_tag_consistency
-
 from typ import expectations_parser
 from unexpected_passes_common import data_types
 from unexpected_passes_common import result_output
 
-EXPECTATIONS_DIR = os.path.realpath(
-    os.path.join(os.path.dirname(__file__), '..', '..', 'content', 'test',
-                 'gpu', 'gpu_tests', 'test_expectations'))
 
 FINDER_DISABLE_COMMENT = 'finder:disable'
 FINDER_ENABLE_COMMENT = 'finder:enable'
 
-FULL_PASS = 1
-NEVER_PASS = 2
-PARTIAL_PASS = 3
 
+class Expectations(object):
+  def CreateTestExpectationMap(self, expectation_file, tests):
+    """Creates an expectation map based off a file or list of tests.
 
-def CreateTestExpectationMap(expectation_file, tests):
-  """Creates an expectation map based off a file or list of tests.
+    Args:
+      expectation_file: A filepath to an expectation file to read from, or None.
+          If a filepath is specified, |tests| must be None.
+      tests: An iterable of strings containing test names to check. If
+          specified, |expectation_file| must be None.
 
-  Args:
-    expectation_file: A filepath to an expectation file to read from, or None.
-        If a filepath is specified, |tests| must be None.
-    tests: An iterable of strings containing test names to check. If specified,
-        |expectation_file| must be None.
+    Returns:
+      A data_types.TestExpectationMap, although all its BuilderStepMap contents
+      will be empty.
+    """
+    logging.info('Creating test expectation map')
+    assert expectation_file or tests
+    assert not (expectation_file and tests)
 
-  Returns:
-    A data_types.TestExpectationMap, although all its BuilderStepMap contents
-    will be empty.
-  """
-  logging.info('Creating test expectation map')
-  assert expectation_file or tests
-  assert not (expectation_file and tests)
+    if expectation_file:
+      with open(expectation_file) as f:
+        content = f.read()
+    else:
+      content = '# results: [ RetryOnFailure ]\n'
+      for t in tests:
+        content += '%s [ RetryOnFailure ]\n' % t
 
-  if expectation_file:
+    list_parser = expectations_parser.TaggedTestListParser(content)
+    expectation_map = data_types.TestExpectationMap()
+    logging.debug('Parsed %d expectations', len(list_parser.expectations))
+    for e in list_parser.expectations:
+      if 'Skip' in e.raw_results:
+        continue
+      expectation = data_types.Expectation(e.test, e.tags, e.raw_results,
+                                           e.reason)
+      expectations_for_test = expectation_map.setdefault(
+          e.test, data_types.ExpectationBuilderMap())
+      assert expectation not in expectations_for_test
+      expectations_for_test[expectation] = data_types.BuilderStepMap()
+
+    return expectation_map
+
+  def RemoveExpectationsFromFile(self, expectations, expectation_file):
+    """Removes lines corresponding to |expectations| from |expectation_file|.
+
+    Ignores any lines that match but are within a disable block or have an
+    inline disable comment.
+
+    Args:
+      expectations: A list of data_types.Expectations to remove.
+      expectation_file: A filepath pointing to an expectation file to remove
+          lines from.
+
+    Returns:
+      A set of strings containing URLs of bugs associated with the removed
+      expectations.
+    """
+
     with open(expectation_file) as f:
-      content = f.read()
-  else:
-    content = '# results: [ RetryOnFailure ]\n'
-    for t in tests:
-      content += '%s [ RetryOnFailure ]\n' % t
+      input_contents = f.read()
 
-  list_parser = expectations_parser.TaggedTestListParser(content)
-  expectation_map = data_types.TestExpectationMap()
-  logging.debug('Parsed %d expectations', len(list_parser.expectations))
-  for e in list_parser.expectations:
-    if 'Skip' in e.raw_results:
-      continue
-    expectation = data_types.Expectation(e.test, e.tags, e.raw_results,
-                                         e.reason)
-    expectations_for_test = expectation_map.setdefault(
-        e.test, data_types.ExpectationBuilderMap())
-    assert expectation not in expectations_for_test
-    expectations_for_test[expectation] = data_types.BuilderStepMap()
+    output_contents = ''
+    in_disable_block = False
+    disable_block_reason = ''
+    removed_urls = set()
+    for line in input_contents.splitlines(True):
+      # Auto-add any comments or empty lines
+      stripped_line = line.strip()
+      if _IsCommentOrBlankLine(stripped_line):
+        output_contents += line
+        assert not (FINDER_DISABLE_COMMENT in line
+                    and FINDER_ENABLE_COMMENT in line)
+        # Handle disable/enable block comments.
+        if FINDER_DISABLE_COMMENT in line:
+          if in_disable_block:
+            raise RuntimeError(
+                'Invalid expectation file %s - contains a disable comment "%s" '
+                'that is in another disable block.' %
+                (expectation_file, stripped_line))
+          in_disable_block = True
+          disable_block_reason = _GetDisableReasonFromComment(line)
+        if FINDER_ENABLE_COMMENT in line:
+          if not in_disable_block:
+            raise RuntimeError(
+                'Invalid expectation file %s - contains an enable comment "%s" '
+                'that is outside of a disable block.' %
+                (expectation_file, stripped_line))
+          in_disable_block = False
+        continue
 
-  return expectation_map
+      current_expectation = self._CreateExpectationFromExpectationFileLine(line)
 
-
-def FilterOutUnusedExpectations(test_expectation_map):
-  """Filters out any unused Expectations from |test_expectation_map|.
-
-  An Expectation is considered unused if its corresponding dictionary is empty.
-  If removing Expectations results in a top-level test key having an empty
-  dictionary, that test entry will also be removed.
-
-  Args:
-    test_expectation_map: A data_types.TestExpectationMap. Will be modified in
-        place.
-
-  Returns:
-    A list containing any Expectations that were removed.
-  """
-  assert isinstance(test_expectation_map, data_types.TestExpectationMap)
-  logging.info('Filtering out unused expectations')
-  unused_expectations = []
-  for _, expectation, builder_map in test_expectation_map.IterBuilderStepMaps():
-    if not builder_map:
-      unused_expectations.append(expectation)
-  for unused in unused_expectations:
-    for _, expectation_map in test_expectation_map.items():
-      if unused in expectation_map:
-        del expectation_map[unused]
-  logging.debug('Found %d unused expectations', len(unused_expectations))
-
-  empty_tests = []
-  for test_name, expectation_map in test_expectation_map.items():
-    if not expectation_map:
-      empty_tests.append(test_name)
-  for empty in empty_tests:
-    del test_expectation_map[empty]
-  logging.debug('Found %d empty tests: %s', len(empty_tests), empty_tests)
-
-  return unused_expectations
-
-
-def SplitExpectationsByStaleness(test_expectation_map):
-  """Separates |test_expectation_map| based on expectation staleness.
-
-  Args:
-    test_expectation_map: A data_types.TestExpectationMap with any unused
-        expectations already filtered out.
-
-  Returns:
-    Three data_types.TestExpectationMaps (stale_dict, semi_stale_dict,
-    active_dict). All three combined contain the information of
-    |test_expectation_map|. |stale_dict| contains entries for expectations that
-    are no longer being helpful, |semi_stale_dict| contains entries for
-    expectations that might be removable or modifiable, but have at least one
-    failed test run. |active_dict| contains entries for expectations that are
-    preventing failures on all builders they're active on, and thus shouldn't be
-    removed.
-  """
-  assert isinstance(test_expectation_map, data_types.TestExpectationMap)
-
-  stale_dict = data_types.TestExpectationMap()
-  semi_stale_dict = data_types.TestExpectationMap()
-  active_dict = data_types.TestExpectationMap()
-
-  # This initially looks like a good target for using
-  # data_types.TestExpectationMap's iterators since there are many nested loops.
-  # However, we need to reset state in different loops, and the alternative of
-  # keeping all the state outside the loop and resetting under certain
-  # conditions ends up being less readable than just using nested loops.
-  for test_name, expectation_map in test_expectation_map.items():
-    for expectation, builder_map in expectation_map.items():
-      # A temporary map to hold data so we can later determine whether an
-      # expectation is stale, semi-stale, or active.
-      tmp_map = {
-          FULL_PASS: data_types.BuilderStepMap(),
-          NEVER_PASS: data_types.BuilderStepMap(),
-          PARTIAL_PASS: data_types.BuilderStepMap(),
-      }
-
-      split_stats_map = builder_map.SplitBuildStatsByPass()
-      for builder_name, (fully_passed, never_passed,
-                         partially_passed) in split_stats_map.items():
-        if fully_passed:
-          tmp_map[FULL_PASS][builder_name] = fully_passed
-        if never_passed:
-          tmp_map[NEVER_PASS][builder_name] = never_passed
-        if partially_passed:
-          tmp_map[PARTIAL_PASS][builder_name] = partially_passed
-
-      def _CopyPassesIntoBuilderMap(builder_map, pass_types):
-        for pt in pass_types:
-          for builder, steps in tmp_map[pt].items():
-            builder_map.setdefault(builder,
-                                   data_types.StepBuildStatsMap()).update(steps)
-
-      # Handle the case of a stale expectation.
-      if not (tmp_map[NEVER_PASS] or tmp_map[PARTIAL_PASS]):
-        builder_map = stale_dict.setdefault(
-            test_name, data_types.ExpectationBuilderMap()).setdefault(
-                expectation, data_types.BuilderStepMap())
-        _CopyPassesIntoBuilderMap(builder_map, [FULL_PASS])
-      # Handle the case of an active expectation.
-      elif not tmp_map[FULL_PASS]:
-        builder_map = active_dict.setdefault(
-            test_name, data_types.ExpectationBuilderMap()).setdefault(
-                expectation, data_types.BuilderStepMap())
-        _CopyPassesIntoBuilderMap(builder_map, [NEVER_PASS, PARTIAL_PASS])
-      # Handle the case of a semi-stale expectation.
-      else:
-        # TODO(crbug.com/998329): Sort by pass percentage so it's easier to find
-        # problematic builders without highlighting.
-        builder_map = semi_stale_dict.setdefault(
-            test_name, data_types.ExpectationBuilderMap()).setdefault(
-                expectation, data_types.BuilderStepMap())
-        _CopyPassesIntoBuilderMap(builder_map,
-                                  [FULL_PASS, PARTIAL_PASS, NEVER_PASS])
-  return stale_dict, semi_stale_dict, active_dict
-
-
-def RemoveExpectationsFromFile(expectations, expectation_file):
-  """Removes lines corresponding to |expectations| from |expectation_file|.
-
-  Ignores any lines that match but are within a disable block or have an inline
-  disable comment.
-
-  Args:
-    expectations: A list of data_types.Expectations to remove.
-    expectation_file: A filepath pointing to an expectation file to remove lines
-        from.
-
-  Returns:
-    A set of strings containing URLs of bugs associated with the removed
-    expectations.
-  """
-
-  with open(expectation_file) as f:
-    input_contents = f.read()
-
-  output_contents = ''
-  in_disable_block = False
-  disable_block_reason = ''
-  removed_urls = set()
-  for line in input_contents.splitlines(True):
-    # Auto-add any comments or empty lines
-    stripped_line = line.strip()
-    if _IsCommentOrBlankLine(stripped_line):
-      output_contents += line
-      assert not (FINDER_DISABLE_COMMENT in line
-                  and FINDER_ENABLE_COMMENT in line)
-      # Handle disable/enable block comments.
-      if FINDER_DISABLE_COMMENT in line:
+      # Add any lines containing expectations that don't match any of the given
+      # expectations to remove.
+      if any([e for e in expectations if e == current_expectation]):
+        # Skip any expectations that match if we're in a disable block or there
+        # is an inline disable comment.
         if in_disable_block:
-          raise RuntimeError(
-              'Invalid expectation file %s - contains a disable comment "%s" '
-              'that is in another disable block.' %
-              (expectation_file, stripped_line))
-        in_disable_block = True
-        disable_block_reason = _GetDisableReasonFromComment(line)
-      if FINDER_ENABLE_COMMENT in line:
-        if not in_disable_block:
-          raise RuntimeError(
-              'Invalid expectation file %s - contains an enable comment "%s" '
-              'that is outside of a disable block.' %
-              (expectation_file, stripped_line))
-        in_disable_block = False
-      continue
-
-    current_expectation = _CreateExpectationFromExpectationFileLine(line)
-
-    # Add any lines containing expectations that don't match any of the given
-    # expectations to remove.
-    if any([e for e in expectations if e == current_expectation]):
-      # Skip any expectations that match if we're in a disable block or there
-      # is an inline disable comment.
-      if in_disable_block:
-        output_contents += line
-        logging.info(
-            'Would have removed expectation %s, but inside a disable block '
-            'with reason %s', stripped_line, disable_block_reason)
-      elif FINDER_DISABLE_COMMENT in line:
-        output_contents += line
-        logging.info(
-            'Would have removed expectation %s, but it has an inline disable '
-            'comment with reason %s',
-            stripped_line.split('#')[0], _GetDisableReasonFromComment(line))
+          output_contents += line
+          logging.info(
+              'Would have removed expectation %s, but inside a disable block '
+              'with reason %s', stripped_line, disable_block_reason)
+        elif FINDER_DISABLE_COMMENT in line:
+          output_contents += line
+          logging.info(
+              'Would have removed expectation %s, but it has an inline disable '
+              'comment with reason %s',
+              stripped_line.split('#')[0], _GetDisableReasonFromComment(line))
+        else:
+          bug = current_expectation.bug
+          if bug:
+            removed_urls.add(bug)
       else:
-        bug = current_expectation.bug
-        if bug:
-          removed_urls.add(bug)
-    else:
-      output_contents += line
+        output_contents += line
 
-  with open(expectation_file, 'w') as f:
-    f.write(output_contents)
+    with open(expectation_file, 'w') as f:
+      f.write(output_contents)
 
-  return removed_urls
+    return removed_urls
 
+  def _CreateExpectationFromExpectationFileLine(self, line):
+    """Creates a data_types.Expectation from |line|.
 
-def _IsCommentOrBlankLine(line):
-  return (not line or line.startswith('#'))
+    Args:
+      line: A string containing a single line from an expectation file.
 
+    Returns:
+      A data_types.Expectation containing the same information as |line|.
+    """
+    header = self._GetExpectationFileTagHeader()
+    single_line_content = header + line
+    list_parser = expectations_parser.TaggedTestListParser(single_line_content)
+    assert len(list_parser.expectations) == 1
+    typ_expectation = list_parser.expectations[0]
+    return data_types.Expectation(typ_expectation.test, typ_expectation.tags,
+                                  typ_expectation.raw_results,
+                                  typ_expectation.reason)
 
-def _CreateExpectationFromExpectationFileLine(line):
-  """Creates a data_types.Expectation from |line|.
+  def _GetExpectationFileTagHeader(self):
+    """Gets the tag header used for expectation files.
 
-  Args:
-    line: A string containing a single line from an expectation file.
+    Returns:
+      A string containing an expectation file header, i.e. the comment block at
+      the top of the file defining possible tags and expected results.
+    """
+    raise NotImplementedError()
 
-  Returns:
-    A data_types.Expectation containing the same information as |line|.
-  """
-  header = validate_tag_consistency.TAG_HEADER
-  single_line_content = header + line
-  list_parser = expectations_parser.TaggedTestListParser(single_line_content)
-  assert len(list_parser.expectations) == 1
-  typ_expectation = list_parser.expectations[0]
-  return data_types.Expectation(typ_expectation.test, typ_expectation.tags,
-                                typ_expectation.raw_results,
-                                typ_expectation.reason)
+  def ModifySemiStaleExpectations(self, stale_expectation_map,
+                                  expectation_file):
+    """Modifies lines from |stale_expectation_map| in |expectation_file|.
 
+    Prompts the user for each modification and provides debug information since
+    semi-stale expectations cannot be blindly removed like fully stale ones.
 
-def ModifySemiStaleExpectations(stale_expectation_map, expectation_file):
-  """Modifies lines from |stale_expectation_map| in |expectation_file|.
+    Args:
+      stale_expectation_map: A data_types.TestExpectationMap containing stale
+          expectations.
+      expectation_file: A filepath pointing to an expectation file to remove
+          lines from.
+      file_handle: An optional open file-like object to output to. If not
+          specified, stdout will be used.
 
-  Prompts the user for each modification and provides debug information since
-  semi-stale expectations cannot be blindly removed like fully stale ones.
+    Returns:
+      A set of strings containing URLs of bugs associated with the modified
+      (manually modified by the user or removed by the script) expectations.
+    """
+    with open(expectation_file) as infile:
+      file_contents = infile.read()
 
-  Args:
-    stale_expectation_map: A data_types.TestExpectationMap containing stale
-        expectations.
-    expectation_file: A filepath pointing to an expectation file to remove lines
-        from.
-    file_handle: An optional open file-like object to output to. If not
-        specified, stdout will be used.
+    expectations_to_remove = []
+    expectations_to_modify = []
+    for _, e, builder_map in stale_expectation_map.IterBuilderStepMaps():
+      line, line_number = self._GetExpectationLine(e, file_contents)
+      expectation_str = None
+      if not line:
+        logging.error(
+            'Could not find line corresponding to semi-stale expectation for '
+            '%s with tags %s and expected results %s' % e.test, e.tags,
+            e.expected_results)
+        expectation_str = '[ %s ] %s [ %s ]' % (' '.join(
+            e.tags), e.test, ' '.join(e.expected_results))
+      else:
+        expectation_str = '%s (approx. line %d)' % (line, line_number)
 
-  Returns:
-    A set of strings containing URLs of bugs associated with the modified
-    (manually modified by the user or removed by the script) expectations.
-  """
-  with open(expectation_file) as infile:
-    file_contents = infile.read()
+      str_dict = result_output.ConvertBuilderMapToPassOrderedStringDict(
+          builder_map)
+      print('\nSemi-stale expectation:\n%s' % expectation_str)
+      result_output.RecursivePrintToFile(str_dict, 1, sys.stdout)
 
-  expectations_to_remove = []
-  expectations_to_modify = []
-  for _, e, builder_map in stale_expectation_map.IterBuilderStepMaps():
-    line, line_number = _GetExpectationLine(e, file_contents)
-    expectation_str = None
-    if not line:
-      logging.error(
-          'Could not find line corresponding to semi-stale expectation for %s '
-          'with tags %s and expected results %s' % e.test, e.tags,
-          e.expected_results)
-      expectation_str = '[ %s ] %s [ %s ]' % (' '.join(
-          e.tags), e.test, ' '.join(e.expected_results))
-    else:
-      expectation_str = '%s (approx. line %d)' % (line, line_number)
+      response = _WaitForUserInputOnModification()
+      if response == 'r':
+        expectations_to_remove.append(e)
+      elif response == 'm':
+        expectations_to_modify.append(e)
 
-    str_dict = _ConvertBuilderMapToPassOrderedStringDict(builder_map)
-    print('\nSemi-stale expectation:\n%s' % expectation_str)
-    result_output._RecursivePrintToFile(str_dict, 1, sys.stdout)
+    # It's possible that the user will introduce a typo while manually modifying
+    # an expectation, which will cause a parser error. Catch that now and give
+    # them chances to fix it so that they don't lose all of their work due to an
+    # early exit.
+    while True:
+      try:
+        with open(expectation_file) as infile:
+          file_contents = infile.read()
+        _ = expectations_parser.TaggedTestListParser(file_contents)
+        break
+      except expectations_parser.ParseError as error:
+        logging.error('Got parser error: %s', error)
+        logging.error(
+            'This probably means you introduced a typo, please fix it.')
+        _WaitForAnyUserInput()
 
-    response = _WaitForUserInputOnModification()
-    if response == 'r':
-      expectations_to_remove.append(e)
-    elif response == 'm':
-      expectations_to_modify.append(e)
+    modified_urls = self.RemoveExpectationsFromFile(expectations_to_remove,
+                                                    expectation_file)
+    for e in expectations_to_modify:
+      modified_urls.add(e.bug)
+    return modified_urls
 
-  # It's possible that the user will introduce a typo while manually modifying
-  # an expectation, which will cause a parser error. Catch that now and give
-  # them chances to fix it so that they don't lose all of their work due to an
-  # early exit.
-  while True:
-    try:
-      with open(expectation_file) as infile:
-        file_contents = infile.read()
-      _ = expectations_parser.TaggedTestListParser(file_contents)
-      break
-    except expectations_parser.ParseError as error:
-      logging.error('Got parser error: %s', error)
-      logging.error('This probably means you introduced a typo, please fix it.')
-      _WaitForAnyUserInput()
+  def _GetExpectationLine(self, expectation, file_contents):
+    """Gets the line and line number of |expectation| in |file_contents|.
 
-  modified_urls = RemoveExpectationsFromFile(expectations_to_remove,
-                                             expectation_file)
-  for e in expectations_to_modify:
-    modified_urls.add(e.bug)
-  return modified_urls
+    Args:
+      expectation: A data_types.Expectation.
+      file_contents: A string containing the contents read from an expectation
+          file.
 
+    Returns:
+      A tuple (line, line_number). |line| is a string containing the exact line
+      in |file_contents| corresponding to |expectation|. |line_number| is an int
+      corresponding to where |line| is in |file_contents|. |line_number| may be
+      off if the file on disk has changed since |file_contents| was read. If a
+      corresponding line cannot be found, both |line| and |line_number| are
+      None.
+    """
+    # We have all the information necessary to recreate the expectation line and
+    # line number can be pulled during the initial expectation parsing. However,
+    # the information we have is not necessarily in the same order as the
+    # text file (e.g. tag ordering), and line numbers can change pretty
+    # dramatically between the initial parse and now due to stale expectations
+    # being removed. So, parse this way in order to improve the user experience.
+    file_lines = file_contents.splitlines()
+    for line_number, line in enumerate(file_lines):
+      if _IsCommentOrBlankLine(line.strip()):
+        continue
+      current_expectation = self._CreateExpectationFromExpectationFileLine(line)
+      if expectation == current_expectation:
+        return line, line_number + 1
+    return None, None
 
-def _GetExpectationLine(expectation, file_contents):
-  """Gets the line and line number of |expectation| in |file_contents|.
+  def FindOrphanedBugs(self, affected_urls):
+    """Finds cases where expectations for bugs no longer exist.
 
-  Args:
-    expectation: A data_types.Expectation.
-    file_contents: A string containing the contents read from an expectation
-        file.
+    Args:
+      affected_urls: An iterable of affected bug URLs, as returned by functions
+          such as RemoveExpectationsFromFile.
 
-  Returns:
-    A tuple (line, line_number). |line| is a string containing the exact line
-    in |file_contents| corresponding to |expectation|. |line_number| is an int
-    corresponding to where |line| is in |file_contents|. |line_number| may be
-    off if the file on disk has changed since |file_contents| was read. If a
-    corresponding line cannot be found, both |line| and |line_number| are None.
-  """
-  # We have all the information necessary to recreate the expectation line and
-  # line number can be pulled during the initial expectation parsing. However,
-  # the information we have is not necessarily in the same order as the
-  # text file (e.g. tag ordering), and line numbers can change pretty
-  # dramatically between the initial parse and now due to stale expectations
-  # being removed. So, parse this way in order to improve the user experience.
-  file_lines = file_contents.splitlines()
-  for line_number, line in enumerate(file_lines):
-    if _IsCommentOrBlankLine(line.strip()):
-      continue
-    current_expectation = _CreateExpectationFromExpectationFileLine(line)
-    if expectation == current_expectation:
-      return line, line_number + 1
-  return None, None
+    Returns:
+      A set containing a subset of |affected_urls| who no longer have any
+      associated expectations in any expectation files.
+    """
+    seen_bugs = set()
 
+    expectation_files = self._GetExpectationFilepaths()
 
-def _ConvertBuilderMapToPassOrderedStringDict(builder_map):
-  """Converts |builder_map| into an ordered dict split by pass type.
+    for ef in expectation_files:
+      with open(ef) as infile:
+        contents = infile.read()
+      for url in affected_urls:
+        if url in seen_bugs:
+          continue
+        if url in contents:
+          seen_bugs.add(url)
+    return set(affected_urls) - seen_bugs
 
-  Args:
-    builder_map: A data_types.BuildStepMap.
+  def _GetExpectationFilepaths(self):
+    """Gets all the filepaths to expectation files of interest.
 
-  Returns:
-    A collections.OrderedDict in the following format:
-    {
-      result_output.FULL_PASS: {
-        builder_name: [
-          step_name (total passes / total builds)
-        ],
-      },
-      result_output.NEVER_PASS: {
-        builder_name: [
-          step_name (total passes / total builds)
-        ],
-      },
-      result_output.PARTIAL_PASS: {
-        builder_name: [
-          step_name (total passes / total builds): [
-            failure links,
-          ],
-        ],
-      },
-    }
-
-    The ordering and presence of the top level keys is guaranteed.
-  """
-  # This is similar to what we do in
-  # result_output._ConvertTestExpectationMapToStringDict, but we want the
-  # top-level grouping to be by pass type rather than by builder, so we can't
-  # re-use the code from there.
-  # Ordered dict used to ensure that order is guaranteed when printing out.
-  str_dict = collections.OrderedDict()
-  str_dict[result_output.FULL_PASS] = {}
-  str_dict[result_output.NEVER_PASS] = {}
-  str_dict[result_output.PARTIAL_PASS] = {}
-  for builder_name, step_name, stats in builder_map.IterBuildStats():
-    step_str = result_output.AddStatsToStr(step_name, stats)
-    if stats.did_fully_pass:
-      str_dict[result_output.FULL_PASS].setdefault(builder_name,
-                                                   []).append(step_str)
-    elif stats.did_never_pass:
-      str_dict[result_output.NEVER_PASS].setdefault(builder_name,
-                                                    []).append(step_str)
-    else:
-      str_dict[result_output.PARTIAL_PASS].setdefault(
-          builder_name, {})[step_str] = list(stats.failure_links)
-  return str_dict
+    Returns:
+      A list of strings, each element being a filepath pointing towards an
+      expectation file.
+    """
+    raise NotImplementedError()
 
 
 def _WaitForAnyUserInput():
@@ -466,147 +326,12 @@
   return response
 
 
-def MergeExpectationMaps(base_map, merge_map, reference_map=None):
-  """Merges |merge_map| into |base_map|.
-
-  Args:
-    base_map: A data_types.TestExpectationMap to be updated with the contents of
-        |merge_map|. Will be modified in place.
-    merge_map: A data_types.TestExpectationMap whose contents will be merged
-        into |base_map|.
-    reference_map: A dict containing the information that was originally in
-        |base_map|. Used for ensuring that a single expectation/builder/step
-        combination is only ever updated once. If None, a copy of |base_map|
-        will be used.
-  """
-  # We only enforce that we're starting with a TestExpectationMap when this is
-  # initially called, not on the recursive calls.
-  if reference_map is None:
-    assert isinstance(base_map, data_types.TestExpectationMap)
-    assert isinstance(merge_map, data_types.TestExpectationMap)
-  # We should only ever encounter a single updated BuildStats for an
-  # expectation/builder/step combination. Use the reference map to determine
-  # if a particular BuildStats has already been updated or not.
-  reference_map = reference_map or copy.deepcopy(base_map)
-  for key, value in merge_map.items():
-    if key not in base_map:
-      base_map[key] = value
-    else:
-      if isinstance(value, dict):
-        MergeExpectationMaps(base_map[key], value, reference_map.get(key, {}))
-      else:
-        assert isinstance(value, data_types.BuildStats)
-        # Ensure we haven't updated this BuildStats already. If the reference
-        # map doesn't have a corresponding BuildStats, then base_map shouldn't
-        # have initially either, and thus it would have been added before
-        # reaching this point. Otherwise, the two values must match, meaning
-        # that base_map's BuildStats hasn't been updated yet.
-        reference_stats = reference_map.get(key, None)
-        assert reference_stats is not None
-        assert reference_stats == base_map[key]
-        base_map[key] = value
-
-
-def AddResultListToMap(expectation_map, builder, results):
-  """Adds |results| to |expectation_map|.
-
-  Args:
-    expectation_map: A data_types.TestExpectationMap. Will be modified in-place.
-    builder: A string containing the builder |results| came from. Should be
-        prefixed with something to distinguish between identically named CI and
-        try builders.
-    results: A list of data_types.Result objects corresponding to the ResultDB
-        data queried for |builder|.
-
-  Returns:
-    A list of data_types.Result objects who did not have a matching expectation
-    in |expectation_map|.
-  """
-  assert isinstance(expectation_map, data_types.TestExpectationMap)
-  failure_results = set()
-  pass_results = set()
-  unmatched_results = []
-  for r in results:
-    if r.actual_result == 'Pass':
-      pass_results.add(r)
-    else:
-      failure_results.add(r)
-
-  # Remove any cases of failure -> pass from the passing set. If a test is
-  # flaky, we get both pass and failure results for it, so we need to remove the
-  # any cases of a pass result having a corresponding, earlier failure result.
-  modified_failing_retry_results = set()
-  for r in failure_results:
-    modified_failing_retry_results.add(
-        data_types.Result(r.test, r.tags, 'Pass', r.step, r.build_id))
-  pass_results -= modified_failing_retry_results
-
-  for r in pass_results | failure_results:
-    found_matching = _AddResultToMap(r, builder, expectation_map)
-    if not found_matching:
-      unmatched_results.append(r)
-
-  return unmatched_results
-
-
-def _AddResultToMap(result, builder, test_expectation_map):
-  """Adds a single |result| to |test_expectation_map|.
-
-  Args:
-    result: A data_types.Result object to add.
-    builder: A string containing the name of the builder |result| came from.
-    test_expectation_map: A data_types.TestExpectationMap. Will be modified
-        in-place.
-
-  Returns:
-    True if an expectation in |test_expectation_map| applied to |result|,
-    otherwise False.
-  """
-  assert isinstance(test_expectation_map, data_types.TestExpectationMap)
-  found_matching_expectation = False
-  # We need to use fnmatch since wildcards are supported, so there's no point in
-  # checking the test name key right now. The AppliesToResult check already does
-  # an fnmatch check.
-  for _, expectation, builder_map in test_expectation_map.IterBuilderStepMaps():
-    if expectation.AppliesToResult(result):
-      found_matching_expectation = True
-      step_map = builder_map.setdefault(builder, data_types.StepBuildStatsMap())
-      stats = step_map.setdefault(result.step, data_types.BuildStats())
-      if result.actual_result == 'Pass':
-        stats.AddPassedBuild()
-      else:
-        stats.AddFailedBuild(result.build_id)
-  return found_matching_expectation
-
-
 def _GetDisableReasonFromComment(line):
   return line.split(FINDER_DISABLE_COMMENT, 1)[1].strip()
 
 
-def FindOrphanedBugs(affected_urls):
-  """Finds cases where expectations for bugs no longer exist.
-
-  Args:
-    affected_urls: An iterable of affected bug URLs, as returned by functions
-        such as RemoveExpectationsFromFile.
-
-  Returns:
-    A set containing a subset of |affected_urls| who no longer have any
-    associated expectations in any expectation files.
-  """
-  seen_bugs = set()
-
-  for expectation_file in os.listdir(EXPECTATIONS_DIR):
-    if not expectation_file.endswith('_expectations.txt'):
-      continue
-    with open(os.path.join(EXPECTATIONS_DIR, expectation_file)) as infile:
-      contents = infile.read()
-    for url in affected_urls:
-      if url in seen_bugs:
-        continue
-      if url in contents:
-        seen_bugs.add(url)
-  return set(affected_urls) - seen_bugs
+def _IsCommentOrBlankLine(line):
+  return (not line or line.startswith('#'))
 
 
 def _get_input(prompt):
diff --git a/testing/unexpected_passes_common/expectations_unittest.py b/testing/unexpected_passes_common/expectations_unittest.py
index 4df7181..0146b8a 100755
--- a/testing/unexpected_passes_common/expectations_unittest.py
+++ b/testing/unexpected_passes_common/expectations_unittest.py
@@ -5,8 +5,6 @@
 
 from __future__ import print_function
 
-import collections
-import copy
 import os
 import sys
 import tempfile
@@ -19,11 +17,8 @@
 
 from pyfakefs import fake_filesystem_unittest
 
-import validate_tag_consistency
-
 from unexpected_passes_common import data_types
 from unexpected_passes_common import expectations
-from unexpected_passes_common import result_output
 from unexpected_passes_common import unittest_utils as uu
 
 FAKE_EXPECTATION_FILE_CONTENTS = """\
@@ -52,20 +47,21 @@
 class CreateTestExpectationMapUnittest(fake_filesystem_unittest.TestCase):
   def setUp(self):
     self.setUpPyfakefs()
+    self.instance = expectations.Expectations()
 
   def testExclusiveOr(self):
     """Tests that only one input can be specified."""
     with self.assertRaises(AssertionError):
-      expectations.CreateTestExpectationMap(None, None)
+      self.instance.CreateTestExpectationMap(None, None)
     with self.assertRaises(AssertionError):
-      expectations.CreateTestExpectationMap('foo', ['bar'])
+      self.instance.CreateTestExpectationMap('foo', ['bar'])
 
   def testExpectationFile(self):
     """Tests reading expectations from an expectation file."""
     with tempfile.NamedTemporaryFile(delete=False, mode='w') as f:
       filename = f.name
       f.write(FAKE_EXPECTATION_FILE_CONTENTS)
-    expectation_map = expectations.CreateTestExpectationMap(filename, None)
+    expectation_map = self.instance.CreateTestExpectationMap(filename, None)
     # Skip expectations should be omitted, but everything else should be
     # present.
     # yapf: disable
@@ -86,7 +82,7 @@
 
   def testIndividualTests(self):
     """Tests reading expectations from a list of tests."""
-    expectation_map = expectations.CreateTestExpectationMap(
+    expectation_map = self.instance.CreateTestExpectationMap(
         None, ['foo/test', 'bar/*'])
     expected_expectation_map = {
         'foo/test': {
@@ -100,363 +96,17 @@
     self.assertIsInstance(expectation_map, data_types.TestExpectationMap)
 
 
-class FilterOutUnusedExpectationsUnittest(unittest.TestCase):
-  def testNoUnused(self):
-    """Tests that filtering is a no-op if there are no unused expectations."""
-    expectation_map = data_types.TestExpectationMap({
-        'foo/test':
-        data_types.ExpectationBuilderMap({
-            data_types.Expectation('foo/test', ['win'], ['Failure']):
-            data_types.BuilderStepMap({
-                'SomeBuilder':
-                data_types.StepBuildStatsMap(),
-            }),
-        })
-    })
-    expected_expectation_map = copy.deepcopy(expectation_map)
-    unused_expectations = expectations.FilterOutUnusedExpectations(
-        expectation_map)
-    self.assertEqual(len(unused_expectations), 0)
-    self.assertEqual(expectation_map, expected_expectation_map)
-
-  def testUnusedButNotEmpty(self):
-    """Tests filtering if there is an unused expectation but no empty tests."""
-    expectation_map = data_types.TestExpectationMap({
-        'foo/test':
-        data_types.ExpectationBuilderMap({
-            data_types.Expectation('foo/test', ['win'], ['Failure']):
-            data_types.BuilderStepMap({
-                'SomeBuilder':
-                data_types.StepBuildStatsMap(),
-            }),
-            data_types.Expectation('foo/test', ['linux'], ['Failure']):
-            data_types.BuilderStepMap(),
-        })
-    })
-    expected_expectation_map = data_types.TestExpectationMap({
-        'foo/test':
-        data_types.ExpectationBuilderMap({
-            data_types.Expectation('foo/test', ['win'], ['Failure']):
-            data_types.BuilderStepMap({
-                'SomeBuilder':
-                data_types.StepBuildStatsMap(),
-            }),
-        }),
-    })
-    unused_expectations = expectations.FilterOutUnusedExpectations(
-        expectation_map)
-    self.assertEqual(
-        unused_expectations,
-        [data_types.Expectation('foo/test', ['linux'], ['Failure'])])
-    self.assertEqual(expectation_map, expected_expectation_map)
-
-  def testUnusedAndEmpty(self):
-    """Tests filtering if there is an expectation that causes an empty test."""
-    expectation_map = data_types.TestExpectationMap({
-        'foo/test':
-        data_types.ExpectationBuilderMap({
-            data_types.Expectation('foo/test', ['win'], ['Failure']):
-            data_types.BuilderStepMap(),
-        }),
-    })
-    unused_expectations = expectations.FilterOutUnusedExpectations(
-        expectation_map)
-    self.assertEqual(unused_expectations,
-                     [data_types.Expectation('foo/test', ['win'], ['Failure'])])
-    self.assertEqual(expectation_map, {})
-
-
-class SplitExpectationsByStalenessUnittest(unittest.TestCase):
-  def testEmptyInput(self):
-    """Tests that nothing blows up with empty input."""
-    stale_dict, semi_stale_dict, active_dict =\
-        expectations.SplitExpectationsByStaleness(
-            data_types.TestExpectationMap())
-    self.assertEqual(stale_dict, {})
-    self.assertEqual(semi_stale_dict, {})
-    self.assertEqual(active_dict, {})
-    self.assertIsInstance(stale_dict, data_types.TestExpectationMap)
-    self.assertIsInstance(semi_stale_dict, data_types.TestExpectationMap)
-    self.assertIsInstance(active_dict, data_types.TestExpectationMap)
-
-  def testStaleExpectations(self):
-    """Tests output when only stale expectations are provided."""
-    expectation_map = data_types.TestExpectationMap({
-        'foo':
-        data_types.ExpectationBuilderMap({
-            data_types.Expectation('foo', ['win'], ['Failure']):
-            data_types.BuilderStepMap({
-                'foo_builder':
-                data_types.StepBuildStatsMap({
-                    'step1':
-                    uu.CreateStatsWithPassFails(1, 0),
-                    'step2':
-                    uu.CreateStatsWithPassFails(2, 0),
-                }),
-                'bar_builder':
-                data_types.StepBuildStatsMap({
-                    'step1':
-                    uu.CreateStatsWithPassFails(3, 0),
-                    'step2':
-                    uu.CreateStatsWithPassFails(4, 0)
-                }),
-            }),
-            data_types.Expectation('foo', ['linux'], ['RetryOnFailure']):
-            data_types.BuilderStepMap({
-                'foo_builder':
-                data_types.StepBuildStatsMap({
-                    'step1':
-                    uu.CreateStatsWithPassFails(5, 0),
-                    'step2':
-                    uu.CreateStatsWithPassFails(6, 0),
-                }),
-            }),
-        }),
-        'bar':
-        data_types.ExpectationBuilderMap({
-            data_types.Expectation('bar', ['win'], ['Failure']):
-            data_types.BuilderStepMap({
-                'foo_builder':
-                data_types.StepBuildStatsMap({
-                    'step1':
-                    uu.CreateStatsWithPassFails(7, 0),
-                }),
-            }),
-        }),
-    })
-    expected_stale_dict = copy.deepcopy(expectation_map)
-    stale_dict, semi_stale_dict, active_dict =\
-        expectations.SplitExpectationsByStaleness(expectation_map)
-    self.assertEqual(stale_dict, expected_stale_dict)
-    self.assertEqual(semi_stale_dict, {})
-    self.assertEqual(active_dict, {})
-
-  def testActiveExpectations(self):
-    """Tests output when only active expectations are provided."""
-    expectation_map = data_types.TestExpectationMap({
-        'foo':
-        data_types.ExpectationBuilderMap({
-            data_types.Expectation('foo', ['win'], ['Failure']):
-            data_types.BuilderStepMap({
-                'foo_builder':
-                data_types.StepBuildStatsMap({
-                    'step1':
-                    uu.CreateStatsWithPassFails(0, 1),
-                    'step2':
-                    uu.CreateStatsWithPassFails(0, 2),
-                }),
-                'bar_builder':
-                data_types.StepBuildStatsMap({
-                    'step1':
-                    uu.CreateStatsWithPassFails(0, 3),
-                    'step2':
-                    uu.CreateStatsWithPassFails(0, 4)
-                }),
-            }),
-            data_types.Expectation('foo', ['linux'], ['RetryOnFailure']):
-            data_types.BuilderStepMap({
-                'foo_builder':
-                data_types.StepBuildStatsMap({
-                    'step1':
-                    uu.CreateStatsWithPassFails(0, 5),
-                    'step2':
-                    uu.CreateStatsWithPassFails(0, 6),
-                }),
-            }),
-        }),
-        'bar':
-        data_types.ExpectationBuilderMap({
-            data_types.Expectation('bar', ['win'], ['Failure']):
-            data_types.BuilderStepMap({
-                'foo_builder':
-                data_types.StepBuildStatsMap({
-                    'step1':
-                    uu.CreateStatsWithPassFails(0, 7),
-                }),
-            }),
-        }),
-    })
-    expected_active_dict = copy.deepcopy(expectation_map)
-    stale_dict, semi_stale_dict, active_dict =\
-        expectations.SplitExpectationsByStaleness(expectation_map)
-    self.assertEqual(stale_dict, {})
-    self.assertEqual(semi_stale_dict, {})
-    self.assertEqual(active_dict, expected_active_dict)
-
-  def testSemiStaleExpectations(self):
-    """Tests output when only semi-stale expectations are provided."""
-    expectation_map = data_types.TestExpectationMap({
-        'foo':
-        data_types.ExpectationBuilderMap({
-            data_types.Expectation('foo', ['win'], ['Failure']):
-            data_types.BuilderStepMap({
-                'foo_builder':
-                data_types.StepBuildStatsMap({
-                    'step1':
-                    uu.CreateStatsWithPassFails(1, 0),
-                    'step2':
-                    uu.CreateStatsWithPassFails(2, 2),
-                }),
-                'bar_builder':
-                data_types.StepBuildStatsMap({
-                    'step1':
-                    uu.CreateStatsWithPassFails(3, 0),
-                    'step2':
-                    uu.CreateStatsWithPassFails(0, 4)
-                }),
-            }),
-            data_types.Expectation('foo', ['linux'], ['RetryOnFailure']):
-            data_types.BuilderStepMap({
-                'foo_builder':
-                data_types.StepBuildStatsMap({
-                    'step1':
-                    uu.CreateStatsWithPassFails(5, 0),
-                    'step2':
-                    uu.CreateStatsWithPassFails(6, 6),
-                }),
-            }),
-        }),
-        'bar':
-        data_types.ExpectationBuilderMap({
-            data_types.Expectation('bar', ['win'], ['Failure']):
-            data_types.BuilderStepMap({
-                'foo_builder':
-                data_types.StepBuildStatsMap({
-                    'step1':
-                    uu.CreateStatsWithPassFails(7, 0),
-                }),
-                'bar_builder':
-                data_types.StepBuildStatsMap({
-                    'step1':
-                    uu.CreateStatsWithPassFails(0, 8),
-                }),
-            }),
-        }),
-    })
-    expected_semi_stale_dict = copy.deepcopy(expectation_map)
-    stale_dict, semi_stale_dict, active_dict =\
-        expectations.SplitExpectationsByStaleness(expectation_map)
-    self.assertEqual(stale_dict, {})
-    self.assertEqual(semi_stale_dict, expected_semi_stale_dict)
-    self.assertEqual(active_dict, {})
-
-  def testAllExpectations(self):
-    """Tests output when all three types of expectations are provided."""
-    expectation_map = data_types.TestExpectationMap({
-        'foo':
-        data_types.ExpectationBuilderMap({
-            data_types.Expectation('foo', ['stale'], 'Failure'):
-            data_types.BuilderStepMap({
-                'foo_builder':
-                data_types.StepBuildStatsMap({
-                    'step1':
-                    uu.CreateStatsWithPassFails(1, 0),
-                    'step2':
-                    uu.CreateStatsWithPassFails(2, 0),
-                }),
-                'bar_builder':
-                data_types.StepBuildStatsMap({
-                    'step1':
-                    uu.CreateStatsWithPassFails(3, 0),
-                    'step2':
-                    uu.CreateStatsWithPassFails(4, 0)
-                }),
-            }),
-            data_types.Expectation('foo', ['semistale'], 'Failure'):
-            data_types.BuilderStepMap({
-                'foo_builder':
-                data_types.StepBuildStatsMap({
-                    'step1':
-                    uu.CreateStatsWithPassFails(1, 0),
-                    'step2':
-                    uu.CreateStatsWithPassFails(2, 2),
-                }),
-                'bar_builder':
-                data_types.StepBuildStatsMap({
-                    'step1':
-                    uu.CreateStatsWithPassFails(3, 0),
-                    'step2':
-                    uu.CreateStatsWithPassFails(0, 4)
-                }),
-            }),
-            data_types.Expectation('foo', ['active'], 'Failure'):
-            data_types.BuilderStepMap({
-                'foo_builder':
-                data_types.StepBuildStatsMap({
-                    'step1':
-                    uu.CreateStatsWithPassFails(1, 1),
-                    'step2':
-                    uu.CreateStatsWithPassFails(2, 2),
-                }),
-                'bar_builder':
-                data_types.StepBuildStatsMap({
-                    'step1':
-                    uu.CreateStatsWithPassFails(3, 3),
-                    'step2':
-                    uu.CreateStatsWithPassFails(0, 4)
-                }),
-            }),
-        }),
-    })
-    expected_stale = {
-        'foo': {
-            data_types.Expectation('foo', ['stale'], 'Failure'): {
-                'foo_builder': {
-                    'step1': uu.CreateStatsWithPassFails(1, 0),
-                    'step2': uu.CreateStatsWithPassFails(2, 0),
-                },
-                'bar_builder': {
-                    'step1': uu.CreateStatsWithPassFails(3, 0),
-                    'step2': uu.CreateStatsWithPassFails(4, 0)
-                },
-            },
-        },
-    }
-    expected_semi_stale = {
-        'foo': {
-            data_types.Expectation('foo', ['semistale'], 'Failure'): {
-                'foo_builder': {
-                    'step1': uu.CreateStatsWithPassFails(1, 0),
-                    'step2': uu.CreateStatsWithPassFails(2, 2),
-                },
-                'bar_builder': {
-                    'step1': uu.CreateStatsWithPassFails(3, 0),
-                    'step2': uu.CreateStatsWithPassFails(0, 4)
-                },
-            },
-        },
-    }
-    expected_active = {
-        'foo': {
-            data_types.Expectation('foo', ['active'], 'Failure'): {
-                'foo_builder': {
-                    'step1': uu.CreateStatsWithPassFails(1, 1),
-                    'step2': uu.CreateStatsWithPassFails(2, 2),
-                },
-                'bar_builder': {
-                    'step1': uu.CreateStatsWithPassFails(3, 3),
-                    'step2': uu.CreateStatsWithPassFails(0, 4)
-                },
-            },
-        },
-    }
-
-    stale_dict, semi_stale_dict, active_dict =\
-        expectations.SplitExpectationsByStaleness(expectation_map)
-    self.assertEqual(stale_dict, expected_stale)
-    self.assertEqual(semi_stale_dict, expected_semi_stale)
-    self.assertEqual(active_dict, expected_active)
-
-
 class RemoveExpectationsFromFileUnittest(fake_filesystem_unittest.TestCase):
   def setUp(self):
+    self.instance = uu.CreateGenericExpectations()
+    self.header = self.instance._GetExpectationFileTagHeader()
     self.setUpPyfakefs()
     with tempfile.NamedTemporaryFile(delete=False) as f:
       self.filename = f.name
 
   def testExpectationRemoval(self):
     """Tests that expectations are properly removed from a file."""
-    contents = validate_tag_consistency.TAG_HEADER + """
+    contents = self.header + """
 
 # This is a test comment
 crbug.com/1234 [ win ] foo/test [ Failure ]
@@ -473,7 +123,7 @@
         data_types.Expectation('bar/test', ['linux'], ['RetryOnFailure'])
     ]
 
-    expected_contents = validate_tag_consistency.TAG_HEADER + """
+    expected_contents = self.header + """
 
 # This is a test comment
 crbug.com/2345 [ win ] foo/test [ RetryOnFailure ]
@@ -485,7 +135,7 @@
     with open(self.filename, 'w') as f:
       f.write(contents)
 
-    removed_urls = expectations.RemoveExpectationsFromFile(
+    removed_urls = self.instance.RemoveExpectationsFromFile(
         stale_expectations, self.filename)
     self.assertEqual(removed_urls, set(['crbug.com/1234']))
     with open(self.filename) as f:
@@ -493,7 +143,7 @@
 
   def testNestedBlockComments(self):
     """Tests that nested disable block comments throw exceptions."""
-    contents = validate_tag_consistency.TAG_HEADER + """
+    contents = self.header + """
 # finder:disable
 # finder:disable
 crbug.com/1234 [ win ] foo/test [ Failure ]
@@ -503,20 +153,20 @@
     with open(self.filename, 'w') as f:
       f.write(contents)
     with self.assertRaises(RuntimeError):
-      expectations.RemoveExpectationsFromFile([], self.filename)
+      self.instance.RemoveExpectationsFromFile([], self.filename)
 
-    contents = validate_tag_consistency.TAG_HEADER + """
+    contents = self.header + """
 # finder:enable
 crbug.com/1234 [ win ] foo/test [ Failure ]
 """
     with open(self.filename, 'w') as f:
       f.write(contents)
     with self.assertRaises(RuntimeError):
-      expectations.RemoveExpectationsFromFile([], self.filename)
+      self.instance.RemoveExpectationsFromFile([], self.filename)
 
   def testBlockComments(self):
     """Tests that expectations in a disable block comment are not removed."""
-    contents = validate_tag_consistency.TAG_HEADER + """
+    contents = self.header + """
 crbug.com/1234 [ win ] foo/test [ Failure ]
 # finder:disable
 crbug.com/2345 [ win ] foo/test [ Failure ]
@@ -534,7 +184,7 @@
         data_types.Expectation('foo/test', ['win'], ['Failure'],
                                'crbug.com/4567'),
     ]
-    expected_contents = validate_tag_consistency.TAG_HEADER + """
+    expected_contents = self.header + """
 # finder:disable
 crbug.com/2345 [ win ] foo/test [ Failure ]
 crbug.com/3456 [ win ] foo/test [ Failure ]
@@ -542,7 +192,7 @@
 """
     with open(self.filename, 'w') as f:
       f.write(contents)
-    removed_urls = expectations.RemoveExpectationsFromFile(
+    removed_urls = self.instance.RemoveExpectationsFromFile(
         stale_expectations, self.filename)
     self.assertEqual(removed_urls, set(['crbug.com/1234', 'crbug.com/4567']))
     with open(self.filename) as f:
@@ -550,7 +200,7 @@
 
   def testInlineComments(self):
     """Tests that expectations with inline disable comments are not removed."""
-    contents = validate_tag_consistency.TAG_HEADER + """
+    contents = self.header + """
 crbug.com/1234 [ win ] foo/test [ Failure ]
 crbug.com/2345 [ win ] foo/test [ Failure ]  # finder:disable
 crbug.com/3456 [ win ] foo/test [ Failure ]
@@ -563,12 +213,12 @@
         data_types.Expectation('foo/test', ['win'], ['Failure'],
                                'crbug.com/3456'),
     ]
-    expected_contents = validate_tag_consistency.TAG_HEADER + """
+    expected_contents = self.header + """
 crbug.com/2345 [ win ] foo/test [ Failure ]  # finder:disable
 """
     with open(self.filename, 'w') as f:
       f.write(contents)
-    removed_urls = expectations.RemoveExpectationsFromFile(
+    removed_urls = self.instance.RemoveExpectationsFromFile(
         stale_expectations, self.filename)
     self.assertEqual(removed_urls, set(['crbug.com/1234', 'crbug.com/3456']))
     with open(self.filename) as f:
@@ -585,579 +235,14 @@
         ), 'foo')
 
 
-class AddResultToMapUnittest(unittest.TestCase):
-  def testResultMatchPassingNew(self):
-    """Test adding a passing result when no results for a builder exist."""
-    r = data_types.Result('some/test/case', ['win', 'win10'], 'Pass',
-                          'pixel_tests', 'build_id')
-    e = data_types.Expectation('some/test/*', ['win10'], 'Failure')
-    expectation_map = data_types.TestExpectationMap({
-        'some/test/*':
-        data_types.ExpectationBuilderMap({
-            e: data_types.BuilderStepMap(),
-        }),
-    })
-    found_matching = expectations._AddResultToMap(r, 'builder', expectation_map)
-    self.assertTrue(found_matching)
-    stats = data_types.BuildStats()
-    stats.AddPassedBuild()
-    expected_expectation_map = {
-        'some/test/*': {
-            e: {
-                'builder': {
-                    'pixel_tests': stats,
-                },
-            },
-        },
-    }
-    self.assertEqual(expectation_map, expected_expectation_map)
-
-  def testResultMatchFailingNew(self):
-    """Test adding a failing result when no results for a builder exist."""
-    r = data_types.Result('some/test/case', ['win', 'win10'], 'Failure',
-                          'pixel_tests', 'build_id')
-    e = data_types.Expectation('some/test/*', ['win10'], 'Failure')
-    expectation_map = data_types.TestExpectationMap({
-        'some/test/*':
-        data_types.ExpectationBuilderMap({
-            e: data_types.BuilderStepMap(),
-        }),
-    })
-    found_matching = expectations._AddResultToMap(r, 'builder', expectation_map)
-    self.assertTrue(found_matching)
-    stats = data_types.BuildStats()
-    stats.AddFailedBuild('build_id')
-    expected_expectation_map = {
-        'some/test/*': {
-            e: {
-                'builder': {
-                    'pixel_tests': stats,
-                },
-            }
-        }
-    }
-    self.assertEqual(expectation_map, expected_expectation_map)
-
-  def testResultMatchPassingExisting(self):
-    """Test adding a passing result when results for a builder exist."""
-    r = data_types.Result('some/test/case', ['win', 'win10'], 'Pass',
-                          'pixel_tests', 'build_id')
-    e = data_types.Expectation('some/test/*', ['win10'], 'Failure')
-    stats = data_types.BuildStats()
-    stats.AddFailedBuild('build_id')
-    expectation_map = data_types.TestExpectationMap({
-        'some/test/*':
-        data_types.ExpectationBuilderMap({
-            e:
-            data_types.BuilderStepMap({
-                'builder':
-                data_types.StepBuildStatsMap({
-                    'pixel_tests': stats,
-                }),
-            }),
-        }),
-    })
-    found_matching = expectations._AddResultToMap(r, 'builder', expectation_map)
-    self.assertTrue(found_matching)
-    stats = data_types.BuildStats()
-    stats.AddFailedBuild('build_id')
-    stats.AddPassedBuild()
-    expected_expectation_map = {
-        'some/test/*': {
-            e: {
-                'builder': {
-                    'pixel_tests': stats,
-                },
-            },
-        },
-    }
-    self.assertEqual(expectation_map, expected_expectation_map)
-
-  def testResultMatchFailingExisting(self):
-    """Test adding a failing result when results for a builder exist."""
-    r = data_types.Result('some/test/case', ['win', 'win10'], 'Failure',
-                          'pixel_tests', 'build_id')
-    e = data_types.Expectation('some/test/*', ['win10'], 'Failure')
-    stats = data_types.BuildStats()
-    stats.AddPassedBuild()
-    expectation_map = data_types.TestExpectationMap({
-        'some/test/*':
-        data_types.ExpectationBuilderMap({
-            e:
-            data_types.BuilderStepMap({
-                'builder':
-                data_types.StepBuildStatsMap({
-                    'pixel_tests': stats,
-                }),
-            }),
-        }),
-    })
-    found_matching = expectations._AddResultToMap(r, 'builder', expectation_map)
-    self.assertTrue(found_matching)
-    stats = data_types.BuildStats()
-    stats.AddFailedBuild('build_id')
-    stats.AddPassedBuild()
-    expected_expectation_map = {
-        'some/test/*': {
-            e: {
-                'builder': {
-                    'pixel_tests': stats,
-                },
-            },
-        },
-    }
-    self.assertEqual(expectation_map, expected_expectation_map)
-
-  def testResultMatchMultiMatch(self):
-    """Test adding a passing result when multiple expectations match."""
-    r = data_types.Result('some/test/case', ['win', 'win10'], 'Pass',
-                          'pixel_tests', 'build_id')
-    e = data_types.Expectation('some/test/*', ['win10'], 'Failure')
-    e2 = data_types.Expectation('some/test/case', ['win10'], 'Failure')
-    expectation_map = data_types.TestExpectationMap({
-        'some/test/*':
-        data_types.ExpectationBuilderMap({
-            e: data_types.BuilderStepMap(),
-            e2: data_types.BuilderStepMap(),
-        }),
-    })
-    found_matching = expectations._AddResultToMap(r, 'builder', expectation_map)
-    self.assertTrue(found_matching)
-    stats = data_types.BuildStats()
-    stats.AddPassedBuild()
-    expected_expectation_map = {
-        'some/test/*': {
-            e: {
-                'builder': {
-                    'pixel_tests': stats,
-                },
-            },
-            e2: {
-                'builder': {
-                    'pixel_tests': stats,
-                },
-            }
-        }
-    }
-    self.assertEqual(expectation_map, expected_expectation_map)
-
-  def testResultNoMatch(self):
-    """Tests that a result is not added if no match is found."""
-    r = data_types.Result('some/test/case', ['win', 'win10'], 'Failure',
-                          'pixel_tests', 'build_id')
-    e = data_types.Expectation('some/test/*', ['win10', 'foo'], 'Failure')
-    expectation_map = data_types.TestExpectationMap({
-        'some/test/*':
-        data_types.ExpectationBuilderMap({
-            e: data_types.BuilderStepMap(),
-        })
-    })
-    found_matching = expectations._AddResultToMap(r, 'builder', expectation_map)
-    self.assertFalse(found_matching)
-    expected_expectation_map = {'some/test/*': {e: {}}}
-    self.assertEqual(expectation_map, expected_expectation_map)
-
-
-class AddResultListToMapUnittest(unittest.TestCase):
-  def GetGenericRetryExpectation(self):
-    return data_types.Expectation('foo/test', ['win10'], 'RetryOnFailure')
-
-  def GetGenericFailureExpectation(self):
-    return data_types.Expectation('foo/test', ['win10'], 'Failure')
-
-  def GetEmptyMapForGenericRetryExpectation(self):
-    foo_expectation = self.GetGenericRetryExpectation()
-    return data_types.TestExpectationMap({
-        'foo/test':
-        data_types.ExpectationBuilderMap({
-            foo_expectation:
-            data_types.BuilderStepMap(),
-        }),
-    })
-
-  def GetEmptyMapForGenericFailureExpectation(self):
-    foo_expectation = self.GetGenericFailureExpectation()
-    return data_types.TestExpectationMap({
-        'foo/test':
-        data_types.ExpectationBuilderMap({
-            foo_expectation:
-            data_types.BuilderStepMap(),
-        }),
-    })
-
-  def GetPassedMapForExpectation(self, expectation):
-    stats = data_types.BuildStats()
-    stats.AddPassedBuild()
-    return self.GetMapForExpectationAndStats(expectation, stats)
-
-  def GetFailedMapForExpectation(self, expectation):
-    stats = data_types.BuildStats()
-    stats.AddFailedBuild('build_id')
-    return self.GetMapForExpectationAndStats(expectation, stats)
-
-  def GetMapForExpectationAndStats(self, expectation, stats):
-    return data_types.TestExpectationMap({
-        expectation.test:
-        data_types.ExpectationBuilderMap({
-            expectation:
-            data_types.BuilderStepMap({
-                'builder':
-                data_types.StepBuildStatsMap({
-                    'pixel_tests': stats,
-                }),
-            }),
-        }),
-    })
-
-  def testRetryOnlyPassMatching(self):
-    """Tests when the only tests are retry expectations that pass and match."""
-    foo_result = data_types.Result('foo/test', ['win10'], 'Pass', 'pixel_tests',
-                                   'build_id')
-    expectation_map = self.GetEmptyMapForGenericRetryExpectation()
-    unmatched_results = expectations.AddResultListToMap(expectation_map,
-                                                        'builder', [foo_result])
-    self.assertEqual(unmatched_results, [])
-
-    expected_expectation_map = self.GetPassedMapForExpectation(
-        self.GetGenericRetryExpectation())
-    self.assertEqual(expectation_map, expected_expectation_map)
-
-  def testRetryOnlyFailMatching(self):
-    """Tests when the only tests are retry expectations that fail and match."""
-    foo_result = data_types.Result('foo/test', ['win10'], 'Failure',
-                                   'pixel_tests', 'build_id')
-    expectation_map = self.GetEmptyMapForGenericRetryExpectation()
-    unmatched_results = expectations.AddResultListToMap(expectation_map,
-                                                        'builder', [foo_result])
-    self.assertEqual(unmatched_results, [])
-
-    expected_expectation_map = self.GetFailedMapForExpectation(
-        self.GetGenericRetryExpectation())
-    self.assertEqual(expectation_map, expected_expectation_map)
-
-  def testRetryFailThenPassMatching(self):
-    """Tests when there are pass and fail results for retry expectations."""
-    foo_fail_result = data_types.Result('foo/test', ['win10'], 'Failure',
-                                        'pixel_tests', 'build_id')
-    foo_pass_result = data_types.Result('foo/test', ['win10'], 'Pass',
-                                        'pixel_tests', 'build_id')
-    expectation_map = self.GetEmptyMapForGenericRetryExpectation()
-    unmatched_results = expectations.AddResultListToMap(
-        expectation_map, 'builder', [foo_fail_result, foo_pass_result])
-    self.assertEqual(unmatched_results, [])
-
-    expected_expectation_map = self.GetFailedMapForExpectation(
-        self.GetGenericRetryExpectation())
-    self.assertEqual(expectation_map, expected_expectation_map)
-
-  def testFailurePassMatching(self):
-    """Tests when there are pass results for failure expectations."""
-    foo_result = data_types.Result('foo/test', ['win10'], 'Pass', 'pixel_tests',
-                                   'build_id')
-    expectation_map = self.GetEmptyMapForGenericFailureExpectation()
-    unmatched_results = expectations.AddResultListToMap(expectation_map,
-                                                        'builder', [foo_result])
-    self.assertEqual(unmatched_results, [])
-
-    expected_expectation_map = self.GetPassedMapForExpectation(
-        self.GetGenericFailureExpectation())
-    self.assertEqual(expectation_map, expected_expectation_map)
-
-  def testFailureFailureMatching(self):
-    """Tests when there are failure results for failure expectations."""
-    foo_result = data_types.Result('foo/test', ['win10'], 'Failure',
-                                   'pixel_tests', 'build_id')
-    expectation_map = self.GetEmptyMapForGenericFailureExpectation()
-    unmatched_results = expectations.AddResultListToMap(expectation_map,
-                                                        'builder', [foo_result])
-    self.assertEqual(unmatched_results, [])
-
-    expected_expectation_map = self.GetFailedMapForExpectation(
-        self.GetGenericFailureExpectation())
-    self.assertEqual(expectation_map, expected_expectation_map)
-
-  def testMismatches(self):
-    """Tests that unmatched results get returned."""
-    foo_match_result = data_types.Result('foo/test', ['win10'], 'Pass',
-                                         'pixel_tests', 'build_id')
-    foo_mismatch_result = data_types.Result('foo/not_a_test', ['win10'],
-                                            'Failure', 'pixel_tests',
-                                            'build_id')
-    bar_result = data_types.Result('bar/test', ['win10'], 'Pass', 'pixel_tests',
-                                   'build_id')
-    expectation_map = self.GetEmptyMapForGenericFailureExpectation()
-    unmatched_results = expectations.AddResultListToMap(
-        expectation_map, 'builder',
-        [foo_match_result, foo_mismatch_result, bar_result])
-    self.assertEqual(len(set(unmatched_results)), 2)
-    self.assertEqual(set(unmatched_results),
-                     set([foo_mismatch_result, bar_result]))
-
-    expected_expectation_map = self.GetPassedMapForExpectation(
-        self.GetGenericFailureExpectation())
-    self.assertEqual(expectation_map, expected_expectation_map)
-
-
-class MergeExpectationMapsUnittest(unittest.TestCase):
-  maxDiff = None
-
-  def testEmptyBaseMap(self):
-    """Tests that a merge with an empty base map copies the merge map."""
-    base_map = data_types.TestExpectationMap()
-    merge_map = data_types.TestExpectationMap({
-        'foo':
-        data_types.ExpectationBuilderMap({
-            data_types.Expectation('foo', ['win'], 'Failure'):
-            data_types.BuilderStepMap({
-                'builder':
-                data_types.StepBuildStatsMap({
-                    'step': data_types.BuildStats(),
-                }),
-            }),
-        }),
-    })
-    original_merge_map = copy.deepcopy(merge_map)
-    expectations.MergeExpectationMaps(base_map, merge_map)
-    self.assertEqual(base_map, merge_map)
-    self.assertEqual(merge_map, original_merge_map)
-
-  def testEmptyMergeMap(self):
-    """Tests that a merge with an empty merge map is a no-op."""
-    base_map = data_types.TestExpectationMap({
-        'foo':
-        data_types.ExpectationBuilderMap({
-            data_types.Expectation('foo', ['win'], 'Failure'):
-            data_types.BuilderStepMap({
-                'builder':
-                data_types.StepBuildStatsMap({
-                    'step': data_types.BuildStats(),
-                }),
-            }),
-        }),
-    })
-    merge_map = data_types.TestExpectationMap()
-    original_base_map = copy.deepcopy(base_map)
-    expectations.MergeExpectationMaps(base_map, merge_map)
-    self.assertEqual(base_map, original_base_map)
-    self.assertEqual(merge_map, {})
-
-  def testMissingKeys(self):
-    """Tests that missing keys are properly copied to the base map."""
-    base_map = data_types.TestExpectationMap({
-        'foo':
-        data_types.ExpectationBuilderMap({
-            data_types.Expectation('foo', ['win'], 'Failure'):
-            data_types.BuilderStepMap({
-                'builder':
-                data_types.StepBuildStatsMap({
-                    'step': data_types.BuildStats(),
-                }),
-            }),
-        }),
-    })
-    merge_map = data_types.TestExpectationMap({
-        'foo':
-        data_types.ExpectationBuilderMap({
-            data_types.Expectation('foo', ['win'], 'Failure'):
-            data_types.BuilderStepMap({
-                'builder':
-                data_types.StepBuildStatsMap({
-                    'step2': data_types.BuildStats(),
-                }),
-                'builder2':
-                data_types.StepBuildStatsMap({
-                    'step': data_types.BuildStats(),
-                }),
-            }),
-            data_types.Expectation('foo', ['mac'], 'Failure'):
-            data_types.BuilderStepMap({
-                'builder':
-                data_types.StepBuildStatsMap({
-                    'step': data_types.BuildStats(),
-                })
-            })
-        }),
-        'bar':
-        data_types.ExpectationBuilderMap({
-            data_types.Expectation('bar', ['win'], 'Failure'):
-            data_types.BuilderStepMap({
-                'builder':
-                data_types.StepBuildStatsMap({
-                    'step': data_types.BuildStats(),
-                }),
-            }),
-        }),
-    })
-    expected_base_map = {
-        'foo': {
-            data_types.Expectation('foo', ['win'], 'Failure'): {
-                'builder': {
-                    'step': data_types.BuildStats(),
-                    'step2': data_types.BuildStats(),
-                },
-                'builder2': {
-                    'step': data_types.BuildStats(),
-                },
-            },
-            data_types.Expectation('foo', ['mac'], 'Failure'): {
-                'builder': {
-                    'step': data_types.BuildStats(),
-                }
-            }
-        },
-        'bar': {
-            data_types.Expectation('bar', ['win'], 'Failure'): {
-                'builder': {
-                    'step': data_types.BuildStats(),
-                },
-            },
-        },
-    }
-    expectations.MergeExpectationMaps(base_map, merge_map)
-    self.assertEqual(base_map, expected_base_map)
-
-  def testMergeBuildStats(self):
-    """Tests that BuildStats for the same step are merged properly."""
-    base_map = data_types.TestExpectationMap({
-        'foo':
-        data_types.ExpectationBuilderMap({
-            data_types.Expectation('foo', ['win'], 'Failure'):
-            data_types.BuilderStepMap({
-                'builder':
-                data_types.StepBuildStatsMap({
-                    'step': data_types.BuildStats(),
-                }),
-            }),
-        }),
-    })
-    merge_stats = data_types.BuildStats()
-    merge_stats.AddFailedBuild('1')
-    merge_map = data_types.TestExpectationMap({
-        'foo':
-        data_types.ExpectationBuilderMap({
-            data_types.Expectation('foo', ['win'], 'Failure'):
-            data_types.BuilderStepMap({
-                'builder':
-                data_types.StepBuildStatsMap({
-                    'step': merge_stats,
-                }),
-            }),
-        }),
-    })
-    expected_stats = data_types.BuildStats()
-    expected_stats.AddFailedBuild('1')
-    expected_base_map = {
-        'foo': {
-            data_types.Expectation('foo', ['win'], 'Failure'): {
-                'builder': {
-                    'step': expected_stats,
-                },
-            },
-        },
-    }
-    expectations.MergeExpectationMaps(base_map, merge_map)
-    self.assertEqual(base_map, expected_base_map)
-
-  def testInvalidMerge(self):
-    """Tests that updating a BuildStats instance twice is an error."""
-    base_map = {
-        'foo': {
-            data_types.Expectation('foo', ['win'], 'Failure'): {
-                'builder': {
-                    'step': data_types.BuildStats(),
-                },
-            },
-        },
-    }
-    merge_stats = data_types.BuildStats()
-    merge_stats.AddFailedBuild('1')
-    merge_map = {
-        'foo': {
-            data_types.Expectation('foo', ['win'], 'Failure'): {
-                'builder': {
-                    'step': merge_stats,
-                },
-            },
-        },
-    }
-    original_base_map = copy.deepcopy(base_map)
-    expectations.MergeExpectationMaps(base_map, merge_map, original_base_map)
-    with self.assertRaises(AssertionError):
-      expectations.MergeExpectationMaps(base_map, merge_map, original_base_map)
-
-
-class ConvertBuilderMapToPassOrderedStringDictUnittest(unittest.TestCase):
-  def testEmptyInput(self):
-    """Tests that an empty input doesn't cause breakage."""
-    output = expectations._ConvertBuilderMapToPassOrderedStringDict(
-        data_types.BuilderStepMap())
-    expected_output = collections.OrderedDict()
-    expected_output[result_output.FULL_PASS] = {}
-    expected_output[result_output.NEVER_PASS] = {}
-    expected_output[result_output.PARTIAL_PASS] = {}
-    self.assertEqual(output, expected_output)
-
-  def testBasic(self):
-    """Tests that a map is properly converted."""
-    builder_map = data_types.BuilderStepMap({
-        'fully pass':
-        data_types.StepBuildStatsMap({
-            'step1': uu.CreateStatsWithPassFails(1, 0),
-        }),
-        'never pass':
-        data_types.StepBuildStatsMap({
-            'step3': uu.CreateStatsWithPassFails(0, 1),
-        }),
-        'partial pass':
-        data_types.StepBuildStatsMap({
-            'step5': uu.CreateStatsWithPassFails(1, 1),
-        }),
-        'mixed':
-        data_types.StepBuildStatsMap({
-            'step7': uu.CreateStatsWithPassFails(1, 0),
-            'step8': uu.CreateStatsWithPassFails(0, 1),
-            'step9': uu.CreateStatsWithPassFails(1, 1),
-        }),
-    })
-    output = expectations._ConvertBuilderMapToPassOrderedStringDict(builder_map)
-
-    expected_output = collections.OrderedDict()
-    expected_output[result_output.FULL_PASS] = {
-        'fully pass': [
-            'step1 (1/1)',
-        ],
-        'mixed': [
-            'step7 (1/1)',
-        ],
-    }
-    expected_output[result_output.NEVER_PASS] = {
-        'never pass': [
-            'step3 (0/1)',
-        ],
-        'mixed': [
-            'step8 (0/1)',
-        ],
-    }
-    expected_output[result_output.PARTIAL_PASS] = {
-        'partial pass': {
-            'step5 (1/2)': [
-                'http://ci.chromium.org/b/build_id0',
-            ],
-        },
-        'mixed': {
-            'step9 (1/2)': [
-                'http://ci.chromium.org/b/build_id0',
-            ],
-        },
-    }
-    self.assertEqual(output, expected_output)
-
-
 class GetExpectationLineUnittest(unittest.TestCase):
+  def setUp(self):
+    self.instance = uu.CreateGenericExpectations()
+
   def testNoMatchingExpectation(self):
     """Tests that the case of no matching expectation is handled."""
     expectation = data_types.Expectation('foo', ['win'], 'Failure')
-    line, line_number = expectations._GetExpectationLine(
+    line, line_number = self.instance._GetExpectationLine(
         expectation, FAKE_EXPECTATION_FILE_CONTENTS)
     self.assertIsNone(line)
     self.assertIsNone(line_number)
@@ -1166,7 +251,7 @@
     """Tests that matching expectations are found."""
     expectation = data_types.Expectation('foo/test', ['win'], 'Failure',
                                          'crbug.com/1234')
-    line, line_number = expectations._GetExpectationLine(
+    line, line_number = self.instance._GetExpectationLine(
         expectation, FAKE_EXPECTATION_FILE_CONTENTS)
     self.assertEqual(line, 'crbug.com/1234 [ win ] foo/test [ Failure ]')
     self.assertEqual(line_number, 3)
@@ -1175,6 +260,7 @@
 class ModifySemiStaleExpectationsUnittest(fake_filesystem_unittest.TestCase):
   def setUp(self):
     self.setUpPyfakefs()
+    self.instance = uu.CreateGenericExpectations()
 
     self._input_patcher = mock.patch.object(expectations,
                                             '_WaitForUserInputOnModification')
@@ -1187,7 +273,7 @@
 
   def testEmptyExpectationMap(self):
     """Tests that an empty expectation map results in a no-op."""
-    modified_urls = expectations.ModifySemiStaleExpectations(
+    modified_urls = self.instance.ModifySemiStaleExpectations(
         data_types.TestExpectationMap(), self.filename)
     self.assertEqual(modified_urls, set())
     self._input_mock.assert_not_called()
@@ -1207,7 +293,7 @@
         }),
     })
     # yapf: enable
-    modified_urls = expectations.ModifySemiStaleExpectations(
+    modified_urls = self.instance.ModifySemiStaleExpectations(
         test_expectation_map, self.filename)
     self.assertEqual(modified_urls, set(['crbug.com/1234']))
     expected_file_contents = """\
@@ -1235,7 +321,7 @@
         }),
     })
     # yapf: enable
-    modified_urls = expectations.ModifySemiStaleExpectations(
+    modified_urls = self.instance.ModifySemiStaleExpectations(
         test_expectation_map, self.filename)
     self.assertEqual(modified_urls, set(['crbug.com/1234']))
     with open(self.filename) as f:
@@ -1254,7 +340,7 @@
         }),
     })
     # yapf: enable
-    modified_urls = expectations.ModifySemiStaleExpectations(
+    modified_urls = self.instance.ModifySemiStaleExpectations(
         test_expectation_map, self.filename)
     self.assertEqual(modified_urls, set())
     with open(self.filename) as f:
@@ -1286,8 +372,8 @@
           }),
       })
       # yapf: enable
-      expectations.ModifySemiStaleExpectations(test_expectation_map,
-                                               self.filename)
+      self.instance.ModifySemiStaleExpectations(test_expectation_map,
+                                                self.filename)
       any_input_mock.assert_called_once()
       with open(self.filename) as infile:
         self.assertEqual(infile.read(), FAKE_EXPECTATION_FILE_CONTENTS)
@@ -1304,10 +390,15 @@
       self.fs.CreateFile(*args, **kwargs)
 
   def setUp(self):
-    expectations_dir = expectations.EXPECTATIONS_DIR
-    # Make sure our fake expectations are where the real ones actually are.
-    self.assertTrue(os.path.exists(expectations_dir))
+    expectations_dir = os.path.join(os.path.dirname(__file__), 'expectations')
     self.setUpPyfakefs()
+    self.instance = expectations.Expectations()
+    self.filepath_patcher = mock.patch.object(
+        self.instance,
+        '_GetExpectationFilepaths',
+        return_value=[os.path.join(expectations_dir, 'real_expectations.txt')])
+    self.filepath_mock = self.filepath_patcher.start()
+    self.addCleanup(self.filepath_patcher.stop)
 
     real_contents = 'crbug.com/1\ncrbug.com/2'
     skipped_contents = 'crbug.com/4'
@@ -1318,11 +409,11 @@
 
   def testNoOrphanedBugs(self):
     bugs = ['crbug.com/1', 'crbug.com/2']
-    self.assertEqual(expectations.FindOrphanedBugs(bugs), set())
+    self.assertEqual(self.instance.FindOrphanedBugs(bugs), set())
 
   def testOrphanedBugs(self):
     bugs = ['crbug.com/1', 'crbug.com/3', 'crbug.com/4']
-    self.assertEqual(expectations.FindOrphanedBugs(bugs),
+    self.assertEqual(self.instance.FindOrphanedBugs(bugs),
                      set(['crbug.com/3', 'crbug.com/4']))
 
 
diff --git a/testing/unexpected_passes_common/queries.py b/testing/unexpected_passes_common/queries.py
index a779c4e..13545cbd 100644
--- a/testing/unexpected_passes_common/queries.py
+++ b/testing/unexpected_passes_common/queries.py
@@ -245,8 +245,7 @@
     all_unmatched_results = {}
 
     for (unmatched_results, prefixed_builder_name, merge_map) in results:
-      expectations.MergeExpectationMaps(tmp_expectation_map, merge_map,
-                                        expectation_map)
+      tmp_expectation_map.Merge(merge_map, expectation_map)
       if unmatched_results:
         all_unmatched_results[prefixed_builder_name] = unmatched_results
 
@@ -260,19 +259,18 @@
 
     Args:
       inputs: An iterable of inputs for QueryBuilder() and
-          expectations.AddResultListToMap(). Should be in the order:
+          data_types.TestExpectationMap.AddResultList(). Should be in the order:
           builder builder_type expectation_map
 
     Returns:
-      The output of expectations.AddResultListToMap().
+      The output of data_types.TestExpectationMap.AddResultList().
     """
     builder, builder_type, expectation_map = inputs
     results = self.QueryBuilder(builder, builder_type)
 
     prefixed_builder_name = '%s:%s' % (builder_type, builder)
-    unmatched_results = expectations.AddResultListToMap(expectation_map,
-                                                        prefixed_builder_name,
-                                                        results)
+    unmatched_results = expectation_map.AddResultList(prefixed_builder_name,
+                                                      results)
 
     return unmatched_results, prefixed_builder_name, expectation_map
 
diff --git a/testing/unexpected_passes_common/result_output.py b/testing/unexpected_passes_common/result_output.py
index 062baab..07a7cfd 100644
--- a/testing/unexpected_passes_common/result_output.py
+++ b/testing/unexpected_passes_common/result_output.py
@@ -206,20 +206,20 @@
     file_handle = file_handle or sys.stdout
     if stale_dict:
       file_handle.write(SECTION_STALE + '\n')
-      _RecursivePrintToFile(stale_str_dict, 0, file_handle)
+      RecursivePrintToFile(stale_str_dict, 0, file_handle)
     if semi_stale_dict:
       file_handle.write(SECTION_SEMI_STALE + '\n')
-      _RecursivePrintToFile(semi_stale_str_dict, 0, file_handle)
+      RecursivePrintToFile(semi_stale_str_dict, 0, file_handle)
     if active_dict:
       file_handle.write(SECTION_ACTIVE + '\n')
-      _RecursivePrintToFile(active_str_dict, 0, file_handle)
+      RecursivePrintToFile(active_str_dict, 0, file_handle)
 
     if unused_expectations_str_list:
       file_handle.write('\n' + SECTION_UNUSED + '\n')
-      _RecursivePrintToFile(unused_expectations_str_list, 0, file_handle)
+      RecursivePrintToFile(unused_expectations_str_list, 0, file_handle)
     if unmatched_results_str_dict:
       file_handle.write('\n' + SECTION_UNMATCHED + '\n')
-      _RecursivePrintToFile(unmatched_results_str_dict, 0, file_handle)
+      RecursivePrintToFile(unmatched_results_str_dict, 0, file_handle)
 
   elif output_format == 'html':
     should_close_file = False
@@ -253,7 +253,7 @@
     raise RuntimeError('Unsupported output format %s' % output_format)
 
 
-def _RecursivePrintToFile(element, depth, file_handle):
+def RecursivePrintToFile(element, depth, file_handle):
   """Recursively prints |element| as text to |file_handle|.
 
   Args:
@@ -267,11 +267,11 @@
     file_handle.write(('  ' * depth) + element + '\n')
   elif isinstance(element, dict):
     for k, v in element.items():
-      _RecursivePrintToFile(k, depth, file_handle)
-      _RecursivePrintToFile(v, depth + 1, file_handle)
+      RecursivePrintToFile(k, depth, file_handle)
+      RecursivePrintToFile(v, depth + 1, file_handle)
   elif isinstance(element, list):
     for i in element:
-      _RecursivePrintToFile(i, depth, file_handle)
+      RecursivePrintToFile(i, depth, file_handle)
   else:
     raise RuntimeError('Given unhandled type %s' % type(element))
 
@@ -581,3 +581,54 @@
     output_str += AddBugTypeToOutputString(orphaned_urls, 'Fixed:')
 
   file_handle.write('Affected bugs for CL description:\n%s' % output_str)
+
+
+def ConvertBuilderMapToPassOrderedStringDict(builder_map):
+  """Converts |builder_map| into an ordered dict split by pass type.
+
+  Args:
+    builder_map: A data_types.BuildStepMap.
+
+  Returns:
+    A collections.OrderedDict in the following format:
+    {
+      result_output.FULL_PASS: {
+        builder_name: [
+          step_name (total passes / total builds)
+        ],
+      },
+      result_output.NEVER_PASS: {
+        builder_name: [
+          step_name (total passes / total builds)
+        ],
+      },
+      result_output.PARTIAL_PASS: {
+        builder_name: [
+          step_name (total passes / total builds): [
+            failure links,
+          ],
+        ],
+      },
+    }
+
+    The ordering and presence of the top level keys is guaranteed.
+  """
+  # This is similar to what we do in
+  # result_output._ConvertTestExpectationMapToStringDict, but we want the
+  # top-level grouping to be by pass type rather than by builder, so we can't
+  # re-use the code from there.
+  # Ordered dict used to ensure that order is guaranteed when printing out.
+  str_dict = collections.OrderedDict()
+  str_dict[FULL_PASS] = {}
+  str_dict[NEVER_PASS] = {}
+  str_dict[PARTIAL_PASS] = {}
+  for builder_name, step_name, stats in builder_map.IterBuildStats():
+    step_str = AddStatsToStr(step_name, stats)
+    if stats.did_fully_pass:
+      str_dict[FULL_PASS].setdefault(builder_name, []).append(step_str)
+    elif stats.did_never_pass:
+      str_dict[NEVER_PASS].setdefault(builder_name, []).append(step_str)
+    else:
+      str_dict[PARTIAL_PASS].setdefault(builder_name, {})[step_str] = list(
+          stats.failure_links)
+  return str_dict
diff --git a/testing/unexpected_passes_common/result_output_unittest.py b/testing/unexpected_passes_common/result_output_unittest.py
index aead599..2aa06c42 100755
--- a/testing/unexpected_passes_common/result_output_unittest.py
+++ b/testing/unexpected_passes_common/result_output_unittest.py
@@ -5,6 +5,7 @@
 
 from __future__ import print_function
 
+import collections
 import itertools
 import sys
 import tempfile
@@ -13,7 +14,6 @@
 from pyfakefs import fake_filesystem_unittest
 
 from unexpected_passes_common import data_types
-from unexpected_passes_common import expectations
 from unexpected_passes_common import result_output
 from unexpected_passes_common import unittest_utils as uu
 
@@ -377,7 +377,7 @@
     self._filepath = self._file_handle.name
 
   def testRecursivePrintToFileExpectationMap(self):
-    """Tests _RecursivePrintToFile() with an expectation map as input."""
+    """Tests RecursivePrintToFile() with an expectation map as input."""
     expectation_map = {
         'foo': {
             '"RetryOnFailure" expectation on "win intel"': {
@@ -397,7 +397,7 @@
             },
         },
     }
-    result_output._RecursivePrintToFile(expectation_map, 0, self._file_handle)
+    result_output.RecursivePrintToFile(expectation_map, 0, self._file_handle)
     self._file_handle.close()
 
     # TODO(crbug.com/1198237): Keep the Python 3 version once we are fully
@@ -432,7 +432,7 @@
       self.assertEqual(f.read(), expected_output)
 
   def testRecursivePrintToFileUnmatchedResults(self):
-    """Tests _RecursivePrintToFile() with unmatched results as input."""
+    """Tests RecursivePrintToFile() with unmatched results as input."""
     unmatched_results = {
         'foo': {
             'builder': {
@@ -448,7 +448,7 @@
             },
         },
     }
-    result_output._RecursivePrintToFile(unmatched_results, 0, self._file_handle)
+    result_output.RecursivePrintToFile(unmatched_results, 0, self._file_handle)
     self._file_handle.close()
     # pylint: disable=line-too-long
     # Order is not guaranteed, so create permutations.
@@ -531,8 +531,7 @@
         data_types.Expectation('foo', ['linux'], 'RetryOnFailure')
     ]
 
-    stale, semi_stale, active = expectations.SplitExpectationsByStaleness(
-        expectation_map)
+    stale, semi_stale, active = expectation_map.SplitByStaleness()
 
     result_output.OutputResults(stale, semi_stale, active, {}, [], 'print',
                                 self._file_handle)
@@ -705,6 +704,73 @@
                                   'Fixed: 1, 2\n'))
 
 
+class ConvertBuilderMapToPassOrderedStringDictUnittest(unittest.TestCase):
+  def testEmptyInput(self):
+    """Tests that an empty input doesn't cause breakage."""
+    output = result_output.ConvertBuilderMapToPassOrderedStringDict(
+        data_types.BuilderStepMap())
+    expected_output = collections.OrderedDict()
+    expected_output[result_output.FULL_PASS] = {}
+    expected_output[result_output.NEVER_PASS] = {}
+    expected_output[result_output.PARTIAL_PASS] = {}
+    self.assertEqual(output, expected_output)
+
+  def testBasic(self):
+    """Tests that a map is properly converted."""
+    builder_map = data_types.BuilderStepMap({
+        'fully pass':
+        data_types.StepBuildStatsMap({
+            'step1': uu.CreateStatsWithPassFails(1, 0),
+        }),
+        'never pass':
+        data_types.StepBuildStatsMap({
+            'step3': uu.CreateStatsWithPassFails(0, 1),
+        }),
+        'partial pass':
+        data_types.StepBuildStatsMap({
+            'step5': uu.CreateStatsWithPassFails(1, 1),
+        }),
+        'mixed':
+        data_types.StepBuildStatsMap({
+            'step7': uu.CreateStatsWithPassFails(1, 0),
+            'step8': uu.CreateStatsWithPassFails(0, 1),
+            'step9': uu.CreateStatsWithPassFails(1, 1),
+        }),
+    })
+    output = result_output.ConvertBuilderMapToPassOrderedStringDict(builder_map)
+
+    expected_output = collections.OrderedDict()
+    expected_output[result_output.FULL_PASS] = {
+        'fully pass': [
+            'step1 (1/1)',
+        ],
+        'mixed': [
+            'step7 (1/1)',
+        ],
+    }
+    expected_output[result_output.NEVER_PASS] = {
+        'never pass': [
+            'step3 (0/1)',
+        ],
+        'mixed': [
+            'step8 (0/1)',
+        ],
+    }
+    expected_output[result_output.PARTIAL_PASS] = {
+        'partial pass': {
+            'step5 (1/2)': [
+                'http://ci.chromium.org/b/build_id0',
+            ],
+        },
+        'mixed': {
+            'step9 (1/2)': [
+                'http://ci.chromium.org/b/build_id0',
+            ],
+        },
+    }
+    self.assertEqual(output, expected_output)
+
+
 def _Dedent(s):
   output = ''
   for line in s.splitlines(True):
diff --git a/testing/unexpected_passes_common/unittest_utils.py b/testing/unexpected_passes_common/unittest_utils.py
index 7b9ed4bb..b43e9544 100644
--- a/testing/unexpected_passes_common/unittest_utils.py
+++ b/testing/unexpected_passes_common/unittest_utils.py
@@ -6,6 +6,7 @@
 from __future__ import print_function
 
 from unexpected_passes_common import builders
+from unexpected_passes_common import expectations
 from unexpected_passes_common import data_types
 from unexpected_passes_common import queries
 
@@ -114,3 +115,18 @@
 
 def RegisterGenericBuildersImplementation():
   builders.RegisterInstance(GenericBuilders())
+
+
+class GenericExpectations(expectations.Expectations):
+  def _GetExpectationFilepaths(self):
+    return []
+
+  def _GetExpectationFileTagHeader(self):
+    return """\
+# tags: [ linux mac win ]
+# results: [ Failure RetryOnFailure Skip ]
+"""
+
+
+def CreateGenericExpectations():
+  return GenericExpectations()