On Mon, 2010-10-18 at 16:50 -0200, Lucas Meneghel Rodrigues wrote:
> This should catch scenarios wherein writeback doesn't flush
> data to disk that is older than 30 seconds.

Hi Akshay, I took time to review your test, please see comments below.
Please let me know what you think about it.

> Notes: I (lmr) am re-mailing this test because the patch
> mailer had previously eaten most of it. Also, the first patch
> Akshay had send for this code has been merged into this patch,
> so hopefully this is the full version of the code he intended
> to contribute.
> 
> Signed-off-by: Akshay Lal <[email protected]>
> ---
>  client/tests/wb_kupdate/common.py              |    8 +
>  client/tests/wb_kupdate/control                |   47 ++++
>  client/tests/wb_kupdate/wb_kupdate.py          |  300 
> ++++++++++++++++++++++++
>  client/tests/wb_kupdate/wb_kupdate_unittest.py |  105 +++++++++
>  4 files changed, 460 insertions(+), 0 deletions(-)
>  create mode 100644 client/tests/wb_kupdate/__init__.py
>  create mode 100644 client/tests/wb_kupdate/common.py
>  create mode 100644 client/tests/wb_kupdate/control
>  create mode 100644 client/tests/wb_kupdate/wb_kupdate.py
>  create mode 100644 client/tests/wb_kupdate/wb_kupdate_unittest.py
> 
> diff --git a/client/tests/wb_kupdate/__init__.py 
> b/client/tests/wb_kupdate/__init__.py
> new file mode 100644
> index 0000000..e69de29
> diff --git a/client/tests/wb_kupdate/common.py 
> b/client/tests/wb_kupdate/common.py
> new file mode 100644
> index 0000000..c505ee4
> --- /dev/null
> +++ b/client/tests/wb_kupdate/common.py
> @@ -0,0 +1,8 @@
> +import os, sys
> +dirname = os.path.dirname(sys.modules[__name__].__file__)
> +autotest_dir = os.path.abspath(os.path.join(dirname, "..", "..", ".."))
> +client_dir = os.path.join(autotest_dir, "client")
> +sys.path.insert(0, client_dir)
> +import setup_modules
> +sys.path.pop(0)
> +setup_modules.setup(base_path=autotest_dir, root_module_name="autotest_lib")
> diff --git a/client/tests/wb_kupdate/control b/client/tests/wb_kupdate/control
> new file mode 100644
> index 0000000..6cd65d8
> --- /dev/null
> +++ b/client/tests/wb_kupdate/control
> @@ -0,0 +1,47 @@
> +AUTHOR = "Akshay Lal <[email protected]>"
> +NAME = "wb_kupdate"
> +TEST_CATEGORY = "Functional"
> +TEST_CLASS = "General"
> +TEST_TYPE = "client"
> +TIME = 'MEDIUM'
> +DOC='''
> +This tests checks the wb_kupdate code path by writting data to a sparse file
> +mounted on a loop back device formated with a user specified filesystem,
> +and waiting a maximum of 'max_flush_time' for writeback to flush the dirty
> +datat from the cache to disk.
> +
> +At the end of the test a keyval is generated which states per iteration the 
> time
> +taken to write the user specified amount of data to disk.
> +'''
> +
> +import os
> +# Required Parameters:
> +# --------------------
> +duration=2                       # In minutes.
> +mount_point='/export/wb_kupdate' # Absolute path.
> +file_count=1
> +write_size=1                     # In MB.
> +
> +# Optional Parameters:
> +# --------------------
> +max_flush_time=1                # In minutes.
> +file_system='ext4'              # mkfs.<file_system> must already exist on
> +                                # the machine. To avoid device initialization
> +                                # set to None.
> +remove_previous=False           # Boolean.
> +sparse_file=os.path.join(       # Absolute path to the sparse file.
> +        os.getcwd(),
> +        'sparse_file')
> +
> +# Beginning execution of the xfstests:
> +# ------------------------------------
> +job.run_test('wb_kupdate',
> +             duration=int(duration),
> +             mount_point=mount_point,
> +             file_count=int(file_count),
> +             write_size=int(write_size),
> +             max_flush_time=int(max_flush_time),
> +             file_system=file_system,
> +             remove_previous=remove_previous,
> +             sparse_file=sparse_file,
> +             tag='wb_kupdate_execution')
> diff --git a/client/tests/wb_kupdate/wb_kupdate.py 
> b/client/tests/wb_kupdate/wb_kupdate.py
> new file mode 100644
> index 0000000..5ed5af2
> --- /dev/null
> +++ b/client/tests/wb_kupdate/wb_kupdate.py
> @@ -0,0 +1,300 @@
> +import commands
> +import datetime
> +import logging
> +import os
> +from autotest_lib.client.bin import test
> +from autotest_lib.client.bin import utils
> +from autotest_lib.client.common_lib import error
> +
> +class wb_kupdate(test.test):
> +    version = 1
> +
> +
> +    def _execute(self, cmd, error_check_skip=False):

^ About this _execute function - we do have utility code that executes
subcommands, such as utils.system and utils.run. So why don't just use
it the default autotest functions?

> +        """Execute any command on the console.
> +
> +        @param cmd: the command to execute.
> +        @param error_check_skip: skip proactively checking for errors. 
> Default
> +        is false.
> +
> +        @returns: a list containing the return status and message of the
> +        command.
> +
> +        @raises error.TestError: if an exception is caught during command
> +        execution.
> +        """
> +        # Log the command.
> +        logging.debug('Command to execute: %s', cmd)
> +        try:
> +            # Execute the command.
> +            ret, output = commands.getstatusoutput(cmd)
> +
> +            # Check the output from the command.
> +            if error_check_skip:
> +                return output
> +
> +            if ret:
> +                logging.error('Error seen on stderr PIPE. Return Code: %s. '
> +                              'Message: %s', ret, output)
> +                raise OSError(output)

^ Some not-so-well-behaved commands do write to stderr even when there's
no actual error, and vice-versa, so this error message might be
misleading. Again, it'd be better to use utils.run, or utils.system, and
avoid use shell commands for stuff that the python API covers.

> +            # Log the output.
> +            logging.debug('Command output: %s', output)
> +        except OSError, err:
> +            raise error.TestError('Exception caught while executing: %s. '
> +                                  'Error msg: %s' % (cmd, err.strerror))
> +
> +        return ret,output
> +
> +
> +    def _check_parameters(self, mount_point, write_size, file_count):
> +        """Check all test parameters.
> +
> +        @param mount_point: the path to the desired mount_point.
> +        @param write_size: the size of data in MB to write.
> +        @param file_count: the number of files to write.
> +        """
> +        # Check mount_point.
> +        if not os.path.exists(mount_point):
> +            logging.info('%s does not exist. Creating directory.', 
> mount_point)
> +            self._execute('mkdir -p %s' % mount_point)

^ Here I had added the -p flag to ensure the mount point will be indeed
created. Or better yet, we should use os.makedirs() instead.

> +        else:
> +            raise error.TestError('Mount point: %s already exists.' %
> +                                  mount_point)
> +
> +        # Check write_size > 0.
> +        if not (write_size > 0):
> +            raise error.TestError('Write size should be a positive integer.')
> +
> +        # Check file_count > 0.
> +        if not (file_count > 0) :
> +            raise error.TestError('File count shoulde be a positive 
> integer.')
> +
> +
> +    def _reset_device(self, mount_point, file_system):
> +        """Reset the test. Reinitialize sparse file.
> +
> +        @param mount_point: the absolute path to the mount_point.
> +        @param file_system: the type of file_system.
> +        """
> +        # Umount device.
> +        self._execute('umount %s' % mount_point, error_check_skip=True)
> +
> +        # Remove sparse_file.
> +        self._execute('rm -rf %s' % self.sparse_file)
> +
> +        # Recreate sparse_file.
> +        self._execute('dd if=/dev/zero of=%s bs=1M seek=1000 count=1' %
> +                      self.sparse_file)
> +
> +        # Format sparse_file.
> +        self._execute('echo "y" |  mkfs -t %s %s' %
> +                      (file_system, self.sparse_file), error_check_skip=True)
> +
> +        # Mount sparse_file.
> +        self._execute('mount -o loop -t %s %s %s' % (file_system,
> +                                                     self.sparse_file,
> +                                                     mount_point))

^ We do have code in client/bin/partition.py to do the above. In
initialize, we could do:

def initialize(self):
    self._create_partition()

def _create_partition(self):
    self.partition = job.partition(self.sparse_file, 1024, job.tmpdir)
    self.partition.mkfs(fstype=self.file_system)
    self.partition.mount()

def _reset_device(self):
    self.partition.umount()
    os.remove(self.sparse_file)
    self._create_partition()

Or something like that, I think it'd avoid to write code for something
that we already have good code written for.

> +
> +        # Flush cache.
> +        self._flush_cache()
> +
> +
> +    def _flush_cache(self):
> +        """Flush both read and write cache.
> +        """
> +        # Flushing read and write cache.
> +        self._execute(self._SYSTEM_FLUSH)

^ we do have

from autotest_lib.client.bin import utils

utils.drop_caches()

That does pretty much the same

> +
> +    def _needs_more_time(self, start_time, duration, _now=None):
> +        """Checks to see if the test has run its course.
> +
> +        @param start_time: a datetime object specifying the start time of the
> +        test.
> +        @param duration: test duration in minutes.
> +        @param _now: used mostly for testing - ensures that the function 
> returns
> +        pass/fail depnding on the value of _now.
> +
> +        @return: True if the test still needs to run longer.
> +               False if the test has run for 'duration' minutes.
> +        """
> +        if not _now:
> +            time_diff = datetime.datetime.now() - start_time
> +        else:
> +            time_diff = _now - start_time
> +        return time_diff <= datetime.timedelta(seconds=duration*60)
> +
> +    def _write_data(self, destination, counter, write_size):
> +        """Writes data to the cache/memory.
> +
> +        @param destination: the absolute path to where the data needs to be
> +        written.
> +        @param counter: the file counter.
> +        @param write_size: the size of data to be written.
> +
> +        @returns: the time when the write completed as a datetime object.
> +        """
> +        # Write data to disk.
> +        file_name = os.path.join(destination, 'test_file_%s' % counter)
> +        write_cmd = ('dd if=/dev/zero of=%s bs=1M count=%s' %
> +                     (file_name, write_size))
> +        self._execute(write_cmd)
> +
> +        # Time the write operation.
> +        write_completion_time = datetime.datetime.now()
> +
> +        # Return write completion time.
> +        return write_completion_time
> +
> +
> +    def _get_disk_usage(self, file_name):
> +        """Returns the disk usage of given file.
> +
> +        @param file_name: the name of the file.
> +
> +        @returns: the disk usage as an integer.
> +        """
> +        # Check du stats.
> +        cmd = '%s %s' % (self._DU_CMD, file_name)
> +
> +        # Expected value for  output = '1028\tfoo'
> +        ret,output = self._execute(cmd)
> +
> +        # Desired ouput = (1028, foo)
> +        output = output.split('\t')
> +
> +        return int(output[0])

^ It would be simpler to use os.stat, it seems.

> +
> +    def _wait_until_data_flushed(self, start_time, max_wait_time):
> +        """Check to see if the sparse file size increases.
> +
> +        @param start_time: the time when data was actually written into the
> +        cache.
> +        @param max_wait_time: the max amount of time to wait.
> +
> +        @returns: time waited as a datetime.timedelta object.
> +        """
> +        current_size = self._get_disk_usage(self.sparse_file)
> +        flushed_size = current_size
> +
> +        # Keep checking until du value changes.
> +        while current_size == flushed_size:
> +            # Get flushed_size.
> +            flushed_size = self._get_disk_usage(self.sparse_file)
> +
> +            # Check if data has been synced to disk.
> +            if not self._needs_more_time(start_time, max_wait_time):
> +                raise error.TestError('Data not flushed. Waited for %s 
> minutes '
> +                                      'for data to flush out.' % 
> max_wait_time)
> +
> +        # Return time waited.
> +        return datetime.datetime.now() - start_time
> +
> +
> +    def initialize(self):
> +        """Initialize all private and global member variables.
> +        """
> +        self._SYSTEM_FLUSH = 'sync; echo 3 > /proc/sys/vm/drop_caches'
> +        self._DU_CMD = 'du'
> +        self.mount_point = ''
> +        self.sparse_file = ''
> +        self.result_map = {}
> +
> +
> +    def run_once(self, duration, mount_point, file_count, write_size,
> +                 max_flush_time=1, file_system=None, remove_previous=False,
> +                 sparse_file=os.path.join(os.getcwd(),'sparse_file')):
> +        """Control execution of the test.
> +
> +        @param duration: an integer defining the duration of test test in
> +        minutes.
> +        @param mount_point: the absolute path to the mount point.
> +        @param file_count: the number of files to write.
> +        @param write_size: the size of each file in MB.
> +        @param max_flush_time: the maximum time to wait for the writeback to
> +        flush dirty data to disk. Default = 1 minute.
> +        @param file_system: the new file system to be mounted, if any.
> +        Default = None.
> +        @param remove_previous: boolean that allows the removal of previous
> +        files before creating a new one. Default = False.
> +        @param sparse_file: the absolute path to the sparse file.
> +        """
> +        # Check validity of parameters.
> +        self._check_parameters(mount_point, write_size, file_count)
> +        self.mount_point = mount_point
> +        self.sparse_file = sparse_file
> +
> +        # Start iterations.
> +        logging.info('Starting test operations.')
> +        test_start_time = datetime.datetime.now()
> +        counter = 1
> +
> +        # Continue testing until options.duration runs out.
> +        while self._needs_more_time(test_start_time, int(duration)):

^ We do have built in support for what you want to do here:
job.run_test() accepts a test_length parameter (s) that indicates how
many full iterations we should run in order to cover the given test
length. If you are interested in the implementation details you can look
at

http://autotest.kernel.org/browser/trunk/client/common_lib/test.py#L215


> +            # Check the current file_count.
> +            if counter % file_count == 0 or counter == 1:
> +                if file_system:
> +                    # Setting up device.
> +                    logging.info('Re-initializing device.')
> +                    self._reset_device(self.mount_point, file_system)
> +                else:
> +                    # Skipping reimage.
> +                    logging.info('Skipping device initialization.')
> +
> +            logging.info('Iteration %s.', counter)
> +
> +            # Write data to disk.
> +            write_completion_time = self._write_data(self.mount_point, 
> counter,
> +                                                     write_size)
> +            logging.debug('Write time:%s',
> +                          write_completion_time.strftime("%H:%M:%S"))
> +
> +            # Wait until data get synced to disk.
> +            time_taken = self._wait_until_data_flushed(write_completion_time,
> +                                                       max_flush_time)
> +
> +            # Log time statistics.
> +            logging.info('Time taken to flush data: %s seconds.',
> +                         time_taken.seconds)
> +
> +            # Check if there is a need to remove the previously written file.
> +            if remove_previous:
> +                logging.debug('Removing previous file instance.')
> +                rm_cmd = str('rm -rf %s;' % file_name)
> +                self._execute(rm_cmd)
> +            else:
> +                logging.debug('Not removing previous file instance.')
> +
> +            # Flush cache.
> +            logging.debug('Flush cache between iterations.')
> +            self._flush_cache()
> +
> +           # Update the result map.
> +            self.result_map[counter] = time_taken.seconds
> +
> +            # Increment the counter.
> +            counter += 1
> +
> +
> +    def postprocess(self):
> +        """Cleanup routine.
> +        """
> +        # Write out keyval map.
> +        self.write_perf_keyval(self.result_map)
> +
> +        # Unmount partition.
> +        logging.debug('Cleanup - unmounting loopback device.')
> +        self._execute('umount %s' % self.mount_point, error_check_skip=True)
> +
> +        # Removing the sparse file.
> +        logging.debug('Cleanup - removing sparse file.')
> +        self._execute('rm -rf %s' % self.sparse_file)
> +
> +        # Removing the mount_point.
> +        logging.debug('Cleanup - removing the mount_point.')
> +        self._execute('rmdir %s' % self.mount_point)
> +
> +        logging.info('Test operations completed.')
> diff --git a/client/tests/wb_kupdate/wb_kupdate_unittest.py 
> b/client/tests/wb_kupdate/wb_kupdate_unittest.py
> new file mode 100644
> index 0000000..84092ed
> --- /dev/null
> +++ b/client/tests/wb_kupdate/wb_kupdate_unittest.py
> @@ -0,0 +1,105 @@
> +#!/usr/bin/python
> +
> +import common
> +import datetime
> +import logging
> +import os
> +import time
> +import unittest

^ General comment - the imports are not following the coding style:

http://autotest.kernel.org/browser/trunk/CODING_STYLE

> +from autotest_lib.client.bin import test
> +from autotest_lib.client.bin import utils
> +from autotest_lib.client.common_lib import error
> +from autotest_lib.client.common_lib.test_utils import mock
> +from autotest_lib.client.tests.wb_kupdate import wb_kupdate
> +
> +class WbKupdateUnitTest(unittest.TestCase):
> +    def setUp(self):
> +        """Set up all required variables for the Unittest.
> +        """

^ Same for the doc strings.

> +        self._logger = logging.getLogger()
> +        self._wbkupdate_obj = WbKupdateSubclass()
> +        self._god = mock.mock_god()
> +
> +    def test_needs_more_time(self):
> +        """Tests the _needs_more_time method.
> +        """
> +        self._logger.info('Testing the "_needs_more_time" method.')
> +
> +        # Obvious failure - since start_time < start_time + 1.
> +        self.assertTrue(self._wbkupdate_obj._needs_more_time(
> +                start_time=datetime.datetime.now(),
> +                duration=1))
> +
> +        # Check if 1 minute has elapsed since start_time.
> +        self.assertFalse(self._wbkupdate_obj._needs_more_time(
> +                start_time=datetime.datetime.now(),
> +                duration=1,
> +                _now=datetime.datetime.now() + 
> datetime.timedelta(seconds=60)))
> +
> +    def test_wait_until_data_flushed_pass(self):
> +        """Tests the _wait_until_data_flushed method.
> +
> +        This tests the "success" code path.
> +        """
> +        self._logger.info('Testing the "_wait_until_data_flushed" method - '
> +                          'Success code path.')
> +
> +        # Creating stubs for required methods.
> +        self._god.stub_function(self._wbkupdate_obj,
> +                                "_get_disk_usage")
> +
> +        # Setting default return values for stub functions.
> +        # Setting the initial size of the file.
> +        self._wbkupdate_obj._get_disk_usage.expect_call('').and_return(10)
> +        # Returning the same file size - forcing code path to enter loop.
> +        self._wbkupdate_obj._get_disk_usage.expect_call('').and_return(10)
> +        # Returning a greater file size - exiting the while loop.
> +        self._wbkupdate_obj._get_disk_usage.expect_call('').and_return(11)
> +
> +        # Call the method.
> +        self._wbkupdate_obj._wait_until_data_flushed(datetime.datetime.now(),
> +                                                     1)
> +
> +        # Ensure all stubbed methods called.
> +        self._god.check_playback()
> +
> +
> +    def test_wait_until_data_flushed_fail(self):
> +        """Tests the _wait_until_data_flushed method.
> +
> +        This tests the "failure" code path.
> +        """
> +        self._logger.info('Testing the "_wait_until_data_flushed" method - '
> +                          'Failure code path.')
> +        # Creating stubs for required methods.
> +        self._god.stub_function(self._wbkupdate_obj,
> +                                "_get_disk_usage")
> +
> +        # Setting default return values for stub functions.
> +        # Setting the initial size of the file.
> +        self._wbkupdate_obj._get_disk_usage.expect_call('').and_return(10)
> +        # Returning the same file size - forcing code path to enter loop.
> +        self._wbkupdate_obj._get_disk_usage.expect_call('').and_return(10)
> +
> +        # Call the method.
> +        self.assertRaises(error.TestError,
> +                          self._wbkupdate_obj._wait_until_data_flushed,
> +                          start_time=datetime.datetime.now(),
> +                          max_wait_time=0)
> +
> +        # Ensure all stubbed methods called.
> +        self._god.check_playback()
> +
> +
> +class WbKupdateSubclass(wb_kupdate.wb_kupdate):
> +    """Sub-classing the wb_kupdate class.
> +    """
> +    def __init__(self):
> +        """Empty constructor.
> +        """
> +        # Create all test defaults.
> +        self.initialize()
> +
> +
> +if __name__ == '__main__':
> +    unittest.main()


_______________________________________________
Autotest mailing list
[email protected]
http://test.kernel.org/cgi-bin/mailman/listinfo/autotest

Reply via email to