aboutsummaryrefslogtreecommitdiff
path: root/doc
diff options
context:
space:
mode:
Diffstat (limited to 'doc')
-rw-r--r--doc/changes.rst212
-rw-r--r--doc/conf.py212
-rw-r--r--doc/index.rst81
-rw-r--r--doc/installation.rst58
-rw-r--r--doc/reference.rst76
-rw-r--r--doc/tests.rst275
-rw-r--r--doc/todo.rst6
-rw-r--r--doc/usage.rst413
8 files changed, 1333 insertions, 0 deletions
diff --git a/doc/changes.rst b/doc/changes.rst
new file mode 100644
index 0000000..5476058
--- /dev/null
+++ b/doc/changes.rst
@@ -0,0 +1,212 @@
+Version History
+***************
+
+.. _version_0_16.2:
+
+Version 0.16.2
+==============
+
+* Fix NameError in ShellTestProvider.
+
+Version 0.16.1
+==============
+
+* Add some options to 0xbench, skia, v8 benchmarks to configure number of iterations
+
+.. _version_0_16:
+
+Version 0.16
+============
+* Added ashmem test
+* Added binder test
+
+.. _version_0_15:
+
+Version 0.15
+============
+* Add support for canvas test in methanol
+* fix hostshell-workload test for upstream builds
+
+.. _version_0_14:
+
+Version 0.14
+============
+* added private test support for bigLITTLE workload suite
+
+.. _version_0_13:
+
+Version 0.13
+============
+* update git URL for methanol
+* delete the copy media files source because this is done with the android_install_cts_medias action
+
+.. _version_0_12:
+
+Version 0.12
+============
+* add sched_tests.py
+* add task_placement test
+* fixes to sdcard mount and vellamo tests
+
+.. _version_0_11:
+
+Version 0.11
+============
+* methanol test fix
+* add support the android black box harness
+
+.. _version_0_10:
+
+Version 0.10
+============
+* update to methanol
+* add MonkeyRunner Parser
+
+.. _version_0_9_1:
+
+Version 0.9.1
+=============
+* Fix bug 1037936 caused by revno 189.3.18
+
+.. _version_0_9:
+
+Version 0.9
+===========
+* Make mmtest logs less verbose.
+* Wait longer for wifi to turn off.
+* add extract-attachments sub command
+* big_LITTLE: add support for selecting testsuite through $(OPTIONS)
+* add methanol test
+
+.. _version_0_8:
+
+Version 0.8
+===========
+* added a test wrapper to make it easier to add shell script type tests
+* updated CTS to use latest Google version
+* added wifi connection test
+* updates to pm-qa
+
+.. _version_0_7:
+
+Version 0.7
+===========
+
+* add a test to disable android wallpaper
+* input method service test
+* monkey runner updates
+* pm-qa test support
+* big-LITTLE test support
+
+.. _version_0_6:
+
+Version 0.6
+===========
+
+* added new sleep test
+* added a hello world example test
+* increase number of repeats for skia benchmark
+* update URL's for test data in some tests
+
+.. _version_0_5:
+
+Version 0.5
+===========
+
+* new tests:
+ * cache-coherency
+ * iozone
+ * memtester
+* support running monkeyrunner tests
+
+.. _version_0_4:
+
+Version 0.4
+===========
+* new wifi test
+* update bluetooth test
+* update test bundle format to 1.3
+
+.. _version_0_3:
+
+Version 0.3
+===========
+* new tjbench test
+* new bluetooth test
+* remove dependency on linaro_json
+
+.. _version_0_2:
+
+Version 0.2
+===========
+* new gator test
+* update mmtest script
+* Bug #962094: error occurred when no parser specified for run-custom command
+* Bug #962096: the test_id generated is longer than 64
+
+.. _version_0_0.10:
+
+Version 0.0.10
+==============
+* new CTS test
+* new v8 test
+* new skia test
+* new glmark2 test
+* add support for install option
+* add support for multiple ids for delete and parse commands
+* remove external tools
+
+.. _version_0_0.9:
+
+Version 0.0.9
+=============
+* add unit test
+* fix LP: #902161 by removing dependency on pexpect.
+
+.. _version_0_0.8:
+
+Version 0.0.8
+=============
+* fix the logical of makedirs Bug LP:#891326
+* modify mmtest to use the built-in MediaFramework
+
+.. _version_0_0.7:
+
+Version 0.0.7
+=============
+* add new mmtest for Multimedia Test
+
+.. _version_0_0.6:
+
+Version 0.0.6
+=============
+* fix install options to go through install method rather than test loader
+
+.. _version_0_0.5:
+
+Version 0.0.5
+=============
+* add support for install option of install subcommand
+* change monkey to always return 0
+
+.. _version_0_0.4:
+
+Version 0.0.4
+=============
+* update for 0xbench's package name modification
+
+.. _version_0_0.3:
+
+Version 0.0.3
+=============
+* add function to collect package information and screen shot after test
+* add support for two more instances to be executed simultaneously
+* add check for the existence of adb conmmand
+* modify MANIFEST.in to make files in test_definitions be installed successfully
+
+.. _version_0_0.1:
+
+Version 0.0.1
+=============
+
+* Initial release
diff --git a/doc/conf.py b/doc/conf.py
new file mode 100644
index 0000000..3900b83
--- /dev/null
+++ b/doc/conf.py
@@ -0,0 +1,212 @@
+# -*- coding: utf-8 -*-
+#
+# Linaro JSON documentation build configuration file, created by
+# sphinx-quickstart on Mon Dec 27 16:39:47 2010.
+#
+# This file is execfile() with the current directory set to its containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys
+import os
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+sys.path.append(os.path.abspath('..'))
+
+# -- General configuration ---------------------------------------------------
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = [
+ 'sphinx.ext.autodoc',
+ 'sphinx.ext.doctest',
+ 'sphinx.ext.intersphinx',
+ 'sphinx.ext.todo',
+ 'sphinx.ext.coverage',
+ 'sphinx.ext.viewcode']
+
+# Configuration for sphinx.ext.todo
+
+todo_include_todos = True
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = []
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'LAVA Android Test'
+copyright = u'2010-2012, Linaro Limited'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+import versiontools
+import lava_android_test
+version = "%d.%d" % lava_android_test.__version__[0:2]
+# The full version, including alpha/beta/rc tags.
+release = versiontools.format_version(lava_android_test.__version__)
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of documents that shouldn't be included in the build.
+#unused_docs = []
+
+# List of directories, relative to source directory, that shouldn't be
+# searched for source files.
+exclude_trees = []
+
+# The rest default role (used for this markup: `text`) to use for
+# all documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+
+# -- Options for HTML output -------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. Major themes that
+# come with Sphinx are currently 'default' and 'sphinxdoc'.
+html_theme = 'default'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = []
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_use_modindex = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = ''
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'LAVAAndroidTestDocumentation'
+
+
+# -- Options for LaTeX output ------------------------------------------------
+
+# The paper size ('letter' or 'a4').
+#latex_paper_size = 'letter'
+
+# The font size ('10pt', '11pt' or '12pt').
+#latex_font_size = '10pt'
+
+# Grouping the document tree into LaTeX files. List of tuples (source
+# start file, target name, title, author, documentclass [howto/manual]).
+latex_documents = [
+ ('index', 'LAVA Android Test.tex', u'LAVA Android Test Documentation',
+ u'Yongqin Liu', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# Additional stuff for the LaTeX preamble.
+#latex_preamble = ''
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_use_modindex = True
+
+
+# Example configuration for intersphinx: refer to the Python standard library.
+intersphinx_mapping = {'http://docs.python.org/': None}
diff --git a/doc/index.rst b/doc/index.rst
new file mode 100644
index 0000000..d4ec0e1
--- /dev/null
+++ b/doc/index.rst
@@ -0,0 +1,81 @@
+===============================
+LAVA Android Test Documentation
+===============================
+
+LAVA Android Test is a wrapper framework exposing unified API and command line
+interface for running arbitrary tests and storing the results in a structured
+manner.
+
+LAVA Android Test is a part of the LAVA stack and can be used with other LAVA
+components, most notably the dispatcher (for setting up the test environment
+and controlling execution of multiple tests) and the dashboard (for storing)
+
+.. seealso:: To learn more about LAVA see https://launchpad.net/lava
+
+Indices and tables
+==================
+
+.. toctree::
+ :maxdepth: 2
+
+ installation.rst
+ usage.rst
+ tests.rst
+ reference.rst
+ changes.rst
+ todo.rst
+
+Features
+========
+
+* Ability to enumerate, install, run and remove tests on a Linux-based system.
+* Support for benchmarks as well as pass/fail tests.
+* Support for capturing android environment information such as installed packages and
+ hardware information and recording that in a machine-readable manner.
+* Store results in raw form (log files) as well as Linaro Dashboard Bundle
+ format that can be uploaded to the LAVA Dashboard for archiving and analysis.
+* Extensible API for adding new tests (:class:`~lava_android_test.api.ITest`)
+* Ever-growing collection of freely available and generic tests and benchmarks
+
+Quickstart
+==========
+
+This example will run on Ubuntu Lucid and beyond. we should better to install it
+in an virtual environment, so that it will not mess up with the system environment::
+
+ $ virtualenv ${workspace} ### create the virtual environment
+ $ source ${workspace}/bin/activate ### enter the virtual environment
+ $ pip install --upgrade lava-android-test ### install the latest package to the the virtual environment
+ $ lava-android-test install monkey ### install the test named monkey
+ $ lava-android-test run monkey ### run the monkey test
+ $ virtualenv ${workspace} ### exit the virtual environment
+
+.. seealso:: For a more thorough description see :ref:`usage`
+.. seealso:: For detailed installation istructions see :ref:`installation`
+
+Latest documentation
+====================
+
+This documentation may be out of date, see
+http://validation.linaro.org/static/docs/lava-android-test/ or the
+Documentation link on any LAVA instance to learn more.
+
+
+Source code, bugs and patches
+=============================
+
+The project is maintained on Launchpad at http://launchpad.net/lava-android-test/.
+
+You can get the source code with bazaar using ``bzr branch lp:lava-android-test``.
+Patches can be submitted using Launchpad merge proposals (for introduction to
+this and topic see https://help.launchpad.net/Code/Review).
+
+Please report all bugs at https://bugs.launchpad.net/lava-android-test/+filebug.
+
+Most of the team is usually available in ``#linaro`` on ``irc.freenode.net``.
+Feel free to drop by to chat and ask questions.
+
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
diff --git a/doc/installation.rst b/doc/installation.rst
new file mode 100644
index 0000000..3b30cb4
--- /dev/null
+++ b/doc/installation.rst
@@ -0,0 +1,58 @@
+
+.. _installation:
+
+Installation
+============
+
+Prerequisites
+^^^^^^^^^^^^^
+
+The following debian packages are needed to use LAVA Test:
+
+* python-setuptools
+* python-apt
+* usbutils
+* python-testrepository - for running unit tests
+* python-sphinx - for building documentation
+
+Installation Options
+^^^^^^^^^^^^^^^^^^^^
+
+There are several installation options available:
+
+Using Python Package Index
+--------------------------
+
+This package is being actively maintained and published in the `Python Package
+Index <http://http://pypi.python.org>`_. You can install it if you have `pip
+<http://pip.openplans.org/>`_ tool using just one line::
+
+ pip install lava-android-test
+
+
+Using source tarball
+--------------------
+
+To install from source you must first obtain a source tarball from either pypi
+or from `Launchpad <http://launchpad.net/>`_. To install the package unpack the
+tarball and run::
+
+ python setup.py install
+
+You can pass ``--user`` if you prefer to do a local (non system-wide)
+installation. Note that executable programs are placed in ``~/.local/bin/`` and
+this directory is not on ``PATH`` by default.
+
+Installing for development
+--------------------------
+
+An easy way to set things up for local development is to create a python
+virtualenv. You can create the virtualenv anyway in your filesystem. In
+the example below, its simply put under the bzr repo where development
+is being done from::
+
+ bzr branch lp:lava-android-test
+ cd lava-android-test
+ virtualenv .venv ; . ./venv/bin/activate
+ pip install keyring
+ ./setup.py develop
diff --git a/doc/reference.rst b/doc/reference.rst
new file mode 100644
index 0000000..aa0cbcb
--- /dev/null
+++ b/doc/reference.rst
@@ -0,0 +1,76 @@
+.. _reference:
+
+=========
+Reference
+=========
+
+.. _command_reference:
+
+Command Reference
+=================
+
+.. automodule:: lava_android_test.commands
+ :members:
+
+.. todo::
+
+ * Describe basic commands
+ * Describe arguments and options to each command in detail
+
+Pathnames and files
+===================
+
+LAVA Android Test uses the following files:
+
+* ``/tmp/lava-android-test/`` -- temporary directory to put temporary files of each lava-android-test instance.
+
+.. _code_reference:
+
+Code reference
+==============
+
+.. todo::
+
+ * Describe general code layout
+ * Describe key API integration points (on a separate page if needed for clarity)
+ * Provide an example test and walk the reader through the meaning of each part
+
+Abstract Interfaces
+^^^^^^^^^^^^^^^^^^^
+
+.. automodule:: lava_android_test.api
+ :members:
+
+Test definitions and test providers
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. automodule:: lava_android_test.test_definitions
+ :members:
+
+Test components (installers, runners and parsers)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. automodule:: lava_android_test.testdef
+ :members:
+
+Core Modules
+^^^^^^^^^^^^
+
+.. automodule:: lava_android_test.config
+ :members:
+
+Environment Scanners
+^^^^^^^^^^^^^^^^^^^^
+
+.. automodule:: lava_android_test.hwprofile
+ :members:
+
+.. automodule:: lava_android_test.swprofile
+ :members:
+
+Utilities
+^^^^^^^^^
+
+.. automodule:: lava_android_test.utils
+ :members:
+
diff --git a/doc/tests.rst b/doc/tests.rst
new file mode 100644
index 0000000..71a11dd
--- /dev/null
+++ b/doc/tests.rst
@@ -0,0 +1,275 @@
+.. _test:
+
+===============
+Supported Tests
+===============
+
+The following tests are currently supported in LAVA Android Test:
+
+ * `0xbench`_
+ * `bctest`_
+ * `big_LITTLE`_
+ * `blackbox`_
+ * `bluetooth`_
+ * `busybox`_
+ * `cache_coherency`_
+ * `commands`_
+ * `command-example`_
+ * `command-linaro_android_kernel_test`_
+ * `command-tjunittest`_
+ * `cts`_
+ * `gatortest`_
+ * `glmark2`_
+ * `helloworld`_
+ * `hostshells`_
+ * `hostshell-connect-lab-wifi`_
+ * `hostshell-example`_
+ * `hostshell-sdcard-mounted`_
+ * `hostshell-workload`_
+ * `ime`_
+ * `install_prep_4bench`_
+ * `instruments`_
+ * `instrument-example`_
+ * `iozone`_
+ * `memtester`_
+ * `methanol`_
+ * `mmtest`_
+ * `monkey`_
+ * `monkey_long_run`_
+ * `pm_qa`_
+ * `sched_tests`_
+ * `shells`_
+ * `shell-binder`_
+ * `shell-dalvik-vm-unit-tests`_
+ * `shell-example`_
+ * `skia`_
+ * `sleep`_
+ * `task_placement`_
+ * `tjbench`_
+ * `usbhardware`_
+ * `v8`_
+ * `monkeyrunner(system)`_
+ * `monkeyrunner(third-party-benchmarks)`_
+
+0xbench
++++++++
+.. automodule:: lava_android_test.test_definitions.0xbench
+
+bctest
+++++++
+.. automodule:: lava_android_test.test_definitions.bctest
+
+big_LITTLE
+++++++++++
+.. automodule:: lava_android_test.test_definitions.big_LITTLE
+
+blackbox
+++++++++
+.. automodule:: lava_android_test.test_definitions.blackbox
+
+bluetooth
++++++++++
+.. automodule:: lava_android_test.test_definitions.bluetooth
+
+busybox
++++++++
+.. automodule:: lava_android_test.test_definitions.busybox
+
+cache_coherency
++++++++++++++++
+.. automodule:: lava_android_test.test_definitions.cache_coherency
+
+commands
+++++++++
+.. automodule:: lava_android_test.test_definitions.commands
+
+command-example
++++++++++++++++
+.. automodule:: lava_android_test.test_definitions.commands.example
+
+command-linaro_android_kernel_test
+++++++++++++++++++++++++++++++++++
+.. automodule:: lava_android_test.test_definitions.commands.linaro_android_kernel_test
+
+command-tjunittest
+++++++++++++++++++
+.. automodule:: lava_android_test.test_definitions.commands.tjunittest
+
+cts
++++
+.. automodule:: lava_android_test.test_definitions.cts
+
+gatortest
++++++++++
+.. automodule:: lava_android_test.test_definitions.gatortest
+
+glmark2
++++++++
+.. automodule:: lava_android_test.test_definitions.glmark2
+
+helloworld
+++++++++++
+.. automodule:: lava_android_test.test_definitions.helloworld
+
+hostshells
+++++++++++
+.. automodule:: lava_android_test.test_definitions.hostshells
+
+hostshell-connect-lab-wifi
+++++++++++++++++++++++++++
+Try to connect the wifi in lava-lab for other tests, also can be used to test if the wifi works
+
+**URL:** None
+
+**Default options:** None
+
+hostshell-example
++++++++++++++++++
+Example for how to integrate test which is a host shell script into lava-android-test
+
+**URL:** None
+
+**Default options:** None
+
+hostshell-sdcard-mounted
+++++++++++++++++++++++++
+Check if the sdcard is mounted when the android booted up by check the output of mount command
+
+**URL:** None
+
+**Default options:** None
+
+hostshell-workload
+++++++++++++++++++
+Test of Automatic Workload Automation for big LITTLE Systems from ARM
+
+**URL:** https://linaro-private.git.linaro.org/gitweb?p=workload-automation.git;a=summary
+
+**Default options:** None
+
+ime
++++
+.. automodule:: lava_android_test.test_definitions.ime
+
+install_prep_4bench
++++++++++++++++++++
+.. automodule:: lava_android_test.test_definitions.install_prep_4bench
+
+instruments
++++++++++++
+.. automodule:: lava_android_test.test_definitions.instruments
+
+instrument-example
+++++++++++++++++++
+.. automodule:: lava_android_test.test_definitions.instruments.example
+
+iozone
+++++++
+.. automodule:: lava_android_test.test_definitions.iozone
+
+memtester
++++++++++
+.. automodule:: lava_android_test.test_definitions.memtester
+
+methanol
+++++++++
+.. automodule:: lava_android_test.test_definitions.methanol
+
+mmtest
+++++++
+.. automodule:: lava_android_test.test_definitions.mmtest
+
+monkey
+++++++
+.. automodule:: lava_android_test.test_definitions.monkey
+
+monkey_long_run
++++++++++++++++
+.. automodule:: lava_android_test.test_definitions.monkey_long_run
+
+pm_qa
++++++
+.. automodule:: lava_android_test.test_definitions.pm_qa
+
+sched_tests
++++++++++++
+.. automodule:: lava_android_test.test_definitions.sched_tests
+
+shells
+++++++
+.. automodule:: lava_android_test.test_definitions.shells
+
+shell-binder
+++++++++++++
+Measures the rate at which a short binder IPC operation can be
+performed. The operation consists of the client sending a parcel
+that contains two integers. For each parcel that the server
+receives, it adds the two integers and sends the sum back to
+the client.
+
+**URL:** http://android.git.linaro.org/gitweb?p=platform/system/extras.git;a=blob;f=tests/binder/benchmarks/binderAddInts.cpp
+
+**Default options:** None
+
+shell-dalvik-vm-unit-tests
+++++++++++++++++++++++++++
+Run the unit tests for dalvik vm.
+
+**URL:** http://android.git.linaro.org/gitweb?p=platform/dalvik.git;a=blob;f=unit-tests/dvmHumanReadableDescriptor_test.cpp
+
+**Default options:** None
+
+shell-example
++++++++++++++
+Example for how to integrate test which is a shell script into lava-android-test
+
+**URL:** None
+
+**Default options:** None
+
+skia
+++++
+.. automodule:: lava_android_test.test_definitions.skia
+
+sleep
++++++
+.. automodule:: lava_android_test.test_definitions.sleep
+
+task_placement
+++++++++++++++
+.. automodule:: lava_android_test.test_definitions.task_placement
+
+tjbench
++++++++
+.. automodule:: lava_android_test.test_definitions.tjbench
+
+usbhardware
++++++++++++
+.. automodule:: lava_android_test.test_definitions.usbhardware
+
+v8
+++
+.. automodule:: lava_android_test.test_definitions.v8
+
+monkeyrunner(system)
+++++++++++++++++++++
+Run some system tests with the monkeyrunner scripts.
+but this part should be recreated with uiautomator scripts
+
+**URL:** http://android.git.linaro.org/gitweb?p=test/linaro/android/system.git;a=summary
+
+**Default options:** None
+
+monkeyrunner(third-party-benchmarks)
+++++++++++++++++++++++++++++++++++++
+Support for run the third party benchmark applications automatically,
+and collect the test result automatically.
+The supported third party benchmark applications include:
+andebench/antutu/caffeinemark/geekbench/glbenchmark/linpack/nbench/quadrant/vellamo
+
+**URL of apks:** https://linaro-private.git.linaro.org/gitweb?p=people/yongqinliu/benchmark-apks.git;a=summary
+
+**URL of scripts:** http://android.git.linaro.org/gitweb?p=platform/external/thirdparty-benchmarks.git;a=summary
+
+**Default options:** None
+
diff --git a/doc/todo.rst b/doc/todo.rst
new file mode 100644
index 0000000..620f67e
--- /dev/null
+++ b/doc/todo.rst
@@ -0,0 +1,6 @@
+List of items that need work
+============================
+
+.. todolist::
+
+Add support for out-of-tree test.
diff --git a/doc/usage.rst b/doc/usage.rst
new file mode 100644
index 0000000..3f199ff
--- /dev/null
+++ b/doc/usage.rst
@@ -0,0 +1,413 @@
+.. _usage:
+
+=====
+Usage
+=====
+
+Workflow Overview
+=================
+
+LAVA Android Test can be used in several different ways. Most notably those are
+standalone (without the LAVA dispatcher) and managed (when LAVA Android Test is
+installed and controlled by the LAVA dispatcher).
+
+Standalone usage
+^^^^^^^^^^^^^^^^
+
+In standalone mode a human operator installs LAVA Android Test on some device
+(laptop or computer or a virtual machine), installs the tests that are to be
+executed and then executes them manually (by manually running LAVA Android test,
+the actual tests are non-interactive).
+
+Using LAVA to develop and run new tests
++++++++++++++++++++++++++++++++++++++++
+
+This mode is useful for test development (adding new tests, developing custom
+tests especially tailored for LAVA, etc.). Here the typical cycle depends on
+how the tests is wrapped for usage by LAVA and what the test developer is
+focusing on.
+
+While developing the actual test the typical set of commands might look like
+this::
+
+ $ lava-android-test install my-custom-test
+ $ lava-android-test run my-custom-test
+ $ lava-android-test uninstall my-custom-test
+
+Here the developer could observe changes to the test program (that is
+presumably compiled and copied somewhere by the install stage).
+
+Using LAVA to analyze test results
+++++++++++++++++++++++++++++++++++
+
+Developing the test is only half of the story. The other half is developing
+LAVA Android Test integration code, most importantly the artefact parser / analyzer.
+This part has to be implemented in python (unlike the test program that can be
+implemented in any language and technology). Here the developer is focusing on
+refining the parser to see if the outcome is as indented. Assuming that earlier
+the developer ran the test at least once and wrote down the result identifier.
+The set of commands one might use is::
+
+ $ lava-android-test parse result-id
+
+The result id is used to locate leftovers from running that specific test
+at some previous point in time.
+
+By default parse will print the bundle to standard output for inspection.
+It should be redirected to a page for easier verification.
+
+.. note::
+
+ While the syntax of the bundle created with `lava-android-test parse` is always
+ correct (or, if the parser does something really, really strange, a
+ detailed error is reported), the actual contents may not be what you
+ intended it to be. Parsers are ultimately fragile as they mostly deal with
+ unstructured or semi-structured free-form text that most test programs seem
+ to produce. The ultimate goal of a developer should be to produce
+ unambiguous, machine readable format. This level of integration would allow
+ to wrap a whole class of tests in one go (such as all xUnit-XML speaking
+ test frameworks).
+
+Usage with the dispatcher
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The dispatcher is useful for automating LAVA Android Test environment setup, describing
+test scenarios (the list of tests to invoke) and finally storing the results in
+the LAVA dashboard.
+
+Typically this mode is based on the following sequence of commands:
+
+#. Install lava-android-test along with the required dependencies.
+#. Install the test or tests.
+#. Run, parse and store in one.
+
+Here the whole setup is non-interactive and at the end the dispatcher can copy
+the output bundle for additional processing.
+
+Automation considerations
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. _wrapping_existing_test_or_benchmark:
+
+Wrapping existing test or benchmark
+===================================
+
+LAVA Android Test can be extended in several different ways. There is no best method,
+each has some pros and cons. In general we welcome any freely redistributable,
+generic tests. Those enrich the LAVA ecosystem and by providing useful
+out-of-the-box features to our users.
+
+Technically all tests are hidden behind a set of abstract interfaces that tell
+LAVA Android Test what to do in response to operator or dispatcher actions. The primary
+interface is :class:`~lava_android_test.api.ITest` and the three principal
+methods: :meth:`~lava_android_test.api.ITest.install`,
+:meth:`~lava_android_test.api.ITest.run`,
+:meth:`~lava_android_test.api.ITest.parse`.
+
+In practice it is usually much easier to instantiate our pluggable delegate
+test (:class:`lava_android_test.testdef.Test`) and define the three delegates that
+know how to install, run and parse. Again for each step we have a base class
+that can be easily customized or even used directly as is. Those classes are
+:class:`~lava_android_test.testdef.TestInstaller`,
+:class:`~lava_android_test.testdef.TestRunner` and
+:class:`~lava_android_test.testdef.TestParser`. They all implement well-defined
+interfaces so if you wish to customize them you should become familiar with
+the API requirements first.
+
+Contributing new tests to LAVA
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The most direct way to add a new test is to contribute patches to LAVA Android Test
+itself. This method will simply add a new test definition to the collection of
+available tests.
+
+This method is recommended for generic tests that rarely change and are
+suitable for wide variety of hardware and software for android.
+
+The advantage is that those tests can be invoked out of the box and will be
+maintained by the LAVA team. The disadvantage is that all changes to those
+tests need to follow Linaro development work flow, get reviewed and finally
+merged. Depending on your situation this may be undesired.
+
+Steps to integrate an Android test to LAVA
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+1. Checkout the lava-android-test
+
+With the following command::
+
+ bzr branch lp:lava-android-test
+
+2. About wrapper script
+
+If the test tools are just command that can be run on android system,
+and the output is well formatted, then congratulations, you can go
+directly to step 6. You don't need to wrap script again.
+
+3. Four types simple test
+
+ 1. Instrumentation test, run like "adb shell am instrument ..."
+
+ create a file like lava_android_test/test_definitions/instruments/example.py,
+ and put it under lava_android_test/test_definitions/instruments::
+
+ cmd = ("am instrument -r -w "
+ "com.android.emulator.connectivity.test/"
+ "android.test.InstrumentationTestRunner")
+ RUN_ADB_SHELL_STEPS = [cmd]
+
+
+ 2. Android command test, run like "adb shell test-command"
+
+ create a file like lava_android_test/test_definitions/commands/example.py
+ and put it under lava_android_test/test_definitions/commands::
+
+ RUN_ADB_SHELL_STEPS = ['tjunittest']
+ PATTERN = ("^\s*(?P<test_case_id>.+)\s+\.\.\.\s+(?P<result>\w+)\."
+ "\s+(?P<measurement>[\d\.]+)\s+(?P<units>\w+)\s*$")
+
+
+ 3. Android shell test, need to write a shell and run like "adb shell script.sh"
+
+ create a file like lava_android_test/test_definitions/shells/example.py
+ and put it under lava_android_test/test_definitions/shells::
+
+ #!/system/bin/sh
+
+ echo "test_case_fail=fail"
+ echo "test_case_pass=pass"
+
+ 4. Host shell test, need to write a shell and run like "bash script.sh"
+
+ create a file like lava_android_test/test_definitions/hostshells/example.sh
+ and put it under lava_android_test/test_definitions/shells::
+
+ #!/bin/bash
+ echo "hostshells-example-fail=fail"
+ echo "hostshells-example-pass=pass"
+
+ this shell script will be passed "-s $(SERIAL) $(OPTIONS)" as arguments
+
+4. About test scripts/tools
+
+Put the actual test tools in some place, normally they are
+in a sub directory of test_definitions, like the busybox test, i.e.
+the actual test tool is busybox_test.sh, and it is put in the
+lava_android_test/test_definitions/busybox directory.
+
+.. note::
+ In this case, we should modify the MANIFEST.in file in the root source directory.
+ Otherwise the contents in that directory won’t be installed into the system python library.
+ Like:
+
+ include lava_android_test/test_definitions/busybox/
+
+5. Add a test wrapper script for your test into the test_definitions directory.
+
+The content of the wrapper script should be something like below,
+Normally, you just need to redefine the red and bold part in the above::
+
+ import os
+ import lava_android_test.testdef
+
+ test_name = 'test_sample'
+
+ #linux commands that will be run on the host before INSTALL_STEPS_ADB_PRE"
+ INSTALL_STEPS_HOST_PRE = []
+ #adb commands that will be run before install apk file into android
+ INSTALL_STEPS_ADB_PRE = []
+ #APK file path list that will be intalled into android
+ APKS= []
+ #adb commands that will be run before install apk file into android
+ INSTALL_STEPS_ADB_POST = []
+ #linux commands that will be run on the host after INSTALL_STEPS_ADB_POST
+ INSTALL_STEPS_HOST_POST = []
+
+ #linux commands that will be run on the host before RUN_STEPS_ADB_PRE
+ RUN_STEPS_HOST_PRE = []
+ #adb commands that will be run before install apk file into android
+ RUN_STEPS_ADB_PRE = []
+ #commands that will be run on android
+ ADB_SHELL_STEPS = []
+ #adb commands that will be run before install apk file into android
+ RUN_STEPS_ADB_POST = []
+ #linux commands that will be run on the host after RUN_STEPS_ADB_POST
+ RUN_STEPS_HOST_POST = []
+
+ #pattern to parse the command output to generate the test result.
+ PATTERN = "^\s*(?P<test_case_id>\w+)=(?P<result>\w+)\s*$"
+
+ inst = lava_android_test.testdef.AndroidTestInstaller(steps_host_pre=INSTALL_STEPS_HOST_PRE,
+ steps_adb_pre=INSTALL_STEPS_ADB_PRE,
+ apks=APKS,
+ steps_adb_post=INSTALL_STEPS_ADB_POST,
+ steps_host_post=INSTALL_STEPS_HOST_POST)
+ run = lava_android_test.testdef.AndroidTestRunner(steps_host_pre=RUN_STEPS_HOST_PRE,
+ steps_adb_pre=RUN_STEPS_ADB_PRE,
+ adbshell_steps=ADB_SHELL_STEPS,
+ steps_adb_post=RUN_STEPS_ADB_POST,
+ steps_host_post=RUN_STEPS_HOST_POST)
+ parser = lava_android_test.testdef.AndroidTestParser(PATTERN)
+ testobj = lava_android_test.testdef.AndroidTest(testname=test_name,
+ installer=inst,
+ runner=run,
+ parser=parser)
+
+
+And in the command part, you can use "$(SERIAL)" to represent the device serial number, like::
+
+ RUN_STEPS_HOST_POST = [ 'python %s/android-0xbenchmark/android_0xbenchmark_wait.py $(SERIAL)' % curdir]
+
+and "$(OPTIONS)" to represent the option string passed from command line, like::
+
+ INSTALL_STEPS_HOST_PRE = [ 'echo $(OPTION)']
+ RUN_STEPS_HOST_PRE = [ 'echo $(OPTION)']
+
+then you can run lava-android-test install -o "install options string" or lava-android-test run -O "run options string"
+
+.. note::
+
+ Because lava-android-test will be run on lava-lab, and there will be multiple devices connected simultaneously,
+ So we should consider to pass the device serial number for each test tools.
+ If the test tools is defined for steps_adb_pre/adbshell_steps/steps_adb_post,
+ then there is no need to pass the device serial number, lava-android-test will do this for you.
+
+
+6. you can:
+
+Here is a blog about install/test lava-android-test that you can reference:
+ http://www.linaro.org/linaro-blog/2011/12/01/local-lava-testing-of-android-ics
+
+* use "lava-android-test list-tests" to check if the test wrapper created can be recognized,
+* use "lava-android-test install ${test_name}" to install the test,
+* use "lava-android-test run ${test_name}" to execute the test,
+* use "lava-android-test show ${result_id}" to show the output the test executed,
+* use "lava-android-test parse ${result_id}" to to generate the result bundle for the test executed.
+
+7. Integrate Into Lava
+
+When you have done the above steps and verified your test that works well,
+then you can integrate it in LAVA with the android-build.
+
+Here is a description about that:
+ https://wiki.linaro.org/Platform/Android/AndroidBuild-LavaIntegration
+
+8. Add Document
+
+At last don’t forget to add an entry and some document in the doc/tests.rst file. Like::
+
+ busybox
+ +++++++
+ .. automodule:: lava_android_test.test_definitions.busybox
+
+Then the information will be listed in the below url:
+ http://lava-android-test.readthedocs.org/en/latest/tests.html
+
+9. Commit Modification
+
+In lava-android-test directory, run the following commands::
+
+ bzr launchpad-login ${your-lauchpad-id}
+ bzr commit -m '${commit msg}
+ bzr push lp:~${your-launchpad-id}/lava-android-test/${branch-name}
+
+Then you can see your branch in the following page:
+ https://code.launchpad.net/lava-android-test
+
+Click your branch, and click the “Propose for merging” link in your branch page to submit a merge proposal.
+In the proposal page, please set Reviewer: to linaro-validation.
+
+Adding Results Parsing
+++++++++++++++++++++++
+
+Because every test has its own way of displaying results, there is no common,
+enforced way of interpreting the results from any given test. That means that
+every test definition also has to define a parser so that LAVA Android Test can
+understand how to pick out the most useful bits of information from the output.
+What we've tried to do, is make this as simple as possible for the most common
+cases, while providing the tools necessary to handle more complex output.
+
+To start off, there are some fields you are always going to want to either pull
+from the results, or define. For all tests:
+
+* test_case_id - This is just a field that uniquely identifies the test.
+ This can contain letters, numbers, underscores, dashes, or periods.
+ If you use any illegal characters, they will automatically be dropped
+ by the TestParser base class before parsing the results. Spaces will be
+ automatically converted to underscores. If you wish to change this behaviour,
+ make sure that you either handle fixing the test_case_id in your parser,
+ or override the TestParser.fixids() method.
+* result - result is simply the result of the test. This applies to both qualitative
+ as well as quantitative tests, and the meaning is specific to the test itself.
+ The valid values for result are: "pass", "fail", "skip", or "unknown".
+
+For performance tests, you will also want to have the following two fields:
+
+* measurement - the "score" or resulting measurement from the benchmark.
+* units - a string defining the units represented by the measurement in some way
+ that will be meaningful to someone looking at the results later.
+
+For results parsing, it's probably easier to look at some examples. Several
+tests have already been defined in the lava-android-test test_definitions directory
+that serve as useful examples.
+
+Defining a simple test
+++++++++++++++++++++++
+
+**Example 1** The tjunittest example might look something like this::
+
+ import os
+ import lava_android_test.testdef
+
+ test_name = 'tjunittest'
+
+ #linux commands that will be run on the host before INSTALL_STEPS_ADB_PRE"
+ INSTALL_STEPS_HOST_PRE = []
+ #adb commands that will be run before install apk file into android
+ INSTALL_STEPS_ADB_PRE = []
+ #APK file path list that will be intalled into android
+ APKS= []
+ #adb commands that will be run before install apk file into android
+ INSTALL_STEPS_ADB_POST = []
+ #linux commands that will be run on the host after INSTALL_STEPS_ADB_POST
+ INSTALL_STEPS_HOST_POST = []
+
+ #linux commands that will be run on the host before RUN_STEPS_ADB_PRE
+ RUN_STEPS_HOST_PRE = []
+ #adb commands that will be run before install apk file into android
+ RUN_STEPS_ADB_PRE = []
+ #commands that will be run on android
+ ADB_SHELL_STEPS = ['tjunittest']
+ #adb commands that will be run before install apk file into android
+ RUN_STEPS_ADB_POST = []
+ #linux commands that will be run on the host after RUN_STEPS_ADB_POST
+ RUN_STEPS_HOST_POST = []
+
+ #pattern to parse the command output to generate the test result.
+ PATTERN = "^\s*(?P<test_case_id>.+)\s+\.\.\.\s+(?P<result>\w+)\.\s+(?P<measurement>[\d\.]+)\s+(?P<units>\w+)\s*$"
+
+ inst = lava_android_test.testdef.AndroidTestInstaller(steps_host_pre=INSTALL_STEPS_HOST_PRE,
+ steps_adb_pre=INSTALL_STEPS_ADB_PRE,
+ apks=APKS,
+ steps_adb_post=INSTALL_STEPS_ADB_POST,
+ steps_host_post=INSTALL_STEPS_HOST_POST)
+ run = lava_android_test.testdef.AndroidTestRunner(steps_host_pre=RUN_STEPS_HOST_PRE,
+ steps_adb_pre=RUN_STEPS_ADB_PRE,
+ adbshell_steps=ADB_SHELL_STEPS,
+ steps_adb_post=RUN_STEPS_ADB_POST,
+ steps_host_post=RUN_STEPS_HOST_POST)
+ parser = lava_android_test.testdef.AndroidTestParser(PATTERN)
+ testobj = lava_android_test.testdef.AndroidTest(testname=test_name,
+ installer=inst,
+ runner=run,
+ parser=parser)
+
+In this example, we just simply defined the tjunittest command in ADB_SHELL_STEPS variable,
+and defined the PATTERN variable used by AndroidTestParser.
+
+If you were to save this under the test_definitions directory as 'tjunittest.py',
+then run 'lava-android-test install tjunittest' and 'lava-android-test run tjunittest',
+you would have a test result with the result id shown to you.
+And you can also run 'lava-android-test parse ${result_id}' to get the test result in the json format,
+which you can submit to lava.
+