Author: buildbot
Date: Wed May  2 18:42:25 2018
New Revision: 1029317

Log:
Staging update by buildbot for climate

Added:
    
websites/staging/climate/trunk/content/api/current/_sources/config/config_overview.rst.txt
    
websites/staging/climate/trunk/content/api/current/_sources/config/config_writer.rst.txt
    
websites/staging/climate/trunk/content/api/current/_sources/config/dataset_information.rst.txt
    
websites/staging/climate/trunk/content/api/current/_sources/config/evaluation_settings.rst.txt
    
websites/staging/climate/trunk/content/api/current/_sources/config/metrics_information.rst.txt
    
websites/staging/climate/trunk/content/api/current/_sources/config/plots_settings.rst.txt
    
websites/staging/climate/trunk/content/api/current/_sources/data_source/data_sources.rst.txt
    websites/staging/climate/trunk/content/api/current/_sources/index.rst.txt
    
websites/staging/climate/trunk/content/api/current/_sources/ocw/dataset.rst.txt
    
websites/staging/climate/trunk/content/api/current/_sources/ocw/dataset_loader.rst.txt
    
websites/staging/climate/trunk/content/api/current/_sources/ocw/dataset_processor.rst.txt
    
websites/staging/climate/trunk/content/api/current/_sources/ocw/evaluation.rst.txt
    
websites/staging/climate/trunk/content/api/current/_sources/ocw/metrics.rst.txt
    
websites/staging/climate/trunk/content/api/current/_sources/ocw/overview.rst.txt
    
websites/staging/climate/trunk/content/api/current/_sources/ocw/plotter.rst.txt
    
websites/staging/climate/trunk/content/api/current/_sources/ocw/statistical_downscaling.rst.txt
    
websites/staging/climate/trunk/content/api/current/_sources/ocw/utils.rst.txt
    
websites/staging/climate/trunk/content/api/current/_sources/ui-backend/backend.rst.txt
    websites/staging/climate/trunk/content/api/current/_static/jquery-3.1.0.js
    
websites/staging/climate/trunk/content/api/current/ocw/statistical_downscaling.html
Modified:
    websites/staging/climate/trunk/content/   (props changed)
    websites/staging/climate/trunk/content/api/current/_static/alabaster.css
    websites/staging/climate/trunk/content/api/current/_static/basic.css
    
websites/staging/climate/trunk/content/api/current/_static/comment-bright.png
    websites/staging/climate/trunk/content/api/current/_static/comment-close.png
    websites/staging/climate/trunk/content/api/current/_static/comment.png
    websites/staging/climate/trunk/content/api/current/_static/doctools.js
    websites/staging/climate/trunk/content/api/current/_static/down-pressed.png
    websites/staging/climate/trunk/content/api/current/_static/down.png
    websites/staging/climate/trunk/content/api/current/_static/file.png
    websites/staging/climate/trunk/content/api/current/_static/jquery.js
    websites/staging/climate/trunk/content/api/current/_static/minus.png
    websites/staging/climate/trunk/content/api/current/_static/plus.png
    websites/staging/climate/trunk/content/api/current/_static/pygments.css
    websites/staging/climate/trunk/content/api/current/_static/searchtools.js
    websites/staging/climate/trunk/content/api/current/_static/up-pressed.png
    websites/staging/climate/trunk/content/api/current/_static/up.png
    websites/staging/climate/trunk/content/api/current/_static/websupport.js
    
websites/staging/climate/trunk/content/api/current/config/config_overview.html
    websites/staging/climate/trunk/content/api/current/config/config_writer.html
    
websites/staging/climate/trunk/content/api/current/config/dataset_information.html
    
websites/staging/climate/trunk/content/api/current/config/evaluation_settings.html
    
websites/staging/climate/trunk/content/api/current/config/metrics_information.html
    
websites/staging/climate/trunk/content/api/current/config/plots_settings.html
    
websites/staging/climate/trunk/content/api/current/data_source/data_sources.html
    websites/staging/climate/trunk/content/api/current/genindex.html
    websites/staging/climate/trunk/content/api/current/http-routingtable.html
    websites/staging/climate/trunk/content/api/current/index.html
    websites/staging/climate/trunk/content/api/current/objects.inv
    websites/staging/climate/trunk/content/api/current/ocw/dataset.html
    websites/staging/climate/trunk/content/api/current/ocw/dataset_loader.html
    
websites/staging/climate/trunk/content/api/current/ocw/dataset_processor.html
    websites/staging/climate/trunk/content/api/current/ocw/evaluation.html
    websites/staging/climate/trunk/content/api/current/ocw/metrics.html
    websites/staging/climate/trunk/content/api/current/ocw/overview.html
    websites/staging/climate/trunk/content/api/current/ocw/plotter.html
    websites/staging/climate/trunk/content/api/current/ocw/utils.html
    websites/staging/climate/trunk/content/api/current/py-modindex.html
    websites/staging/climate/trunk/content/api/current/search.html
    websites/staging/climate/trunk/content/api/current/searchindex.js
    websites/staging/climate/trunk/content/api/current/ui-backend/backend.html

Propchange: websites/staging/climate/trunk/content/
------------------------------------------------------------------------------
--- cms:source-revision (original)
+++ cms:source-revision Wed May  2 18:42:25 2018
@@ -1 +1 @@
-1830783
+1830784

Added: 
websites/staging/climate/trunk/content/api/current/_sources/config/config_overview.rst.txt
==============================================================================
--- 
websites/staging/climate/trunk/content/api/current/_sources/config/config_overview.rst.txt
 (added)
+++ 
websites/staging/climate/trunk/content/api/current/_sources/config/config_overview.rst.txt
 Wed May  2 18:42:25 2018
@@ -0,0 +1,85 @@
+Configuration File Overview
+===========================
+
+Apache Open Climate Workbench includes tools for creating and reading 
configuration files. Below is an explanation of the general configuration file 
structure, and in-depth look at the various configuration options, and 
explanations of how to use configuration files in an evaluation.
+
+Getting Started
+---------------
+
+OCW configuration files are written in `YAML <http://yaml.org/>`_ with type 
annotations that are supported by the `PyYAML library 
<http://pyyaml.org/wiki/PyYAMLDocumentation>`_. Let's look at an example 
configuration file to get started.
+
+.. code::
+
+    evaluation:
+        temporal_time_delta: 365
+        spatial_regrid_lats: !!python/tuple [-20, 20, 1]
+        spatial_regrid_lons: !!python/tuple [-20, 20, 1]
+
+    datasets:
+        reference:
+            data_source: local
+            file_count: 1
+            path: 
/tmp/AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_tasmax.nc
+            variable: tasmax
+
+        targets:
+            - data_source: local
+              file_count: 1
+              path: 
/tmp/AFRICA_UC-WRF311_CTL_ERAINT_MM_50km-rg_1989-2008_tasmax.nc
+              variable: tasmax
+    metrics:
+        - Bias
+
+    plots:
+        - type: contour
+          results_indeces:
+              - !!python/tuple [0, 0]
+          lats:
+              range_min: -20
+              range_max: 20
+              range_step: 1
+          lons:
+              range_min: -20
+              range_max: 20
+              range_step: 1
+          output_name: wrf_bias_compared_to_knmi
+          optional_args:
+              gridshape: !!python/tuple [6, 6]
+    
+There are 4 main categories for configuration settings: Evaluation, Datasets, 
Metrics, and Plots.
+
+Evaluation Settings
+-------------------
+
+This is where you will set evaluation specific settings such as temporal and 
spatial bin sizes to use during dataset preparation. Visit the :doc:`Evaluation 
Settings <evaluation_settings>` page for additional information.
+
+Dataset Information
+-------------------
+
+The datasets section is where you specify all the datasets to use for an 
evaluation. You can specify what the reference dataset should be as well as 
giving a list of target datasets. Visit the :doc:`Dataset Information 
<dataset_information>` page for additional information.
+
+Metrics Information
+-------------------
+
+You will need to load some metrics if you want to get anything useful out of 
your evaluation. Visit the :doc:`Metrics Information <metrics_information>` 
page to learn how to specify the metrics that should be used in the evaluation.
+
+Plots Settings
+--------------
+
+This is where you specify what plots to make after running the evaluation. The 
:doc:`Plots Settings <plots_settings>` page provides examples for the supported 
plot types.
+
+Example Run
+-----------
+
+If you have tried the **simple_model_to_model_bias.py** example in the primary 
toolkit examples you can run the same evaluation but use a config file to do so 
instead of direct API scripting. From the **climate/ocw-config-runner/** 
directory run the following command to run the example::
+
+    python ocw_evaluation_from_config.py 
example/simple_model_to_model_bias.yaml
+
+.. note::
+
+    If you haven't run the previous example which downloads the necessary 
datasets this evaluation will fail. The necessary local files will not have 
been downloaded!
+
+Writing a Config File
+---------------------
+
+You can export an :class:`evaluation.Evaluation` object to a configuration 
file for easily repeatable evaluations. Checkout the documentation on the 
:doc:`configuration file writer API <config_writer>` for additional information.

Added: 
websites/staging/climate/trunk/content/api/current/_sources/config/config_writer.rst.txt
==============================================================================
--- 
websites/staging/climate/trunk/content/api/current/_sources/config/config_writer.rst.txt
 (added)
+++ 
websites/staging/climate/trunk/content/api/current/_sources/config/config_writer.rst.txt
 Wed May  2 18:42:25 2018
@@ -0,0 +1,5 @@
+Configuration File Writer API
+=============================
+
+.. automodule:: ocw_config_runner.configuration_writer
+    :members:

Added: 
websites/staging/climate/trunk/content/api/current/_sources/config/dataset_information.rst.txt
==============================================================================
--- 
websites/staging/climate/trunk/content/api/current/_sources/config/dataset_information.rst.txt
 (added)
+++ 
websites/staging/climate/trunk/content/api/current/_sources/config/dataset_information.rst.txt
 Wed May  2 18:42:25 2018
@@ -0,0 +1,89 @@
+Dataset Information
+===================
+
+Dataset configuration information is passed in the **datasets** section of the 
configuration file. You can specify one reference dataset and one or more 
target datasets for your evaluation::
+
+    datasets:
+        reference:
+            data_source: local
+            file_count: 1
+            path: 
/tmp/AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_tasmax.nc
+            variable: tasmax
+
+        targets:
+            - data_source: local
+              file_count: 1
+              path: 
/tmp/AFRICA_UC-WRF311_CTL_ERAINT_MM_50km-rg_1989-2008_tasmax.nc
+              variable: tasmax
+            - data_source: local
+              file_count: 1
+              path: 
/tmp/AFRICA_UC-WRF311_CTL_ERAINT_MM_50km-rg_1989-2008_tasmax.nc
+              variable: tasmax
+
+Each **data_source** module requires datasets to be passed in a slightly 
different manner. Below is an explanation of the format for each of the 
supported data sources.
+
+Local Dataset
+-------------
+.. code::
+
+    data_source: local
+    file_count: 1
+    path: /tmp/AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_tasmax.nc
+    variable: tasmax
+
+The **path** flag is the location where the dataset is located on your 
computer. The **variable** flag is the variable that should be pulled out of 
the NetCDF file once it has been opened. You pass any optional flags that are 
accepted by :func:`local.load_file` by using the **optional_args** flag::
+
+    data_source: local
+    file_count: 1
+    path: /tmp/AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_tasmax.nc
+    variable: tasmax
+    optional_args:
+        elevation_index=0,
+        name='foo'
+
+.. note::
+
+    The **file_count** flag is currently not used. It is there to support 
planned future functionality. However, you still need to specify it! Leave it 
as 1.
+
+
+RCMED Dataset
+-------------
+
+.. code::
+    
+    data_source: rcmed
+    dataset_id: 4
+    parameter_id: 32
+    min_lat: -10
+    max_lat: 10
+    min_lon: -20
+    max_lon: 20
+    start_time: 1997-01-01
+    end_time: 2000-01-01
+
+To load a dataset from the Jet Propulsion Laboratory's RCMED you will need to 
specify the above flags. The **dataset_id** and **parameter_id** are dataset 
specific and can be looked up on the `RCMES project website 
<https://rcmes.jpl.nasa.gov/content/rcmes-and-data>`_. Pretty much any common 
time format will be accepted for the start and end times. However, just to be 
safe you should try to stick with something very standard such as `ISO-8601 
<http://www.w3.org/TR/NOTE-datetime>`_ formatted time values. You may also pass 
any optional parameters that are accepted by :func:`rcmed.parameter_dataset` 
with the **optional_args** flag.
+
+ESGF Dataset
+------------
+
+In order to load an ESGF dataset you will need to specify the following 
parameters in addition to having an ESGF login::
+
+    data_source: esgf
+    dataset_id: obs4MIPs.CNES.AVISO.mon.v1|esg-datanode.jpl.nasa.gov
+    variable: zosStderr
+    esgf_password: totallynotmypassword
+    esgf_username: totallynotmyusername
+
+The **dataset_id** and **variable** flags are ESGF dataset specific. You can 
locate them through and ESGF nodes search page. You may also pass any optional 
parameters that are accepted by :func:`esgf.load_dataset` with the 
**optional_args** flag.
+
+
+OpenDAP Dataset
+---------------
+
+A dataset can be downloaded from an OpenDAP URL with the following settings::
+
+    data_source: dap
+    url: http://test.opendap.org/dap/data/nc/sst.mnmean.nc.gz
+    variable: sst
+
+You may also pass any optional parameters that are accepted by 
:func:`dap.load` with the **optional_args** flag.

Added: 
websites/staging/climate/trunk/content/api/current/_sources/config/evaluation_settings.rst.txt
==============================================================================
--- 
websites/staging/climate/trunk/content/api/current/_sources/config/evaluation_settings.rst.txt
 (added)
+++ 
websites/staging/climate/trunk/content/api/current/_sources/config/evaluation_settings.rst.txt
 Wed May  2 18:42:25 2018
@@ -0,0 +1,56 @@
+Evaluation Settings
+===================
+
+The evaluation settings section of the configuration file allows you to set 
attributes that are critical for making adjustments to the loaded datasets 
prior to an evaluation run. Here is an example evaluation settings section of a 
configuration file. Below, we'll look at each of the configuration options in 
detail.
+
+.. code::
+
+    evaluation:
+        temporal_time_delta: 365
+        spatial_regrid_lats: !!python/tuple [-20, 20, 1]
+        spatial_regrid_lons: !!python/tuple [-20, 20, 1]
+
+Temporal Rebin
+--------------
+
+It is often necessary to temporally rebin datasets prior to an evaluation. The 
**temporal_time_delta** flag is where you can set the **temporal_resolution** 
parameter for :func:`dataset_processor.temporal_rebin`. The value that you pass 
here is interpreted as the number of days to assign to a 
:class:`datetime.timedelta` object before running the 
:func:`dataset_processor.temporal_rebin` function.
+
+.. note::
+
+    This attribute is only useful if you use the configuration data to create 
an :class:`evaluation.Evaluation` object with the 
:func:`evaluation_creation.generate_evaluation_from_config` config parser 
function.
+
+Spatial Regrid
+--------------
+
+.. note::
+
+    Some funcitonality here is still in development. Specifically, passing the 
spatial_regrid_* flags as lists of values.
+
+If you need to regrid your datasets onto a new lat/lon grid you will need to 
set the **spatial_regrid_lats** and **spatial_regrid_lons** options. These will 
be passed to the :func:`dataset_processor.spatial_regrid` function along with 
each dataset. There are two valid ways to pass these parameters. First, you can 
pass them as a list of all values::
+
+    evaluation:
+        spatial_regrid_lats: [-10, -5, 0, 5, 10]
+        spatial_regrid_lons: [-10, -5, 0, 5, 10]
+
+This is generally useful if you only need to pass a few parameters or if the 
sequence isn't easy to define as a valid **range** in Python. The other option 
is to pass **range** information as a tuple. This requires you to use `PyYAML's 
Python Type Annotations 
<http://pyyaml.org/wiki/PyYAMLDocumentation#YAMLtagsandPythontypes>`_ but 
provides a far more compact representation::
+
+    evaluation:
+        spatial_regrid_lats: !!python/tuple [-20, 20, 1]
+        spatial_regrid_lons: !!python/tuple [-20, 20, 1]
+
+Using this style directly maps to a call to :func:`numpy.arange`::
+
+    # spatial_regrid_lats: !!python/tuple [-20, 20, 1] becomes
+    lats = numpy.arange(-20, 20, 1)
+
+Be sure to pay special attention to the end value for your interval. The 
:func:`numpy.arange` function does not include the end value in the returned 
interval.
+
+Subset Bounds
+-------------
+
+In order to subset the datasets down to an area of interest you will need to 
pass bounds information::
+
+    evaluation:
+        subset: [-10, 10, -20, 20, "1997-01-01", "2000-01-01"]
+
+Here you're passing the bounding lat/lon box with the first 4 values as well 
as the valid temporal range bounds with the starting and end time values. 
Pretty much any common time format will be accepted. However, just to be safe 
you should try to stick with something very standard such as `ISO-8601 
<http://www.w3.org/TR/NOTE-datetime>`_ formatted time values.

Added: 
websites/staging/climate/trunk/content/api/current/_sources/config/metrics_information.rst.txt
==============================================================================
--- 
websites/staging/climate/trunk/content/api/current/_sources/config/metrics_information.rst.txt
 (added)
+++ 
websites/staging/climate/trunk/content/api/current/_sources/config/metrics_information.rst.txt
 Wed May  2 18:42:25 2018
@@ -0,0 +1,12 @@
+Metrics Information
+===================
+
+.. note::
+
+    At the moment, you can only load metrics that are in :mod:`ocw.metrics`. 
In the future you will also be able to specify user defined metrics here as 
well. However, as a work around you can define your custom metrics in the 
:mod:`ocw.metrics` module.
+
+You can set the metrics you want to use in the evaluation in the **metrics** 
section of the config. You simply need to supply a list of the metric class 
names that you want to be used::
+
+    metrics:
+        - Bias
+        - TemporalStdDev

Added: 
websites/staging/climate/trunk/content/api/current/_sources/config/plots_settings.rst.txt
==============================================================================
--- 
websites/staging/climate/trunk/content/api/current/_sources/config/plots_settings.rst.txt
 (added)
+++ 
websites/staging/climate/trunk/content/api/current/_sources/config/plots_settings.rst.txt
 Wed May  2 18:42:25 2018
@@ -0,0 +1,44 @@
+Plots Settings
+==============
+
+Plotting configuration information is passed in the **plots** section of the 
configuration file::
+
+    plots:
+        - type: contour
+          results_indeces:
+              - !!python/tuple [0, 0]
+          lats:
+              range_min: -20
+              range_max: 20
+              range_step: 1
+          lons:
+              range_min: -20
+              range_max: 20
+              range_step: 1
+          output_name: wrf_bias_compared_to_knmi
+          optional_args:
+              gridshape: !!python/tuple [6, 6]
+
+Each type of support plot has a different configuration format expected. Each 
of these are explained below. Note, most of these will require you to specify 
what result data you want included in the plots with the **results_indeces** 
flag. This relates the format that an Evaluation object outputs results in. 
Check the :class:`evaluation.Evaluation` documentation for more details.
+
+Contour Maps
+-------------
+
+The contour maps config configures data for OCW's contour plotter 
:func:`plotting.draw_contour_map`::
+
+    type: contour
+          results_indeces:
+              - !!python/tuple [0, 0]
+          lats:
+              range_min: -20
+              range_max: 20
+              range_step: 1
+          lons:
+              range_min: -20
+              range_max: 20
+              range_step: 1
+          output_name: wrf_bias_compared_to_knmi
+          optional_args:
+              gridshape: !!python/tuple [6, 6]
+
+The **lat** and **lon** parameters are specified as a range of values. Be 
aware that the **range_max** element is not included in the output range so you 
may need to adjust it slightly if you want a particular value included. The 
**output_name** parameter is the name of the resulting output graph. You may 
also pass any optional parameters that are supported by the 
:func:`plotting.draw_contour_map` function with the **optional_args** flag.

Added: 
websites/staging/climate/trunk/content/api/current/_sources/data_source/data_sources.rst.txt
==============================================================================
--- 
websites/staging/climate/trunk/content/api/current/_sources/data_source/data_sources.rst.txt
 (added)
+++ 
websites/staging/climate/trunk/content/api/current/_sources/data_source/data_sources.rst.txt
 Wed May  2 18:42:25 2018
@@ -0,0 +1,27 @@
+Data Sources
+************
+
+Local Module
+============
+.. automodule:: local
+    :members:
+
+RCMED Module
+============
+.. automodule:: rcmed
+    :members:
+
+DAP Module
+==========
+.. automodule:: dap
+    :members:
+
+ESGF Module
+===========
+.. automodule:: esgf
+    :members:
+
+PODAAC Module
+=============
+.. automodule:: podaac_datasource
+    :members:

Added: websites/staging/climate/trunk/content/api/current/_sources/index.rst.txt
==============================================================================
--- websites/staging/climate/trunk/content/api/current/_sources/index.rst.txt 
(added)
+++ websites/staging/climate/trunk/content/api/current/_sources/index.rst.txt 
Wed May  2 18:42:25 2018
@@ -0,0 +1,37 @@
+.. Apache Open Climate Workbench documentation master file, created by
+   sphinx-quickstart on Fri Oct 25 07:58:45 2013.
+   You can adapt this file completely to your liking, but it should at least
+   contain the root `toctree` directive.
+
+Welcome to Apache Open Climate Workbench's documentation!
+=========================================================
+
+Contents:
+
+.. toctree::
+   :maxdepth: 4
+
+   ocw/overview
+   ocw/dataset
+   ocw/dataset_loader
+   ocw/dataset_processor
+   ocw/evaluation
+   ocw/metrics
+   ocw/plotter
+   ocw/statistical_downscaling
+   ocw/utils
+   data_source/data_sources
+   ui-backend/backend
+   config/config_overview
+   config/config_writer
+   config/dataset_information
+   config/evaluation_settings
+   config/metrics_information
+   config/plots_settings
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`

Added: 
websites/staging/climate/trunk/content/api/current/_sources/ocw/dataset.rst.txt
==============================================================================
--- 
websites/staging/climate/trunk/content/api/current/_sources/ocw/dataset.rst.txt 
(added)
+++ 
websites/staging/climate/trunk/content/api/current/_sources/ocw/dataset.rst.txt 
Wed May  2 18:42:25 2018
@@ -0,0 +1,12 @@
+Dataset Module
+**************
+
+Bounds
+======
+.. autoclass:: dataset.Bounds
+    :members:
+
+Dataset
+=======
+.. autoclass:: dataset.Dataset
+    :members:

Added: 
websites/staging/climate/trunk/content/api/current/_sources/ocw/dataset_loader.rst.txt
==============================================================================
--- 
websites/staging/climate/trunk/content/api/current/_sources/ocw/dataset_loader.rst.txt
 (added)
+++ 
websites/staging/climate/trunk/content/api/current/_sources/ocw/dataset_loader.rst.txt
 Wed May  2 18:42:25 2018
@@ -0,0 +1,5 @@
+Dataset Loader Module
+*********************
+
+.. automodule:: dataset_loader
+    :members:

Added: 
websites/staging/climate/trunk/content/api/current/_sources/ocw/dataset_processor.rst.txt
==============================================================================
--- 
websites/staging/climate/trunk/content/api/current/_sources/ocw/dataset_processor.rst.txt
 (added)
+++ 
websites/staging/climate/trunk/content/api/current/_sources/ocw/dataset_processor.rst.txt
 Wed May  2 18:42:25 2018
@@ -0,0 +1,5 @@
+Dataset Processor Module
+************************
+
+.. automodule:: dataset_processor
+   :members:

Added: 
websites/staging/climate/trunk/content/api/current/_sources/ocw/evaluation.rst.txt
==============================================================================
--- 
websites/staging/climate/trunk/content/api/current/_sources/ocw/evaluation.rst.txt
 (added)
+++ 
websites/staging/climate/trunk/content/api/current/_sources/ocw/evaluation.rst.txt
 Wed May  2 18:42:25 2018
@@ -0,0 +1,5 @@
+Evaluation Module
+*****************
+
+.. autoclass:: evaluation.Evaluation
+    :members:

Added: 
websites/staging/climate/trunk/content/api/current/_sources/ocw/metrics.rst.txt
==============================================================================
--- 
websites/staging/climate/trunk/content/api/current/_sources/ocw/metrics.rst.txt 
(added)
+++ 
websites/staging/climate/trunk/content/api/current/_sources/ocw/metrics.rst.txt 
Wed May  2 18:42:25 2018
@@ -0,0 +1,5 @@
+Metrics Module
+**************
+
+.. automodule:: metrics
+    :members:

Added: 
websites/staging/climate/trunk/content/api/current/_sources/ocw/overview.rst.txt
==============================================================================
--- 
websites/staging/climate/trunk/content/api/current/_sources/ocw/overview.rst.txt
 (added)
+++ 
websites/staging/climate/trunk/content/api/current/_sources/ocw/overview.rst.txt
 Wed May  2 18:42:25 2018
@@ -0,0 +1,179 @@
+Overview
+========
+
+The Apache Open Climate Workbench toolkit aims to provide a suit of tools to 
make Climate Scientists lives easier. It does this by providing tools for 
loading and manipulating datasets, running evaluations, and plotting results. 
Below is a breakdown of many of the OCW components with an explanation of how 
to use them. An OCW evaluation usually has the following steps:
+
+1. Load one or more datasets
+2. Perform dataset manipulations (subset, temporal/spatial rebin, etc.)
+3. Load various metrics
+4. Instantiate and run the evaluation
+5. Plot results
+
+Common Data Abstraction
+-----------------------
+
+The OCW :class:`dataset.Dataset` class is the primary data abstraction used 
throughout OCW. It facilitates the uniform handling of data throughout the 
toolkit and provides a few useful helper functions such as 
:func:`dataset.Dataset.spatial_boundaries` and 
:func:`dataset.Dataset.temporal_boundaries`. Creating a new dataset object is 
straightforward but generally you will want to use an OCW data source to load 
the data for you.
+
+Data Sources
+------------
+
+OCW data sources allow users to easily load :class:`dataset.Dataset` objects 
from a number of places. These data sources help with step 1 of an evaluation 
above. In general the primary file format that is supported is NetCDF. For 
instance, the :mod:`local`, :mod:`dap` and :mod:`esgf` data sources only 
support loading NetCDF files from your local machine, an OpenDAP URL, and the 
ESGF respectively. Some data sources, such as :mod:`rcmed`, point to externally 
supported data sources. In the case of the RCMED data source, the Regional 
Climate Model Evaluation Database is run by NASA's Jet Propulsion Laboratory. 
+
+Adding additional data sources is quite simple. The only API limitation that 
we have on a data source is that it returns a valid :class:`dataset.Dataset` 
object. Please feel free to send patches for adding more data sources. 
+
+A simple example using the :mod:`local` data source to load a NetCDF file from 
your local machine::
+
+>>> import ocw.data_source.local as local
+>>> ds = local.load_file('/tmp/some_dataset.nc', 'SomeVarInTheDataset')
+
+Dataset Manipulations
+---------------------
+
+All :class:`dataset.Dataset` manipulations are handled by the 
:mod:`dataset_processor` module. In general, an evaluation will include calls 
to :func:`dataset_processor.subset`, :func:`dataset_processor.spatial_regrid`, 
and :func:`dataset_processor.temporal_rebin` to ensure that the datasets can 
actually be compared. :mod:`dataset_processor` functions take a 
:class:`dataset.Dataset` object and some various parameters and return a 
modified :class:`dataset.Dataset` object. The original dataset is never 
manipulated in the process.
+
+Subsetting is a great way to speed up your processing and keep useless data 
out of your plots. Notice that we're using a :class:`dataset.Bounds` objec to 
represent the area of interest::
+
+>>> import ocw.dataset_processor as dsp
+>>> new_bounds = Bounds(min_lat, max_lat, min_lon, max_lon, start_time, 
end_time)
+>>> knmi_dataset = dsp.subset(knmi_dataset, new_bounds)
+
+Temporally re-binning a dataset is great when the time step of the data is too 
fine grain for the desired use. For instance, perhaps we want to see a yearly 
trend but we have daily data. We would need to make the following call to 
adjust our dataset::
+
+>>> knmi_dataset = dsp.temporal_rebin(knmi_dataset, 
datetime.timedelta(days=365))
+
+It is critically necessary for our datasets to be on the same lat/lon grid 
before we try to compare them. That's where spatial re-gridding comes in 
helpful. Here we re-grid our example dataset onto a 1-degree lat/lon grid 
within the range that we subsetted the dataset previously::
+
+>>> new_lons = np.arange(min_lon, max_lon, 1)
+>>> new_lats = np.arange(min_lat, max_lat, 1)
+>>> knmi_dataset = dsp.spatial_regrid(knmi_dataset, new_lats, new_lons)
+
+Metrics
+-------
+
+Metrics are the backbone of an evaluation. You'll find a number of (hopefully) 
useful "default" metrics in the :mod:`metrics` module in the toolkit. In 
general you won't be too likely to use a metric outside of an evaluation, 
however you could run a metric manually if you so desired.::
+
+>>> import ocw.metrics
+>>> # Load 2 datasets
+>>> bias = ocw.metrics.Bias()
+>>> print bias.run(dataset1, dataset2)
+
+While this might be exactly what you need to get the job done, it is far more 
likely that you'll need to run a number of metrics over a number of datasets. 
That's where running an evaluation comes in, but we'll get to that shortly.
+
+There are two "types" of metrics that the toolkit supports. A unary metric 
acts on a single dataset and returns a result. A binary metric acts on a target 
and reference dataset and returns a result. This is helpful to know if you 
decide that the included metrics aren't sufficient. We've attempted to make 
adding a new metric as simple as possible. You simply create a new class that 
inherits from either the unary or binary base classes and override the `run` 
function. At this point your metric will behave exactly like the included 
metrics in the toolkit. Below is an example of how one of the included metrics 
is implemented. If you need further assistance with your own metrics be sure to 
email the project's mailing list!::
+
+>>> class Bias(BinaryMetric):
+>>>     '''Calculate the bias between a reference and target dataset.'''
+>>> 
+>>>     def run(self, ref_dataset, target_dataset):
+>>>         '''Calculate the bias between a reference and target dataset.
+>>> 
+>>>         .. note::
+>>>            Overrides BinaryMetric.run()
+>>> 
+>>>         :param ref_dataset: The reference dataset to use in this metric 
run.
+>>>         :type ref_dataset: ocw.dataset.Dataset object
+>>>         :param target_dataset: The target dataset to evaluate against the
+>>>             reference dataset in this metric run.
+>>>         :type target_dataset: ocw.dataset.Dataset object
+>>> 
+>>>         :returns: The difference between the reference and target datasets.
+>>>         :rtype: Numpy Array
+>>>         '''
+>>>         return ref_dataset.values - target_dataset.values
+
+While this might look a bit scary at first, if we take out all the 
documentation you'll see that it's really extremely simple.::
+
+>>> # Our new Bias metric inherits from the Binary Metric base class
+>>> class Bias(BinaryMetric):
+>>>     # Since our new metric is a binary metric we need to override
+>>>     # the run funtion in the BinaryMetric base class.
+>>>     def run(self, ref_dataset, target_dataset):
+>>>         # To implement the bias metric we simply return the difference
+>>>         # between the reference and target dataset's values arrays.
+>>>         return ref_dataset.values - target_dataset.values
+
+It is very important to note that you shouldn't change the datasets that are 
passed into the metric that you're implementing. If you do you might cause 
unexpected results in future parts of the evaluation. If you need to do 
manipulations, copy the data first and do manipulations on the copy. Leave the 
original dataset alone!
+
+Handling an Evaluation
+----------------------
+
+We saw above that it is easy enough to run a metric over a few datasets 
manually. However, when we have a lot of datasets and/or a lot of metrics to 
run that can become tedious and error prone. This is where the 
:class:`evaluation.Evaluation` class comes in handy. It ensures that all the 
metrics that you choose are run over all combinations of the datasets that you 
input. Consider the following simple example::
+
+>>> import ocw.evaluation as eval
+>>> import ocw.data_source.local as local
+>>> import ocw.metrics as metrics
+>>> 
+>>> # Load a few datasets
+>>> ref_dataset = local.load_file(...)
+>>> target1 = local.load_file(...)
+>>> target2 = local.load_file(...)
+>>> target_datasets = [target1, target2]
+>>>
+>>> # Do some dataset manipulations here such as subsetting and regridding
+>>>
+>>> # Load a few metrics
+>>> bias = metrics.Bias()
+>>> tstd = metrics.TemporalStdDev()
+>>> metrics = [bias, tstd]
+>>>
+>>> new_eval = eval.Evaluation(ref_dataset, target_datasets, metrics)
+>>> new_eval.run()
+>>> print new_eval.results
+>>> print new_eval.unary_results
+
+First we load all of our datasets and do any manipulations (which we leave out 
for brevity). Then we load the metrics that we want to run, namely Bias and 
TemporalStdDev. We then load our evaluation object.::
+
+>>> new_eval = eval.Evaluation(ref_dataset, target_datasets, metrics)
+
+Notice two things about this. First, we're splitting the datasets into a 
reference dataset (ref_dataset) and a list of target datasets 
(target_datasets). Second, one of the metrics that we loaded 
(:class:`metrics.TemporalStdDev`) is a unary metric. The reference/target 
dataset split is necessary to handling binary metrics. When an evaluation is 
run, all the binary metrics are run against every (reference, target) dataset 
pair. So the above evaluation could be replaced with the following calls. Of 
course this wouldn't handle the unary metric, but we'll get to that in a 
second.::
+
+>>> result1 = bias.run(ref_dataset, target1)
+>>> result2 = bias.run(ref_dataset, target2)
+
+Unary metrics are handled slightly differently but they're still simple. Each 
unary metric passed into the evaluation is run against *every* dataset in the 
evaluation. So we could replace the above evaluation with the following calls::
+
+>>> unary_result1 = tstd(ref_dataset)
+>>> unary_result2 = tstd(target1)
+>>> unary_result3 = tstd(target2)
+
+The only other part that we need to explore to fully understand the 
:class:`evalution.Evaluation` class is how the results are stored internally 
from the run. The `results` list is a multidimensional array holding all the 
binary metric results and the `unary_results` is a list holding all the unary 
metric results. To more accurately replace the above evaluation with manual 
calls we would write the following::
+
+>>> results = [
+>>>     # Results for target1
+>>>     [
+>>>         bias.run(ref_dataset, target1)
+>>>         # If there were other binary metrics, the results would be here.
+>>>     ],
+>>>     # Results for target2
+>>>     [
+>>>         bias.run(ref_dataset, target2)
+>>>         # If there were other binary metrics, the results would be here.
+>>>     ]
+>>> ]
+>>>
+>>> unary_results = [
+>>>     # Results for TemporalStdDev
+>>>     [
+>>>         tstd(ref_dataset),
+>>>         tstd(target1),
+>>>         tstd(target2)
+>>>     ]
+>>>     # If there were other unary metrics, the results would be in a list 
here.
+>>> ]
+
+Plotting
+--------
+
+Plotting can be fairly complicated business. Luckily we have `pretty good 
documentation 
<https://cwiki.apache.org/confluence/display/CLIMATE/Guide+to+Plotting+API>`_ 
on the project wiki that can help you out. There are also fairly simple 
examples in the project's example folder with the remainder of the code such as 
the following::
+
+>>> # Let's grab the values returned for bias.run(ref_dataset, target1)
+>>> results = bias_evaluation.results[0][0]
+>>>
+>>> Here's the same lat/lons we used earlier when we were re-gridding
+>>> lats = new_lats
+>>> lons = new_lons
+>>> fname = 'My_Test_Plot'
+>>>  
+>>> plotter.draw_contour_map(results, lats, lons, fname)
+
+This would give you a contour map calls `My_Test_Plot` for the requested bias 
metric run.

Added: 
websites/staging/climate/trunk/content/api/current/_sources/ocw/plotter.rst.txt
==============================================================================
--- 
websites/staging/climate/trunk/content/api/current/_sources/ocw/plotter.rst.txt 
(added)
+++ 
websites/staging/climate/trunk/content/api/current/_sources/ocw/plotter.rst.txt 
Wed May  2 18:42:25 2018
@@ -0,0 +1,5 @@
+Plotter Module
+**************
+
+.. automodule:: plotter
+    :members:

Added: 
websites/staging/climate/trunk/content/api/current/_sources/ocw/statistical_downscaling.rst.txt
==============================================================================
--- 
websites/staging/climate/trunk/content/api/current/_sources/ocw/statistical_downscaling.rst.txt
 (added)
+++ 
websites/staging/climate/trunk/content/api/current/_sources/ocw/statistical_downscaling.rst.txt
 Wed May  2 18:42:25 2018
@@ -0,0 +1,8 @@
+Downscaling Module
+******************
+
+Downscaling
+===========
+.. autoclass:: statistical_downscaling.Downscaling
+    :members:
+

Added: 
websites/staging/climate/trunk/content/api/current/_sources/ocw/utils.rst.txt
==============================================================================
--- 
websites/staging/climate/trunk/content/api/current/_sources/ocw/utils.rst.txt 
(added)
+++ 
websites/staging/climate/trunk/content/api/current/_sources/ocw/utils.rst.txt 
Wed May  2 18:42:25 2018
@@ -0,0 +1,5 @@
+Utils Module
+************
+
+.. automodule:: utils
+   :members:

Added: 
websites/staging/climate/trunk/content/api/current/_sources/ui-backend/backend.rst.txt
==============================================================================
--- 
websites/staging/climate/trunk/content/api/current/_sources/ui-backend/backend.rst.txt
 (added)
+++ 
websites/staging/climate/trunk/content/api/current/_sources/ui-backend/backend.rst.txt
 Wed May  2 18:42:25 2018
@@ -0,0 +1,80 @@
+Evaluation UI Webservices
+*************************
+
+The OCW evaluation UI is a demonstration web application that is built upon the
+OCW toolkit. The web services for the application are written in Python on top
+of the Bottle Web Framework.
+
+Configuration and Dependencies
+==============================
+
+The Evaluation UI is built on top of the OCW toolkit and as such requires it to
+function properly. Please check the toolkit's documentation for relevant
+installation instructions. You will also need to ensure that you have Bottle
+installed. You can install it with:
+
+.. code::
+    
+    pip install bottle
+
+The backend serves the static files for the evaluation frontend as well. If you
+plan to use the frontend you need to ensure that the *app* directory is present
+in the main web service directory. The easiest way to do this is to create a
+symbolic link where the *run_webservices* module is located. Assuming you have
+the entire *ocw-ui* directory, you can do this with the following command.
+
+.. code::
+
+    cd ocw-ui/backend
+    ln -s ../frontend/app app
+
+Finally, to start the backend just run the following command.
+
+.. code::
+
+    python run_webservices.py
+    
+Web Service Explanation
+=======================
+
+The backend endpoints are broken up into a number of modules for ease of
+maintenance and understanding. The *run_webservices* module is the primary
+application module. It brings together all the various submodules into a
+useful system. It also defines a number of helpful endpoints for returning
+static files such as the index page, CSS files, JavaScript files, and more.
+
+Local File Metadata Extractors
+------------------------------
+
+The *local_file_metadata_extractors* module contains all the endpoints that are
+used to strip information out of various objects for display in the UI. At the
+moment, the main functionality is stripping out metadata from NetCDF files when
+a user wishes to *load* a local file into the evaluation.
+
+.. autobottle:: local_file_metadata_extractors:lfme_app
+
+Directory Helpers
+-----------------
+
+The *directory_helpers* module contains a number of endpoints for working
+directory manipulation. The frontend uses these endpoints to grab directory
+information (within a prefix path for security), return result directory
+information, and other things.
+
+.. autobottle:: directory_helpers:dir_app
+
+RCMED Helpers
+-------------
+
+The *rcmed_helpers* module contains endpoints for loading datasets from the
+Regional Climate Model Evaluation Database at NASA's Jet Propulsion Laboratory.
+
+.. autobottle:: rcmed_helpers:rcmed_app
+
+Processing Endpoints
+--------------------
+
+The *processing* module contains all the endpoints related to the running of
+evaluations.
+
+.. autobottle:: processing:processing_app

Modified: 
websites/staging/climate/trunk/content/api/current/_static/alabaster.css
==============================================================================
--- websites/staging/climate/trunk/content/api/current/_static/alabaster.css 
(original)
+++ websites/staging/climate/trunk/content/api/current/_static/alabaster.css 
Wed May  2 18:42:25 2018
@@ -272,15 +272,10 @@ div.admonition {
 }
 
 div.admonition tt.xref, div.admonition code.xref, div.admonition a tt {
-    background-color: ;
+    background-color: #FBFBFB;
     border-bottom: 1px solid #fafafa;
 }
 
-dd div.admonition {
-    margin-left: -60px;
-    padding-left: 60px;
-}
-
 div.admonition p.admonition-title {
     font-family: 'Garamond', 'Georgia', serif;
     font-weight: normal;
@@ -443,6 +438,16 @@ table.field-list p {
     margin-bottom: 0.8em;
 }
 
+/* Cloned from
+ * 
https://github.com/sphinx-doc/sphinx/commit/ef60dbfce09286b20b7385333d63a60321784e68
+ */
+.field-name {
+    -moz-hyphens: manual;
+    -ms-hyphens: manual;
+    -webkit-hyphens: manual;
+    hyphens: manual;
+}
+
 table.footnote td.label {
     width: .1px;
     padding: 0.3em 0 0.3em 0.5em;
@@ -488,11 +493,6 @@ dl pre, blockquote pre, li pre {
     padding-left: 30px;
 }
 
-dl dl pre {
-    margin-left: -90px;
-    padding-left: 90px;
-}
-
 tt, code {
     background-color: #ecf0f3;
     color: #222;

Modified: websites/staging/climate/trunk/content/api/current/_static/basic.css
==============================================================================
--- websites/staging/climate/trunk/content/api/current/_static/basic.css 
(original)
+++ websites/staging/climate/trunk/content/api/current/_static/basic.css Wed 
May  2 18:42:25 2018
@@ -4,7 +4,7 @@
  *
  * Sphinx stylesheet -- basic theme.
  *
- * :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
+ * :copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS.
  * :license: BSD, see LICENSE for details.
  *
  */
@@ -122,6 +122,8 @@ ul.keywordmatches li.goodmatch a {
 
 table.contentstable {
     width: 90%;
+    margin-left: auto;
+    margin-right: auto;
 }
 
 table.contentstable p.biglink {
@@ -149,9 +151,14 @@ table.indextable td {
     vertical-align: top;
 }
 
-table.indextable dl, table.indextable dd {
+table.indextable ul {
     margin-top: 0;
     margin-bottom: 0;
+    list-style-type: none;
+}
+
+table.indextable > tbody > tr > td > ul {
+    padding-left: 0em;
 }
 
 table.indextable tr.pcap {
@@ -183,6 +190,13 @@ div.genindex-jumpbox {
     padding: 0.4em;
 }
 
+/* -- domain module index --------------------------------------------------- 
*/
+
+table.modindextable td {
+    padding: 2px;
+    border-collapse: collapse;
+}
+
 /* -- general body styles --------------------------------------------------- 
*/
 
 div.body p, div.body dd, div.body li, div.body blockquote {
@@ -217,10 +231,6 @@ div.body td {
     text-align: left;
 }
 
-.field-list ul {
-    padding-left: 1em;
-}
-
 .first {
     margin-top: 0 !important;
 }
@@ -337,10 +347,6 @@ table.docutils td, table.docutils th {
     border-bottom: 1px solid #aaa;
 }
 
-table.field-list td, table.field-list th {
-    border: 0 !important;
-}
-
 table.footnote td, table.footnote th {
     border: 0 !important;
 }
@@ -377,6 +383,27 @@ div.figure p.caption span.caption-number
 div.figure p.caption span.caption-text {
 }
 
+/* -- field list styles ----------------------------------------------------- 
*/
+
+table.field-list td, table.field-list th {
+    border: 0 !important;
+}
+
+.field-list ul {
+    margin: 0;
+    padding-left: 1em;
+}
+
+.field-list p {
+    margin: 0;
+}
+
+.field-name {
+    -moz-hyphens: manual;
+    -ms-hyphens: manual;
+    -webkit-hyphens: manual;
+    hyphens: manual;
+}
 
 /* -- other body styles ----------------------------------------------------- 
*/
 
@@ -427,15 +454,6 @@ dl.glossary dt {
     font-size: 1.1em;
 }
 
-.field-list ul {
-    margin: 0;
-    padding-left: 1em;
-}
-
-.field-list p {
-    margin: 0;
-}
-
 .optional {
     font-size: 1.3em;
 }
@@ -592,6 +610,16 @@ span.eqno {
     float: right;
 }
 
+span.eqno a.headerlink {
+    position: relative;
+    left: 0px;
+    z-index: 1;
+}
+
+div.math:hover a.headerlink {
+    visibility: visible;
+}
+
 /* -- printout stylesheet --------------------------------------------------- 
*/
 
 @media print {

Modified: 
websites/staging/climate/trunk/content/api/current/_static/comment-bright.png
==============================================================================
Binary files - no diff available.

Modified: 
websites/staging/climate/trunk/content/api/current/_static/comment-close.png
==============================================================================
Binary files - no diff available.

Modified: websites/staging/climate/trunk/content/api/current/_static/comment.png
==============================================================================
Binary files - no diff available.

Modified: websites/staging/climate/trunk/content/api/current/_static/doctools.js
==============================================================================
--- websites/staging/climate/trunk/content/api/current/_static/doctools.js 
(original)
+++ websites/staging/climate/trunk/content/api/current/_static/doctools.js Wed 
May  2 18:42:25 2018
@@ -4,7 +4,7 @@
  *
  * Sphinx JavaScript utilities for all documentation.
  *
- * :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
+ * :copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS.
  * :license: BSD, see LICENSE for details.
  *
  */

Modified: 
websites/staging/climate/trunk/content/api/current/_static/down-pressed.png
==============================================================================
Binary files - no diff available.

Modified: websites/staging/climate/trunk/content/api/current/_static/down.png
==============================================================================
Binary files - no diff available.

Modified: websites/staging/climate/trunk/content/api/current/_static/file.png
==============================================================================
Binary files - no diff available.


Reply via email to