I often solve fixed point problems where I apply a contraction mapping to 
an array and`` iterate until convergence. There is nothing in the algorithm 
that requires the new value at one array index to depend on neighboring 
indices — only on values obtained at the same index on the previous 
iteration.

It should be quite easy to parallelize each iteration of this algorithm. My 
question is what the optimal strategy would be for parallelizing the 
algorithm using the built-in Julia features. If I were using MPI I would 
simply assign each process a chunk of indices and let them update that 
portion of the array on each iteration. Would a similar approach be optimal 
in Julia also? If so, how would I do that? If not, what would be better?

To give you an idea of the type of algorithm I am talking about, I have 
included a working example below (it requires Grid.jl and Optim.jl). I 
realize that there is a lot of code below, so if it is too long to expect 
people on this forum to read, please let me know and I will try to condense 
my code. Thanks!

using Grid
using Optim
#=
    Computes and returns T^k v, where T is an operator, v is an initial
    condition and k is the number of iterates. Provided that T is a
    contraction mapping or similar, T^k v will be an approximation to
    the fixed point.
=#
function compute_fixed_point(T::Function, v; err_tol=1e-3, max_iter=50,
                             verbose=true, print_skip=10)
    iterate = 0
    err = err_tol + 1
    while iterate < max_iter && err > err_tol
        new_v = T(v)
        iterate += 1
        err = Base.maxabs(new_v - v)
        if verbose
            if iterate % print_skip == 0
                println("Compute iterate $iterate with error $err")
            end
        end
        v = new_v
    end

    if iterate < max_iter && verbose
        println("Converged in $iterate steps")
    elseif iterate == max_iter
        warn("max_iter exceeded in compute_fixed_point")
    end

    return v
end

linspace_range(x_min, x_max, n_x) = x_min:(x_max - x_min) / (n_x  - 1): x_max

type GrowthModel
    f::Function
    bet::Real
    u::Function
    grid_max::Int
    grid_size::Int
    grid::FloatRange
end

default_f(k) = k^0.65
default_u(c) = log(c)

function GrowthModel(f=default_f, bet=0.95, u=default_u,
                     grid_max=2, grid_size=150)
    grid = linspace_range(1e-6, grid_max, grid_size)
    return GrowthModel(f, bet, u, grid_max, grid_size, grid)
end

#=
    The approximate Bellman operator, which computes and returns the
    updated value function Tw on the grid points. Could also return the
    policy function instead if asked.

    NOTE: this is the function I would like to parallelize
=#
function bellman_operator!(g::GrowthModel, w::Vector, out::Vector;
                          ret_policy::Bool=false)
    # Apply linear interpolation to w
    Aw = CoordInterpGrid(g.grid, w, BCnan, InterpLinear)

    for (i, k) in enumerate(g.grid)
        objective(c) = - g.u(c) - g.bet * Aw[g.f(k) - c]
        res = optimize(objective, 1e-6, g.f(k))
        c_star = res.minimum

        if ret_policy
            # set the policy equal to the optimal c
            out[i] = c_star
        else
            # set Tw[i] equal to max_c { u(c) + beta w(f(k_i) - c)}
            out[i] = - objective(c_star)
        end
    end

    return out
end

function bellman_operator(g::GrowthModel, w::Vector;
                          ret_policy::Bool=false)
    out = similar(w)
    bellman_operator!(g, w, out, ret_policy=ret_policy)
end

gm = GrowthModel()
v_init = 5 .* gm.u(gm.grid) .- 25

v_star = compute_fixed_point(x->bellman_operator(gm, x),  v_init, 
max_iter=1000, err_tol=1e-7)

​

Reply via email to