Consider the problem of taking a linear combination of m (n x n)-matrices 
stored in a (n x n x m)-array A. The weights are stored in a length-m vector 
w. 
In Matlab, we can accomplish this by

n = 100;

m = 10000;


A = rand(n,n,m);

y = randn(1,m);


times = zeros(1,10);


for cntr = 1:10

   tic;

   Z = sum(repmat(reshape(y,[1 1 m]),[n n 1]).*A,3);

   times(cntr) = toc;

end


sprintf('time = %f',mean(times))

This gives time = 0.3259 on my machine using R2014b. (Note: I am aware of 
the ridiculous amount of memory allocated by this solution, however, it 
turns out to be 25% faster than using bsxfun, for instance)

A Julia-implementation that accomplishes the same thing is

function matrixLinComb(A::Array{Float64,3},y::Array{Float64,2})
  Z = reshape(sum(reshape(y,1,1,m).*A,3),n,n)
end

n = 100
m = 10000

A = rand(n,n,m)
y = randn(1,m)

times = zeros(10)

for cntr = 1:10
  times[cntr] = @elapsed matrixLinComb(A,y)
end
println("time = $(mean(times))")

This gives time = 0.5130 using v0.4.0 . I've tried out different other 
implementations (broadcast, reshaping A and *multiply etc.), though this 
turned out the be the fastest method. Does anyone has an idea why Matlab's 
repmat function is superior to the Julia-implementation (or, as 
my colleagues call it - Julia doesn't work)? Thanks!

Reply via email to