Sure!
Thank you for investing your time ;-).

Regards,
Tobias

Am 22.01.2014 14:23, schrieb Julius Ziegler:
-----BEGIN PGP SIGNED MESSAGE-----
Hash: SHA1

Could you attach your updated code, please?

Best,
Julius

On 01/22/2014 01:45 PM, Tobias Schmidt wrote:

_______________________________________________ NLopt-discuss

_______________________________________________ NLopt-discuss


- --
Dipl.-Inform. Julius Ziegler <[email protected]>

Institut für Mess- und Regelungstechnik
Karlsruher Institut für Technologie

Department of Measurement and Control
Karlsruhe Institute of Technology

Engler-Bunte-Ring 21
76131 Karlsruhe

Tel. +49 721 608 47146

-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1.4.11 (GNU/Linux)
Comment: Using GnuPG with Thunderbird - http://www.enigmail.net/

iQEcBAEBAgAGBQJS38Y7AAoJEMXuNLCiya8Lg1MH/3duI8UCbeg0f92eY+H7jYQM
aMwxHvpRxezJR9XaDu5Lixe1rm8ImFpb74Cd91ju5dm8GbZzqNxCS02o9sr1LqsK
TGomGkF5EHYgNUocixxDPE/ND2fSnUz0gWMIKRItYtchRL3ozgYR9GYIwEgZcoOu
hJcD18c4Ue4E5nIsvdAAnOpVXc9QhYodIt7WLEXgJyvc4w/tncIFrLX2SYdQw4q9
Xy9xB3eOyC3M5osvoixn6Ns09fQLevcChFC8ky98EayNiezOpcHoXWWdGoTTIiLI
Sp2FDjmO1HwkEJQ8c+lOjYmGXwdcbzRiQeJ3gMJ0zPIjMQj2jmfXFwTOfSEfxYs=
=CFmS
-----END PGP SIGNATURE-----

_______________________________________________
NLopt-discuss mailing list
[email protected]
http://ab-initio.mit.edu/cgi-bin/mailman/listinfo/nlopt-discuss


/*
 * npg.cpp
 *
 * The function npg solves an n-person finite non-co-operative game to
 * compute one sample Nash Equilibrium. It uses an optimization formulation
 * of n-person non-co-operative games as described in the adjoining paper
 * "An Optimization Formulation to Compute Nash Equilibrium in finite
 * Games" presented by the author.
 *
 * The inputs to the function are:
 * a) M : The row vector containing number of strategies of each player.
 * b) U : The matrix containing the pay-offs to the players at various
 *        pure strategy combinations.
 *
 * The outputs are:
 * a) A : The matrix whose columns are mixed strategies to players at Nash
 *        equilibrium.
 * b) payoff : The row vector containing the pay-offs to the players at Nash
 *        equilibrium.
 * c) iterations : Number of iterations performed by the optimization
 *        method.
 * d) err : The absolute error in the objective function of the minimization
 *        problem.
 *
 * For theory of the method the adjoining paper may be consulted. Here an
 * example is given for explanantion. Consider a 3-person game where each
 * player has 2 strategies each. So M = [2 2 2]. Suppose the pay-offs at the
 * pure strategy combinations (1-1-1), (1-1-2) and so on, as described by
 * the ranking in the theory, are given as the matrix U =
 * [2,7.5,0;3,.2,.3;6,3.6,1.5;2,3.5,5;0,3.2,9;2,3.2,5;0,2,3.2;2.1,0,1]. Then
 * after supplying M and U call [A,payoff,iterations,err] = npg(M,U).
 *
 * The method is capable of giving one sample Nash equilibrium out of
 * probably many present in a given game. The screenshot showing GUI has
 * been developed on the code using it as dll and VB.Net. The GUI software
 * may be made available on request.
 *
 * Any error in the code may be reported to [email protected]. Any
 * suggestion/comment is greatly appreciated.
 *
 *      Author: tschmidt
 *         See: n-person game for Optimization Toolbox on Matlab
 */

#include <cstring>
#include <math.h>
#include <iostream>
#include <cstdio>
#include <nlopt.hpp>
#include <fpstatus.h>

#include "Eigen/Core"

using namespace Eigen;

typedef struct {
        const int s, p;
        const MatrixXi& I;
        const VectorXd& Us;
        unsigned int myfun_calls;
        double* pre_x;
        double* pre_f;
} myfun_data_t;

typedef struct {
        const int p;
        const VectorXi& pay;
        const MatrixXi& I;
        const MatrixXd& U;
        double* pre_x;
        double* pre_f;
        double tol;
} confun_m_data_t;

typedef struct {
        const MatrixXi& Aeq;
        const VectorXi& beq;
        double* pre_x;
        double* pre_f;
        double tol;
} linconfun_m_data_t;

typedef struct {
        const int n;
} zeroconfun_data_t;

void printMatD(const MatrixXd &mat) {
        for (int i = 0; i < mat.rows(); ++i) {
                for (int j = 0; j < mat.cols(); ++j)
                        std::printf("%f\t", mat(i, j));
                std::printf("\n");
        }
}

void printMatI(const MatrixXi &mat) {
        for (int i = 0; i < mat.rows(); ++i) {
                for (int j = 0; j < mat.cols(); ++j)
                        std::printf("%d\t", mat(i, j));
                std::printf("\n");
        }
}

double myfun(unsigned int n, const double* x, double* grad, void* f_data) {
        myfun_data_t* data = (myfun_data_t*) f_data;
        double res = 0, prod = 1;
        int i;
        unsigned int j, k;
        ++data->myfun_calls;
        std::cout << data->myfun_calls << ": myfun(";
        for (int l = 0; l < n; ++l)
                std::printf("%.6f%s", x[l], l + 1 < n ? ", " : ") = ");

        for (k = 0; k < n - data->s; ++k) {
                res += x[data->s + k];
        }

        for (i = 0; i < data->p; ++i) {
                for (j = 0; j < n - data->s; ++j)
                        prod *= x[data->I(i, j) - 1];
                res -= data->Us(i) * prod;
                prod = 1;
        }
        std::cout << res << std::endl;

        if (grad) {
                for (j = 0; j < n; ++j) {
                        grad[j] = (x[j] != data->pre_x[j]) ? (res - 
data->pre_f[j]) / (x[j]
                                        - data->pre_x[j]) : 0;
                        data->pre_f[j] = res;
                        data->pre_x[j] = x[j];
                }
        }
        return res;
}

void confun_m(unsigned int m, double* result, unsigned int n, const double* x,
                double* grad, void* f_data) {
        confun_m_data_t* data = (confun_m_data_t*) (f_data);
        int i, j;
        unsigned int t, k;
        double add, prd;

        for (i = 0; i < m; ++i) {
                result[i] = -x[data->pay(i) - 1];
                for (t = 0; t < n - m; ++t) {
                        add = 0;
                        for (j = 0; j < data->p; ++j) {
                                prd = 1;
                                for (k = 0; k < n - m; ++k)
                                        if (i + 1 == data->I(j, k))
                                                prd *= data->U(j, k);
                                        else
                                                prd *= x[data->I(j, k) - 1];
                                if (data->I(j, t) != i + 1)
                                        prd = 0;
                                add += prd;
                        }
                        result[i] += add;
                }
                // convert Constraint h(x) <= 0 to -h(x) >= 0
                result[i] = -result[i] - data->tol;
        }

        if (grad) {
                for (k = 0; k < m; ++k) {
                        for (t = 0; t < n; ++t) {
                                grad[k * n + t] = (x[t] != data->pre_x[t]) ? 
(data->pre_f[k * n
                                                + t] - result[k]) / 
(data->pre_x[t] - x[t]) : 0;
                                data->pre_f[k * n + t] = result[k];
                                data->pre_x[t] = x[t];
                        }
                }
        }
}

void linconfun_m(unsigned int m, double* result, unsigned int n,
                const double* x, double* grad, void* f_data) {
        linconfun_m_data_t* data = (linconfun_m_data_t*) (f_data);
        unsigned int i, j;

        for (i = 0; i < m; ++i) {
                result[i] = -data->beq(i);
                for (j = 0; j < n; ++j)
                        result[i] += data->Aeq(i, j) * x[j];
                result[i] += data->tol;
        }

        if (grad) {
                for (i = 0; i < m; ++i) {
                        for (j = 0; j < n; ++j) {
                                grad[i * n + j] = (x[j] != data->pre_x[j]) ? 
(data->pre_f[i * n
                                                + j] - result[i]) / 
(data->pre_x[j] - x[j]) : 0;
                                data->pre_f[i * n + j] = result[i];
                                data->pre_x[j] = x[j];
                        }
                }
        }
}

double zeroconfun(unsigned int n, const double* x, double* grad, void* f_data) {
        zeroconfun_data_t* data = (zeroconfun_data_t*) f_data;
        int i;
        double ret = 0;
        for (i = n - data->n; i < n; ++i) {
                ret += x[i];
        }
        return ret;
}

int npg(RowVectorXi& iM, MatrixXd& iU, MatrixXd& oA, RowVectorXd& oPayoff,
                int* oIterations, double* oErr) throw (const std::string) {
#if 1 // editor folding of npg matrices initialization code
        int i = 0, j = 0, k = 0, p = 1;
        double V = 1;
        int n = iM.cols();
        int s = iM.sum();
        oA = MatrixXd::Zero(iM.maxCoeff(), n);

        oPayoff = RowVectorXd::Zero(n);

        for (i = 0; i < n; ++i)
                p = p * iM(i);

        if (p != iU.rows() || n != iU.cols())
                throw std::string("Error: Dimension mismatch!"); // 
ERR_DIM_MISMATCH;

        RowVectorXd P = RowVectorXd::Zero(n);
        RowVectorXd N = RowVectorXd::Zero(n);

        P(n - 1) = 1;

        for (i = n - 2; i >= 0; --i)
                P(i) = P(i + 1) * iM(i + 1);

        for (i = 0; i < n; ++i)
                N(i) = p / P(i);

        VectorXd x0 = VectorXd::Zero(s + n);

        for (i = 0; i < n; ++i)
                for (j = 0; j < iM(i); ++j, ++k)
                        x0(k) = 1.0 / iM(i);

        VectorXd Us = iU.rowwise().sum();

        for (i = 0; i < n; ++i)
                V = V * std::pow((1.0 / iM(i)), (double) iM(i));

        x0.tail(n) << V * (iU.colwise().sum().transpose());

        MatrixXi Aeq = MatrixXi::Zero(n, s + n);
        int cnt = 0;

        for (i = 0; i < n; ++i) {
                if (i != 0)
                        cnt += iM(i);
                for (j = 0; j < s; ++j)
                        if ((j + 1) <= iM.head(i + 1).sum() && (j + 1) > cnt)
                                Aeq(i, j) = 1;
        }

        VectorXi beq = VectorXi::Ones(n);
        MatrixXi I = MatrixXi::Ones(p, n);

        int counter = 0, count = 0;

        for (i = 0; i < n; ++i)
                for (j = 0; j < N(i); ++j) {
                        ++counter;
                        if (i != 0 && counter > iM.head(i + 1).sum())
                                counter -= iM(i);
                        for (k = 0; k < P(i); ++k)
                                I(count++) = counter;
                }

        VectorXd lb = VectorXd::Zero(s + n);
        VectorXd ub = VectorXd::Ones(s + n);
        VectorXi pay = VectorXi::Zero(s);
        counter = 0;

        for (i = 0; i < n; ++i)
                for (j = 0; j < iM(i); ++j)
                        pay(counter++) = i + 1 + s;

        lb.tail(n).setConstant(-HUGE_VAL);
        ub.tail(n).setConstant(HUGE_VAL);
#endif

#if 1 // nlopt optimization problem
        // Construct optimization problem
        nlopt::opt opt(nlopt::LD_SLSQP, s + n);
        //      nlopt::opt opt(nlopt::LN_COBYLA, s+n);

        double* fun_pre_x = (double*) std::calloc(s + n, sizeof(double));
        double* fun_pre_f = (double*) std::calloc(s + n, sizeof(double));
        myfun_data_t fun_data = { s, p, I, Us, 0, fun_pre_x, fun_pre_f };
        opt.set_min_objective(&myfun, &fun_data);

        double* confun_pre_x = (double*) std::calloc(s + n, sizeof(double));
        double* confun_pre_f = (double*) std::calloc(s * (s + n), 
sizeof(double));
        confun_m_data_t confun_m_data = { p, pay, I, iU, confun_pre_x,
                        confun_pre_f, 1e-10 };
        opt.add_inequality_mconstraint(&confun_m, (void*) &confun_m_data,
                        std::vector<double>(s, 1e-10));

        double* linconfun_pre_x = (double*) std::calloc(s + n, sizeof(double));
        double* linconfun_pre_f =
                        (double*) std::calloc(n * (s + n), sizeof(double));
        linconfun_m_data_t linconfun_m_data = { Aeq, beq, linconfun_pre_x,
                        linconfun_pre_f, 1e-10 };
        opt.add_equality_mconstraint(&linconfun_m, (void*) &linconfun_m_data,
                        std::vector<double>(n, 1e-10));

        zeroconfun_data_t zeroconfun_data = { n };
        opt.add_equality_constraint(&zeroconfun, &zeroconfun_data, 1e-10);

        std::vector<double> lbv(lb.data(), lb.data() + lb.rows());
        std::vector<double> ubv(ub.data(), ub.data() + ub.rows());

        opt.set_lower_bounds(lbv);
        opt.set_upper_bounds(ubv);

        opt.set_xtol_rel(1e-6);
        opt.set_xtol_abs(1e-6);
        opt.set_ftol_rel(1e-6);
        opt.set_ftol_abs(1e-6);

        opt.set_maxeval(1e4);

        // Call optimization function and map output
        std::vector<double> x(x0.data(), x0.data() + x0.rows());
        try {
                opt.optimize(x);
        } catch (nlopt::roundoff_limited err) {
                throw std::string("Halted because roundoff errors limited 
progress! ")
                                + err.what();
        } catch (nlopt::forced_stop err) {
                throw std::string("Halted because of a forced termination! ")
                                + err.what();
        } catch (std::invalid_argument err) {
                throw std::string("Invalid arguments. ") + err.what();
        } catch (std::bad_alloc err) {
                throw std::string("Ran out of memory. ") + err.what();
        } catch (std::runtime_error err) {
                throw std::string("Generic failure. ") + err.what();
        }
#endif

        count = 0;
        for (i = 0; i < n; ++i) {
                for (j = 0; j < iM(i); ++j)
                        oA(j, i) = std::fabs(x[count++]);
                oPayoff(i) = x[s + i];
        }
        *oErr = std::fabs(opt.last_optimum_value());
        *oIterations = fun_data.myfun_calls;

        return EXIT_SUCCESS;
}

int main(int argc, char *argv[]) {
        RowVectorXi m(2);
        m << 3, 3;
        MatrixXd u(9, 2);
        u << 0.1324, -0.1324, 0.1238, -0.1238, 0.1155, -0.1155, 0.1470, 
-0.1470, 0.1384, -0.1384, 0.1301, -0.1301, 0.1610, -0.1610, 0.1524, -0.1524, 
0.1442, -0.1442;
        MatrixXd a;
        RowVectorXd payoff;
        int iter = 0;
        double err = 0.0;

        try {
                npg(m, u, a, payoff, &iter, &err);
        } catch (const std::string message) {
                std::cout << message << std::endl;
                return EXIT_FAILURE;
        }

        std::cout << "Iterations: " << iter << "\nAbsolute error: " << err
                        << std::endl;
        std::cout << "A:" << std::endl;
        printMatD(a);
        std::cout << "Payoff:" << std::endl;
        printMatD(payoff);

        return EXIT_SUCCESS;
}
_______________________________________________
NLopt-discuss mailing list
[email protected]
http://ab-initio.mit.edu/cgi-bin/mailman/listinfo/nlopt-discuss

Reply via email to