summaryrefslogtreecommitdiff
path: root/models
diff options
context:
space:
mode:
authorKatolaZ <katolaz@yahoo.it>2015-10-19 16:23:00 +0100
committerKatolaZ <katolaz@yahoo.it>2015-10-19 16:23:00 +0100
commitdf8386f75b0538075d72d52693836bb8878f505b (patch)
tree704c2a0836f8b9fd9f470c12b6ae05637c431468 /models
parent363274e79eade464247089c105260bc34940da07 (diff)
First commit of MAMMULT code
Diffstat (limited to 'models')
-rw-r--r--models/correlations/Makefile15
-rw-r--r--models/correlations/fit_utils.c382
-rw-r--r--models/correlations/fit_utils.h25
-rw-r--r--models/correlations/rank_utils.c217
-rw-r--r--models/correlations/rank_utils.h44
-rw-r--r--models/correlations/tune_qnn_adaptive.c208
-rw-r--r--models/correlations/tune_rho.c142
-rw-r--r--models/growth/Makefile26
-rw-r--r--models/growth/nibilab_linear_delay.c414
-rw-r--r--models/growth/nibilab_linear_delay_mix.c457
-rw-r--r--models/growth/nibilab_linear_delta.c403
-rw-r--r--models/growth/nibilab_linear_random_times.c417
-rw-r--r--models/growth/nibilab_nonlinear.c493
-rw-r--r--models/growth/node_deg_over_time.py82
-rw-r--r--models/nullmodels/model_MDM.py66
-rw-r--r--models/nullmodels/model_MSM.py65
-rw-r--r--models/nullmodels/model_hypergeometric.py56
-rw-r--r--models/nullmodels/model_layer_growth.py121
18 files changed, 3633 insertions, 0 deletions
diff --git a/models/correlations/Makefile b/models/correlations/Makefile
new file mode 100644
index 0000000..b3828aa
--- /dev/null
+++ b/models/correlations/Makefile
@@ -0,0 +1,15 @@
+CFLAGS="-O3"
+CC="gcc"
+GSL_FLAGS=-lgsl -lgslcblas -lm
+MFLAG=-lm
+
+all: tune_qnn_adaptive tune_rho
+
+tune_qnn_adaptive: tune_qnn_adaptive.c rank_utils.c fit_utils.c
+ $(CC) $(CFLAGS) -o tune_qnn_adaptive tune_qnn_adaptive.c rank_utils.c fit_utils.c $(GSL_FLAGS)
+
+tune_rho: tune_rho.c rank_utils.c
+ $(CC) $(CFLAGS) -o tune_rho tune_rho.c rank_utils.c $(MFLAG)
+
+clean:
+ rm tune_rho tune_qnn_adaptive
diff --git a/models/correlations/fit_utils.c b/models/correlations/fit_utils.c
new file mode 100644
index 0000000..e9e3954
--- /dev/null
+++ b/models/correlations/fit_utils.c
@@ -0,0 +1,382 @@
+/**
+ *
+ * Fit a given sequence with a power-law function
+ *
+ * a*x^{b}
+ *
+ * The fit is actually performed as a linear fit on the
+ * exponential-binned log-log distribution
+ *
+ *
+ */
+
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <gsl/gsl_fit.h>
+
+/**
+ *
+ * Load a sequence from a file, which contains one element on each line
+ *
+ */
+
+void load_sequence(char *fname, double **v, int *N){
+
+ int size;
+ char buff[256];
+
+ FILE *f;
+
+ f = fopen(fname, "r");
+ if (!f){
+ fprintf(stderr, "Error opening file %s!!!! Exiting...\n", fname);
+ exit(1);
+ }
+
+ *N =0;
+ size = 10;
+ if (*v == NULL)
+ *v = malloc(size * sizeof(double));
+ else{
+ *v = realloc(*v, size * sizeof(double));
+ }
+
+ while (fgets(buff, 255, f)){
+ (*v)[*N] = atof(buff);
+ *N += 1;
+ if (*N == size){
+ size += 10;
+ *v = realloc(*v, size * sizeof(double));
+ }
+ }
+ *v = realloc(*v, (*N) * sizeof(double));
+ fclose(f);
+}
+
+
+/**
+ *
+ * Load a sequence, getting the "col"-th column of the input file
+ *
+ */
+
+void load_sequence_col(char *fname, double **v, int *N, int col){
+
+ int size, n;
+ char buff[256];
+ char *ptr;
+
+ FILE *f;
+
+ f = fopen(fname, "r");
+ if (!f){
+ fprintf(stderr, "Error opening file %s!!!! Exiting...\n", fname);
+ exit(1);
+ }
+
+ *N =0;
+ size = 10;
+ if (*v == NULL)
+ *v = malloc(size * sizeof(double));
+ else{
+ *v = realloc(*v, size * sizeof(double));
+ }
+
+ while (fgets(buff, 255, f)){
+ ptr = strtok(buff, " ");
+ if (col > 0){
+ n = 0;
+ while (n<col){
+ ptr = strtok(NULL, " ");
+ n += 1;
+ }
+ }
+ (*v)[*N] = atof(ptr);
+ *N += 1;
+
+ if (*N == size){
+ size += 10;
+ *v = realloc(*v, size * sizeof(double));
+ }
+ }
+ *v = realloc(*v, (*N) * sizeof(double));
+ fclose(f);
+}
+
+
+/**
+ *
+ * Make the exponential binning of a distribution, with a giving
+ * exponent alpha. The value of y[i] is the number of elements of v
+ * whose value lies between x[i-1] and x[i]...
+ *
+ */
+
+void exp_bin_cnt(double *v, int N, double alpha, double **x, double **y, int *num){
+
+ double min_v, max_v, val, last_val;
+ int i, j, size, num_x;
+ double last_size, new_size;
+
+ min_v = max_v = v[0];
+
+ for (i=1; i<N; i ++){
+ if (v[i] > max_v)
+ max_v = v[i];
+ else if (v[i] > 0 && v[i] < min_v)
+ min_v = v[i];
+ }
+
+ size = 10;
+ if (*x == NULL){
+ *x = malloc(size * sizeof(double));
+ }
+ else{
+ *x = realloc(*x, size * sizeof(double));
+ }
+
+ val = min_v;
+ last_size = min_v;
+ (*x)[0] = min_v;
+ num_x = 1;
+
+ while(val < max_v){
+ new_size = last_size * alpha;
+ val = last_size + new_size;
+ last_size = new_size;
+ last_val = val;
+ (*x)[num_x] = val;
+ num_x += 1;
+ if (num_x == size){
+ size += 10;
+ *x = realloc(*x, size * sizeof(double));
+ }
+ }
+
+ if (*y == NULL){
+ *y = malloc(num_x * sizeof(double));
+ }
+ else{
+ *y = realloc(*y, num_x * sizeof(double));
+ }
+ for (i=0; i < num_x; i++){
+ (*y)[i] = 0;
+ }
+
+
+
+ for(i=0; i <N; i ++){
+ j = 0;
+ while(v[i] > (*x)[j]){
+ j ++;
+ }
+ (*y)[j] += 1;
+ }
+ *num = num_x;
+}
+
+/**
+ *
+ * Make the exponential binning of a distribution, with a giving
+ * exponent alpha. The value of y[i] is the average of the values in
+ * the vector "w" whose corresponding v lies between x[i-1] and x[i]...
+ *
+ */
+
+
+void exp_bin_avg(double *v, double *w, int N, double alpha, double **x, double **y, int *num){
+
+ double min_v, max_v, val, last_val;
+ int i, j, size, num_x;
+ double last_size, new_size;
+ int *cnt;
+
+
+ min_v = max_v = v[0];
+
+ for (i=1; i<N; i ++){
+ if (v[i] > max_v)
+ max_v = v[i];
+ else if (v[i] > 0 && v[i] < min_v)
+ min_v = v[i];
+ }
+
+ size = 10;
+ if (*x == NULL){
+ *x = malloc(size * sizeof(double));
+ }
+ else{
+ *x = realloc(*x, size * sizeof(double));
+ }
+
+ val = min_v;
+ last_size = min_v;
+ (*x)[0] = min_v;
+ num_x = 1;
+
+ while(val < max_v){
+ new_size = last_size * alpha;
+ val = last_size + new_size;
+ last_size = new_size;
+ last_val = val;
+ (*x)[num_x] = val;
+ num_x += 1;
+ if (num_x == size){
+ size += 10;
+ *x = realloc(*x, size * sizeof(double));
+ }
+ }
+
+
+ cnt = malloc(num_x * sizeof(int));
+
+ if (*y == NULL){
+ *y = malloc(num_x * sizeof(double));
+ }
+ else{
+ *y = realloc(*y, num_x * sizeof(double));
+ }
+ for (i=0; i < num_x; i++){
+ (*y)[i] = 0;
+ cnt[i] = 0;
+ }
+
+ for(i=0; i <N; i ++){
+ j = 0;
+ while(j < num_x && v[i] > (*x)[j]){
+ j ++;
+ }
+ if(j == num_x){
+ printf("Error!!!!! Trying to assing a non-existing bin!!! -- fit_utils.c:exp_bin_avg!!!\n");
+ exit(37);
+ }
+ (*y)[j] += w[i];
+ cnt[j] += 1;
+ }
+ *num = num_x;
+
+ for(i=0; i< num_x; i++){
+ if (cnt[i] > 0){
+ (*y)[i] = (*y)[i] / cnt[i];
+ }
+ }
+ free(cnt);
+}
+
+
+/**
+ *
+ * Print a distribution on stdout
+ *
+ */
+
+void dump_distr(double *x, double *y, int N){
+ int i;
+
+ for(i=0; i<N; i ++){
+ printf("%g %g\n", x[i], y[i]);
+ }
+
+}
+
+
+/**
+ * Compact a distribution, leaving only the pairs (x_i, y_i) for which
+ * y_i > 0
+ *
+ */
+
+void compact_distr(double *x, double *y, int *num){
+
+ int i, j;
+
+ i = j = 0;
+ while(j < *num){
+ while(j < *num && y[j] == 0){
+ j ++;
+ }
+ if (j==*num){
+ break;
+ }
+ x[i] = x[j];
+ y[i] = y[j];
+ i ++;
+ j ++;
+ }
+ *num = i;
+}
+
+
+/**
+ *
+ * Apply the function "f" on all the elemnts of a vector, in-place
+ *
+ */
+
+void map_vec(double *v, int N, double (*f)(double)){
+ int i;
+
+ for (i=0; i<N; i ++){
+ v[i] = f(v[i]);
+ }
+}
+
+
+/**
+ *
+ * Normalize a distribution, dividing each y[i] for the width of the
+ * corresponding bin (i.e., x[i] - x[i-1])
+ *
+ */
+void normalize_distr(double *x, double *y, int num){
+
+ int i;
+
+ for(i=1; i<num; i ++){
+ y[i] /= (x[i] - x[i-1]);
+ }
+}
+
+/**
+ *
+ * take two vectors (k and q) and a pairing, and compute the best
+ * power-law fit "a*k^{mu}" of qnn(k)
+ *
+ */
+
+void fit_current_trend(double *k, double *q, int N, int *pairing, double *mu, double *a,
+ double *corr){
+
+ static int *num;
+ static double *q_pair, *x, *y;
+
+ int i;
+ int fit_num;
+ double c0, c1, cov00, cov01, cov11, sqsum;
+
+ if (q_pair == NULL){
+ q_pair = malloc(N * sizeof(double));
+ }
+
+ for(i=0; i<N; i++){
+ q_pair[i] = q[pairing[i]];
+ }
+
+ exp_bin_avg(k, q_pair, N, 1.3, &x, &y, &fit_num);
+ //normalize_distr(x,y, fit_num);
+ compact_distr(x, y, &fit_num);
+
+ map_vec(x, fit_num, log);
+ map_vec(y, fit_num, log);
+ gsl_fit_linear(x, 1, y, 1, fit_num, &c0, &c1, &cov00, &cov01, &cov11, &sqsum);
+
+ //fprintf(stderr,"cov00: %g cov01: %g cov11: %g\n", cov00, cov01, cov11);
+ //fprintf(stderr, "corr: %g ", cov01 / sqrt(cov00 * cov11));
+ *a = c0;
+ *mu = c1;
+ *corr = cov01/sqrt(cov00 * cov11);
+}
+
diff --git a/models/correlations/fit_utils.h b/models/correlations/fit_utils.h
new file mode 100644
index 0000000..183f47c
--- /dev/null
+++ b/models/correlations/fit_utils.h
@@ -0,0 +1,25 @@
+#ifndef __FIT_UTILS_H__
+#define __FIT_UTILS_H__
+
+
+void load_sequence(char *fname, double **v, int *N);
+
+void load_sequence_col(char *fname, double **v, int *N, int col);
+
+void exp_bin_cnt(double *v, int N, double alpha, double **x, double **y, int *num);
+
+void exp_bin_avg(double *v, double *w, int N, double alpha, double **x, double **y, int *num);
+
+void dump_distr(double *x, double *y, int N);
+
+void compact_distr(double *x, double *y, int *num);
+
+void map_vec(double *v, int N, double (*f)(double));
+
+void normalize_distr(double *x, double *y, int num);
+
+void fit_current_trend(double *k, double *q, int N, int *pairing,
+ double *mu, double *a, double *corr);
+
+
+#endif /* __FIT_UTILS_H__ */
diff --git a/models/correlations/rank_utils.c b/models/correlations/rank_utils.c
new file mode 100644
index 0000000..4b8ef41
--- /dev/null
+++ b/models/correlations/rank_utils.c
@@ -0,0 +1,217 @@
+/**
+ *
+ * Some utility functions to be used with tune_rho, compute_rho and
+ * the like
+ *
+ */
+
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+#include <string.h>
+#include <time.h>
+
+#include "rank_utils.h"
+
+void load_ranking(char *fname, int *N, double **R){
+
+ char buff[256];
+ int size = 10;
+ FILE *f;
+
+ if (*R == NULL){
+ *R = malloc(size * sizeof (double));
+ }
+ f = fopen(fname, "r");
+ if (!f){
+ printf("Unable to open file: %s!!! Exiting\n");
+ exit(1);
+ }
+
+ *N = 0;
+
+ while (fgets(buff, 255, f)){
+ if (* N == size){
+ size += 10;
+ *R = realloc(*R, size * sizeof(double));
+ }
+ (*R)[*N] = atof(buff);
+ *N += 1;
+ }
+}
+
+double avg_array(double *v, int N){
+
+ double sum = 0.0;
+ int i;
+
+ for (i = 0; i < N; i ++){
+ sum += v[i];
+ }
+ return sum/N;
+}
+
+double compute_C(double *R1, double *R2, int N){
+ double mu1, mu2, sum1, sum2;
+
+ mu1 = avg_array(R1, N);
+ mu2 = avg_array(R2, N);
+
+ sum1 = mu1 * N;
+ sum2 = mu2 * N;
+
+ return N * mu1 * mu2 - mu2 * sum1 - mu1 * sum2;
+}
+
+
+double compute_D(double *R1, double *R2, int N){
+
+ double mu1, mu2, s1, s2;
+ int i;
+
+ mu1 = avg_array(R1, N);
+ mu2 = avg_array(R1, N);
+
+ s1 = s2 = 0.0;
+
+ for (i=0 ; i < N; i ++){
+ s1 += pow((R1[i] - mu1), 2);
+ s2 += pow((R2[i] - mu2), 2);
+ }
+
+ return sqrt(s1 * s2);
+}
+
+
+double compute_rho(double *R1, double *R2, int N, int *pairing){
+
+ double rho = 0;
+ int i;
+
+ for (i=0; i < N; i ++){
+ rho += R1[i] * R2[ pairing[i] ];
+ }
+
+ rho = (rho + compute_C(R1, R2, N))/ compute_D(R1, R2, N);
+ return rho;
+}
+
+void dump_ranking(double *R, int N){
+ int i;
+
+ for (i=0; i < N; i ++){
+ printf("%d: %f\n", i, R[i] );
+ }
+}
+
+
+void init_pairing_natural(int *pairing, int N){
+ int i;
+
+ for (i = 0; i< N; i ++){
+ pairing[i] = i;
+ }
+}
+
+void init_pairing_inverse(int *pairing, int N){
+ int i;
+
+ for (i = 0; i< N; i ++){
+ pairing[i] = N-i-1;
+ }
+}
+
+void select_pairing(int *pairing, int N, int argc, char *argv[], int pos){
+
+ if (argc < pos + 1 || !strncasecmp("rnd", argv[pos], 3)){
+ init_pairing_random(pairing, N);
+ }
+ else if (!strncasecmp("nat", argv[pos], 3)){
+ init_pairing_natural(pairing, N);
+ }
+ else if (!strncasecmp("inv", argv[pos], 3)){
+ init_pairing_inverse(pairing, N);
+ }
+ else{
+ printf ("Pairing strategy \"%s\" unknown!!! Exiting...\n", argv[pos]);
+ exit(1);
+ }
+
+}
+
+
+void shuffle_sequence(int *s, int N){
+
+ int i, j, tmp;
+
+ for (i=N-1; i>=0; i--){
+ j = rand() % (i+1);
+ tmp = s[j];
+ s[j] = s[i];
+ s[i] = tmp;
+ }
+}
+
+
+
+void init_pairing_random(int *pairing, int N){
+
+ init_pairing_natural(pairing, N);
+ shuffle_sequence(pairing, N);
+
+}
+
+/* Loads a pairing from a file, in the format:
+ *
+ * rank1 rank2
+ * ...........
+ */
+void load_pairing(int **pairing, int N, char *fname){
+
+ FILE *f;
+ int i, j, num;
+ char buff[256];
+ char *ptr;
+
+ f = fopen(fname, "r");
+ if (!f){
+ printf("Error opening file \"%s\"!!!! Exiting....\n", fname);
+ exit(2);
+ }
+
+ if (*pairing == NULL){
+ *pairing = malloc(N * sizeof(int));
+ init_pairing_natural(*pairing, N);
+ }
+
+ num = 0;
+ while(num < N){
+ fgets(buff, 255, f);
+ if (buff[0] == '#')
+ continue;
+ ptr = strtok(buff, " "); /* read the first node */
+ i = atoi(ptr);
+ ptr = strtok(NULL, " "); /* read the second node */
+ j = atoi(ptr);
+ (*pairing)[i] = j;
+ num += 1;
+ }
+}
+
+
+void dump_pairing(int *pairing, int N){
+ int i;
+
+ for(i=0; i< N; i ++){
+ printf("%d %d\n", i, pairing[i]);
+ }
+}
+
+void copy_pairing(int *p1, int *p2, int N){
+ int i;
+
+ for (i=0 ; i < N; i ++){
+ p2[i] = p1[i];
+ }
+}
diff --git a/models/correlations/rank_utils.h b/models/correlations/rank_utils.h
new file mode 100644
index 0000000..521582a
--- /dev/null
+++ b/models/correlations/rank_utils.h
@@ -0,0 +1,44 @@
+#ifndef __RANK_UTILS_H__
+#define __RANK_UTILS_H__
+
+/* Load a ranking */
+void load_ranking(char *fname, int *N, double **R);
+
+/* Compute the average of an array */
+double avg_array(double *v, int N);
+
+/* Compute the term "C" in the rho correlation coefficient */
+double compute_C(double *R1, double *R2, int N);
+
+/* Compute the term "D" in the rho correlation coefficient */
+double compute_D(double *R1, double *R2, int N);
+
+/* Compute the Spearman's rank correlation coefficient \rho */
+double compute_rho(double *R1, double *R2, int N, int *pairing);
+
+void dump_ranking(double *R, int N);
+
+void init_pairing_natural(int *pairing, int N);
+
+void init_pairing_inverse(int *pairing, int N);
+
+void init_pairing_random(int *pairing, int N);
+
+void select_pairing(int *pairing, int N, int argc, char *argv[], int pos);
+
+void shuffle_sequence(int *s, int N);
+
+void dump_pairing(int *pairing, int N);
+
+void copy_pairing(int *pairing1, int *pairing2, int N);
+
+
+#endif /* __RANK_UTILS_H__ */
+
+
+
+
+
+
+
+
diff --git a/models/correlations/tune_qnn_adaptive.c b/models/correlations/tune_qnn_adaptive.c
new file mode 100644
index 0000000..71643a1
--- /dev/null
+++ b/models/correlations/tune_qnn_adaptive.c
@@ -0,0 +1,208 @@
+/**
+ *
+ * Tune the inter-layer degree correlation function \bar{q}(k) of a
+ * two-layer multiplex, in order to make it as similar as possible to
+ * a power law with exponent $\mu$, where $\mu$ is given as input
+ *
+ * This version of the program is (or, better, "shoud be") able to
+ * tune also the value of the pre-factor "a"
+ *
+ */
+
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+#include <strings.h>
+#include <time.h>
+
+#include "rank_utils.h"
+
+
+
+
+inline double compute_delta(double q, double k, double mu, double a){
+
+ return fabs(log(q) - mu * log(k) - log(a));
+ //return fabs (q - a*pow(k,mu));
+}
+
+
+
+
+
+
+void tune_qnn_adaptive(double *R1, double *R2, int N, int *pairing, double eps,
+ double beta, double mu_target){
+
+ double act_mu, test_mu, F, F_new, F_old;
+ double delta1_old, delta2_old, delta1_new, delta2_new;
+ double val, prob;
+ double mu, a, err, dummy_a;
+ int *new_pairing;
+ int p1, p2, tmp_val;
+ int tot;
+ char swap = 0;
+
+ new_pairing = malloc(N * sizeof(int));
+ copy_pairing(pairing, new_pairing, N);
+
+ a = 1.0;
+
+ F = 10000;
+
+ //fprintf("%f %f %f %f %f\n", eps, beta, mu_target, act_mu, F);
+
+ tot = 0;
+ while (F > eps){
+ /* sample two positions of "pairing" and shuffle them in "new_pairing" */
+
+ p1 = rand() % N;
+ p2 = rand() % N;
+ while (p2 == p1){
+ p2 = rand() % N;
+ }
+ tmp_val = new_pairing[p1];
+ new_pairing[p1] = new_pairing[p2];
+ new_pairing[p2] = tmp_val;
+
+ delta1_old = compute_delta(R2[pairing[p1]], R1[p1], mu_target, a);
+ delta2_old = compute_delta(R2[pairing[p2]], R1[p2], mu_target, a);
+
+ delta1_new = compute_delta(R2[new_pairing[p1]], R1[p1], mu_target, a);
+ delta2_new = compute_delta(R2[new_pairing[p2]], R1[p2], mu_target, a);
+
+ //F_new = log(delta1_new * delta2_new);
+ //F_old = log(delta1_old * delta2_old);
+
+ F_new = delta1_new + delta2_new;
+ F_old = delta1_old + delta2_old;
+
+
+ if (F_new <= F_old){/* Accept this swap with probability 1 */
+ tmp_val = pairing[p1];
+ pairing[p1] = pairing[p2];
+ pairing[p2] = tmp_val;
+ //act_mu = test_mu;
+ swap = 1;
+ }
+ else{/* Accept the swap with a certain probability */
+ val = 1.0 * rand() / RAND_MAX;
+
+ //prob = pow(fabs(F - F_new)/(F+F_new), beta);
+ prob = exp(-(F_new - F_old)/beta);
+ //fprintf(stderr, "-- %g %g %g -> %f \n", F_new, F_old, F_new - F_old, prob);
+ if (val < prob){ /* Accept the swap */
+ tmp_val = pairing[p1];
+ pairing[p1] = pairing[p2];
+ pairing[p2] = tmp_val;
+ //act_mu = test_mu;
+ swap = 1;
+ }
+ else{ /* Rollback the swap */
+ tmp_val = new_pairing[p1];
+ new_pairing[p1] = new_pairing[p2];
+ new_pairing[p2] = tmp_val;
+ swap = 0;
+ }
+
+ }
+
+
+ ///F = fabs(act_mu - mu_target);
+ ///if (tot % 200 == 0){
+ //fprintf(stderr, "%d %g\n", tot, act_mu);
+
+ //fprintf(stderr, "%d: %f %f ---- %f %f ---- %f %f ---- %d \n",
+ //tot, delta1_old, delta2_old, delta1_new, delta2_new, F_old, F_new, swap);
+
+ ///}
+ tot += 1;
+ if (tot %200 == 0){
+ fit_current_trend(R1, R2, N, pairing, &mu, &a, &err);
+ fprintf(stderr, "mu: %g a: %g corr: %g\n", mu, a, err);
+ //a = a - 0.01 *(a - exp(a));
+ a = exp(a);
+ }
+ fit_current_trend(R1, R2, N, pairing, &mu, &dummy_a, &err);
+ F = fabs(mu - mu_target);
+ }
+}
+
+void dump_qnn(double *R1, double *R2, int N, int *pairing){
+
+ int i;
+ int *qnn, *num;
+
+ qnn = malloc(N * sizeof(int));
+ num = malloc(N * sizeof(int));
+
+ for (i=0; i<N; i++){
+ qnn[i]=0;
+ num[i]=0;
+ }
+
+ for (i=0; i<N; i++){
+ qnn[(int)(R1[i])] += R2[pairing[i]];
+ num[(int)(R1[i])] += 1;
+ }
+
+ for(i=0; i<N; i++){
+ if (num[i] >0){
+ printf("%d %f\n", i, 1.0*qnn[i]/num[i]);
+ }
+ }
+ free(num);
+ free(qnn);
+}
+
+
+void dump_degs(double *R1, double *R2, int N, int *pairing){
+
+ int i;
+
+ for(i=0; i<N; i++){
+ printf("%g %g\n", R1[i], R2[pairing[i]]);
+ }
+
+}
+
+int main (int argc, char *argv[]){
+
+ int N1, N2;
+ double *R1 = NULL;
+ double *R2 = NULL;
+ double eps, beta, mu_target;
+
+ int *pairing = NULL;
+
+ srand(time(NULL));
+
+ if (argc < 6){
+ printf("Usage: %s <degs1> <degs2> <mu> <eps> <beta> [RND|NAT|INV]\n", argv[0]);
+ exit(1);
+ }
+
+ mu_target = atof(argv[3]);
+ eps = atof(argv[4]);
+ beta = atof(argv[5]);
+
+ load_ranking(argv[1], &N1, &R1);
+ load_ranking(argv[2], &N2, &R2);
+
+ if (N1 != N2){
+ printf("Error!!!! The two files must have the same number of lines!!!! Exiting...\n");
+ exit(1);
+ }
+
+ pairing = malloc(N1 * sizeof(int));
+
+ select_pairing(pairing, N1, argc, argv, 6);
+
+ tune_qnn_adaptive(R1, R2, N1, pairing, eps, beta, mu_target);
+
+ dump_pairing(pairing, N1);
+ //dump_qnn(R1, R2, N1, pairing);
+ //dump_degs(R1, R2, N1, pairing);
+
+}
diff --git a/models/correlations/tune_rho.c b/models/correlations/tune_rho.c
new file mode 100644
index 0000000..4495b0e
--- /dev/null
+++ b/models/correlations/tune_rho.c
@@ -0,0 +1,142 @@
+/**
+ *
+ * Tune the Spearman's \rho correlation coefficient between two rankings
+ * given as input, using a simulated-anneling procedure
+ *
+ */
+
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+#include <strings.h>
+#include <time.h>
+
+#include "rank_utils.h"
+
+//void select_pairing(int *pairing, int N, int argc, char *argv[]){
+//
+// if (argc < 7 || !strncasecmp("rnd", argv[6], 3)){
+// init_pairing_random(pairing, N);
+// }
+// else if (!strncasecmp("nat", argv[6], 3)){
+// init_pairing_natural(pairing, N);
+// }
+// else if (!strncasecmp("inv", argv[6], 3)){
+// init_pairing_inverse(pairing, N);
+// }
+// else{
+// printf ("Pairing strategy \"%s\" unknown!!! Exiting...\n", argv[6]);
+// exit(1);
+// }
+//
+//}
+
+void tune_rho(double *R1, double *R2, int N, int *pairing, double eps,
+ double beta, double rho_target){
+
+ double act_rho, test_rho, F, F_new;
+ double val, prob;
+ int *new_pairing;
+ int p1, p2, tmp_val;
+ int tot;
+
+ new_pairing = malloc(N * sizeof(int));
+ copy_pairing(pairing, new_pairing, N);
+
+ act_rho = compute_rho(R1, R2, N, pairing);
+
+ F = fabs(act_rho - rho_target);
+
+ //fprintf("%f %f %f %f %f\n", eps, beta, rho_target, act_rho, F);
+
+ tot = 0;
+ while (F > eps){
+ /* sample two positions of "pairing" and shuffle them in "new_pairing" */
+ p1 = rand() % N;
+ p2 = rand() % N;
+ while (p2 == p1){
+ p2 = rand() % N;
+ }
+ tmp_val = new_pairing[p1];
+ new_pairing[p1] = new_pairing[p2];
+ new_pairing[p2] = tmp_val;
+ test_rho = compute_rho(R1, R2, N, new_pairing);
+ F_new = fabs(test_rho - rho_target);
+ if (F_new < F){/* Accept this swap with probability 1 */
+ tmp_val = pairing[p1];
+ pairing[p1] = pairing[p2];
+ pairing[p2] = tmp_val;
+ act_rho = test_rho;
+ }
+ else{/* Accept the swap with a certain probability */
+ val = 1.0 * rand() / RAND_MAX;
+
+ //prob = pow(fabs(F - F_new)/(F+F_new), beta);
+ prob = exp(-(F_new - F)/beta);
+ //fprintf(stderr, "-- %f\n", prob);
+ if (val < prob){ /* Accept the swap */
+ tmp_val = pairing[p1];
+ pairing[p1] = pairing[p2];
+ pairing[p2] = tmp_val;
+ act_rho = test_rho;
+ }
+ else{ /* Rollback the swap */
+ tmp_val = new_pairing[p1];
+ new_pairing[p1] = new_pairing[p2];
+ new_pairing[p2] = tmp_val;
+ }
+
+ }
+ F = fabs(act_rho - rho_target);
+ if (tot % 200 == 0){
+ fprintf(stderr, "%d %g\n", tot, act_rho);
+ }
+ tot += 1;
+ }
+}
+
+
+
+
+int main (int argc, char *argv[]){
+
+ int N1, N2;
+ double *R1 = NULL;
+ double *R2 = NULL;
+ double eps, beta, rho, rho_target;
+
+ int *pairing = NULL;
+
+ srand(time(NULL));
+
+ if (argc < 6){
+ printf("Usage: %s <rank1> <rank2> <rho> <eps> <beta> [RND|NAT|INV]\n", argv[0]);
+ exit(1);
+ }
+
+ rho_target = atof(argv[3]);
+ eps = atof(argv[4]);
+ beta = atof(argv[5]);
+
+ load_ranking(argv[1], &N1, &R1);
+ load_ranking(argv[2], &N2, &R2);
+
+ if (N1 != N2){
+ printf("Error!!!! The two files must have the same number of lines!!!! Exiting...\n");
+ exit(1);
+ }
+
+ pairing = malloc(N1 * sizeof(int));
+
+ select_pairing(pairing, N1, argc, argv, 6);
+
+ rho = compute_rho(R1, R2, N1, pairing);
+
+ fprintf(stderr, "%g\n", rho);
+
+ tune_rho(R1, R2, N1, pairing, eps, beta, rho_target);
+
+ dump_pairing(pairing, N1);
+
+}
diff --git a/models/growth/Makefile b/models/growth/Makefile
new file mode 100644
index 0000000..d0b461d
--- /dev/null
+++ b/models/growth/Makefile
@@ -0,0 +1,26 @@
+CFLAGS="-O3"
+CC="gcc"
+MFLAG=-lm
+
+all: nibilab_linear_delta nibilab_linear_delay nibilab_linear_delay_mix \
+ nibilab_linear_random_times nibilab_nonlinear
+
+nibilab_linear_delta: nibilab_linear_delta.c
+ $(CC) $(CFLAGS) -o nibilab_linear_delta nibilab_linear_delta.c $(MFLAG)
+
+nibilab_linear_delay: nibilab_linear_delay.c
+ $(CC) $(CFLAGS) -o nibilab_linear_delay nibilab_linear_delay.c $(MFLAG)
+
+nibilab_linear_delay_mix: nibilab_linear_delay_mix.c
+ $(CC) $(CFLAGS) -o nibilab_linear_delay_mix nibilab_linear_delay_mix.c $(MFLAG)
+
+nibilab_linear_random_times: nibilab_linear_random_times.c
+ $(CC) $(CFLAGS) -o nibilab_linear_random_times nibilab_linear_random_times.c $(MFLAG)
+
+nibilab_nonlinear: nibilab_nonlinear.c
+ $(CC) $(CFLAGS) -o nibilab_nonlinear nibilab_nonlinear.c $(MFLAG)
+
+
+clean:
+ rm nibilab_linear_delta nibilab_linear_delay nibilab_linear_delay_mix \
+ nibilab_linear_random_times nibilab_nonlinear
diff --git a/models/growth/nibilab_linear_delay.c b/models/growth/nibilab_linear_delay.c
new file mode 100644
index 0000000..518b09d
--- /dev/null
+++ b/models/growth/nibilab_linear_delay.c
@@ -0,0 +1,414 @@
+/**
+ *
+ * NiBiLaB model (Nicosia, Bianconi, Latora, Barthelemy) of multiplex
+ * linear preferential attachment
+ *
+ * A new node arrives on layer 1 at each time step, but its replica on
+ * layer 2 arrives after a power-law distributed delay \tau
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <math.h>
+
+
+typedef struct{
+ int size;
+ int N;
+ int *val;
+} int_array_t;
+
+
+typedef struct{
+ int K;
+ int N;
+ int size;
+ int *i;
+ int *j;
+ int *degrees;
+ int *arrived;
+ double *cumul;
+ int_array_t *times;
+} ij_net_t;
+
+
+
+typedef struct{
+ double a;
+ double b;
+ double c;
+ double d;
+} coupling_t;
+
+
+typedef struct{
+ int N;
+ double x_min;
+ double *distr;
+ double gamma;
+} distr_t;
+
+
+int init_network(ij_net_t *G, int m0){
+
+ int n;
+
+ for(n=0; n<m0; n++){
+ G->i[n] = n;
+ G->j[n] = n+1 % m0;
+ G->degrees[n] = 2;
+ G->arrived[n] = n;
+ }
+ G->K = m0;
+ G->N = m0;
+ return n;
+}
+
+
+
+void dump_network(ij_net_t *G){
+
+ int i;
+ for(i=0; i< G->K; i ++){
+ printf("%d %d\n",G->i[i], G->j[i]);
+ }
+}
+
+
+void dump_network_to_file(ij_net_t *G, char *fname){
+
+
+ FILE *f;
+ int i;
+
+
+ f = fopen(fname, "w+");
+
+ for(i=0; i< G->K; i ++){
+ fprintf(f, "%d %d\n",G->i[i], G->j[i]);
+ }
+ fclose(f);
+}
+
+
+
+double f1(double v1, double v2, coupling_t *matr){
+ return v1 * matr->a + v2 * matr->b;
+}
+
+double f2(double v1, double v2, coupling_t *matr){
+ return v1 * matr->c + v2 * matr->d;
+}
+
+
+void compute_cumul(ij_net_t *G1, ij_net_t *G2, coupling_t *matr){
+
+ int i;
+ double val;
+
+ G1->cumul[0] = f1(G1->degrees[ G1->arrived[0] ], G2->degrees[ G1->arrived[0] ], matr) ;
+ G2->cumul[0] = f2(G1->degrees[ G2->arrived[0] ], G2->degrees[ G2->arrived[0] ], matr) ;
+
+ for(i=1; i<G1->N; i ++){
+ val = f1(G1->degrees[ G1->arrived[i] ], G2->degrees[ G1->arrived[i] ],matr) ;
+ G1->cumul[i] = G1->cumul[i-1] + val;
+ }
+ for(i=1; i<G2->N; i ++){
+ val = f2(G1->degrees[ G2->arrived[i] ], G2->degrees[ G2->arrived[i] ], matr) ;
+ G2->cumul[i] = G2->cumul[i-1] + val;
+ }
+}
+
+void dump_cumul(ij_net_t *G){
+
+ int i;
+
+ for (i=0; i<G->N; i ++){
+ printf("%d %2.6f\n", G->times[i], G->cumul[i]);
+ }
+
+}
+
+
+
+int select_neigh_cumul(ij_net_t *G){
+
+ double r;
+ int j;
+
+ r = (rand() * 1.0 / RAND_MAX) * G->cumul[ G->N-1 ];
+ //printf(" r: %f ", r);
+
+ j = 0;
+ /* Change with a binary search ???!?!?!?! */
+ while (r > G->cumul[j]){
+ j ++;
+ }
+ return G->arrived[j];
+}
+
+int already_neighbour(ij_net_t *G, int j, int d, int offset){
+
+ int i;
+
+ for(i=G-> K + offset; i< G->K + offset + j; i ++){
+ if (G->j[i] == d)
+ return 1;
+ }
+ return 0;
+}
+
+
+void sample_edges(ij_net_t *G, int n, int m, int offset){
+
+ int j;
+ int d;
+
+ //printf("----");
+ for(j=0; j<m; j++){
+ G->i[G->K + offset + j] = n;
+ d = select_neigh_cumul(G);
+ while(already_neighbour(G, j, d, offset)){
+ d = select_neigh_cumul(G);
+ }
+ //printf(" link %d--%d\n", n, d);
+ G->j[G->K + offset + j] = d;
+ G->degrees[d] += 1;
+ }
+ //printf("\n");
+}
+
+void create_distr(double *v, double gamma, int x_min, int x_max){
+ int n, i;
+ double sum, new_sum;
+
+ n = x_max-x_min + 1;
+
+ sum = 0;
+ for(i=0; i<n; i++){
+ v[i] = pow((x_min + i), gamma);
+ sum += v[i];
+ }
+ new_sum = 0;
+ /* Now we normalize the array*/
+ for(i=0; i<n; i++){
+ v[i]/= sum;
+ new_sum += v[i];
+ }
+ //printf("New_sum: %lf\n", new_sum);
+ /* Now we compute the cumulative distribution*/
+ for(i=1; i<n; i++){
+ v[i] += v[i-1];
+ }
+}
+
+
+inline int find_degree(double *v, int dim, double xi){
+ int i;
+
+ i=0;
+ while(xi > v[i])
+ i++;
+ return i;
+
+}
+
+
+int sample_power_law(distr_t *d){
+
+ double xi, q, val;
+ int k;
+
+ while(1){
+ xi = rand()* 1.0 / RAND_MAX;
+ k = find_degree(d->distr, d->N, xi);
+ val = rand()*1.0/RAND_MAX;
+ q = d->x_min + xi * d->N;
+ q = q / (floor(q) + 1);
+ q = pow(q, d->gamma);
+ if (val <= q){
+ return k;
+ }
+ }
+}
+
+
+
+
+int grow_multi_net_delay(ij_net_t *G1, ij_net_t *G2, int N, int m, int m0, coupling_t *matr){
+
+ int i, j;
+ int d;
+ int i2;
+
+ i2 = m0;
+
+ for(i=m0; i<N; i++){
+ compute_cumul(G1, G2, matr);
+ //dump_cumul(G1);
+ //dump_cumul(G2);
+ //n = m0 + i; /* This is the id of the new node */
+ G1->arrived[i] = i;
+ sample_edges(G1, G1->arrived[i], m, 0);
+ G1->degrees[G1->arrived[i]] += m;
+
+ for (j=0; j < G2->times[i].N; j++){
+ G2->arrived[i2] = G2->times[i].val[j];
+ //printf("%d\n", G2->arrived[i2]);
+ sample_edges(G2, G2->arrived[i2], m, m *j);
+ G2->degrees[G2->arrived[i2]] += m;
+ i2 += 1;
+ }
+ G1->N += 1;
+ G1->K += m;
+ G2->N += G2->times[i].N;
+ G2->K += m * G2->times[i].N;
+ }
+ return 0;
+}
+
+void init_structure(ij_net_t *G, int N){
+
+ int i;
+
+ G->i = malloc(G->size * sizeof(int));
+ G->j = malloc(G->size * sizeof(int));
+ G->degrees = malloc(N * sizeof(int));
+ G->arrived = malloc(N * sizeof(int));
+ G->cumul = malloc(N * sizeof(double));
+ G->times = malloc(N * sizeof(int_array_t));
+ for (i=0; i<N; i++){
+ G->times[i].size = 5;
+ G->times[i].N = 0;
+ G->times[i].val = malloc(5 * sizeof(int));
+ }
+ memset(G->degrees, 0, N*sizeof(int));
+ G->N = 0;
+
+}
+
+void add_node_to_time(ij_net_t *G, int node, int t){
+
+ if (G->times[t].size == G->times[t].N){
+ G->times[t].size += 5;
+ G->times[t].val = realloc(G->times[t].val, G->times[t].size);
+ }
+ G->times[t].val[G->times[t].N] = node;
+ G->times[t].N += 1;
+}
+
+
+void init_times_delta(ij_net_t *G, int N){
+
+ int i;
+
+ for (i=0; i<N; i ++){
+ add_node_to_time(G,i,i);
+ }
+
+}
+
+
+
+void init_times_delay(ij_net_t *G, distr_t *d, int m0, int N){
+ int i;
+ int t;
+
+ for (i=m0; i<N; i ++){
+ t = sample_power_law(d);/* So t is in the interval [1,N] */
+ //printf(" %d", t);
+ if (i+t < N){
+ add_node_to_time(G, i, t+i);
+ }
+ }
+ //printf("\n");
+}
+
+
+
+void dump_times(ij_net_t *G, int N){
+
+ int i, j;
+
+ for (i=0; i<N; i++){
+ for (j=0; j< G->times[i].N; j++){
+ printf("%d %d\n", i,G->times[i].val[j]);
+ }
+ //printf("\n");
+ }
+
+}
+
+
+int main(int argc, char *argv[]){
+
+ ij_net_t G1, G2;
+
+ int N, m, m0, k_max;
+ coupling_t matr;
+ double gamma;
+ distr_t delay_distr;
+
+ char str[256];
+
+ if (argc < 10){
+ printf("Usage: %s <N> <m> <m0> <outfile> <a> <b> <c> <d> <gamma>\n", argv[0]);
+ exit(1);
+ }
+
+ srand(time(NULL));
+
+ /* Diagonal coupling */
+ matr.a = atof(argv[5]);
+ matr.b = atof(argv[6]);
+ matr.c = atof(argv[7]);
+ matr.d = atof(argv[8]);
+ gamma = atof(argv[9]);
+
+ N = atoi(argv[1]);
+ m = atoi(argv[2]);
+ m0 = atoi(argv[3]);
+
+ G1.size = (N+m0) * m;
+ G2.size = (N+m0) * m;
+
+ init_structure(&G1, N);
+ init_structure(&G2, N);
+
+
+ G1.K = init_network(&G1, m0);
+ G2.K = init_network(&G2, m0);
+
+ delay_distr.N = N;
+ delay_distr.gamma = gamma;
+ delay_distr.x_min = 1;
+ delay_distr.distr = malloc(N * sizeof(double));
+
+ create_distr(delay_distr.distr, delay_distr.gamma, delay_distr.x_min, N);
+
+ init_times_delay(&G2, &delay_distr, m0, N);
+
+ dump_times(&G2, N);
+
+ fprintf(stderr, "Init finished!\n");
+
+ grow_multi_net_delay(&G1, &G2, N, m, m0, &matr);
+
+ //printf("### G1\n");
+ sprintf(str, "%s_layer1.txt", argv[4]);
+ dump_network_to_file(&G1, str);
+
+ //printf("### G2\n");
+ sprintf(str, "%s_layer2.txt", argv[4]);
+ dump_network_to_file(&G2, str);
+
+ /* dump_network_to_file(S, S_num, argv[4]); */
+ /* printf("Network dumped!\n"); */
+ /* k_max = degree_distr(S, S_num, &distr); */
+ /* printf("k_max is: %d\n", k_max); */
+ /* dump_distr_to_file(distr, k_max, argv[5]); */
+
+}
diff --git a/models/growth/nibilab_linear_delay_mix.c b/models/growth/nibilab_linear_delay_mix.c
new file mode 100644
index 0000000..e48d16b
--- /dev/null
+++ b/models/growth/nibilab_linear_delay_mix.c
@@ -0,0 +1,457 @@
+/**
+ *
+ * NiBiLaB model (Nicosia, Bianconi, Latora, Barthelemy) of multiplex
+ * linear preferential attachment
+ *
+ * At each time step, a new node arrives on one of the two layers
+ * (chosen uniformly at random), but its replica on the other layer
+ * arrives after a power-law distributed delay \tau
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <math.h>
+
+
+typedef struct{
+ int size;
+ int N;
+ int *val;
+} int_array_t;
+
+
+typedef struct{
+ int K;
+ int N;
+ int size;
+ int *i;
+ int *j;
+ int *degrees;
+ int *arrived;
+ double *cumul;
+ int_array_t *times;
+} ij_net_t;
+
+
+
+typedef struct{
+ double a;
+ double b;
+ double c;
+ double d;
+} coupling_t;
+
+
+typedef struct{
+ int N;
+ double x_min;
+ double *distr;
+ double gamma;
+} distr_t;
+
+
+int init_network(ij_net_t *G, int m0){
+
+ int n;
+
+ for(n=0; n<m0; n++){
+ G->i[n] = n;
+ G->j[n] = (n+1) % m0;
+ G->degrees[n] = 2;
+ G->arrived[n] = n;
+ }
+ G->K = m0;
+ G->N = m0;
+ return n;
+}
+
+
+
+void dump_network(ij_net_t *G){
+
+ int i;
+ for(i=0; i< G->K; i ++){
+ printf("%d %d\n",G->i[i], G->j[i]);
+ }
+}
+
+
+void dump_network_to_file(ij_net_t *G, char *fname){
+
+
+ FILE *f;
+ int i;
+
+
+ f = fopen(fname, "w+");
+
+ for(i=0; i< G->K; i ++){
+ fprintf(f, "%d %d\n",G->i[i], G->j[i]);
+ }
+ fclose(f);
+}
+
+
+
+double f1(double v1, double v2, coupling_t *matr){
+ return v1 * matr->a + v2 * matr->b;
+}
+
+double f2(double v1, double v2, coupling_t *matr){
+ return v1 * matr->c + v2 * matr->d;
+}
+
+
+void compute_cumul(ij_net_t *G1, ij_net_t *G2, coupling_t *matr){
+
+ int i;
+ double val;
+
+ G1->cumul[0] = f1(G1->degrees[ G1->arrived[0] ], G2->degrees[ G1->arrived[0] ], matr) ;
+ G2->cumul[0] = f2(G1->degrees[ G2->arrived[0] ], G2->degrees[ G2->arrived[0] ], matr) ;
+
+ for(i=1; i<G1->N; i ++){
+ val = f1(G1->degrees[ G1->arrived[i] ], G2->degrees[ G1->arrived[i] ],matr) ;
+ G1->cumul[i] = G1->cumul[i-1] + val;
+ }
+ for(i=1; i<G2->N; i ++){
+ val = f2(G1->degrees[ G2->arrived[i] ], G2->degrees[ G2->arrived[i] ], matr) ;
+ G2->cumul[i] = G2->cumul[i-1] + val;
+ }
+}
+
+void dump_cumul(ij_net_t *G){
+
+ int i;
+
+ for (i=0; i<G->N; i ++){
+ printf("%d %2.6f\n", G->times[i], G->cumul[i]);
+ }
+
+}
+
+
+
+int select_neigh_cumul(ij_net_t *G){
+
+ double r;
+ int j;
+
+ r = (rand() * 1.0 / RAND_MAX) * G->cumul[ G->N-1 ];
+ //printf(" r: %f ", r);
+
+ j = 0;
+ /* Change with a binary search ???!?!?!?! */
+ while (r > G->cumul[j]){
+ j ++;
+ }
+ return G->arrived[j];
+}
+
+int already_neighbour(ij_net_t *G, int j, int d, int offset){
+
+ int i;
+
+ for(i=G-> K + offset; i< G->K + offset + j; i ++){
+ if (G->j[i] == d)
+ return 1;
+ }
+ return 0;
+}
+
+
+void sample_edges(ij_net_t *G, int n, int m, int offset){
+
+ int j;
+ int d;
+
+ //printf("----");
+ for(j=0; j<m; j++){
+ G->i[G->K + offset + j] = n;
+ d = select_neigh_cumul(G);
+ while(already_neighbour(G, j, d, offset)){
+ d = select_neigh_cumul(G);
+ }
+ //printf(" link %d--%d\n", n, d);
+ G->j[G->K + offset + j] = d;
+ G->degrees[d] += 1;
+ }
+ //printf("\n");
+}
+
+void create_distr(double *v, double gamma, int x_min, int x_max){
+ int n, i;
+ double sum, new_sum;
+
+ n = x_max-x_min + 1;
+
+ sum = 0;
+ for(i=0; i<n; i++){
+ v[i] = pow((x_min + i), gamma);
+ sum += v[i];
+ }
+ new_sum = 0;
+ /* Now we normalize the array*/
+ for(i=0; i<n; i++){
+ v[i]/= sum;
+ new_sum += v[i];
+ }
+ //printf("New_sum: %lf\n", new_sum);
+ /* Now we compute the cumulative distribution*/
+ for(i=1; i<n; i++){
+ v[i] += v[i-1];
+ }
+}
+
+
+inline int find_degree(double *v, int dim, double xi){
+ int i;
+
+ i=0;
+ while(xi > v[i])
+ i++;
+ return i;
+
+}
+
+
+int sample_power_law(distr_t *d){
+
+ double xi, q, val;
+ int k;
+
+ while(1){
+ xi = rand()* 1.0 / RAND_MAX;
+ k = find_degree(d->distr, d->N, xi);
+ val = rand()*1.0/RAND_MAX;
+ q = d->x_min + xi * d->N;
+ q = q / (floor(q) + 1);
+ q = pow(q, d->gamma);
+ if (val <= q){
+ return k;
+ }
+ }
+}
+
+
+
+
+int grow_multi_net_delay_mixed(ij_net_t *G1, ij_net_t *G2, int N, int m, int m0, coupling_t *matr){
+
+ int i, j;
+ int d;
+ int i1, i2;
+
+ i1 = m0;
+ i2 = m0;
+
+ for(i=m0; i<N; i++){
+ compute_cumul(G1, G2, matr);
+ //dump_cumul(G1);
+ //dump_cumul(G2);
+ //n = m0 + i; /* This is the id of the new node */
+ for (j=0; j < G1->times[i].N; j++){
+ G1->arrived[i1] = G1->times[i].val[j];
+ sample_edges(G1, G1->arrived[i1], m, m * j);
+ G1->degrees[G1->arrived[i1]] += m;
+ i1 += 1;
+ }
+ for (j=0; j < G2->times[i].N; j++){
+ G2->arrived[i2] = G2->times[i].val[j];
+ //printf("%d\n", G2->arrived[i2]);
+ sample_edges(G2, G2->arrived[i2], m, m *j);
+ G2->degrees[G2->arrived[i2]] += m;
+ i2 += 1;
+ }
+ G1->N += G1->times[i].N;
+ G1->K += m * G1->times[i].N;
+ G2->N += G2->times[i].N;
+ G2->K += m * G2->times[i].N;
+ }
+ return 0;
+}
+
+void init_structure(ij_net_t *G, int N){
+
+ int i;
+
+ G->i = malloc(G->size * sizeof(int));
+ G->j = malloc(G->size * sizeof(int));
+ G->degrees = malloc(N * sizeof(int));
+ G->arrived = malloc(N * sizeof(int));
+ G->cumul = malloc(N * sizeof(double));
+ G->times = malloc(N * sizeof(int_array_t));
+ for (i=0; i<N; i++){
+ G->times[i].size = 5;
+ G->times[i].N = 0;
+ G->times[i].val = malloc(5 * sizeof(int));
+ }
+ memset(G->degrees, 0, N*sizeof(int));
+ G->N = 0;
+}
+
+void add_node_to_time(ij_net_t *G, int node, int t){
+
+ if (G->times[t].size == G->times[t].N){
+ G->times[t].size += 5;
+ G->times[t].val = realloc(G->times[t].val, G->times[t].size);
+ }
+ G->times[t].val[G->times[t].N] = node;
+ G->times[t].N += 1;
+}
+
+
+void init_times_delta(ij_net_t *G, int N){
+
+ int i;
+
+ for (i=0; i<N; i ++){
+ add_node_to_time(G,i,i);
+ }
+
+}
+
+
+
+void init_times_delay(ij_net_t *G, distr_t *d, int m0, int N){
+ int i;
+ int t;
+
+ for (i=m0; i<N; i ++){
+ t = sample_power_law(d);/* So t is in the interval [1,N] */
+ //printf(" %d", t);
+ if (i+t < N){
+ add_node_to_time(G, i, t+i);
+ }
+ }
+ //printf("\n");
+}
+
+/**
+ * Set the arrival time of nodes on each layer. A node $i$ selects its
+ * master layer uniformly at random, and then arrives in the other
+ * layer after a power-lay distributed delay
+ *
+ */
+
+void init_times_delay_mixed(ij_net_t *G1, ij_net_t *G2, distr_t *d, int m0, int N){
+ int i;
+ int t;
+
+ double val;
+
+ for (i=m0; i<N; i ++){
+ t = sample_power_law(d);/* So t is in the interval [1,N] */
+ //printf(" %d", t);
+ val = rand() * 1.0 / RAND_MAX;
+ if (val <=0.5){ /* select layer 1 as the master layer*/
+ add_node_to_time(G1, i, i);
+ if (i+t < N){
+ add_node_to_time(G2, i, t+i);
+ }
+ }
+ else{/* select layer 2 as the master layer */
+ add_node_to_time(G2, i, i);
+ if (i+t < N){
+ add_node_to_time(G1, i, t+i);
+ }
+ }
+ }
+ //printf("\n");
+}
+
+
+
+
+void dump_times(ij_net_t *G, int N){
+
+ int i, j;
+
+ for (i=0; i<N; i++){
+ for (j=0; j< G->times[i].N; j++){
+ printf("%d %d\n", i,G->times[i].val[j]);
+ }
+ //printf("\n");
+ }
+
+}
+
+
+int main(int argc, char *argv[]){
+
+ ij_net_t G1, G2;
+
+ int N, m, m0, k_max;
+ coupling_t matr;
+ double gamma;
+ distr_t delay_distr;
+
+ char str[256];
+
+ if (argc < 10){
+ printf("Usage: %s <N> <m> <m0> <outfile> <a> <b> <c> <d> <gamma>\n", argv[0]);
+ exit(1);
+ }
+
+ srand(time(NULL));
+
+ /* Diagonal coupling */
+ matr.a = atof(argv[5]);
+ matr.b = atof(argv[6]);
+ matr.c = atof(argv[7]);
+ matr.d = atof(argv[8]);
+ gamma = atof(argv[9]);
+
+ N = atoi(argv[1]);
+ m = atoi(argv[2]);
+ m0 = atoi(argv[3]);
+
+ G1.size = (N+m0) * m;
+ G2.size = (N+m0) * m;
+
+ init_structure(&G1, N);
+ init_structure(&G2, N);
+
+
+ G1.K = init_network(&G1, m0);
+ G2.K = init_network(&G2, m0);
+
+ delay_distr.N = N;
+ delay_distr.gamma = gamma;
+ delay_distr.x_min = 1;
+ delay_distr.distr = malloc(N * sizeof(double));
+
+ create_distr(delay_distr.distr, delay_distr.gamma, delay_distr.x_min, N);
+
+ //init_times_delay(&G2, &delay_distr, m0, N);
+ init_times_delay_mixed(&G1, &G2, &delay_distr, m0, N);
+
+ printf("----- G1 times ----- \n");
+ dump_times(&G1, N);
+
+ printf("----- G2 times ----- \n");
+ dump_times(&G2, N);
+ //exit(2);
+
+ fprintf(stderr, "Init finished!\n");
+
+ grow_multi_net_delay_mixed(&G1, &G2, N, m, m0, &matr);
+
+ //printf("### G1\n");
+ sprintf(str, "%s_layer1.txt", argv[4]);
+ dump_network_to_file(&G1, str);
+
+ //printf("### G2\n");
+ sprintf(str, "%s_layer2.txt", argv[4]);
+ dump_network_to_file(&G2, str);
+
+ /* dump_network_to_file(S, S_num, argv[4]); */
+ /* printf("Network dumped!\n"); */
+ /* k_max = degree_distr(S, S_num, &distr); */
+ /* printf("k_max is: %d\n", k_max); */
+ /* dump_distr_to_file(distr, k_max, argv[5]); */
+
+}
diff --git a/models/growth/nibilab_linear_delta.c b/models/growth/nibilab_linear_delta.c
new file mode 100644
index 0000000..b1f7e90
--- /dev/null
+++ b/models/growth/nibilab_linear_delta.c
@@ -0,0 +1,403 @@
+/**
+ *
+ * NiBiLaB model (Nicosia, Bianconi, Latora, Barthelemy) of multiplex
+ * linear preferential attachment
+ *
+ * Node arrive at the same time on both layers.
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <math.h>
+
+
+typedef struct{
+ int size;
+ int N;
+ int *val;
+} int_array_t;
+
+
+typedef struct{
+ int K;
+ int N;
+ int size;
+ int *i;
+ int *j;
+ int *degrees;
+ int *arrived;
+ double *cumul;
+ int_array_t *times;
+} ij_net_t;
+
+
+
+typedef struct{
+ double a;
+ double b;
+ double c;
+ double d;
+} coupling_t;
+
+
+typedef struct{
+ int N;
+ double x_min;
+ double *distr;
+ double gamma;
+} distr_t;
+
+
+int init_network(ij_net_t *G, int m0){
+
+ int n;
+
+ for(n=0; n<m0; n++){
+ G->i[n] = n;
+ G->j[n] = n+1 % m0;
+ G->degrees[n] = 2;
+ G->arrived[n] = n;
+ }
+ G->K = m0;
+ G->N = m0;
+ return n;
+}
+
+
+
+void dump_network(ij_net_t *G){
+
+ int i;
+ for(i=0; i< G->K; i ++){
+ printf("%d %d\n",G->i[i], G->j[i]);
+ }
+}
+
+
+void dump_network_to_file(ij_net_t *G, char *fname){
+
+
+ FILE *f;
+ int i;
+
+
+ f = fopen(fname, "w+");
+
+ for(i=0; i< G->K; i ++){
+ fprintf(f, "%d %d\n",G->i[i], G->j[i]);
+ }
+ fclose(f);
+}
+
+
+
+double f1(double v1, double v2, coupling_t *matr){
+ return v1 * matr->a + v2 * matr->b;
+}
+
+double f2(double v1, double v2, coupling_t *matr){
+ return v1 * matr->c + v2 * matr->d;
+}
+
+
+void compute_cumul(ij_net_t *G1, ij_net_t *G2, coupling_t *matr){
+
+ int i;
+ double val;
+
+ G1->cumul[0] = f1(G1->degrees[ G1->arrived[0] ], G2->degrees[ G1->arrived[0] ], matr) ;
+ G2->cumul[0] = f2(G1->degrees[ G2->arrived[0] ], G2->degrees[ G2->arrived[0] ], matr) ;
+
+ for(i=1; i<G1->N; i ++){
+ val = f1(G1->degrees[ G1->arrived[i] ], G2->degrees[ G1->arrived[i] ],matr) ;
+ G1->cumul[i] = G1->cumul[i-1] + val;
+ }
+ for(i=1; i<G2->N; i ++){
+ val = f2(G1->degrees[ G2->arrived[i] ], G2->degrees[ G2->arrived[i] ], matr) ;
+ G2->cumul[i] = G2->cumul[i-1] + val;
+ }
+}
+
+void dump_cumul(ij_net_t *G){
+
+ int i;
+
+ for (i=0; i<G->N; i ++){
+ printf("%d %2.6f\n", G->times[i], G->cumul[i]);
+ }
+
+}
+
+
+
+int select_neigh_cumul(ij_net_t *G){
+
+ double r;
+ int j;
+
+ r = (rand() * 1.0 / RAND_MAX) * G->cumul[ G->N-1 ];
+ //printf(" r: %f ", r);
+
+ j = 0;
+ /* Change with a binary search ???!?!?!?! */
+ while (r > G->cumul[j]){
+ j ++;
+ }
+ return G->arrived[j];
+}
+
+int already_neighbour(ij_net_t *G, int j, int d, int offset){
+
+ int i;
+
+ for(i=G-> K + offset; i< G->K + offset + j; i ++){
+ if (G->j[i] == d)
+ return 1;
+ }
+ return 0;
+}
+
+
+void sample_edges(ij_net_t *G, int n, int m, int offset){
+
+ int j;
+ int d;
+
+ //printf("----");
+ for(j=0; j<m; j++){
+ G->i[G->K + offset + j] = n;
+ d = select_neigh_cumul(G);
+ while(already_neighbour(G, j, d, offset)){
+ d = select_neigh_cumul(G);
+ }
+ //printf(" link %d--%d\n", n, d);
+ G->j[G->K + offset + j] = d;
+ G->degrees[d] += 1;
+ }
+ //printf("\n");
+}
+
+void create_distr(double *v, double gamma, int x_min, int x_max){
+ int n, i;
+ double sum, new_sum;
+
+ n = x_max-x_min + 1;
+
+ sum = 0;
+ for(i=0; i<n; i++){
+ v[i] = pow((x_min + i), gamma);
+ sum += v[i];
+ }
+ new_sum = 0;
+ /* Now we normalize the array*/
+ for(i=0; i<n; i++){
+ v[i]/= sum;
+ new_sum += v[i];
+ }
+ //printf("New_sum: %lf\n", new_sum);
+ /* Now we compute the cumulative distribution*/
+ for(i=1; i<n; i++){
+ v[i] += v[i-1];
+ }
+}
+
+
+inline int find_degree(double *v, int dim, double xi){
+ int i;
+
+ i=0;
+ while(xi > v[i])
+ i++;
+ return i;
+
+}
+
+
+int sample_power_law(distr_t *d){
+
+ double xi, q, val;
+ int k;
+
+ while(1){
+ xi = rand()* 1.0 / RAND_MAX;
+ k = find_degree(d->distr, d->N, xi);
+ val = rand()*1.0/RAND_MAX;
+ q = d->x_min + xi * d->N;
+ q = q / (floor(q) + 1);
+ q = pow(q, d->gamma);
+ if (val <= q){
+ return k;
+ }
+ }
+}
+
+
+
+
+int grow_multi_net_delta(ij_net_t *G1, ij_net_t *G2, int N, int m, int m0, coupling_t *matr){
+
+ int i, j;
+ int d;
+ int i2;
+
+
+ for(i=m0; i<N; i++){
+ compute_cumul(G1, G2, matr);
+ //dump_cumul(G1);
+ //dump_cumul(G2);
+ //n = m0 + i; /* This is the id of the new node */
+ G1->arrived[i] = i;
+ sample_edges(G1, G1->arrived[i], m, 0);
+ G1->degrees[G1->arrived[i]] += m;
+
+ G2->arrived[i] = i;
+ //printf("%d\n", G2->arrived[i2]);
+ sample_edges(G2, G2->arrived[i], m, 0);
+ G2->degrees[G2->arrived[i]] += m;
+ G1->N += 1;
+ G1->K += m;
+ G2->N += 1;
+ G2->K += m;
+ }
+ return 0;
+}
+
+void init_structure(ij_net_t *G, int N){
+
+ int i;
+
+ G->i = malloc(G->size * sizeof(int));
+ G->j = malloc(G->size * sizeof(int));
+ G->degrees = malloc(N * sizeof(int));
+ G->arrived = malloc(N * sizeof(int));
+ G->cumul = malloc(N * sizeof(double));
+ G->times = malloc(N * sizeof(int_array_t));
+ for (i=0; i<N; i++){
+ G->times[i].size = 5;
+ G->times[i].N = 0;
+ G->times[i].val = malloc(5 * sizeof(int));
+ }
+ memset(G->degrees, 0, N*sizeof(int));
+ G->N = 0;
+
+}
+
+void add_node_to_time(ij_net_t *G, int node, int t){
+
+ if (G->times[t].size == G->times[t].N){
+ G->times[t].size += 5;
+ G->times[t].val = realloc(G->times[t].val, G->times[t].size);
+ }
+ G->times[t].val[G->times[t].N] = node;
+ G->times[t].N += 1;
+}
+
+
+void init_times_delta(ij_net_t *G, int N){
+
+ int i;
+
+ for (i=0; i<N; i ++){
+ add_node_to_time(G,i,i);
+ }
+
+}
+
+
+
+void init_times_delay(ij_net_t *G, distr_t *d, int m0, int N){
+ int i;
+ int t;
+
+ for (i=m0; i<N; i ++){
+ t = sample_power_law(d);/* So t is in the interval [1,N] */
+ printf(" %d", t);
+ if (i+t < N){
+ add_node_to_time(G, i, t+i);
+ }
+ }
+ printf("\n");
+}
+
+
+
+void dump_times(ij_net_t *G, int N){
+
+ int i, j;
+
+ for (i=0; i<N; i++){
+ printf("%d: ", i);
+ for (j=0; j< G->times[i].N; j++){
+ printf(" %d", G->times[i].val[j]);
+ }
+ printf("\n");
+ }
+
+}
+
+
+int main(int argc, char *argv[]){
+
+ ij_net_t G1, G2;
+
+ int N, m, m0, k_max;
+ coupling_t matr;
+ double gamma;
+ distr_t delay_distr;
+
+ char str[256];
+
+ if (argc < 9){
+ printf("Usage: %s <N> <m> <m0> <outfile> <a> <b> <c> <d>\n", argv[0]);
+ exit(1);
+ }
+
+ srand(time(NULL));
+
+ /* Diagonal coupling */
+ matr.a = atof(argv[5]);
+ matr.b = atof(argv[6]);
+ matr.c = atof(argv[7]);
+ matr.d = atof(argv[8]);
+
+ N = atoi(argv[1]);
+ m = atoi(argv[2]);
+ m0 = atoi(argv[3]);
+
+ G1.size = (N+m0) * m;
+ G2.size = (N+m0) * m;
+
+ init_structure(&G1, N);
+ init_structure(&G2, N);
+
+
+ G1.K = init_network(&G1, m0);
+ G2.K = init_network(&G2, m0);
+
+
+ init_times_delta(&G2, N);
+
+ //dump_times(&G2, N);
+
+ fprintf(stderr, "Init finished!\n");
+
+ grow_multi_net_delta(&G1, &G2, N, m, m0, &matr);
+
+ //printf("### G1\n");
+ sprintf(str, "%s_layer1.txt", argv[4]);
+ dump_network_to_file(&G1, str);
+
+ //printf("### G2\n");
+ sprintf(str, "%s_layer2.txt", argv[4]);
+ dump_network_to_file(&G2, str);
+
+ /* dump_network_to_file(S, S_num, argv[4]); */
+ /* printf("Network dumped!\n"); */
+ /* k_max = degree_distr(S, S_num, &distr); */
+ /* printf("k_max is: %d\n", k_max); */
+ /* dump_distr_to_file(distr, k_max, argv[5]); */
+
+}
diff --git a/models/growth/nibilab_linear_random_times.c b/models/growth/nibilab_linear_random_times.c
new file mode 100644
index 0000000..48929aa
--- /dev/null
+++ b/models/growth/nibilab_linear_random_times.c
@@ -0,0 +1,417 @@
+/**
+ *
+ * NiBiLaB model (Nicosia, Bianconi, Latora, Barthelemy) of multiplex
+ * linear preferential attachment
+ *
+ * Nodes arrive sequentially on the first layer, but their replicas on
+ * the second layer arrive at uniformly distributed times.
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <math.h>
+
+
+typedef struct{
+ int size;
+ int N;
+ int *val;
+} int_array_t;
+
+
+typedef struct{
+ int K;
+ int N;
+ int size;
+ int *i;
+ int *j;
+ int *degrees;
+ int *arrived;
+ double *cumul;
+ int_array_t *times;
+} ij_net_t;
+
+
+
+typedef struct{
+ double a;
+ double b;
+ double c;
+ double d;
+} coupling_t;
+
+
+typedef struct{
+ int N;
+ double x_min;
+ double *distr;
+ double gamma;
+} distr_t;
+
+
+int init_network(ij_net_t *G, int m0){
+
+ int n;
+
+ for(n=0; n<m0; n++){
+ G->i[n] = n;
+ G->j[n] = n+1 % m0;
+ G->degrees[n] = 2;
+ G->arrived[n] = n;
+ }
+ G->K = m0;
+ G->N = m0;
+ return n;
+}
+
+
+
+void dump_network(ij_net_t *G){
+
+ int i;
+ for(i=0; i< G->K; i ++){
+ printf("%d %d\n",G->i[i], G->j[i]);
+ }
+}
+
+
+void dump_network_to_file(ij_net_t *G, char *fname){
+
+
+ FILE *f;
+ int i;
+
+
+ f = fopen(fname, "w+");
+
+ for(i=0; i< G->K; i ++){
+ fprintf(f, "%d %d\n",G->i[i], G->j[i]);
+ }
+ fclose(f);
+}
+
+
+
+double f1(double v1, double v2, coupling_t *matr){
+ return v1 * matr->a + v2 * matr->b;
+}
+
+double f2(double v1, double v2, coupling_t *matr){
+ return v1 * matr->c + v2 * matr->d;
+}
+
+
+void compute_cumul(ij_net_t *G1, ij_net_t *G2, coupling_t *matr){
+
+ int i;
+ double val;
+
+ G1->cumul[0] = f1(G1->degrees[ G1->arrived[0] ], G2->degrees[ G1->arrived[0] ], matr) ;
+ G2->cumul[0] = f2(G1->degrees[ G2->arrived[0] ], G2->degrees[ G2->arrived[0] ], matr) ;
+
+ for(i=1; i<G1->N; i ++){
+ val = f1(G1->degrees[ G1->arrived[i] ], G2->degrees[ G1->arrived[i] ],matr) ;
+ G1->cumul[i] = G1->cumul[i-1] + val;
+ }
+ for(i=1; i<G2->N; i ++){
+ val = f2(G1->degrees[ G2->arrived[i] ], G2->degrees[ G2->arrived[i] ], matr) ;
+ G2->cumul[i] = G2->cumul[i-1] + val;
+ }
+}
+
+void dump_cumul(ij_net_t *G){
+
+ int i;
+
+ for (i=0; i<G->N; i ++){
+ printf("%d %2.6f\n", G->times[i], G->cumul[i]);
+ }
+
+}
+
+
+
+int select_neigh_cumul(ij_net_t *G){
+
+ double r;
+ int j;
+
+ r = (rand() * 1.0 / RAND_MAX) * G->cumul[ G->N-1 ];
+ //printf(" r: %f ", r);
+
+ j = 0;
+ /* Change with a binary search ???!?!?!?! */
+ while (r > G->cumul[j]){
+ j ++;
+ }
+ return G->arrived[j];
+}
+
+int already_neighbour(ij_net_t *G, int j, int d, int offset){
+
+ int i;
+
+ for(i=G-> K + offset; i< G->K + offset + j; i ++){
+ if (G->j[i] == d)
+ return 1;
+ }
+ return 0;
+}
+
+
+void sample_edges(ij_net_t *G, int n, int m, int offset){
+
+ int j;
+ int d;
+
+ //printf("----");
+ for(j=0; j<m; j++){
+ G->i[G->K + offset + j] = n;
+ d = select_neigh_cumul(G);
+ while(already_neighbour(G, j, d, offset)){
+ d = select_neigh_cumul(G);
+ }
+ //printf(" link %d--%d\n", n, d);
+ G->j[G->K + offset + j] = d;
+ G->degrees[d] += 1;
+ }
+ //printf("\n");
+}
+
+void create_distr(double *v, double gamma, int x_min, int x_max){
+ int n, i;
+ double sum, new_sum;
+
+ n = x_max-x_min + 1;
+
+ sum = 0;
+ for(i=0; i<n; i++){
+ v[i] = pow((x_min + i), gamma);
+ sum += v[i];
+ }
+ new_sum = 0;
+ /* Now we normalize the array*/
+ for(i=0; i<n; i++){
+ v[i]/= sum;
+ new_sum += v[i];
+ }
+ //printf("New_sum: %lf\n", new_sum);
+ /* Now we compute the cumulative distribution*/
+ for(i=1; i<n; i++){
+ v[i] += v[i-1];
+ }
+}
+
+
+inline int find_degree(double *v, int dim, double xi){
+ int i;
+
+ i=0;
+ while(xi > v[i])
+ i++;
+ return i;
+
+}
+
+
+int sample_power_law(distr_t *d){
+
+ double xi, q, val;
+ int k;
+
+ while(1){
+ xi = rand()* 1.0 / RAND_MAX;
+ k = find_degree(d->distr, d->N, xi);
+ val = rand()*1.0/RAND_MAX;
+ q = d->x_min + xi * d->N;
+ q = q / (floor(q) + 1);
+ q = pow(q, d->gamma);
+ if (val <= q){
+ return k;
+ }
+ }
+}
+
+
+
+
+int grow_multi_net_delay(ij_net_t *G1, ij_net_t *G2, int N, int m, int m0, coupling_t *matr){
+
+ int i, j;
+ int d;
+ int i2;
+
+ i2 = m0;
+
+ for(i=m0; i<N; i++){
+ compute_cumul(G1, G2, matr);
+ //dump_cumul(G1);
+ //dump_cumul(G2);
+ //n = m0 + i; /* This is the id of the new node */
+ G1->arrived[i] = i;
+ sample_edges(G1, G1->arrived[i], m, 0);
+ G1->degrees[G1->arrived[i]] += m;
+
+ for (j=0; j < G2->times[i].N; j++){
+ G2->arrived[i2] = G2->times[i].val[j];
+ //printf("%d\n", G2->arrived[i2]);
+ sample_edges(G2, G2->arrived[i2], m, m *j);
+ G2->degrees[G2->arrived[i2]] += m;
+ i2 += 1;
+ }
+ G1->N += 1;
+ G1->K += m;
+ G2->N += G2->times[i].N;
+ G2->K += m * G2->times[i].N;
+ }
+ return 0;
+}
+
+void init_structure(ij_net_t *G, int N){
+
+ int i;
+
+ G->i = malloc(G->size * sizeof(int));
+ G->j = malloc(G->size * sizeof(int));
+ G->degrees = malloc(N * sizeof(int));
+ G->arrived = malloc(N * sizeof(int));
+ G->cumul = malloc(N * sizeof(double));
+ G->times = malloc(N * sizeof(int_array_t));
+ for (i=0; i<N; i++){
+ G->times[i].size = 5;
+ G->times[i].N = 0;
+ G->times[i].val = malloc(5 * sizeof(int));
+ }
+ memset(G->degrees, 0, N*sizeof(int));
+ G->N = 0;
+
+}
+
+void add_node_to_time(ij_net_t *G, int node, int t){
+
+ if (G->times[t].size == G->times[t].N){
+ G->times[t].size += 5;
+ G->times[t].val = realloc(G->times[t].val, G->times[t].size);
+ }
+ G->times[t].val[G->times[t].N] = node;
+ G->times[t].N += 1;
+}
+
+
+void init_times_delta(ij_net_t *G, int N){
+
+ int i;
+
+ for (i=0; i<N; i ++){
+ add_node_to_time(G,i,i);
+ }
+
+}
+
+
+
+void init_times_delay(ij_net_t *G, distr_t *d, int m0, int N){
+ int i;
+ int t;
+
+ for (i=m0; i<N; i ++){
+ t = sample_power_law(d);/* So t is in the interval [1,N] */
+ printf(" %d", t);
+ if (i+t < N){
+ add_node_to_time(G, i, t+i);
+ }
+ }
+ printf("\n");
+}
+
+void init_times_random(ij_net_t *G, int N){
+
+ int i,j;
+
+ for (i=0; i<N; i ++){
+ j = rand() % N;
+ add_node_to_time(G, i, j);
+ }
+}
+
+
+void dump_times(ij_net_t *G, int N){
+
+ int i, j;
+
+ for (i=0; i<N; i++){
+ printf("%d: ", i);
+ for (j=0; j< G->times[i].N; j++){
+ printf(" %d", G->times[i].val[j]);
+ }
+ printf("\n");
+ }
+
+}
+
+
+int main(int argc, char *argv[]){
+
+ ij_net_t G1, G2;
+
+ int N, m, m0, k_max;
+ coupling_t matr;
+ double gamma;
+ distr_t delay_distr;
+
+ char str[256];
+
+ if (argc < 9){
+ printf("Usage: %s <N> <m> <m0> <outfile> <a> <b> <c> <d>\n", argv[0]);
+ exit(1);
+ }
+
+ srand(time(NULL));
+
+ /* Diagonal coupling */
+ matr.a = atof(argv[5]);
+ matr.b = atof(argv[6]);
+ matr.c = atof(argv[7]);
+ matr.d = atof(argv[8]);
+
+ N = atoi(argv[1]);
+ m = atoi(argv[2]);
+ m0 = atoi(argv[3]);
+
+ G1.size = (N+m0) * m;
+ G2.size = (N+m0) * m;
+
+ init_structure(&G1, N);
+ init_structure(&G2, N);
+
+
+ G1.K = init_network(&G1, m0);
+ G2.K = init_network(&G2, m0);
+
+
+ init_times_random(&G2, N);
+
+ //dump_times(&G2, N);
+
+ fprintf(stderr, "Init finished!\n");
+
+ grow_multi_net_delay(&G1, &G2, N, m, m0, &matr);
+
+ //printf("### G1\n");
+ sprintf(str, "%s_layer1.txt", argv[4]);
+ dump_network_to_file(&G1, str);
+
+ //printf("### G2\n");
+ sprintf(str, "%s_layer2.txt", argv[4]);
+ dump_network_to_file(&G2, str);
+
+ /* dump_network_to_file(S, S_num, argv[4]); */
+ /* printf("Network dumped!\n"); */
+ /* k_max = degree_distr(S, S_num, &distr); */
+ /* printf("k_max is: %d\n", k_max); */
+ /* dump_distr_to_file(distr, k_max, argv[5]); */
+
+}
diff --git a/models/growth/nibilab_nonlinear.c b/models/growth/nibilab_nonlinear.c
new file mode 100644
index 0000000..3ad9ce8
--- /dev/null
+++ b/models/growth/nibilab_nonlinear.c
@@ -0,0 +1,493 @@
+/**
+ *
+ * NiBiLaB model (Nicosia, Bianconi, Latora, Barthelemy) of multiplex
+ * non-linear preferential attachment.
+ *
+ * The probability for a newly arrived node $i$ to create a link to an
+ * existing node $j$ is
+ *
+ * p_{i->j} \propto k\lay{1}^{\alpha}/k\lay{2}^{\beta}
+ *
+ * where alpha and beta are two parameters.
+ *
+ * Nodes arrive at the same time on both layers.
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <math.h>
+
+
+typedef struct{
+ int size;
+ int N;
+ int *val;
+} int_array_t;
+
+
+typedef struct{
+ int K;
+ int N;
+ int size;
+ int *i;
+ int *j;
+ int *degrees;
+ int *arrived;
+ double *cumul;
+ int_array_t *times;
+ double *eta;
+} ij_net_t;
+
+
+
+typedef struct{
+ double a;
+ double b;
+ double c;
+ double d;
+} coupling_t;
+
+
+typedef struct{
+ int N;
+ double x_min;
+ double *distr;
+ double gamma;
+} distr_t;
+
+
+int init_network(ij_net_t *G, int m0){
+
+ int n;
+
+ for(n=0; n<m0; n++){
+ G->i[n] = n;
+ G->j[n] = n+1 % m0;
+ G->degrees[n] = 2;
+ G->arrived[n] = n;
+ }
+ G->K = m0;
+ G->N = m0;
+ return n;
+}
+
+
+
+void dump_network(ij_net_t *G){
+
+ int i;
+ for(i=0; i< G->K; i ++){
+ printf("%d %d\n",G->i[i], G->j[i]);
+ }
+}
+
+
+void dump_network_to_file(ij_net_t *G, char *fname){
+
+
+ FILE *f;
+ int i;
+
+
+ f = fopen(fname, "w+");
+
+ for(i=0; i< G->K; i ++){
+ fprintf(f, "%d %d\n",G->i[i], G->j[i]);
+ }
+ fclose(f);
+}
+
+
+
+double f1(double v1, double v2, coupling_t *matr){
+ return v1 * matr->a + v2 * matr->b;
+}
+
+double f2(double v1, double v2, coupling_t *matr){
+ return v1 * matr->c + v2 * matr->d;
+}
+
+
+void compute_cumul_nlin(ij_net_t *G1, ij_net_t *G2, double alpha){
+
+ int i;
+ double val;
+
+ /* The bias at layer 1 is k1/k2^2 */
+ if (G1->degrees[G1->arrived[0]] > 0 && G2->degrees[ G1->arrived[0] ])
+ G1->cumul[0] = (G1->degrees[G1->arrived[0]]) * 1.0 / pow(G2->degrees[ G1->arrived[0] ],alpha) ;
+ else
+ G1->cumul[0] = 0.0;
+
+ if (G2->degrees[G2->arrived[0]] > 0 && G1->degrees[ G2->arrived[0] ] > 0)
+ G2->cumul[0] = ( G2->degrees[G2->arrived[0]]) * 1.0/ pow(G1->degrees[ G2->arrived[0] ],alpha);
+ else
+ G2->cumul[0] = 0.0;
+
+ for(i=1; i<G1->N; i ++){
+ if (G1->degrees[G1->arrived[i]] > 0 && G2->degrees[ G1->arrived[i] ] > 0)
+ val = (G1->degrees[G1->arrived[i]]) * 1.0 / pow(G2->degrees[ G1->arrived[i] ],alpha) ;
+ else
+ val = 0;
+ G1->cumul[i] = G1->cumul[i-1] + val;
+ }
+ for(i=1; i<G2->N; i ++){
+ if (G2->degrees[G2->arrived[i]] > 0 && G1->degrees[ G2->arrived[i] ] )
+ val = (G2->degrees[G2->arrived[i]]) * 1.0/ pow(G1->degrees[ G2->arrived[i] ], alpha);
+ else
+ val = 0.0;
+ G2->cumul[i] = G2->cumul[i-1] + val;
+ }
+}
+
+
+void compute_cumul_nlin_2(ij_net_t *G1, ij_net_t *G2, double alpha, double beta){
+
+ int i;
+ double val;
+
+ /* The bias at layer 1 is k1/k2^2 */
+ if (G1->degrees[G1->arrived[0]] > 0 && G2->degrees[ G1->arrived[0] ])
+ G1->cumul[0] = pow(G1->degrees[G1->arrived[0]], alpha) / pow(G2->degrees[ G1->arrived[0] ],beta) ;
+ else
+ G1->cumul[0] = 0.0;
+
+ if (G2->degrees[G2->arrived[0]] > 0 && G1->degrees[ G2->arrived[0] ] > 0)
+ G2->cumul[0] = pow(G2->degrees[G2->arrived[0]], alpha)/pow(G1->degrees[ G2->arrived[0] ], beta);
+ else
+ G2->cumul[0] = 0.0;
+
+ for(i=1; i<G1->N; i ++){
+ if (G1->degrees[G1->arrived[i]] > 0 && G2->degrees[ G1->arrived[i] ] > 0)
+ val = pow(G1->degrees[G1->arrived[i]], alpha)/pow(G2->degrees[ G1->arrived[i] ],beta) ;
+ else
+ val = 0;
+ G1->cumul[i] = G1->cumul[i-1] + val;
+ }
+ for(i=1; i<G2->N; i ++){
+ if (G2->degrees[G2->arrived[i]] > 0 && G1->degrees[ G2->arrived[i] ] )
+ val = pow(G2->degrees[G2->arrived[i]],alpha)/pow(G1->degrees[ G2->arrived[i] ], beta);
+ else
+ val = 0.0;
+ G2->cumul[i] = G2->cumul[i-1] + val;
+ }
+}
+
+
+void dump_cumul(ij_net_t *G){
+
+ int i;
+
+ for (i=0; i<G->N; i ++){
+ printf("%d %2.6f\n", G->times[i], G->cumul[i]);
+ }
+
+}
+
+
+
+int select_neigh_cumul(ij_net_t *G){
+
+ double r;
+ int j;
+
+ r = (rand() * 1.0 / RAND_MAX) * G->cumul[ G->N-1 ];
+ //printf(" r: %f ", r);
+
+ j = 0;
+ /* Change with a binary search ???!?!?!?! */
+ while (r > G->cumul[j]){
+ j ++;
+ }
+ return G->arrived[j];
+}
+
+int already_neighbour(ij_net_t *G, int j, int d, int offset){
+
+ int i;
+
+ for(i=G-> K + offset; i< G->K + offset + j; i ++){
+ if (G->j[i] == d)
+ return 1;
+ }
+ return 0;
+}
+
+
+void sample_edges(ij_net_t *G, int n, int m, int offset){
+
+ int j;
+ int d;
+
+ //printf("----");
+ for(j=0; j<m; j++){
+ G->i[G->K + offset + j] = n;
+ d = select_neigh_cumul(G);
+ while(already_neighbour(G, j, d, offset)){
+ d = select_neigh_cumul(G);
+ }
+ //printf(" link %d--%d\n", n, d);
+ G->j[G->K + offset + j] = d;
+ G->degrees[d] += 1;
+ }
+ //printf("\n");
+}
+
+void create_distr(double *v, double gamma, int x_min, int x_max){
+ int n, i;
+ double sum, new_sum;
+
+ n = x_max-x_min + 1;
+
+ sum = 0;
+ for(i=0; i<n; i++){
+ v[i] = pow((x_min + i), gamma);
+ sum += v[i];
+ }
+ new_sum = 0;
+ /* Now we normalize the array*/
+ for(i=0; i<n; i++){
+ v[i]/= sum;
+ new_sum += v[i];
+ }
+ //printf("New_sum: %lf\n", new_sum);
+ /* Now we compute the cumulative distribution*/
+ for(i=1; i<n; i++){
+ v[i] += v[i-1];
+ }
+}
+
+
+inline int find_degree(double *v, int dim, double xi){
+ int i;
+
+ i=0;
+ while(xi > v[i])
+ i++;
+ return i;
+
+}
+
+
+int sample_power_law(distr_t *d){
+
+ double xi, q, val;
+ int k;
+
+ while(1){
+ xi = rand()* 1.0 / RAND_MAX;
+ k = find_degree(d->distr, d->N, xi);
+ val = rand()*1.0/RAND_MAX;
+ q = d->x_min + xi * d->N;
+ q = q / (floor(q) + 1);
+ q = pow(q, d->gamma);
+ if (val <= q){
+ return k;
+ }
+ }
+}
+
+
+
+
+int grow_multi_net_nlin_2(ij_net_t *G1, ij_net_t *G2, int N, int m,
+ int m0, double alpha, double beta){
+
+ int i, j;
+ int d;
+ int i2;
+
+
+ for(i=m0; i<N; i++){
+ compute_cumul_nlin_2(G1, G2, alpha, beta);
+ //dump_cumul(G1);
+ //dump_cumul(G2);
+ //n = m0 + i; /* This is the id of the new node */
+ G1->arrived[i] = i;
+ sample_edges(G1, G1->arrived[i], m, 0);
+ G1->degrees[G1->arrived[i]] += m;
+
+ G2->arrived[i] = i;
+ sample_edges(G2, G2->arrived[i], m, 0);
+ G2->degrees[G2->arrived[i]] += m;
+ G1->N += 1;
+ G1->K += m;
+ G2->N += 1;
+ G2->K += m ;
+ }
+ return 0;
+}
+
+void init_structure(ij_net_t *G, int N){
+
+ int i;
+
+ G->i = malloc(G->size * sizeof(int));
+ G->j = malloc(G->size * sizeof(int));
+ G->degrees = malloc(N * sizeof(int));
+ G->arrived = malloc(N * sizeof(int));
+ G->cumul = malloc(N * sizeof(double));
+ G->eta = malloc(N * sizeof(double));
+ G->times = malloc(N * sizeof(int_array_t));
+ for (i=0; i<N; i++){
+ G->times[i].size = 5;
+ G->times[i].N = 0;
+ G->times[i].val = malloc(5 * sizeof(int));
+ }
+ memset(G->degrees, 0, N*sizeof(int));
+ G->N = 0;
+
+}
+
+void add_node_to_time(ij_net_t *G, int node, int t){
+
+ if (G->times[t].size == G->times[t].N){
+ G->times[t].size += 5;
+ G->times[t].val = realloc(G->times[t].val, G->times[t].size);
+ }
+ G->times[t].val[G->times[t].N] = node;
+ G->times[t].N += 1;
+}
+
+
+void init_times_delta(ij_net_t *G, int N){
+
+ int i;
+
+ for (i=0; i<N; i ++){
+ add_node_to_time(G,i,i);
+ }
+
+}
+
+
+
+void init_times_delay(ij_net_t *G, distr_t *d, int m0, int N){
+ int i;
+ int t;
+
+ for (i=m0; i<N; i ++){
+ t = sample_power_law(d);/* So t is in the interval [1,N] */
+ printf(" %d", t);
+ if (i+t < N){
+ add_node_to_time(G, i, t+i);
+ }
+ }
+ printf("\n");
+}
+
+
+
+void dump_times(ij_net_t *G, int N){
+
+ int i, j;
+
+ for (i=0; i<N; i++){
+ printf("%d: ", i);
+ for (j=0; j< G->times[i].N; j++){
+ printf(" %d", G->times[i].val[j]);
+ }
+ printf("\n");
+ }
+
+}
+
+
+void init_eta_random(ij_net_t *G, int N){
+
+ int i;
+ double val;
+
+ for (i=0; i<N; i ++){
+ val = rand() * 1.0/RAND_MAX;
+ G->eta[i] = val;
+ }
+}
+
+void init_eta_ass(ij_net_t *G2, ij_net_t *G1, int N){
+ int i;
+
+ for (i=0; i<N; i ++){
+ G2->eta[i] = G1->eta[i];
+ }
+
+}
+
+void init_eta_dis(ij_net_t *G2, ij_net_t *G1, int N){
+ int i;
+
+ for (i=0; i<N; i ++){
+ G2->eta[i] = 1 - G1->eta[i];
+ }
+
+}
+
+
+int main(int argc, char *argv[]){
+
+ ij_net_t G1, G2;
+
+ int N, m, m0, k_max;
+ coupling_t matr;
+ double alpha, beta;
+ distr_t delay_distr;
+
+ char str[256];
+
+ if (argc < 6){
+ printf("Usage: %s <N> <m> <m0> <outfile> <alpha> <beta>\n", argv[0]);
+ exit(1);
+ }
+
+ srand(time(NULL));
+
+ /* Diagonal coupling */
+
+ N = atoi(argv[1]);
+ m = atoi(argv[2]);
+ m0 = atoi(argv[3]);
+ alpha=atof(argv[5]);
+ beta = atof(argv[6]);
+
+ G1.size = (N+m0) * m;
+ G2.size = (N+m0) * m;
+
+ init_structure(&G1, N);
+ init_structure(&G2, N);
+
+
+ G1.K = init_network(&G1, m0);
+ G2.K = init_network(&G2, m0);
+
+ //init_eta_random(&G1, N);
+ //init_eta_random(&G2, N);
+
+
+
+ //init_times_delta(&G1, N);
+ //init_times_delta(&G2, N);
+
+ //dump_times(&G2, N);
+
+ fprintf(stderr, "Init finished!\n");
+
+ grow_multi_net_nlin_2(&G1, &G2, N, m, m0, alpha, beta);
+
+ //printf("### G1\n");
+ sprintf(str, "%s_layer1.txt", argv[4]);
+ dump_network_to_file(&G1, str);
+
+ //printf("### G2\n");
+ sprintf(str, "%s_layer2.txt", argv[4]);
+ dump_network_to_file(&G2, str);
+
+ /* dump_network_to_file(S, S_num, argv[4]); */
+ /* printf("Network dumped!\n"); */
+ /* k_max = degree_distr(S, S_num, &distr); */
+ /* printf("k_max is: %d\n", k_max); */
+ /* dump_distr_to_file(distr, k_max, argv[5]); */
+
+}
diff --git a/models/growth/node_deg_over_time.py b/models/growth/node_deg_over_time.py
new file mode 100644
index 0000000..a71e86c
--- /dev/null
+++ b/models/growth/node_deg_over_time.py
@@ -0,0 +1,82 @@
+####
+##
+## Get an edgelist, a file pof arrival times and a node ID and return
+## the degree of that node as a function of time (we suppose that IDs
+## are sequential)
+##
+##
+
+import sys
+
+
+if len(sys.argv) < 4:
+ print "Usage: %s <netfile> <timefile> <nodeid1> [<nodeid2> <nodeid3>...]" % sys.argv[0]
+ sys.exit(1)
+
+node_id = int(sys.argv[3])
+
+lines = open(sys.argv[2], "r").readlines()
+
+arrival_time = {}
+
+#### WARNING!!!! THIS WORKS ONLY FOR M0=3
+arrival_time[0] = 0
+arrival_time[1] = 1
+arrival_time[2] = 2
+
+
+neigh_by_time = {}
+
+max_t = -1
+
+for l in lines:
+ if l[0] == "#":
+ continue
+ t,node = [int(x) for x in l.strip(" \n").split(" ")]
+ arrival_time[node] = t
+ if t > max_t :
+ max_t = t
+
+
+lines = open(sys.argv[1], "r").readlines()
+
+
+for l in lines:
+ if l[0] == "#":
+ continue
+ n1,n2 = [int(x) for x in l.strip(" \n").split(" ")]
+
+
+ if neigh_by_time.has_key(n1):
+ neigh_by_time[n1].append(arrival_time[n2])
+ else:
+ neigh_by_time[n1] = [arrival_time[n2]]
+
+ if neigh_by_time.has_key(n2):
+ neigh_by_time[n2].append(arrival_time[n1])
+ else:
+ neigh_by_time[n2] = [arrival_time[n1]]
+
+
+
+#print neigh_by_time[node_id]
+
+
+for node_id in sys.argv[3:]:
+ node_id = int(node_id)
+ neigh_by_time[node_id].sort()
+ last_time = neigh_by_time[node_id][0]
+#### changed here
+ k = 1
+ print "#### ", node_id
+ for t in neigh_by_time[node_id][1:]:
+ if t != last_time:
+ if last_time < arrival_time[node_id]:
+ print arrival_time[node_id], k
+ else:
+ print last_time, k
+ last_time = t
+ k += 1
+ print max_t, k-1
+ print
+ print
diff --git a/models/nullmodels/model_MDM.py b/models/nullmodels/model_MDM.py
new file mode 100644
index 0000000..5b0a969
--- /dev/null
+++ b/models/nullmodels/model_MDM.py
@@ -0,0 +1,66 @@
+####
+##
+##
+## This is the vertical participation model. For each node i, we use
+## exactly the same value of B_i as in the original network, but we
+## choose at random the layers in which node i will be active. This
+## breaks down intra-layer correlations.
+##
+## We get as input a file which reports, for each value of B_i, the
+## number of nodes in the original network which have that value, i the format:
+##
+## B_i N(B_i)
+##
+##
+##
+## The output is the obtained distribution of bit-strings.
+##
+##
+
+import sys
+import random
+
+
+def to_binary(l):
+ s = 0
+ e = 0
+ for v in l:
+ s += v * pow(2,e)
+ e +=1
+ return s
+
+
+if len(sys.argv) < 3:
+ print "Usage: %s <Bi_file> <M>" % sys.argv[0]
+ sys.exit(1)
+
+M = int(sys.argv[2])
+
+layers = range(M)
+
+distr = {}
+
+with open(sys.argv[1], "r") as f:
+ for l in f:
+ if l[0] == "#":
+ continue
+ val, num = [int(x) for x in l.strip(" \n").split(" ")]
+ for j in range(num):
+ node_layers = random.sample(layers, val)
+ node_bitstring = [0 for x in range(M)]
+ #print node_bitstring, node_layers
+ for i in node_layers:
+ #print i,
+ node_bitstring[i] = 1
+ #print node_bitstring
+
+ bs = to_binary(node_bitstring)
+ if bs in distr:
+ distr[bs] += 1
+ else:
+ distr[bs] = 1
+
+for k in distr:
+ bin_list = bin(k)
+ bin_num = sum([int(x) if x=='1' else 0 for x in bin_list[2:]])
+ sys.stderr.write("%d %0175s %d \n" % (bin_num, bin_list[2:], distr[k]))
diff --git a/models/nullmodels/model_MSM.py b/models/nullmodels/model_MSM.py
new file mode 100644
index 0000000..2c30df5
--- /dev/null
+++ b/models/nullmodels/model_MSM.py
@@ -0,0 +1,65 @@
+####
+##
+##
+## Create a synthetic multiplex network in which a node $i$ appears at
+## each layer $\alpha$ with a probability equal to $B_i$, which is the
+## fraction of layers in which node $i$ participate in the original
+## multiplex.
+##
+## Take a file of node binary participation indices, and sample a
+## multiplex compatible with that distribution
+##
+##
+## The input file is in the format:
+##
+## nodeID_i B_i
+##
+## The output file is a node-layer participation file, in the format
+##
+## node_id1 layer_id1
+## node_id2 layer_id2
+## .....
+##
+
+import sys
+
+import random
+
+if len(sys.argv) < 3:
+ print "Usage: %s <filein> <M>" % sys.argv[0]
+ sys.exit(1)
+
+M = int(sys.argv[2])
+
+bin_index = {}
+node_ids = []
+
+lines = open(sys.argv[1]).readlines()
+
+
+for l in lines:
+ if l[0] == "#":
+ continue
+ elems = [int(x) for x in l.strip(" \n").split(" ")]
+ bin_index[elems[0]] = 1.0 * elems[1]/M
+ node_ids.append(elems[0])
+
+N = len(node_ids)
+
+node_layers = {}
+
+for alpha in range (M):
+ for i in node_ids:
+ val = random.random()
+ if val < bin_index[i]:
+ if node_layers.has_key(i):
+ node_layers[i].append(alpha)
+ else:
+ node_layers[i] = [alpha]
+
+
+for i in node_ids:
+ if node_layers.has_key(i):
+ for j in range(len(node_layers[i])):
+ print i, node_layers[i][j]
+
diff --git a/models/nullmodels/model_hypergeometric.py b/models/nullmodels/model_hypergeometric.py
new file mode 100644
index 0000000..0a36237
--- /dev/null
+++ b/models/nullmodels/model_hypergeometric.py
@@ -0,0 +1,56 @@
+####
+##
+## This is the hypergeometric model. Each layer has the same number of
+## non-isolated nodes as the initial graph, but the nodes are
+## activated at random. The input is a file of number of non-isolated
+## nodes in each layer, and the total number of nodes in the multiplex.
+##
+## The output file is a node-layer participation file, in the format
+##
+## node_id1 layer_id1
+## node_id2 layer_id2
+## .....
+##
+
+import sys
+import random
+
+if len(sys.argv) < 3:
+ print "Usage: %s <layer_N_file> <N>" % sys.argv[0]
+ sys.exit(1)
+
+N = int(sys.argv[2])
+
+layer_degs = []
+node_layers = {}
+
+lines = open(sys.argv[1]).readlines()
+
+M = 0
+
+
+for l in lines:
+ if l[0] == "#":
+ continue
+ n = [int(x) for x in l.strip(" \n").split(" ")][0]
+ layer_degs.append(n)
+ M += 1
+
+for i in range(M):
+ num = layer_degs[i]
+ added = []
+ n = 0
+ while n < num:
+ j = random.choice(range(N))
+ if j not in added:
+ n += 1
+ added.append(j)
+ if node_layers.has_key(j):
+ node_layers[j].append(i)
+ else:
+ node_layers[j] = [i]
+
+for n in node_layers.keys():
+ for i in node_layers[n]:
+ print n,i
+
diff --git a/models/nullmodels/model_layer_growth.py b/models/nullmodels/model_layer_growth.py
new file mode 100644
index 0000000..46b4c0f
--- /dev/null
+++ b/models/nullmodels/model_layer_growth.py
@@ -0,0 +1,121 @@
+####
+##
+## layer-by-layer multiplex growth
+##
+## - We start from a multiplex with M_0 layers, with a certain number of
+## active nodes each
+##
+## - Each new layer arrives with a certain number N\lay{\alpha} of nodes
+## to be activated (this number is sampled from the observed distribution
+## of N\lay{\alpha}, like in the airlines multiplex)
+##
+## - Each node $i$ is activated with a probability proportional to the
+## number of existing layers in which it is already active, added to an
+## attractivity A :
+##
+## P_i(t) \propto A + B_i(t)
+##
+## - the hope is that A might tune the exponent of the resulting distribution
+## of B_i.....
+##
+##
+## This script takes as input a file which contains the degrees of the
+## layers, the total number of nodes in the multiplex, the initial
+## number M0 of layers in the multiplex and the attractiveness
+## parameter A. If "RND" is specified as a third parameter, then the
+## sequence of N\lay{\alpha} is shuffled
+##
+## Gives as output a list of node-layer participation
+##
+
+import sys
+import random
+
+if len(sys.argv) < 5:
+ print "Usage: %s <layers_N> <N> <M0> <A> [RND]" % sys.argv[0]
+ sys.exit(1)
+
+N = int(sys.argv[2])
+M0 = int(sys.argv[3])
+A = int(sys.argv[4])
+
+layer_degs = []
+
+
+if len(sys.argv) > 5 and sys.argv[5] == "RND":
+ randomize = True
+else:
+ randomize = False
+
+lines = open(sys.argv[1]).readlines()
+
+M = 0
+
+
+for l in lines:
+ if l[0] == "#":
+ continue
+ n = [int(x) for x in l.strip(" \n").split(" ")][0]
+ layer_degs.append(n)
+ M += 1
+
+
+if randomize:
+ random.shuffle(layer_degs)
+
+## the list node_Bi contains, at each time, the attachment
+## probabilities, i.e. it is a list which contains each node $i$
+## exactly $A + B_i$ times
+
+node_Bi = []
+
+#
+# initialize the distribution of attachment proibabilities, giving to
+# all nodes an attachment probability equal to A
+#
+
+for i in range(N):
+ for j in range(A):
+ node_Bi.append(i)
+
+layers = []
+
+
+for i in range(M0):
+ N_alpha = layer_degs.pop()
+ node_list = []
+ num_nodes = 0
+ while num_nodes < N_alpha:
+ val = random.choice(range(N))
+ if val not in node_list:
+ node_list.append(val)
+ num_nodes += 1
+ for n in node_list:
+ node_Bi.append(n)
+ layers.append(node_list)
+ i += 1
+
+
+#sys.exit(1)
+
+while i < M:
+ N_alpha = layer_degs.pop()
+ node_list = []
+ num_nodes = 0
+ while num_nodes < N_alpha:
+ val = random.choice(node_Bi)
+ if val not in node_list:
+ node_list.append(val)
+ num_nodes += 1
+ for n in node_list:
+ node_Bi.append(n)
+ layers.append(node_list)
+ i += 1
+
+#print layers
+
+for i in range(M):
+ node_list = layers[i]
+ for n in node_list:
+ print n, i
+