Nonlinear regression. A JavaScript code with gradient descent optimized with ADAM.

Nonlinear regression. A JavaScript code with gradient descent optimized with ADAM.

Multivariate nonlinear regression. I am working on a predictive model for water alkaline electrolyzers (AWE). The literature is not particularly generous regarding dynamic models. The only model available and well referenced in the literature is Ulberg’s [1].
This was a six-parameter semi-empirical model consisting of a non-linear function.

The Vcell is the potential difference to be applied to the electrolyser to obtain the current I.

The goal is to model the six parameters so that the function “fits” the electrolyzer experimental data.

Data used comes from a 15kW electrolyser.

In the following we will show the code used for the nonlinear regression of the six parameters.
I decided to share this tiny contribution because I would have liked to find a ready-to-use JavaScript nonlinear regression code.

		<script>
function Ulberg(initial_params, x1, x2, y) {
	result = [];
	if(!((x1.length == x2.length) || (x2.length == y.length))){
	console.log("I vettori hanno grandezze diverse");
	}
	const [r1_i, r2_i, s_i, t1_i, t2_i, t3_i] = initial_params;
	const Vrev = 12;
	const A = 1;

	const r1 = r1_i;
	const r2 = r2_i;
	const s  = s_i ;
	const t1 = t1_i;
	const t2 = t2_i;
	const t3 = t3_i;
	// Prima elaborazione
	for(i = 0; i < y.length; i++){
		//Funzione completa
		const valore_funzione = (Vrev) + ((r1 + r2*x1[i])/A)*x2[i] + s*Math.log10(1 + ((t1 + (t2/x1[i]) + (t3/(x1[i]*x1[i])))/A)*x2[i]);
		result.push(valore_funzione);
	}
	//console.log(result);
	//console.log("Sono stata eseguita");
	return result;
}

	
function meanSquaredError(actual, experimental){
		if(!(actual.length == experimental.length)){
	console.log("I vettori hanno grandezze diverse, impossibile determinare il mean squared error.");
	}
	error = 0;
	
	for(i = 0; i < actual.length; i++){
			error += Math.pow((actual[i] - experimental[i]), 2);
			//console.log("Errore parziale");
			//console.log(error);
	}	

	
	return error/actual.length;
}


function addVector(a, b){
    return a.map((e,i) => e + b[i]);
}

function subtractVector(a, b){
    return a.map((e,i) => e - b[i]);
}

function multiplyVector(a, b){
    return a.map((e,i) => e * b[i]);
}

function divideVector(a, b){
    return a.map((e,i) => e / b[i]);
}

function elevateVector(a, power){
    return a.map((e,i) => Math.pow(e, power));
}

function sqrtVector(a){
    return a.map((e,i) => Math.sqrt(e));
}

function multiplyScalar(a, vector){
	return vector.map((e,i) => e * a);
}

/* Your data. Please make sure that all vectors have the same dimension */
const x1 = [55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55];
const x2 = [0.001256,0.007716,0.01337,0.019026,0.027493,0.033133,0.050034,0.066534,0.086245,0.105553,0.125261,0.156229,0.189204,0.232631,0.265603,0.28812,0.32109,0.345216,0.377782,0.410752,0.433669,0.467039,0.488748];
const y = [15.5113,16.3797,17.1882,18.0416,18.8204,19.2249,19.8249,20.4399,20.9205,21.386,21.8068,22.3632,22.8748,23.4324,23.8543,24.1107,24.4727,24.7592,25.0613,25.4084,25.6349,25.9222,26.0588];


const h = 0.01;
const lambda = 0.01;
const beta1 = 0.9;
const beta2 = 0.999;

const noi = 100000; /*  Number of iterations */


//let initial_params =  	[ 1, 1, 1, 1, 1, 1 ];

let m = 			[0, 0, 0, 0, 0, 0];
let v = 			[0, 0, 0, 0, 0, 0];
let m_hat = 			[0, 0, 0, 0, 0, 0];
let v_hat = 			[0, 0, 0, 0, 0, 0];
let epsilon =			[0.00000001, 0.00000001, 0.00000001, 0.00000001, 0.00000001, 0.00000001, 0.00000001, 0.00000001];

/* Se vuoi ottenere l'immagine della funzione */
//console.log(Ulberg(initial_params, x1, x2, y));

var empty_array = [];
var explore_params = [];
var gradient = [];
//Calcolo le derivate parziali
console.log(Ulberg(initial_params, [55], [0.5], [9999999]));

for(j = 0; j < noi; j++){
	for(t = 0; t < initial_params.length; t++){
	//console.log(initial_params.length);
		explore_params = initial_params.slice();
		explore_params[t] = explore_params[t] + h;
y) - meanSquaredError(Ulberg(initial_params, x1, x2, y), y))/h);
		gradient.push(     ((meanSquaredError(Ulberg(explore_params, x1, x2, y), y) - meanSquaredError(Ulberg(initial_params, x1, x2, y), y))/h )        );
		}

	m = addVector(multiplyScalar(beta1, m), multiplyScalar((1 - beta1), gradient));
	v = addVector(multiplyScalar(beta2, v), multiplyScalar((1 - beta2), multiplyVector(gradient, gradient)));
	
	m_hat.slice();
	v_hat.slice();
	
	m_hat = multiplyScalar((1/(1 - Math.pow(beta1, j+1))), m);
	v_hat = multiplyScalar((1/(1 - Math.pow(beta2, j+1))), v);

	initial_params = subtractVector(initial_params, divideVector(multiplyScalar(lambda, m_hat), addVector(epsilon, elevateVector(v_hat, 0.5))));
	
	/*for(k = 0; k < initial_params.length; k++){
		initial_params[k] = initial_params[k] - Math.random() * lambda * gradient[k];
		}
		*/
	gradient = empty_array.slice();
	console.log("Sto per stampare i parametri iniziali modificati:");
	console.log(initial_params);
	console.log("Errore:");
	console.log(meanSquaredError(Ulberg(initial_params, x1, x2, y), y));
}



</script>

Instructions for use

The code can be modified by renaming the occurrence “Ulberg” everywhere and inserting the function name of your choice. Of course, the function itself must also be changed.

If you want to increase or decrease the number of parameters simply start the code with an initial_params vector with a different number of “1”.

The vectors x1, x2,… xn, y can have the size you prefer depending on your dataset, as long as they are homogeneous with each other.

No code changes are needed other than adding or removing parameters in the “ulberg” function.

This script is created in JavaScript since in the future it will be necessary to dynamically obtain data from a server starting from a PHP script.

You can opt to use libraries like TensorFlow, but I preferred to write the code from scratch to have more control over it in the future.

As the image shows, the six parameters have changed to align perfectly with the dataset.

For nonlinear analyses, we recommend using the ADAM algorithm. In fact, this specific function, recalibrated on the standard deviation, encountered a local minimum and failed to converge.

This work reminded me of my Ph.D. days.

[1] https://www.researchgate.net/publication/223231380_Modeling_of_advanced_alkaline_electrolyzers_A_system_simulation_approach

[2] https://www.geogebra.org/m/mn8fd2bh (this Geogebra script is a contribution by hawe, also reported in the comments below). We kindly thank him for his nice work.

Condividi questo articolo :)
Subscribe
Notificami
guest
2 Commenti
Oldest
Newest Most Voted
Inline Feedbacks
View all comments
hawe

Hallo,
thank you for the code.

There is a nonsens line bevor gradient.push

y) - meanSquaredError(Ulberg(initial_params, x1, x2, y), y))/h);

epsilon.length = 8 ?

I did some adaptions to run the code in ggb. most irritating is the ‘const’ scope of valore_funzione causing no updates of the function in ggb.
https://www.geogebra.org/m/mn8fd2bh

grazie mille
hawe

2
0
Commenta per partecipare alla discussionex