-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathrunmlp.c
306 lines (266 loc) · 10.1 KB
/
runmlp.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
/*******************************************************************************
License:
This software was developed at the National Institute of Standards and
Technology (NIST) by employees of the Federal Government in the course
of their official duties. Pursuant to title 17 Section 105 of the
United States Code, this software is not subject to copyright protection
and is in the public domain. NIST assumes no responsibility whatsoever for
its use by other parties, and makes no guarantees, expressed or implied,
about its quality, reliability, or any other characteristic.
Disclaimer:
This software was developed to promote biometric standards and biometric
technology testing for the Federal Government in accordance with the USA
PATRIOT Act and the Enhanced Border Security and Visa Entry Reform Act.
Specific hardware and software products identified in this software were used
in order to perform the software development. In no case does such
identification imply recommendation or endorsement by the National Institute
of Standards and Technology, nor does it imply that the products and equipment
identified are necessarily the best available for the purpose.
*******************************************************************************/
/***********************************************************************
LIBRARY: MLP - Multi-Layer Perceptron Neural Network
FILE: RUNMLP.C
AUTHORS: Charles Wilson
G. T. Candela
Michael D. Garris
DATE: 1992
UPDATED: 09/10/2004
UPDATED: 03/22/2005 by MDG
ROUTINES:
#cat: mlphypscons - classifies the given set of feature vectors using
#cat: the MLP code.
#cat: runmlp - runs the MLP classifier on a given feature vector.
#cat:
#cat: runmlp2 - runs the MLP classifier on a given feature vector.
#cat: This version of this routine returns on error and does not
#cat: directly exit.
***********************************************************************/
#include "mlp.h"
#include "mlpcla.h"
/*************************************************************/
void mlphypscons(int ninps, int nhids, int nouts, char acfunc_hids,
char acfunc_outs, float *weights, float *klts,
int nklts, int *hyps_i, float *cons)
{
int i;
float *outacts, *kptr;
malloc_flt(&outacts, nouts, "mlphypscons : outacts");
for(i = 0, kptr = klts; i < nklts; i++, kptr += ninps){
runmlp(ninps, nhids, nouts, acfunc_hids, acfunc_outs, weights,
kptr, outacts, &(hyps_i[i]), &(cons[i]));
}
free(outacts);
}
/*************************************************************/
/* runmlp: Runs the Multi-Layer Perceptron (MLP) on a feature vector.
Input args:
ninps, nhids, nouts: Numbers of input, hidden, and output nodes
of the MLP.
acfunc_hids_code: Code character specifying the type of activation
function to be used on the hidden nodes: must be LINEAR,
SIGMOID, or SINUSOID (defined in parms.h).
acfunc_outs_code: Code character specifying the type of activation
function to be used on the output nodes.
w: The MLP weights.
featvec: The feature vector that the MLP is to be run on; its first
ninps elts will be used.
Output args:
outacs: The output activations. This buffer must be provided by
caller, allocated to (at least) nouts floats.
hypclass: The hypothetical class, as an integer in the range
0 through nouts - 1.
confidence: A floating-point value in the range 0. through 1.
Defined to be outacs[hypclass], i.e. the highest
output-activation value.
*/
void runmlp(int ninps, int nhids, int nouts, char acfunc_hids_code,
char acfunc_outs_code, float *w, float *featvec, float *outacs,
int *hypclass, float *confidence)
{
char str[100];
static char t = 't';
static int i1 = 1;
float *w1, *b1, *w2, *b2, hidacs[MAX_NHIDS], *p, *pe, *maxac_p,
maxac, ac;
static float f1 = 1.;
void (*acfunc_hids)(float *), (*acfunc_outs)(float *);
void ac_v_linear(float *), ac_v_sigmoid(float *), ac_v_sinusoid(float *);
acfunc_hids = (void (*)(float *))NULL;
acfunc_outs = (void (*)(float *))NULL;
if(nhids > MAX_NHIDS) {
sprintf(str, "nhids, %d, is > MAX_NHIDS, defined as %d in \
runmlp.c", nhids, MAX_NHIDS);
fatalerr("runmlp", str, NULL);
}
/* Resolve the activation function codes to functions. */
switch(acfunc_hids_code) {
case LINEAR:
acfunc_hids = ac_v_linear;
break;
case SIGMOID:
acfunc_hids = ac_v_sigmoid;
break;
case SINUSOID:
acfunc_hids = ac_v_sinusoid;
break;
default:
sprintf(str, "unsupported acfunc_hids_code %d.\n\
Supported codes are LINEAR (%d), SIGMOID (%d), and SINUSOID \
(%d).", (int)acfunc_hids_code, (int)LINEAR, (int)SIGMOID,
(int)SINUSOID);
fatalerr("runmlp", str, NULL);
break;
}
switch(acfunc_outs_code) {
case LINEAR:
acfunc_outs = ac_v_linear;
break;
case SIGMOID:
acfunc_outs = ac_v_sigmoid;
break;
case SINUSOID:
acfunc_outs = ac_v_sinusoid;
break;
default:
sprintf(str, "unsupported acfunc_outs_code %d.\n\
Supported codes are LINEAR (%d), SIGMOID (%d), and SINUSOID \
(%d).", (int)acfunc_outs_code, (int)LINEAR, (int)SIGMOID,
(int)SINUSOID);
fatalerr("runmlp", str, NULL);
break;
}
/* Where the weights and biases of the two layers begin in w. */
b2 = (w2 = (b1 = (w1 = w) + nhids * ninps) + nhids) + nouts * nhids;
/* Start hidden activations out as first-layer biases. */
memcpy((char *)hidacs, (char *)b1, nhids * sizeof(float));
/* Add product of first-layer weights with feature vector. */
mlp_sgemv(t, ninps, nhids, f1, w1, ninps, featvec, i1, f1, hidacs, i1);
/* Finish each hidden activation by applying activation function. */
for(pe = (p = (float *)hidacs) + nhids; p < pe; p++)
acfunc_hids(p);
/* Same steps again for second layer. */
memcpy((char *)outacs, (char *)b2, nouts * sizeof(float));
mlp_sgemv(t, nhids, nouts, f1, w2, nhids, hidacs, i1, f1, outacs, i1);
for(pe = (p = outacs) + nouts; p < pe; p++)
acfunc_outs(p);
/* Find the hypothetical class -- the class whose output node
activated most strongly -- and the confidence -- that activation
value. */
for(pe = (maxac_p = p = outacs) + nouts, maxac = *p, p++; p < pe;
p++)
if((ac = *p) > maxac) {
maxac = ac;
maxac_p = p;
}
*hypclass = maxac_p - outacs;
*confidence = maxac;
}
/*************************************************************/
/* runmlp2: Runs the Multi-Layer Perceptron (MLP) on a feature vector.
Input args:
ninps, nhids, nouts:
Numbers of input, hidden, and output nodes
of the MLP.
acfunc_hids_code:
Code character specifying the type of activation
function to be used on the hidden nodes: must be LINEAR,
SIGMOID, or SINUSOID (defined in parms.h).
acfunc_outs_code:
Code character specifying the type of activation
function to be used on the output nodes.
w:
The MLP weights.
featvec:
The feature vector that the MLP is to be run on; its first
ninps elts will be used.
Output args:
outacs:
The output activations. This buffer must be provided by
caller, allocated to (at least) nouts floats.
hypclass:
The hypothetical class, as an integer in the range
0 through nouts - 1.
confidence:
A floating-point value in the range 0. through 1.
Defined to be outacs[hypclass], i.e. the highest
output-activation value.
*/
int runmlp2(const int ninps, const int nhids, const int nouts,
const char acfunc_hids_code, const char acfunc_outs_code,
float *w, float *featvec,
float *outacs, int *hypclass, float *confidence)
{
static char t = 't';
static int i1 = 1;
static float f1 = 1.0;
float *w1, *b1, *w2, *b2, hidacs[MAX_NHIDS], *p, *pe, *maxac_p,
maxac, ac;
void (*acfunc_hids)(float *), (*acfunc_outs)(float *);
void ac_v_linear(float *), ac_v_sigmoid(float *), ac_v_sinusoid(float *);
acfunc_hids = (void (*)(float *))NULL;
acfunc_outs = (void (*)(float *))NULL;
if(nhids > MAX_NHIDS) {
fprintf(stderr, "ERROR : runmlp2 : nhids : %d > %d\n",
nhids, MAX_NHIDS);
return(-2);
}
/* Resolve the activation function codes to functions. */
switch(acfunc_hids_code) {
case LINEAR:
acfunc_hids = ac_v_linear;
break;
case SIGMOID:
acfunc_hids = ac_v_sigmoid;
break;
case SINUSOID:
acfunc_hids = ac_v_sinusoid;
break;
default:
fprintf(stderr, "ERROR : runmlp2 : acfunc_hids_code : %d unsupported\n",
acfunc_hids_code);
return(-3);
break;
}
switch(acfunc_outs_code) {
case LINEAR:
acfunc_outs = ac_v_linear;
break;
case SIGMOID:
acfunc_outs = ac_v_sigmoid;
break;
case SINUSOID:
acfunc_outs = ac_v_sinusoid;
break;
default:
fprintf(stderr, "ERROR : runmlp2 : acfunc_outs_code : %d unsupported\n",
acfunc_outs_code);
return(-4);
break;
}
/* Where the weights and biases of the two layers begin in w. */
b2 = (w2 = (b1 = (w1 = w) + nhids * ninps) + nhids) + nouts * nhids;
/* Start hidden activations out as first-layer biases. */
memcpy((char *)hidacs, (char *)b1, nhids * sizeof(float));
/* Add product of first-layer weights with feature vector. */
mlp_sgemv(t, ninps, nhids, f1, w1, ninps, featvec, i1, f1, hidacs, i1);
/* Finish each hidden activation by applying activation function. */
for(pe = (p = (float *)hidacs) + nhids; p < pe; p++)
acfunc_hids(p);
/* Same steps again for second layer. */
memcpy((char *)outacs, (char *)b2, nouts * sizeof(float));
mlp_sgemv(t, nhids, nouts, f1, w2, nhids, hidacs, i1, f1, outacs, i1);
for(pe = (p = outacs) + nouts; p < pe; p++)
acfunc_outs(p);
/* Find the hypothetical class -- the class whose output node
activated most strongly -- and the confidence -- that activation
value. */
for(pe = (maxac_p = p = outacs) + nouts, maxac = *p, p++; p < pe; p++){
if((ac = *p) > maxac) {
maxac = ac;
maxac_p = p;
}
}
*hypclass = maxac_p - outacs;
*confidence = maxac;
return(0);
}