-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathSigmoidLikelihood.py
146 lines (117 loc) · 3.21 KB
/
SigmoidLikelihood.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
#!/usr/bin/env python
from sys import argv
from svm import *
from math import log, exp
from string import atof
from random import randrange
#--[Basic Function]---------------------------------------------------------------------
#input decision_values, real_labels{1,-1}, #positive_instances, #negative_instances
#output [A,B] that minimize sigmoid likilihood
#refer to Platt's Probablistic Output for Support Vector Machines
import sys
matrixfn = sys.argv[1]
resultfn = sys.argv[2]
myfile = open(matrixfn,'r')
matrixf = open(matrixfn, 'r')
resultf = open(resultfn, 'r')
for line in matrixf:
print line,
resultf.close
for line in matrixf:
print line,
resultf.close
def SigmoidTrain(deci, label, prior1=None, prior0=None):
#Count prior0 and prior1 if needed
if prior1==None or prior0==None:
prior1, prior0 = 0, 0
for i in range(len(label)):
if label[i] > 0:
prior1+=1
else:
prior0+=1
#Parameter Setting
maxiter=100 #Maximum number of iterations
minstep=1e-10 #Minimum step taken in line search
sigma=1e-12 #For numerically strict PD of Hessian
eps=1e-5
#Construct Target Support
hiTarget=(prior1+1.0)/(prior1+2.0)
loTarget=1/(prior0+2.0)
length=prior1+prior0
t=[]
for i in range(length):
if label[i] > 0:
t.append(hiTarget)
else:
t.append(loTarget)
#Initial Point and Initial Fun Value
A,B=0.0, log((prior0+1.0)/(prior1+1.0))
fval = 0.0
for i in range(length):
fApB = deci[i]*A+B
if fApB >= 0:
fval += t[i]*fApB + log(1+exp(-fApB))
else:
fval += (t[i] - 1)*fApB +log(1+exp(fApB))
for it in range(maxiter):
#Update Gradient and Hessian (use H' = H + sigma I)
h11=h22=sigma #Numerically ensures strict PD
h21=g1=g2=0.0
for i in range(length):
fApB = deci[i]*A+B
if (fApB >= 0):
p=exp(-fApB)/(1.0+exp(-fApB))
q=1.0/(1.0+exp(-fApB))
else:
p=1.0/(1.0+exp(fApB))
q=exp(fApB)/(1.0+exp(fApB))
d2=p*q
h11+=deci[i]*deci[i]*d2
h22+=d2
h21+=deci[i]*d2
d1=t[i]-p
g1+=deci[i]*d1
g2+=d1
#Stopping Criteria
if abs(g1)<eps and abs(g2)<eps:
break
#Finding Newton direction: -inv(H') * g
det=h11*h22-h21*h21
dA=-(h22*g1 - h21 * g2) / det
dB=-(-h21*g1+ h11 * g2) / det
gd=g1*dA+g2*dB
#Line Search
stepsize = 1
while stepsize >= minstep:
newA = A + stepsize * dA
newB = B + stepsize * dB
#New function value
newf = 0.0
for i in range(length):
fApB = deci[i]*newA+newB
if fApB >= 0:
newf += t[i]*fApB + log(1+exp(-fApB))
else:
newf += (t[i] - 1)*fApB +log(1+exp(fApB))
#Check sufficient decrease
if newf < fval + 0.0001 * stepsize * gd:
A, B, fval = newA, newB, newf
break
else:
stepsize = stepsize / 2.0
if stepsize < minstep:
print "line search fails",A,B,g1,g2,dA,dB,gd
return [A,B]
if it>=maxiter-1:
print "reaching maximal iterations",g1,g2
return [A,B]
#reads decision_value and Platt parameter [A,B]
#outputs predicted probability
def SigmoidPredict(deci, AB):
A, B = AB
fApB = deci * A + B
if (fApB >= 0):
return exp(-fApB)/(1.0+exp(-fApB))
else:
return 1.0/(1+exp(fApB))
return prob