forked from cjlin1/libsvm
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcommonutil.py
166 lines (146 loc) · 5 KB
/
commonutil.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
#!/usr/bin/env python
from __future__ import print_function
import sys
try:
import scipy
from scipy import sparse
except:
scipy = None
sparse = None
__all__ = ['svm_read_problem', 'evaluations', 'csr_find_scale_param', 'csr_scale']
def svm_read_problem(data_file_name, return_scipy=False):
"""
svm_read_problem(data_file_name, return_scipy=False) -> [y, x], y: list, x: list of dictionary
svm_read_problem(data_file_name, return_scipy=True) -> [y, x], y: ndarray, x: csr_matrix
Read LIBSVM-format data from data_file_name and return labels y
and data instances x.
"""
prob_y = []
prob_x = []
row_ptr = [0]
col_idx = []
for i, line in enumerate(open(data_file_name)):
line = line.split(None, 1)
# In case an instance with all zero features
if len(line) == 1: line += ['']
label, features = line
prob_y += [float(label)]
if scipy != None and return_scipy:
nz = 0
for e in features.split():
ind, val = e.split(":")
val = float(val)
if val != 0:
col_idx += [int(ind)-1]
prob_x += [val]
nz += 1
row_ptr += [row_ptr[-1]+nz]
else:
xi = {}
for e in features.split():
ind, val = e.split(":")
xi[int(ind)] = float(val)
prob_x += [xi]
if scipy != None and return_scipy:
prob_y = scipy.array(prob_y)
prob_x = scipy.array(prob_x)
col_idx = scipy.array(col_idx)
row_ptr = scipy.array(row_ptr)
prob_x = sparse.csr_matrix((prob_x, col_idx, row_ptr))
return (prob_y, prob_x)
def evaluations_scipy(ty, pv):
"""
evaluations_scipy(ty, pv) -> (ACC, MSE, SCC)
ty, pv: ndarray
Calculate accuracy, mean squared error and squared correlation coefficient
using the true values (ty) and predicted values (pv).
"""
if not (scipy != None and isinstance(ty, scipy.ndarray) and isinstance(pv, scipy.ndarray)):
raise TypeError("type of ty and pv must be ndarray")
if len(ty) != len(pv):
raise ValueError("len(ty) must be equal to len(pv)")
ACC = 100.0*(ty == pv).mean()
MSE = ((ty - pv)**2).mean()
l = len(ty)
sumv = pv.sum()
sumy = ty.sum()
sumvy = (pv*ty).sum()
sumvv = (pv*pv).sum()
sumyy = (ty*ty).sum()
with scipy.errstate(all = 'raise'):
try:
SCC = ((l*sumvy-sumv*sumy)*(l*sumvy-sumv*sumy))/((l*sumvv-sumv*sumv)*(l*sumyy-sumy*sumy))
except:
SCC = float('nan')
return (float(ACC), float(MSE), float(SCC))
def evaluations(ty, pv, useScipy = True):
"""
evaluations(ty, pv, useScipy) -> (ACC, MSE, SCC)
ty, pv: list, tuple or ndarray
useScipy: convert ty, pv to ndarray, and use scipy functions for the evaluation
Calculate accuracy, mean squared error and squared correlation coefficient
using the true values (ty) and predicted values (pv).
"""
if scipy != None and useScipy:
return evaluations_scipy(scipy.asarray(ty), scipy.asarray(pv))
if len(ty) != len(pv):
raise ValueError("len(ty) must be equal to len(pv)")
total_correct = total_error = 0
sumv = sumy = sumvv = sumyy = sumvy = 0
for v, y in zip(pv, ty):
if y == v:
total_correct += 1
total_error += (v-y)*(v-y)
sumv += v
sumy += y
sumvv += v*v
sumyy += y*y
sumvy += v*y
l = len(ty)
ACC = 100.0*total_correct/l
MSE = total_error/l
try:
SCC = ((l*sumvy-sumv*sumy)*(l*sumvy-sumv*sumy))/((l*sumvv-sumv*sumv)*(l*sumyy-sumy*sumy))
except:
SCC = float('nan')
return (float(ACC), float(MSE), float(SCC))
def csr_find_scale_param(x, lower=-1, upper=1):
assert isinstance(x, sparse.csr_matrix)
assert lower < upper
l, n = x.shape
feat_min = x.min(axis=0).toarray().flatten()
feat_max = x.max(axis=0).toarray().flatten()
coef = (feat_max - feat_min) / (upper - lower)
coef[coef != 0] = 1.0 / coef[coef != 0]
# (x - ones(l,1) * feat_min') * diag(coef) + lower
# = x * diag(coef) - ones(l, 1) * (feat_min' * diag(coef)) + lower
# = x * diag(coef) + ones(l, 1) * (-feat_min' * diag(coef) + lower)
# = x * diag(coef) + ones(l, 1) * offset'
offset = -feat_min * coef + lower
offset[coef == 0] = 0
if sum(offset != 0) * l > 3 * x.getnnz():
print(
"WARNING: The #nonzeros of the scaled data is at least 2 times larger than the original one.\n"
"If feature values are non-negative and sparse, set lower=0 rather than the default lower=-1.",
file=sys.stderr)
return {'coef':coef, 'offset':offset}
def csr_scale(x, scale_param):
assert isinstance(x, sparse.csr_matrix)
offset = scale_param['offset']
coef = scale_param['coef']
assert len(coef) == len(offset)
l, n = x.shape
if not n == len(coef):
print("WARNING: The dimension of scaling parameters and feature number do not match.", file=sys.stderr)
coef = resize(coef, n)
offset = resize(offset, n)
# scaled_x = x * diag(coef) + ones(l, 1) * offset'
offset = sparse.csr_matrix(offset.reshape(1, n))
offset = sparse.vstack([offset] * l, format='csr', dtype=x.dtype)
scaled_x = x.dot(sparse.diags(coef, 0, shape=(n, n))) + offset
if scaled_x.getnnz() > x.getnnz():
print(
"WARNING: original #nonzeros %d\n" % x.getnnz() +
" > new #nonzeros %d\n" % scaled_x.getnnz() +
"If feature values are non-negative and sparse, get scale_param by setting lower=0 rather than the default lower=-1.",
file=sys.stderr)