Skip to content

Commit f121cba

Browse files
committed
eol
1 parent 52dad3c commit f121cba

2 files changed

Lines changed: 141 additions & 141 deletions

File tree

Lines changed: 48 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -1,48 +1,48 @@
1-
# -*- coding: utf-8 -*-
2-
"""
3-
@brief test log(time=12s)
4-
"""
5-
import unittest
6-
import pandas
7-
import numpy
8-
from sklearn import datasets
9-
from sklearn.linear_model import LinearRegression
10-
from sklearn.metrics import r2_score
11-
from pyquickhelper.pycode import ExtTestCase
12-
from mlinsights.metrics import r2_score_comparable
13-
14-
15-
class TestScoringMetrics(ExtTestCase):
16-
17-
def test_r2_score_comparable(self):
18-
iris = datasets.load_iris()
19-
X = iris.data[:, :4]
20-
y = iris.target + 1
21-
df = pandas.DataFrame(X)
22-
df.columns = ["X1", "X2", "X3", "X4"]
23-
model1 = LinearRegression().fit(X, y)
24-
model2 = LinearRegression().fit(X, numpy.log(y))
25-
r2a = r2_score(y, model1.predict(X))
26-
r2b = r2_score(numpy.log(y), model2.predict(X))
27-
r2c = r2_score_comparable(y, model2.predict(X), tr='log')
28-
r2d = r2_score_comparable(y, model2.predict(X), inv_tr='exp')
29-
self.assertEqual(r2b, r2c)
30-
self.assertGreater(r2c, r2a)
31-
self.assertLesser(r2a, r2d)
32-
r2e = r2_score_comparable(y, model2.predict(X), inv_tr='exp', tr='exp')
33-
self.assertLesser(r2e, 0)
34-
35-
def test_r2_score_comparable_exception(self):
36-
iris = datasets.load_iris()
37-
y = iris.target + 1
38-
self.assertRaise(lambda: r2_score_comparable(y, y), ValueError)
39-
self.assertRaise(
40-
lambda: r2_score_comparable(y, y, tr="log2"),
41-
TypeError)
42-
self.assertRaise(
43-
lambda: r2_score_comparable(y, y, inv_tr="log2"),
44-
TypeError)
45-
46-
47-
if __name__ == "__main__":
48-
unittest.main()
1+
# -*- coding: utf-8 -*-
2+
"""
3+
@brief test log(time=12s)
4+
"""
5+
import unittest
6+
import pandas
7+
import numpy
8+
from sklearn import datasets
9+
from sklearn.linear_model import LinearRegression
10+
from sklearn.metrics import r2_score
11+
from pyquickhelper.pycode import ExtTestCase
12+
from mlinsights.metrics import r2_score_comparable
13+
14+
15+
class TestScoringMetrics(ExtTestCase):
16+
17+
def test_r2_score_comparable(self):
18+
iris = datasets.load_iris()
19+
X = iris.data[:, :4]
20+
y = iris.target + 1
21+
df = pandas.DataFrame(X)
22+
df.columns = ["X1", "X2", "X3", "X4"]
23+
model1 = LinearRegression().fit(X, y)
24+
model2 = LinearRegression().fit(X, numpy.log(y))
25+
r2a = r2_score(y, model1.predict(X))
26+
r2b = r2_score(numpy.log(y), model2.predict(X))
27+
r2c = r2_score_comparable(y, model2.predict(X), tr='log')
28+
r2d = r2_score_comparable(y, model2.predict(X), inv_tr='exp')
29+
self.assertEqual(r2b, r2c)
30+
self.assertGreater(r2c, r2a)
31+
self.assertLesser(r2a, r2d)
32+
r2e = r2_score_comparable(y, model2.predict(X), inv_tr='exp', tr='exp')
33+
self.assertLesser(r2e, 0)
34+
35+
def test_r2_score_comparable_exception(self):
36+
iris = datasets.load_iris()
37+
y = iris.target + 1
38+
self.assertRaise(lambda: r2_score_comparable(y, y), ValueError)
39+
self.assertRaise(
40+
lambda: r2_score_comparable(y, y, tr="log2"),
41+
TypeError)
42+
self.assertRaise(
43+
lambda: r2_score_comparable(y, y, inv_tr="log2"),
44+
TypeError)
45+
46+
47+
if __name__ == "__main__":
48+
unittest.main()
Lines changed: 93 additions & 93 deletions
Original file line numberDiff line numberDiff line change
@@ -1,93 +1,93 @@
1-
"""
2-
@file
3-
@brief Metrics to compare machine learning.
4-
"""
5-
import numpy
6-
from sklearn.metrics import r2_score
7-
8-
_known_functions = {
9-
'exp': numpy.exp,
10-
'log': numpy.log
11-
}
12-
13-
14-
def comparable_metric(metric_function, y_true, y_pred,
15-
tr="log", inv_tr='exp', **kwargs):
16-
"""
17-
Applies function on either the true target or/and the predictions
18-
before computing r2 score.
19-
20-
:param metric_function: metric to compute
21-
:param y_true: expected targets
22-
:param y_pred: predictions
23-
:param sample_weight: weights
24-
:param multioutput: see :epkg:`sklearn:metrics:r2_score`
25-
:param tr: transformation applied on the target
26-
:param inv_tr: transformation applied on the predictions
27-
:return: results
28-
"""
29-
tr = _known_functions.get(tr, tr)
30-
inv_tr = _known_functions.get(inv_tr, inv_tr)
31-
if tr is not None and not callable(tr):
32-
raise TypeError("Argument tr must be callable.")
33-
if inv_tr is not None and not callable(inv_tr):
34-
raise TypeError("Argument inv_tr must be callable.")
35-
if tr is None and inv_tr is None:
36-
raise ValueError(
37-
"tr and inv_tr cannot be both None at the same time.")
38-
if tr is None:
39-
return metric_function(y_true, inv_tr(y_pred), **kwargs)
40-
if inv_tr is None:
41-
return metric_function(tr(y_true), y_pred, **kwargs)
42-
return metric_function(tr(y_true), inv_tr(y_pred), **kwargs)
43-
44-
45-
def r2_score_comparable(y_true, y_pred, *, sample_weight=None,
46-
multioutput='uniform_average',
47-
tr=None, inv_tr=None):
48-
"""
49-
Applies function on either the true target or/and the predictions
50-
before computing r2 score.
51-
52-
:param y_true: expected targets
53-
:param y_pred: predictions
54-
:param sample_weight: weights
55-
:param multioutput: see :epkg:`sklearn:metrics:r2_score`
56-
:param tr: transformation applied on the target
57-
:param inv_tr: transformation applied on the predictions
58-
:return: results
59-
60-
Example:
61-
62-
.. runpython::
63-
:showcode:
64-
65-
import numpy
66-
from sklearn import datasets
67-
from sklearn.model_selection import train_test_split
68-
from sklearn.linear_model import LinearRegression
69-
from sklearn.metrics import r2_score
70-
from mlinsights.metrics import r2_score_comparable
71-
72-
iris = datasets.load_iris()
73-
X = iris.data[:, :4]
74-
y = iris.target + 1
75-
76-
X_train, X_test, y_train, y_test = train_test_split(X, y)
77-
78-
model1 = LinearRegression().fit(X_train, y_train)
79-
print('r2', r2_score(y_test, model1.predict(X_test)))
80-
print('r2 log', r2_score(numpy.log(y_test), numpy.log(model1.predict(X_test))))
81-
print('r2 log comparable', r2_score_comparable(
82-
y_test, model1.predict(X_test), tr="log", inv_tr="log"))
83-
84-
model2 = LinearRegression().fit(X_train, numpy.log(y_train))
85-
print('r2', r2_score(numpy.log(y_test), model2.predict(X_test)))
86-
print('r2 log', r2_score(y_test, numpy.exp(model2.predict(X_test))))
87-
print('r2 log comparable', r2_score_comparable(
88-
y_test, model2.predict(X_test), inv_tr="exp"))
89-
"""
90-
return comparable_metric(r2_score, y_true, y_pred,
91-
sample_weight=sample_weight,
92-
multioutput=multioutput,
93-
tr=tr, inv_tr=inv_tr)
1+
"""
2+
@file
3+
@brief Metrics to compare machine learning.
4+
"""
5+
import numpy
6+
from sklearn.metrics import r2_score
7+
8+
_known_functions = {
9+
'exp': numpy.exp,
10+
'log': numpy.log
11+
}
12+
13+
14+
def comparable_metric(metric_function, y_true, y_pred,
15+
tr="log", inv_tr='exp', **kwargs):
16+
"""
17+
Applies function on either the true target or/and the predictions
18+
before computing r2 score.
19+
20+
:param metric_function: metric to compute
21+
:param y_true: expected targets
22+
:param y_pred: predictions
23+
:param sample_weight: weights
24+
:param multioutput: see :epkg:`sklearn:metrics:r2_score`
25+
:param tr: transformation applied on the target
26+
:param inv_tr: transformation applied on the predictions
27+
:return: results
28+
"""
29+
tr = _known_functions.get(tr, tr)
30+
inv_tr = _known_functions.get(inv_tr, inv_tr)
31+
if tr is not None and not callable(tr):
32+
raise TypeError("Argument tr must be callable.")
33+
if inv_tr is not None and not callable(inv_tr):
34+
raise TypeError("Argument inv_tr must be callable.")
35+
if tr is None and inv_tr is None:
36+
raise ValueError(
37+
"tr and inv_tr cannot be both None at the same time.")
38+
if tr is None:
39+
return metric_function(y_true, inv_tr(y_pred), **kwargs)
40+
if inv_tr is None:
41+
return metric_function(tr(y_true), y_pred, **kwargs)
42+
return metric_function(tr(y_true), inv_tr(y_pred), **kwargs)
43+
44+
45+
def r2_score_comparable(y_true, y_pred, *, sample_weight=None,
46+
multioutput='uniform_average',
47+
tr=None, inv_tr=None):
48+
"""
49+
Applies function on either the true target or/and the predictions
50+
before computing r2 score.
51+
52+
:param y_true: expected targets
53+
:param y_pred: predictions
54+
:param sample_weight: weights
55+
:param multioutput: see :epkg:`sklearn:metrics:r2_score`
56+
:param tr: transformation applied on the target
57+
:param inv_tr: transformation applied on the predictions
58+
:return: results
59+
60+
Example:
61+
62+
.. runpython::
63+
:showcode:
64+
65+
import numpy
66+
from sklearn import datasets
67+
from sklearn.model_selection import train_test_split
68+
from sklearn.linear_model import LinearRegression
69+
from sklearn.metrics import r2_score
70+
from mlinsights.metrics import r2_score_comparable
71+
72+
iris = datasets.load_iris()
73+
X = iris.data[:, :4]
74+
y = iris.target + 1
75+
76+
X_train, X_test, y_train, y_test = train_test_split(X, y)
77+
78+
model1 = LinearRegression().fit(X_train, y_train)
79+
print('r2', r2_score(y_test, model1.predict(X_test)))
80+
print('r2 log', r2_score(numpy.log(y_test), numpy.log(model1.predict(X_test))))
81+
print('r2 log comparable', r2_score_comparable(
82+
y_test, model1.predict(X_test), tr="log", inv_tr="log"))
83+
84+
model2 = LinearRegression().fit(X_train, numpy.log(y_train))
85+
print('r2', r2_score(numpy.log(y_test), model2.predict(X_test)))
86+
print('r2 log', r2_score(y_test, numpy.exp(model2.predict(X_test))))
87+
print('r2 log comparable', r2_score_comparable(
88+
y_test, model2.predict(X_test), inv_tr="exp"))
89+
"""
90+
return comparable_metric(r2_score, y_true, y_pred,
91+
sample_weight=sample_weight,
92+
multioutput=multioutput,
93+
tr=tr, inv_tr=inv_tr)

0 commit comments

Comments
 (0)