Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
G
geomag-algorithms
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Container Registry
Model registry
Operate
Environments
Monitor
Incidents
Service Desk
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
ghsc
National Geomagnetism Program
geomag-algorithms
Commits
57c3abce
Commit
57c3abce
authored
9 years ago
by
Eddie McWhirter
Browse files
Options
Downloads
Patches
Plain Diff
Add spaces after all commas and around all arithmetic operators in SQDistAlgorithm.
parent
31eabbb6
No related branches found
No related tags found
No related merge requests found
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
geomagio/algorithm/SQDistAlgorithm.py
+59
-55
59 additions, 55 deletions
geomagio/algorithm/SQDistAlgorithm.py
with
59 additions
and
55 deletions
geomagio/algorithm/SQDistAlgorithm.py
+
59
−
55
View file @
57c3abce
...
@@ -46,10 +46,11 @@ def RMSE(params, *args):
...
@@ -46,10 +46,11 @@ def RMSE(params, *args):
raise
Exception
raise
Exception
# calculate root-mean-squared-error of predictions
# calculate root-mean-squared-error of predictions
rmse
=
np
.
sqrt
(
np
.
nanmean
([(
m
-
n
)
**
2
for
m
,
n
in
zip
(
yobs
,
yhat
)])
)
rmse
=
np
.
sqrt
(
np
.
nanmean
([(
m
-
n
)
**
2
for
m
,
n
in
zip
(
yobs
,
yhat
)]))
return
rmse
return
rmse
def
additive
(
yobs
,
m
,
alpha
=
None
,
beta
=
None
,
gamma
=
None
,
phi
=
1
,
def
additive
(
yobs
,
m
,
alpha
=
None
,
beta
=
None
,
gamma
=
None
,
phi
=
1
,
yhat0
=
None
,
s0
=
None
,
l0
=
None
,
b0
=
None
,
sigma0
=
None
,
yhat0
=
None
,
s0
=
None
,
l0
=
None
,
b0
=
None
,
sigma0
=
None
,
zthresh
=
6
,
fc
=
0
,
hstep
=
0
):
zthresh
=
6
,
fc
=
0
,
hstep
=
0
):
...
@@ -77,7 +78,7 @@ def additive(yobs, m, alpha=None, beta=None, gamma=None, phi=1,
...
@@ -77,7 +78,7 @@ def additive(yobs, m, alpha=None, beta=None, gamma=None, phi=1,
l0 : initial level (i.e., l(t-hstep))
l0 : initial level (i.e., l(t-hstep))
(if None, default is mean(yobs[0:m]))
(if None, default is mean(yobs[0:m]))
b0 : initial slope (i.e., b(t-hstep))
b0 : initial slope (i.e., b(t-hstep))
(if None, default is (mean(yobs[m:2*m]) - mean(yobs[0:m]))/m
)
(if None, default is (mean(yobs[m:2*m]) - mean(yobs[0:m]))/m)
sigma0 : initial standard-deviation estimate (len(sigma0) == hstep+1)
sigma0 : initial standard-deviation estimate (len(sigma0) == hstep+1)
(if None, default is [sqrt(var(yobs))] * (hstep+1) )
(if None, default is [sqrt(var(yobs))] * (hstep+1) )
zthresh : z-score threshold to determine whether yhat is updated by
zthresh : z-score threshold to determine whether yhat is updated by
...
@@ -95,7 +96,7 @@ def additive(yobs, m, alpha=None, beta=None, gamma=None, phi=1,
...
@@ -95,7 +96,7 @@ def additive(yobs, m, alpha=None, beta=None, gamma=None, phi=1,
-------
-------
yhat : series of smoothed/forecast values (aligned with yobs(t))
yhat : series of smoothed/forecast values (aligned with yobs(t))
shat : series of seasonal adjustments (aligned with yobs(t))
shat : series of seasonal adjustments (aligned with yobs(t))
sigmahat
:series of time-varying standard deviations (aligned with yobs(t))
sigmahat:
series of time-varying standard deviations (aligned with yobs(t))
yhat0next : use as yhat0 when function called again with new observations
yhat0next : use as yhat0 when function called again with new observations
s0next : use as s0 when function called again with new observations
s0next : use as s0 when function called again with new observations
l0next : use as l0 when function called again with new observations
l0next : use as l0 when function called again with new observations
...
@@ -145,23 +146,23 @@ def additive(yobs, m, alpha=None, beta=None, gamma=None, phi=1,
...
@@ -145,23 +146,23 @@ def additive(yobs, m, alpha=None, beta=None, gamma=None, phi=1,
else
:
else
:
yhat
=
list
(
yhat0
)
yhat
=
list
(
yhat0
)
if
len
(
yhat
)
!=
hstep
:
if
len
(
yhat
)
!=
hstep
:
raise
Exception
,
"
yhat0 must have length %d
"
%
hstep
raise
Exception
,
"
yhat0 must have length %d
"
%
hstep
if
s0
is
None
:
if
s0
is
None
:
s
=
[
yobs
[
i
]
-
l
for
i
in
range
(
m
)]
s
=
[
yobs
[
i
]
-
l
for
i
in
range
(
m
)]
s
=
[
i
if
~
np
.
isnan
(
i
)
else
0
for
i
in
s
]
# replace NaNs with 0s
s
=
[
i
if
~
np
.
isnan
(
i
)
else
0
for
i
in
s
]
# replace NaNs with 0s
else
:
else
:
s
=
list
(
s0
)
s
=
list
(
s0
)
if
len
(
s
)
!=
m
:
if
len
(
s
)
!=
m
:
raise
Exception
,
"
s0 must have length %d
"
%
m
raise
Exception
,
"
s0 must have length %d
"
%
m
if
sigma0
is
None
:
if
sigma0
is
None
:
# NOTE: maybe default should be vector of zeros???
# NOTE: maybe default should be vector of zeros???
sigma
=
[
np
.
sqrt
(
np
.
nanvar
(
yobs
))]
*
(
hstep
+
1
)
sigma
=
[
np
.
sqrt
(
np
.
nanvar
(
yobs
))]
*
(
hstep
+
1
)
else
:
else
:
sigma
=
list
(
sigma0
)
sigma
=
list
(
sigma0
)
if
len
(
sigma
)
!=
(
hstep
+
1
):
if
len
(
sigma
)
!=
(
hstep
+
1
):
raise
Exception
,
"
sigma0 must have length %d
"
%
(
hstep
+
1
)
raise
Exception
,
"
sigma0 must have length %d
"
%
(
hstep
+
1
)
#
#
# Optimal parameter estimation if requested
# Optimal parameter estimation if requested
...
@@ -179,34 +180,34 @@ def additive(yobs, m, alpha=None, beta=None, gamma=None, phi=1,
...
@@ -179,34 +180,34 @@ def additive(yobs, m, alpha=None, beta=None, gamma=None, phi=1,
if
alpha
!=
None
:
if
alpha
!=
None
:
# allows us to fix alpha
# allows us to fix alpha
boundaries
=
[(
alpha
,
alpha
)]
boundaries
=
[(
alpha
,
alpha
)]
initial_values
=
[
alpha
]
initial_values
=
[
alpha
]
else
:
else
:
boundaries
=
[(
0
,
1
)]
boundaries
=
[(
0
,
1
)]
initial_values
=
[
0.3
]
# FIXME: should add alpha0 option
initial_values
=
[
0.3
]
# FIXME: should add alpha0 option
if
beta
!=
None
:
if
beta
!=
None
:
# allows us to fix beta
# allows us to fix beta
boundaries
.
append
((
beta
,
beta
))
boundaries
.
append
((
beta
,
beta
))
initial_values
.
append
(
beta
)
initial_values
.
append
(
beta
)
else
:
else
:
boundaries
.
append
((
0
,
1
))
boundaries
.
append
((
0
,
1
))
initial_values
.
append
(
0.1
)
# FIXME: should add beta0 option
initial_values
.
append
(
0.1
)
# FIXME: should add beta0 option
if
gamma
!=
None
:
if
gamma
!=
None
:
# allows us to fix gamma
# allows us to fix gamma
boundaries
.
append
((
gamma
,
gamma
))
boundaries
.
append
((
gamma
,
gamma
))
initial_values
.
append
(
gamma
)
initial_values
.
append
(
gamma
)
else
:
else
:
boundaries
.
append
((
0
,
1
))
boundaries
.
append
((
0
,
1
))
initial_values
.
append
(
0.1
)
# FIXME: should add gamma0 option
initial_values
.
append
(
0.1
)
# FIXME: should add gamma0 option
if
phi
!=
None
:
if
phi
!=
None
:
# allows us to fix phi
# allows us to fix phi
boundaries
.
append
((
phi
,
phi
))
boundaries
.
append
((
phi
,
phi
))
initial_values
.
append
(
phi
)
initial_values
.
append
(
phi
)
else
:
else
:
boundaries
.
append
((
0
,
1
))
boundaries
.
append
((
0
,
1
))
initial_values
.
append
(
0.9
)
# FIXME: should add phi0 option
initial_values
.
append
(
0.9
)
# FIXME: should add phi0 option
initial_values
=
np
.
array
(
initial_values
)
initial_values
=
np
.
array
(
initial_values
)
...
@@ -220,7 +221,6 @@ def additive(yobs, m, alpha=None, beta=None, gamma=None, phi=1,
...
@@ -220,7 +221,6 @@ def additive(yobs, m, alpha=None, beta=None, gamma=None, phi=1,
# endif (alpha == None or beta == None or gamma == None)
# endif (alpha == None or beta == None or gamma == None)
#
#
# Now begin the actual Holt-Winters algorithm
# Now begin the actual Holt-Winters algorithm
#
#
...
@@ -229,35 +229,31 @@ def additive(yobs, m, alpha=None, beta=None, gamma=None, phi=1,
...
@@ -229,35 +229,31 @@ def additive(yobs, m, alpha=None, beta=None, gamma=None, phi=1,
# r equal to mean(s)
# r equal to mean(s)
r
=
[
np
.
nanmean
(
s
)]
r
=
[
np
.
nanmean
(
s
)]
# determine h-step vector of phis for damped trends
# determine h-step vector of phis for damped trends
# NOTE: Did away with phiVec altogether, and just use phiHminus1 now;
# NOTE: Did away with phiVec altogether, and just use phiHminus1 now;
#
#
#phiVec = np.array([phi**i for i in range(1,hstep)])
#phiVec = np.array([phi**i for i in range(1,hstep)])
# determine sum(c^2) and phi_(j-1) for hstep "prediction interval" outside
# determine sum(c^2) and phi_(j-1) for hstep "prediction interval" outside
# of
# of
# loop; initialize variables for jstep (beyond hstep) prediction intervals
# loop; initialize variables for jstep (beyond hstep) prediction intervals
sumc2_H
=
1
sumc2_H
=
1
phiHminus1
=
0
phiHminus1
=
0
for
h
in
range
(
1
,
hstep
):
for
h
in
range
(
1
,
hstep
):
phiHminus1
=
phiHminus1
+
phi
**
(
h
-
1
)
phiHminus1
=
phiHminus1
+
phi
**
(
h
-
1
)
sumc2_H
=
sumc2_H
+
(
alpha
*
(
1
+
phiHminus1
*
beta
)
+
\
sumc2_H
=
sumc2_H
+
(
alpha
*
(
1
+
phiHminus1
*
beta
)
+
gamma
*
(
1
if
(
h
%
m
==
0
)
else
0
))
**
2
gamma
*
(
1
if
(
h
%
m
==
0
)
else
0
))
**
2
phiJminus1
=
phiHminus1
phiJminus1
=
phiHminus1
sumc2
=
sumc2_H
sumc2
=
sumc2_H
jstep
=
hstep
jstep
=
hstep
# convert to, and pre-allocate numpy arrays
# convert to, and pre-allocate numpy arrays
# FIXME: this should just be done when checking inputs above
# FIXME: this should just be done when checking inputs above
yobs
=
np
.
array
(
yobs
)
yobs
=
np
.
array
(
yobs
)
sigma
=
np
.
concatenate
((
sigma
,
np
.
zeros
(
yobs
.
size
+
fc
)))
sigma
=
np
.
concatenate
((
sigma
,
np
.
zeros
(
yobs
.
size
+
fc
)))
yhat
=
np
.
concatenate
((
yhat
,
np
.
zeros
(
yobs
.
size
+
fc
)))
yhat
=
np
.
concatenate
((
yhat
,
np
.
zeros
(
yobs
.
size
+
fc
)))
r
=
np
.
concatenate
((
r
,
np
.
zeros
(
yobs
.
size
+
fc
)))
r
=
np
.
concatenate
((
r
,
np
.
zeros
(
yobs
.
size
+
fc
)))
s
=
np
.
concatenate
((
s
,
np
.
zeros
(
yobs
.
size
+
fc
)))
s
=
np
.
concatenate
((
s
,
np
.
zeros
(
yobs
.
size
+
fc
)))
# smooth/simulate/forecast yobs
# smooth/simulate/forecast yobs
for
i
in
range
(
len
(
yobs
)
+
fc
):
for
i
in
range
(
len
(
yobs
)
+
fc
):
...
@@ -267,20 +263,17 @@ def additive(yobs, m, alpha=None, beta=None, gamma=None, phi=1,
...
@@ -267,20 +263,17 @@ def additive(yobs, m, alpha=None, beta=None, gamma=None, phi=1,
# NOTE: this will be over-written if valid observations exist at step i
# NOTE: this will be over-written if valid observations exist at step i
if
jstep
==
hstep
:
if
jstep
==
hstep
:
sigma2
=
sigma
[
i
]
*
sigma
[
i
]
sigma2
=
sigma
[
i
]
*
sigma
[
i
]
sigma
[
i
+
hstep
+
1
]
=
np
.
sqrt
(
sigma2
*
sumc2
)
sigma
[
i
+
hstep
+
1
]
=
np
.
sqrt
(
sigma2
*
sumc2
)
# predict h steps ahead
# predict h steps ahead
yhat
[
i
+
hstep
]
=
l
+
phiHminus1
*
b
+
s
[
i
+
hstep
%
m
]
yhat
[
i
+
hstep
]
=
l
+
phiHminus1
*
b
+
s
[
i
+
hstep
%
m
]
## NOTE: this was a misguided attempt to smooth s that led to
## NOTE: this was a misguided attempt to smooth s that led to
## oscillatory
## oscillatory
## behavior; this makes perfect sense in hindsight, but I'm
## behavior; this makes perfect sense in hindsight, but I'm
## leaving
## leaving
## comments here as a reminder to NOT try this again. -EJR 6/2015
## comments here as a reminder to NOT try this again. -EJR 6/2015
#yhat[i+hstep] = l + (phiVec*b).sum() + np.nanmean(s[i+ssIdx])
# yhat[i+hstep] = l + (phiVec*b).sum() + np.nanmean(s[i+ssIdx])
# determine discrepancy between observation and prediction at step i
# determine discrepancy between observation and prediction at step i
# FIXME: this if-block becomes unneccessary if we remove the fc option,
# FIXME: this if-block becomes unneccessary if we remove the fc option,
...
@@ -291,7 +284,6 @@ def additive(yobs, m, alpha=None, beta=None, gamma=None, phi=1,
...
@@ -291,7 +284,6 @@ def additive(yobs, m, alpha=None, beta=None, gamma=None, phi=1,
else
:
else
:
et
=
np
.
nan
et
=
np
.
nan
# this if/else block is not strictly necessary, but it makes the logic
# this if/else block is not strictly necessary, but it makes the logic
# somewhat easier to follow (for me at least -EJR 5/2015)
# somewhat easier to follow (for me at least -EJR 5/2015)
if
(
np
.
isnan
(
et
)
or
np
.
abs
(
et
)
>
zthresh
*
sigma
[
i
]):
if
(
np
.
isnan
(
et
)
or
np
.
abs
(
et
)
>
zthresh
*
sigma
[
i
]):
...
@@ -300,9 +292,8 @@ def additive(yobs, m, alpha=None, beta=None, gamma=None, phi=1,
...
@@ -300,9 +292,8 @@ def additive(yobs, m, alpha=None, beta=None, gamma=None, phi=1,
#
#
# no change in seasonal adjustments
# no change in seasonal adjustments
r
[
i
+
1
]
=
0
r
[
i
+
1
]
=
0
s
[
i
+
m
]
=
s
[
i
]
s
[
i
+
m
]
=
s
[
i
]
# update l before b
# update l before b
l
=
l
+
phi
*
b
l
=
l
+
phi
*
b
...
@@ -313,15 +304,15 @@ def additive(yobs, m, alpha=None, beta=None, gamma=None, phi=1,
...
@@ -313,15 +304,15 @@ def additive(yobs, m, alpha=None, beta=None, gamma=None, phi=1,
# interval;
# interval;
# sumc2 and jstep will be reset with the next valid observation
# sumc2 and jstep will be reset with the next valid observation
phiJminus1
=
phiJminus1
+
phi
**
jstep
phiJminus1
=
phiJminus1
+
phi
**
jstep
sumc2
=
sumc2
+
(
alpha
*
(
1
+
phiJminus1
*
beta
)
+
sumc2
=
sumc2
+
(
alpha
*
(
1
+
phiJminus1
*
beta
)
+
gamma
*
(
1
if
(
jstep
%
m
==
0
)
else
0
))
**
2
gamma
*
(
1
if
(
jstep
%
m
==
0
)
else
0
))
**
2
jstep
=
jstep
+
1
jstep
=
jstep
+
1
else
:
else
:
# still update sigma using et when et > zthresh*sigma
# still update sigma using et when et > zthresh*sigma
# (and is not NaN)
# (and is not NaN)
# NOTE: Bodenham-et-Adams-2013 may have a more robust method
# NOTE: Bodenham-et-Adams-2013 may have a more robust method
sigma
[
i
+
1
]
=
alpha
*
np
.
abs
(
et
)
+
(
1
-
alpha
)
*
sigma
[
i
]
sigma
[
i
+
1
]
=
alpha
*
np
.
abs
(
et
)
+
(
1
-
alpha
)
*
sigma
[
i
]
else
:
else
:
#
#
...
@@ -331,18 +322,18 @@ def additive(yobs, m, alpha=None, beta=None, gamma=None, phi=1,
...
@@ -331,18 +322,18 @@ def additive(yobs, m, alpha=None, beta=None, gamma=None, phi=1,
# renormalization could occur inside loop, but we choose to
# renormalization could occur inside loop, but we choose to
# integrate
# integrate
# r, and adjust a and s outside the loop to improve performance.
# r, and adjust a and s outside the loop to improve performance.
r
[
i
+
1
]
=
gamma
*
(
1
-
alpha
)
*
et
/
m
r
[
i
+
1
]
=
gamma
*
(
1
-
alpha
)
*
et
/
m
# update and append to s using equation-error formulation
# update and append to s using equation-error formulation
s
[
i
+
m
]
=
s
[
i
]
+
gamma
*
(
1
-
alpha
)
*
et
s
[
i
+
m
]
=
s
[
i
]
+
gamma
*
(
1
-
alpha
)
*
et
# update l and b using equation-error formulation
# update l and b using equation-error formulation
l
=
l
+
phi
*
b
+
alpha
*
et
l
=
l
+
phi
*
b
+
alpha
*
et
b
=
phi
*
b
+
alpha
*
beta
*
et
b
=
phi
*
b
+
alpha
*
beta
*
et
# update sigma with et, then reset prediction interval variables
# update sigma with et, then reset prediction interval variables
# NOTE: Bodenham-et-Adams-2013 may have a more robust method
# NOTE: Bodenham-et-Adams-2013 may have a more robust method
sigma
[
i
+
1
]
=
alpha
*
np
.
abs
(
et
)
+
(
1
-
alpha
)
*
sigma
[
i
]
sigma
[
i
+
1
]
=
alpha
*
np
.
abs
(
et
)
+
(
1
-
alpha
)
*
sigma
[
i
]
sumc2
=
sumc2_H
sumc2
=
sumc2_H
phiJminus1
=
phiHminus1
phiJminus1
=
phiHminus1
jstep
=
hstep
jstep
=
hstep
...
@@ -351,7 +342,6 @@ def additive(yobs, m, alpha=None, beta=None, gamma=None, phi=1,
...
@@ -351,7 +342,6 @@ def additive(yobs, m, alpha=None, beta=None, gamma=None, phi=1,
# endfor i in range(len(yobs) + fc - hstep)
# endfor i in range(len(yobs) + fc - hstep)
"""
"""
NOTE: Seasonal adjustments s[i+1:i+m] should be normalized so their mean
NOTE: Seasonal adjustments s[i+1:i+m] should be normalized so their mean
is zero, at least until the next observation, or else the notion of a
is zero, at least until the next observation, or else the notion of a
...
@@ -367,17 +357,31 @@ def additive(yobs, m, alpha=None, beta=None, gamma=None, phi=1,
...
@@ -367,17 +357,31 @@ def additive(yobs, m, alpha=None, beta=None, gamma=None, phi=1,
"""
"""
r
=
np
.
cumsum
(
r
)
r
=
np
.
cumsum
(
r
)
l
=
l
+
r
[
-
1
]
l
=
l
+
r
[
-
1
]
s
=
list
(
np
.
array
(
s
)
-
np
.
hstack
((
r
,
np
.
tile
(
r
[
-
1
],
m
-
1
)))
)
s
=
list
(
np
.
array
(
s
)
-
np
.
hstack
((
r
,
np
.
tile
(
r
[
-
1
],
m
-
1
))))
# return different outputs depending on retParams
# return different outputs depending on retParams
if
retParams
:
if
retParams
:
return
(
yhat
[:
len
(
yobs
)
+
fc
],
s
[:
len
(
yobs
)
+
fc
],
sigma
[
1
:
len
(
yobs
)
+
fc
+
1
],
return
(
yhat
[:
len
(
yobs
)
+
fc
],
yhat
[
len
(
yobs
)
+
fc
:],
s
[
len
(
yobs
)
+
fc
:],
l
,
b
,
sigma
[
len
(
yobs
)
+
fc
:],
s
[:
len
(
yobs
)
+
fc
],
alpha
,
beta
,
gamma
,
rmse
)
sigma
[
1
:
len
(
yobs
)
+
fc
+
1
],
yhat
[
len
(
yobs
)
+
fc
:],
s
[
len
(
yobs
)
+
fc
:],
l
,
b
,
sigma
[
len
(
yobs
)
+
fc
:],
alpha
,
beta
,
gamma
,
rmse
)
else
:
else
:
return
(
yhat
[:
len
(
yobs
)
+
fc
],
s
[:
len
(
yobs
)
+
fc
],
sigma
[
1
:
len
(
yobs
)
+
fc
+
1
],
return
(
yhat
[:
len
(
yobs
)
+
fc
],
yhat
[
len
(
yobs
)
+
fc
:],
s
[
len
(
yobs
)
+
fc
:],
l
,
b
,
sigma
[
len
(
yobs
)
+
fc
:])
s
[:
len
(
yobs
)
+
fc
],
sigma
[
1
:
len
(
yobs
)
+
fc
+
1
],
yhat
[
len
(
yobs
)
+
fc
:],
s
[
len
(
yobs
)
+
fc
:],
l
,
b
,
sigma
[
len
(
yobs
)
+
fc
:])
if
__name__
==
'
__main__
'
:
if
__name__
==
'
__main__
'
:
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment