diff --git a/geomagio/algorithm/SQDistAlgorithm.py b/geomagio/algorithm/SQDistAlgorithm.py
index 8bd7722b8c4e65e5689693f83315f7d898334b52..ac556a325965c67560183225c9327fe5ed61a442 100644
--- a/geomagio/algorithm/SQDistAlgorithm.py
+++ b/geomagio/algorithm/SQDistAlgorithm.py
@@ -136,7 +136,7 @@ def additive(yobs, m, alpha=None, beta=None, gamma=None, phi=1,
 
     if b0 is None:
         b = (np.nanmean(yobs[m:2 * m]) - np.nanmean(yobs[0:m])) / m
-        b = 0 if np.isnan(b) else b # replace NaN with 0
+        b = 0 if np.isnan(b) else b  # replace NaN with 0
     else:
         b = b0
         if not np.isscalar(b0):
@@ -151,14 +151,13 @@ def additive(yobs, m, alpha=None, beta=None, gamma=None, phi=1,
 
     if s0 is None:
         s = [yobs[i] - l for i in range(m)]
-        s = [i if ~np.isnan(i) else 0 for i in s] # replace NaNs with 0s
+        s = [i if ~np.isnan(i) else 0 for i in s]  # replace NaNs with 0s
     else:
         s = list(s0)
         if len(s) != m:
             raise AlgorithmException("s0 must have length %d " % m)
 
     if sigma0 is None:
-        # NOTE: maybe default should be vector of zeros???
         sigma = [np.sqrt(np.nanvar(yobs))] * (hstep + 1)
     else:
         sigma = list(sigma0)
@@ -186,7 +185,7 @@ def additive(yobs, m, alpha=None, beta=None, gamma=None, phi=1,
             initial_values = [alpha]
         else:
             boundaries = [(0, 1)]
-            initial_values = [0.3] # FIXME: should add alpha0 option
+            initial_values = [0.3]  # FIXME: should add alpha0 option
 
         if beta != None:
             # allows us to fix beta
@@ -194,7 +193,7 @@ def additive(yobs, m, alpha=None, beta=None, gamma=None, phi=1,
             initial_values.append(beta)
         else:
             boundaries.append((0, 1))
-            initial_values.append(0.1) # FIXME: should add beta0 option
+            initial_values.append(0.1)  # FIXME: should add beta0 option
 
         if gamma != None:
             # allows us to fix gamma
@@ -202,7 +201,7 @@ def additive(yobs, m, alpha=None, beta=None, gamma=None, phi=1,
             initial_values.append(gamma)
         else:
             boundaries.append((0, 1))
-            initial_values.append(0.1) # FIXME: should add gamma0 option
+            initial_values.append(0.1)  # FIXME: should add gamma0 option
 
         if phi != None:
             # allows us to fix phi
@@ -210,7 +209,7 @@ def additive(yobs, m, alpha=None, beta=None, gamma=None, phi=1,
             initial_values.append(phi)
         else:
             boundaries.append((0, 1))
-            initial_values.append(0.9) # FIXME: should add phi0 option
+            initial_values.append(0.9)  # FIXME: should add phi0 option
 
         initial_values = np.array(initial_values)
         method = 'additive'
@@ -231,14 +230,9 @@ def additive(yobs, m, alpha=None, beta=None, gamma=None, phi=1,
     # r equal to mean(s)
     r = [np.nanmean(s)]
 
-    # determine h-step vector of phis for damped trends
-    # NOTE: Did away with phiVec altogether, and just use phiHminus1 now;
-    #
-    #phiVec = np.array([phi**i for i in range(1,hstep)])
-
     # determine sum(c^2) and phi_(j-1) for hstep "prediction interval" outside
-    # of
-    # loop; initialize variables for jstep (beyond hstep) prediction intervals
+    # of loop; initialize variables for jstep (beyond hstep) prediction
+    # intervals
     sumc2_H = 1
     phiHminus1 = 0
     for h in range(1, hstep):
@@ -250,7 +244,6 @@ def additive(yobs, m, alpha=None, beta=None, gamma=None, phi=1,
     jstep = hstep
 
     # convert to, and pre-allocate numpy arrays
-    # FIXME: this should just be done when checking inputs above
     yobs = np.array(yobs)
     sigma = np.concatenate((sigma, np.zeros(yobs.size + fc)))
     yhat = np.concatenate((yhat, np.zeros(yobs.size + fc)))
@@ -259,10 +252,9 @@ def additive(yobs, m, alpha=None, beta=None, gamma=None, phi=1,
 
     # smooth/simulate/forecast yobs
     for i in range(len(yobs) + fc):
-
-        # update/append sigma for h steps ahead of i following
-        # Hyndman-et-al-2005
-        # NOTE: this will be over-written if valid observations exist at step i
+        # Update/append sigma for h steps ahead of i following
+        # Hyndman-et-al-2005. This will be over-written if valid observations
+        # exist at step i
         if jstep == hstep:
             sigma2 = sigma[i] * sigma[i]
         sigma[i + hstep + 1] = np.sqrt(sigma2 * sumc2)
@@ -270,28 +262,14 @@ def additive(yobs, m, alpha=None, beta=None, gamma=None, phi=1,
         # predict h steps ahead
         yhat[i + hstep] = l + phiHminus1 * b + s[i + hstep % m]
 
-        ## NOTE: this was a misguided attempt to smooth s that led to
-        ## oscillatory
-        ##       behavior; this makes perfect sense in hindsight, but I'm
-        ## leaving
-        ##       comments here as a reminder to NOT try this again. -EJR 6/2015
-        # yhat[i+hstep] = l + (phiVec*b).sum() + np.nanmean(s[i+ssIdx])
-
         # determine discrepancy between observation and prediction at step i
-        # FIXME: this if-block becomes unneccessary if we remove the fc option,
-        #        and force the user to place NaNs at the end of yobs if/when
-        #        they want forecasts beyond the last available observation
         if i < len(yobs):
             et = yobs[i] - yhat[i]
         else:
             et = np.nan
 
-        # this if/else block is not strictly necessary, but it makes the logic
-        # somewhat easier to follow (for me at least -EJR 5/2015)
         if (np.isnan(et) or np.abs(et) > zthresh * sigma[i]):
-            #
             # forecast (i.e., update l, b, and s assuming et==0)
-            #
 
             # no change in seasonal adjustments
             r[i + 1] = 0
@@ -303,27 +281,23 @@ def additive(yobs, m, alpha=None, beta=None, gamma=None, phi=1,
 
             if np.isnan(et):
                 # when forecasting, grow sigma=sqrt(var) like a prediction
-                # interval;
-                # sumc2 and jstep will be reset with the next valid observation
+                # interval; sumc2 and jstep will be reset with the next
+                # valid observation
                 phiJminus1 = phiJminus1 + phi**jstep
                 sumc2 = sumc2 + (alpha * (1 + phiJminus1 * beta) +
                                  gamma * (1 if (jstep % m == 0) else 0))**2
                 jstep = jstep + 1
 
             else:
-                # still update sigma using et when et > zthresh*sigma
+                # still update sigma using et when et > zthresh * sigma
                 # (and is not NaN)
-                # NOTE: Bodenham-et-Adams-2013 may have a more robust method
                 sigma[i + 1] = alpha * np.abs(et) + (1 - alpha) * sigma[i]
-
         else:
-            #
             # smooth (i.e., update l, b, and s by filtering et)
-            #
 
             # renormalization could occur inside loop, but we choose to
-            # integrate
-            # r, and adjust a and s outside the loop to improve performance.
+            # integrate r, and adjust a and s outside the loop to improve
+            # performance.
             r[i + 1] = gamma * (1 - alpha) * et / m
 
             # update and append to s using equation-error formulation
@@ -334,29 +308,14 @@ def additive(yobs, m, alpha=None, beta=None, gamma=None, phi=1,
             b = phi * b + alpha * beta * et
 
             # update sigma with et, then reset prediction interval variables
-            # NOTE: Bodenham-et-Adams-2013 may have a more robust method
             sigma[i + 1] = alpha * np.abs(et) + (1 - alpha) * sigma[i]
             sumc2 = sumc2_H
             phiJminus1 = phiHminus1
             jstep = hstep
-
         # endif (np.isnan(et) or np.abs(et) > zthresh * sigma[i])
 
     # endfor i in range(len(yobs) + fc - hstep)
 
-    """
-    NOTE: Seasonal adjustments s[i+1:i+m] should be normalized so their mean
-      is zero, at least until the next observation, or else the notion of a
-      "seasonal" adjustment loses all meaning. In order to ensure that the
-      predictions yhat[:] remain unchanged, the baseline a is shifted too.
-      Archibald-et-Koehler-2003 recommend doing all this inside the loop,
-      but this slows the algorithm significantly. A&K-2003 note, however,
-      that r can be integrated, and used to adjust s[:] *outside* the loop,
-      and Gardner-2006 recommends this approach. A&K-2003 provide valid
-      reasons for their recommendation (online optimization of alpha will
-      be impacted), but since ours is not currently such an estimator, we
-      choose the more computationally efficient approach.
-    """
     r = np.cumsum(r)
     l = l + r[-1]
     s = list(np.array(s) - np.hstack((r, np.tile(r[-1], m - 1))))
@@ -387,10 +346,4 @@ def additive(yobs, m, alpha=None, beta=None, gamma=None, phi=1,
 
 
 if __name__ == '__main__':
-    """
-    This might be expanded to call HoltWinters.py as a script. More likely,
-    HoltWinters.py will be incorporated into another module or class, which
-    will have it's own command-line functionality.
-    """
-
-    print 'HELLO'
+    pass