Skip to content

Commit da70a78

Browse files
committed
losses update
1 parent bcf5d84 commit da70a78

2 files changed

Lines changed: 8 additions & 8 deletions

File tree

pyproject.toml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# NEW VERSION TUTORIAL
2-
# STEP 1 - CHANGE VERSION IN THIS FILE
3-
# STEP 2 - CREATE TAG WITH THAT VERSIOn
2+
# STEP 1 - CREATE TAG WITH NEW VERSION
3+
# STEP 2 - CHANGE VERSION IN THIS FILE
44
# STEP 3 - PUSH TO GITHUB
55
# STEP 4 - PUSH TAG TO GITHUB
66
# STEP 5 - WAIT AND SEE THAT TESTS PASS ON GITHUB AND THAT IT DIDNT FIND ANOTHER REASON TO SKIP PIP
@@ -13,7 +13,7 @@ build-backend = "setuptools.build_meta"
1313
name = "torchzero"
1414
description = "Modular optimization library for PyTorch."
1515

16-
version = "0.3.2"
16+
version = "0.3.3"
1717
dependencies = [
1818
"torch",
1919
"numpy",

tests/test_opts.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -723,7 +723,7 @@ def test(self): _run(**self.kwargs)
723723
sphere_opt=lambda p: tz.Modular(p, tz.m.GraftModules(tz.m.Shampoo(), tz.m.RMSprop()), tz.m.LR(0.2)),
724724
needs_closure=False,
725725
func='booth', steps=50, loss=200, merge_invariant=False,
726-
sphere_steps=20, sphere_loss=1e-4, # merge and unmerge lrs are very different so need to test convergence separately somewhere
726+
sphere_steps=20, sphere_loss=1e-3, # merge and unmerge lrs are very different so need to test convergence separately somewhere
727727
)
728728

729729
# ------------------------- quasi_newton/quasi_newton ------------------------ #
@@ -800,7 +800,7 @@ def test(self): _run(**self.kwargs)
800800
sphere_opt=lambda p: tz.Modular(p, tz.m.GaussianHomotopy(10, 1, tol=1e-1, seed=0), tz.m.BFGS(), tz.m.StrongWolfe()),
801801
needs_closure=True,
802802
func='booth', steps=20, loss=0.1, merge_invariant=True,
803-
sphere_steps=10, sphere_loss=150, # merge and unmerge lrs are very different so need to test convergence separately somewhere
803+
sphere_steps=10, sphere_loss=200,
804804
)
805805

806806
# ---------------------------- smoothing/laplacian --------------------------- #
@@ -809,15 +809,15 @@ def test(self): _run(**self.kwargs)
809809
sphere_opt=lambda p: tz.Modular(p, tz.m.LaplacianSmoothing(min_numel=1), tz.m.LR(0.5)),
810810
needs_closure=False,
811811
func='booth', steps=50, loss=0.4, merge_invariant=False,
812-
sphere_steps=10, sphere_loss=3, # merge and unmerge lrs are very different so need to test convergence separately somewhere
812+
sphere_steps=10, sphere_loss=3,
813813
)
814814

815815
LaplacianSmoothing_global = Run(
816816
func_opt=lambda p: tz.Modular(p, tz.m.LaplacianSmoothing(layerwise=False), tz.m.LR(0.1)),
817817
sphere_opt=lambda p: tz.Modular(p, tz.m.LaplacianSmoothing(layerwise=False), tz.m.LR(0.5)),
818818
needs_closure=False,
819819
func='booth', steps=50, loss=0.4, merge_invariant=True,
820-
sphere_steps=10, sphere_loss=3, # merge and unmerge lrs are very different so need to test convergence separately somewhere
820+
sphere_steps=10, sphere_loss=3,
821821
)
822822

823823
# -------------------------- wrappers/optim_wrapper -------------------------- #
@@ -834,7 +834,7 @@ def test(self): _run(**self.kwargs)
834834
func_opt=lambda p: tz.Modular(p, tz.m.NystromSketchAndSolve(2, seed=0), tz.m.StrongWolfe()),
835835
sphere_opt=lambda p: tz.Modular(p, tz.m.NystromSketchAndSolve(10, seed=0), tz.m.StrongWolfe()),
836836
needs_closure=True,
837-
func='booth', steps=3, loss=1e-8, merge_invariant=True,
837+
func='booth', steps=3, loss=1e-6, merge_invariant=True,
838838
sphere_steps=10, sphere_loss=1e-12,
839839
)
840840
NystromPCG = Run(

0 commit comments

Comments
 (0)