Add openpilot tests
This commit is contained in:
50
tinygrad_repo/test/extra/test_export_model.py
Normal file
50
tinygrad_repo/test/extra/test_export_model.py
Normal file
@@ -0,0 +1,50 @@
|
||||
import unittest
|
||||
from extra.export_model import export_model, EXPORT_SUPPORTED_DEVICE
|
||||
from tinygrad.tensor import Tensor, Device
|
||||
import json
|
||||
|
||||
class MockMultiInputModel:
|
||||
def forward(self, x1, x2, x3):
|
||||
return x1 + x2 + x3
|
||||
|
||||
class MockMultiOutputModel:
|
||||
def __call__(self, x1):
|
||||
return x1 + 2.0, x1.pad(((0, 0), (0, 1))) + 1.0
|
||||
|
||||
# TODO: move compile_efficientnet tests here
|
||||
@unittest.skipUnless(Device.DEFAULT in EXPORT_SUPPORTED_DEVICE, f"Model export is not supported on {Device.DEFAULT}")
|
||||
class TextModelExport(unittest.TestCase):
|
||||
def test_multi_input_model_export(self):
|
||||
model = MockMultiInputModel()
|
||||
inputs = [Tensor.rand(2,2), Tensor.rand(2,2), Tensor.rand(2,2)]
|
||||
prg, inp_sizes, _, _ = export_model(model, "", *inputs)
|
||||
prg = json.loads(prg)
|
||||
|
||||
assert len(inputs) == len(prg["inputs"]) == len(inp_sizes), f"Model and exported inputs don't match: mdl={len(inputs)}, prg={len(prg['inputs'])}, inp_sizes={len(inp_sizes)}"
|
||||
|
||||
for i in range(len(inputs)):
|
||||
assert f"input{i}" in inp_sizes, f"input{i} not captured in inp_sizes"
|
||||
assert f"input{i}" in prg["buffers"], f"input{i} not captured in exported buffers"
|
||||
|
||||
for i, exported_input in enumerate(prg["inputs"]):
|
||||
assert inputs[i].dtype.name == exported_input["dtype"], f"Model and exported input dtype don't match: mdl={inputs[i].dtype.name}, prg={exported_input['dtype']}"
|
||||
|
||||
def test_multi_output_model_export(self):
|
||||
model = MockMultiOutputModel()
|
||||
input = Tensor.rand(2,2)
|
||||
outputs = model(input)
|
||||
prg, _, out_sizes, _ = export_model(model, "", input)
|
||||
prg = json.loads(prg)
|
||||
|
||||
assert len(outputs) == len(prg["outputs"]) == len(out_sizes), f"Model and exported outputs don't match: mdl={len(outputs)}, prg={len(prg['outputs'])}, inp_sizes={len(out_sizes)}"
|
||||
|
||||
for i in range(len(outputs)):
|
||||
assert f"output{i}" in out_sizes, f"output{i} not captured in out_sizes"
|
||||
assert f"output{i}" in prg["buffers"], f"output{i} not captured in exported buffers"
|
||||
|
||||
for i, exported_output in enumerate(prg["outputs"]):
|
||||
assert outputs[i].dtype.name == exported_output["dtype"], f"Model and exported output dtype don't match: mdl={outputs[i].dtype.name}, prg={exported_output['dtype']}"
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
57
tinygrad_repo/test/extra/test_extra_helpers.py
Normal file
57
tinygrad_repo/test/extra/test_extra_helpers.py
Normal file
@@ -0,0 +1,57 @@
|
||||
#!/usr/bin/env python
|
||||
import os, cloudpickle, tempfile, unittest, subprocess
|
||||
from extra.helpers import enable_early_exec, cross_process, _CloudpickleFunctionWrapper
|
||||
|
||||
def normalize_line_endings(s): return s.replace(b'\r\n', b'\n')
|
||||
|
||||
class TestEarlyExec(unittest.TestCase):
|
||||
def setUp(self) -> None:
|
||||
self.early_exec = enable_early_exec()
|
||||
|
||||
def early_exec_py_file(self, file_content, exec_args):
|
||||
with tempfile.NamedTemporaryFile(suffix=".py", delete=False) as temp:
|
||||
temp.write(file_content)
|
||||
temp_path = temp.name
|
||||
try:
|
||||
output = self.early_exec((["python3", temp_path] + exec_args, None))
|
||||
return output
|
||||
finally:
|
||||
os.remove(temp_path)
|
||||
|
||||
def test_enable_early_exec(self):
|
||||
output = self.early_exec_py_file(b'print("Hello, world!")', [])
|
||||
self.assertEqual(b"Hello, world!\n", normalize_line_endings(output))
|
||||
|
||||
def test_enable_early_exec_with_arg(self):
|
||||
output = self.early_exec_py_file(b'import sys\nprint("Hello, " + sys.argv[1] + "!")', ["world"])
|
||||
self.assertEqual(b"Hello, world!\n", normalize_line_endings(output))
|
||||
|
||||
def test_enable_early_exec_process_exception(self):
|
||||
with self.assertRaises(subprocess.CalledProcessError):
|
||||
self.early_exec_py_file(b'raise Exception("Test exception")', [])
|
||||
|
||||
def test_enable_early_exec_type_exception(self):
|
||||
with self.assertRaises(TypeError):
|
||||
self.early_exec((["python3"], "print('Hello, world!')"))
|
||||
|
||||
class TestCrossProcess(unittest.TestCase):
|
||||
|
||||
def test_cross_process(self):
|
||||
def _iterate():
|
||||
for i in range(10): yield i
|
||||
results = list(cross_process(_iterate))
|
||||
self.assertEqual(list(range(10)), results)
|
||||
|
||||
def test_cross_process_exception(self):
|
||||
def _iterate():
|
||||
for i in range(10):
|
||||
if i == 5: raise ValueError("Test exception")
|
||||
yield i
|
||||
with self.assertRaises(ValueError): list(cross_process(_iterate))
|
||||
|
||||
def test_CloudpickleFunctionWrapper(self):
|
||||
def add(x, y): return x + y
|
||||
self.assertEqual(7, cloudpickle.loads(cloudpickle.dumps(_CloudpickleFunctionWrapper(add)))(3, 4))
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
107
tinygrad_repo/test/extra/test_lr_scheduler.py
Normal file
107
tinygrad_repo/test/extra/test_lr_scheduler.py
Normal file
@@ -0,0 +1,107 @@
|
||||
import numpy as np
|
||||
import torch
|
||||
import unittest
|
||||
from tinygrad.tensor import Tensor
|
||||
from tinygrad.nn.state import get_parameters
|
||||
from tinygrad.nn.optim import Adam
|
||||
from extra.lr_scheduler import MultiStepLR, ReduceLROnPlateau, CosineAnnealingLR, OneCycleLR
|
||||
from extra.training import train, evaluate
|
||||
from extra.datasets import fetch_mnist
|
||||
import pytest
|
||||
|
||||
pytestmark = [pytest.mark.exclude_cuda, pytest.mark.exclude_gpu]
|
||||
|
||||
np.random.seed(1337)
|
||||
Tensor.manual_seed(1337)
|
||||
|
||||
X_train, Y_train, X_test, Y_test = fetch_mnist()
|
||||
|
||||
class TinyBobNet:
|
||||
def __init__(self):
|
||||
self.l1 = Tensor.scaled_uniform(784, 128)
|
||||
self.l2 = Tensor.scaled_uniform(128, 10)
|
||||
|
||||
def parameters(self):
|
||||
return get_parameters(self)
|
||||
|
||||
def forward(self, x):
|
||||
return x.dot(self.l1).relu().dot(self.l2).log_softmax()
|
||||
|
||||
def lr_scheduler_training(sched_fn=None, args=None):
|
||||
model = TinyBobNet()
|
||||
optim = Adam(model.parameters(), lr=0.01)
|
||||
if sched_fn is not None: sched = sched_fn(optim, **args)
|
||||
for _ in range(25):
|
||||
train(model, X_train, Y_train, optim, 100)
|
||||
if sched_fn is not None:
|
||||
if isinstance(sched, ReduceLROnPlateau):
|
||||
sched.step(evaluate(model, X_test, Y_test))
|
||||
else:
|
||||
sched.step()
|
||||
return evaluate(model, X_test, Y_test)
|
||||
|
||||
def current_lr(optim): return optim.param_groups[0]['lr'] if hasattr(optim, 'param_groups') else optim.lr
|
||||
def get_lrs(optim, sched, epochs, steps=1, accs=None):
|
||||
lr = current_lr(optim)
|
||||
if not isinstance(lr, float): lr = lr.numpy()[0]
|
||||
lrs = [lr]
|
||||
for e in range(epochs):
|
||||
for _ in range(steps):
|
||||
optim.step()
|
||||
sched.step() if accs is None else sched.step(accs[e])
|
||||
lr = current_lr(optim)
|
||||
if not isinstance(lr, float): lr = lr.numpy()[0]
|
||||
lrs.append(lr)
|
||||
return lrs
|
||||
|
||||
class TestLrScheduler(unittest.TestCase):
|
||||
def _test_lr_scheduler(self, tinygrad_sched, torch_sched, epochs, opts, atol, rtol):
|
||||
accs = opts.pop('accs', None)
|
||||
tinygrad_optim, torch_optim = Adam([], lr=0.01), torch.optim.Adam([torch.tensor([0.], requires_grad=True)], lr=0.01)
|
||||
tinygrad_sched, torch_sched = tinygrad_sched(tinygrad_optim, **opts), torch_sched(torch_optim, **opts)
|
||||
|
||||
tinygrad_lrs = get_lrs(tinygrad_optim, tinygrad_sched, epochs, accs=accs)
|
||||
torch_lrs = get_lrs(torch_optim, torch_sched, epochs, accs=accs)
|
||||
|
||||
np.testing.assert_allclose(tinygrad_lrs, torch_lrs, atol=atol, rtol=rtol)
|
||||
|
||||
def _test_multisteplr(self, epochs, opts, atol, rtol):
|
||||
self._test_lr_scheduler(MultiStepLR, torch.optim.lr_scheduler.MultiStepLR, epochs, opts, atol, rtol)
|
||||
def _test_reducelronplateau(self, epochs, opts, atol, rtol):
|
||||
opts['accs'] = np.random.randn(epochs)
|
||||
self._test_lr_scheduler(ReduceLROnPlateau, torch.optim.lr_scheduler.ReduceLROnPlateau, epochs, opts, atol, rtol)
|
||||
def _test_cosineannealinglr(self, epochs, opts, atol, rtol):
|
||||
opts['T_max'] = epochs
|
||||
self._test_lr_scheduler(CosineAnnealingLR, torch.optim.lr_scheduler.CosineAnnealingLR, epochs, opts, atol, rtol)
|
||||
def _test_onecyclelr(self, epochs, opts, atol, rtol):
|
||||
opts['total_steps'] = epochs
|
||||
self._test_lr_scheduler(OneCycleLR, torch.optim.lr_scheduler.OneCycleLR, epochs, opts, atol, rtol)
|
||||
|
||||
def test_multisteplr(self): self._test_multisteplr(10, {'milestones': [1, 2, 7]}, 1e-6, 1e-6)
|
||||
def test_multisteplr_gamma(self): self._test_multisteplr(10, {'milestones': [1, 2, 7], 'gamma': 0.1337}, 1e-6, 1e-6)
|
||||
|
||||
def test_reducelronplateau(self): self._test_reducelronplateau(100, {}, 1e-6, 1e-6)
|
||||
def test_reducelronplateau_max(self): self._test_reducelronplateau(100, {'mode': 'max'}, 1e-6, 1e-6)
|
||||
def test_reducelronplateau_factor(self): self._test_reducelronplateau(100, {'factor': 0.1337}, 1e-6, 1e-6)
|
||||
def test_reducelronplateau_patience(self): self._test_reducelronplateau(100, {'patience': 3}, 1e-6, 1e-6)
|
||||
def test_reducelronplateau_threshold(self): self._test_reducelronplateau(100, {'threshold': 1e-6}, 1e-6, 1e-6)
|
||||
def test_reducelronplateau_threshold_mode(self): self._test_reducelronplateau(100, {'threshold_mode': 'abs'}, 1e-6, 1e-6)
|
||||
|
||||
def test_cosineannealinglr(self): self._test_cosineannealinglr(100, {}, 1e-6, 1e-6)
|
||||
def test_cosineannealinglr_eta_min(self): self._test_cosineannealinglr(100, {'eta_min': 0.001}, 1e-6, 1e-6)
|
||||
|
||||
def test_onecyclelr(self): self._test_onecyclelr(1000, {'pct_start': 0.3, 'anneal_strategy': 'linear',
|
||||
'cycle_momentum': False, 'div_factor': 25.0,
|
||||
'final_div_factor': 10000.0, 'max_lr':1e-5}, 1e-6, 1e-6)
|
||||
@unittest.skip("slow")
|
||||
def test_training(self):
|
||||
without = lr_scheduler_training()
|
||||
sched_fns = [MultiStepLR, ReduceLROnPlateau, CosineAnnealingLR, OneCycleLR]
|
||||
argss = [{'milestones': [5, 7, 10, 15], 'gamma': 0.5}, {'factor': 0.5, 'patience': 2}, {'T_max': 25, 'eta_min': 0.001},
|
||||
{'pct_start': 0.3, 'anneal_strategy': 'linear', 'cycle_momentum': False, 'div_factor': 25.0, 'final_div_factor': 10000.0, 'max_lr':1e-5, 'total_steps': 25}]
|
||||
for sched_fn, args in zip(sched_fns, argss):
|
||||
with_sched = lr_scheduler_training(sched_fn, args)
|
||||
assert with_sched > without
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
106
tinygrad_repo/test/extra/test_utils.py
Normal file
106
tinygrad_repo/test/extra/test_utils.py
Normal file
@@ -0,0 +1,106 @@
|
||||
#!/usr/bin/env python
|
||||
import io, unittest
|
||||
import os
|
||||
import tempfile
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
import torch
|
||||
import numpy as np
|
||||
from tinygrad.helpers import CI
|
||||
from extra.utils import fetch, temp, download_file
|
||||
from tinygrad.nn.state import torch_load
|
||||
from PIL import Image
|
||||
|
||||
@unittest.skipIf(CI, "no internet tests in CI")
|
||||
class TestFetch(unittest.TestCase):
|
||||
def test_fetch_bad_http(self):
|
||||
self.assertRaises(AssertionError, fetch, 'http://httpstat.us/500')
|
||||
self.assertRaises(AssertionError, fetch, 'http://httpstat.us/404')
|
||||
self.assertRaises(AssertionError, fetch, 'http://httpstat.us/400')
|
||||
|
||||
def test_fetch_small(self):
|
||||
assert(len(fetch('https://google.com'))>0)
|
||||
|
||||
def test_fetch_img(self):
|
||||
img = fetch("https://media.istockphoto.com/photos/hen-picture-id831791190")
|
||||
pimg = Image.open(io.BytesIO(img))
|
||||
assert pimg.size == (705, 1024)
|
||||
|
||||
class TestFetchRelative(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.working_dir = os.getcwd()
|
||||
self.tempdir = tempfile.TemporaryDirectory()
|
||||
os.chdir(self.tempdir.name)
|
||||
with open('test_file.txt', 'x') as f:
|
||||
f.write("12345")
|
||||
|
||||
def tearDown(self):
|
||||
os.chdir(self.working_dir)
|
||||
self.tempdir.cleanup()
|
||||
|
||||
#test ./
|
||||
def test_fetch_relative_dotslash(self):
|
||||
self.assertEqual(b'12345', fetch("./test_file.txt"))
|
||||
|
||||
#test ../
|
||||
def test_fetch_relative_dotdotslash(self):
|
||||
os.mkdir('test_file_path')
|
||||
os.chdir('test_file_path')
|
||||
self.assertEqual(b'12345', fetch("../test_file.txt"))
|
||||
|
||||
class TestDownloadFile(unittest.TestCase):
|
||||
def setUp(self):
|
||||
from pathlib import Path
|
||||
self.test_file = Path(temp("test_download_file/test_file.txt"))
|
||||
|
||||
def tearDown(self):
|
||||
os.remove(self.test_file)
|
||||
os.removedirs(self.test_file.parent)
|
||||
|
||||
@patch('requests.get')
|
||||
def test_download_file_with_mkdir(self, mock_requests):
|
||||
mock_response = MagicMock()
|
||||
mock_response.iter_content.return_value = [b'1234', b'5678']
|
||||
mock_response.status_code = 200
|
||||
mock_response.headers = {'content-length': '8'}
|
||||
mock_requests.return_value = mock_response
|
||||
self.assertFalse(self.test_file.parent.exists())
|
||||
download_file("https://www.mock.com/fake.txt", self.test_file, skip_if_exists=False)
|
||||
self.assertTrue(self.test_file.parent.exists())
|
||||
self.assertTrue(self.test_file.is_file())
|
||||
self.assertEqual('12345678', self.test_file.read_text())
|
||||
|
||||
class TestUtils(unittest.TestCase):
|
||||
def test_fake_torch_load_zipped(self): self._test_fake_torch_load_zipped()
|
||||
def test_fake_torch_load_zipped_float16(self): self._test_fake_torch_load_zipped(isfloat16=True)
|
||||
def _test_fake_torch_load_zipped(self, isfloat16=False):
|
||||
class LayerWithOffset(torch.nn.Module):
|
||||
def __init__(self):
|
||||
super(LayerWithOffset, self).__init__()
|
||||
d = torch.randn(16)
|
||||
self.param1 = torch.nn.Parameter(
|
||||
d.as_strided([2, 2], [1, 2], storage_offset=5)
|
||||
)
|
||||
self.param2 = torch.nn.Parameter(
|
||||
d.as_strided([2, 2], [1, 2], storage_offset=4)
|
||||
)
|
||||
|
||||
model = torch.nn.Sequential(
|
||||
torch.nn.Linear(4, 8),
|
||||
torch.nn.Linear(8, 3),
|
||||
LayerWithOffset()
|
||||
)
|
||||
if isfloat16: model = model.half()
|
||||
|
||||
path = temp(f"test_load_{isfloat16}.pt")
|
||||
torch.save(model.state_dict(), path)
|
||||
model2 = torch_load(path)
|
||||
|
||||
for name, a in model.state_dict().items():
|
||||
b = model2[name]
|
||||
a, b = a.numpy(), b.numpy()
|
||||
assert a.shape == b.shape
|
||||
assert a.dtype == b.dtype
|
||||
assert np.array_equal(a, b)
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
Reference in New Issue
Block a user