Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
116 commits
Select commit Hold shift + click to select a range
edf6a94
Update mnist.py
co63oc May 22, 2023
7abc8d9
Merge pull request #518 from 514flowey/complex
514flowey May 16, 2024
e90fbac
Merge pull request #534 from fansunqi/master
LDYang694 May 16, 2024
04a02e0
polish PixelShuffle in nn.py
LDYang694 May 16, 2024
233c0e7
Merge pull request #538 from fansunqi/issue525_branch
LDYang694 May 16, 2024
a8f51c4
Merge pull request #537 from fansunqi/issue527_branch
LDYang694 May 16, 2024
a2316de
Merge pull request #535 from fansunqi/issue529_branch
LDYang694 May 16, 2024
7bb9ce2
Merge pull request #536 from fansunqi/issue528_branch
LDYang694 May 16, 2024
b506d63
Merge pull request #443 from co63oc/patch-1
LDYang694 May 16, 2024
e001b4c
polish rocm support
LDYang694 May 20, 2024
26b2cf0
Merge pull request #543 from LDYang694/master
LDYang694 May 20, 2024
f645a07
Merge pull request #541 from fansunqi/issue521_branch
LDYang694 May 20, 2024
f2644d4
Merge pull request #540 from fansunqi/issue522_branch
LDYang694 May 20, 2024
640af86
Merge pull request #539 from fansunqi/issue523_branch
LDYang694 May 20, 2024
05f4cf3
Update version to 1.3.9.8
LDYang694 May 20, 2024
e69e1f7
Merge branch 'master' into master
uyzhang May 21, 2024
419bf3c
Merge pull request #533 from uyzhang/master
uyzhang May 21, 2024
7714ce3
fix: a minimal quick fix for issue #544
zhc7 May 22, 2024
c334324
Merge pull request #545 from zhc7/patch-1
LDYang694 May 28, 2024
5df1673
fix: jt.Var.expand with valid index -1
May 29, 2024
14de5fa
a IndexError fix of issue #448
May 30, 2024
862bce9
a ValueError fix of issue #450
May 30, 2024
cd8b19a
fix illegal parameters of Pool and Pool3d of issue #451,#453,#456,#457
May 30, 2024
793d638
fix illegal parameters of Conv2d issue #471,#472,#473,#474,#475,#476,…
May 30, 2024
9e60eb6
fix illegal parameters of PixelShuffle of issue #458,fix validity of …
May 30, 2024
9a23f5c
check x.shape and kernel_size of Pool and Pool3d,issue #461,#463
May 31, 2024
9d7e634
fix Pad2d with illegal padding,issue #464,#465,#466,#467
May 31, 2024
c79142d
fix illegal parameters of ConvTranspose and Pool,issue #478,#480,#481…
May 31, 2024
6967475
Update README.md
LDYang694 Jun 4, 2024
b1f18f0
Merge pull request #546 from Hanyx2021/fix-expand
LDYang694 Jun 5, 2024
8d26bb8
polish nn.Sequential attribute
LDYang694 Jun 5, 2024
44daca5
check target shape and output shape in jt.nn.binary_cross_entropy_wit…
Jun 6, 2024
3310abb
check input1 and input2 shape in jt.nn.Bilinear()
Jun 6, 2024
e7e3ea3
check input shape in jt.nn.Conv1d
Jun 6, 2024
8cc95a6
check input shape in jt.nn.Conv1d_sp
Jun 6, 2024
d7bfb05
jt.nn.Conv1d_sp in_channels and out_channels must be positive
Jun 6, 2024
4c04060
jt.nn.Conv1d in_channels and out_channels must be positive
Jun 6, 2024
ecc73d0
check input shape in jt.nn.ConvTranspose
Jun 6, 2024
9c26755
modify stride positive check in jt.nn.transpose3d
Jun 10, 2024
5d35972
add input shape check in jt.nn.transpose3d
Jun 10, 2024
00c6cb1
modify stride positive check in jt.nn.conv_transpose; add input shape…
Jun 10, 2024
dc824d1
remove 3D(unbatch) description
Jun 10, 2024
5cd0051
add stride check in jt.nn.ConvTranspose
Jun 10, 2024
f192568
modify error information
Jun 10, 2024
0f5c7f8
check input shape in nn.Dropout2d
Jun 10, 2024
bd10558
check input shape in jt.nn.ZeroPad2d
Jun 10, 2024
81eccbf
check input shape in jt.nn.ReplicationPad2d
Jun 10, 2024
0dc433d
check input shape and scale factor's positiveness in jt.nn.Upsample
Jun 10, 2024
ca63d37
resume
Jun 10, 2024
d42bbda
check input shape and scale factor's positiveness in jt.nn.Upsample
Jun 10, 2024
0ea0fd9
Update setup.py
LDYang694 Jun 25, 2024
7416cfb
update version
LDYang694 Jun 25, 2024
35d6655
Merge branch 'Jittor:master' into master
fansunqi Jul 1, 2024
21e7409
check parameters' positive in jt.nn.fold
fansunqi Jul 1, 2024
8454a7a
Merge branch 'Jittor:master' into fold
fansunqi Jul 1, 2024
810a069
check parameter's positive in jt.nn.unfold
fansunqi Jul 1, 2024
5ff687d
Merge pull request #561 from fansunqi/fold
LDYang694 Jul 2, 2024
b2f7f26
update version
LDYang694 Jul 2, 2024
7852283
add isin
LDYang694 Jul 5, 2024
dc692c2
Merge pull request #562 from fansunqi/unfold
JittorRepos Jul 8, 2024
76014c0
Merge pull request #557 from fansunqi/ReplicationPad2d
JittorRepos Jul 8, 2024
9cc147d
Merge pull request #556 from fansunqi/zeroPad2d
LDYang694 Jul 8, 2024
88fcdd8
Merge pull request #555 from fansunqi/Dropout2d
LDYang694 Jul 8, 2024
3638d05
Merge pull request #550 from fansunqi/Conv1d
LDYang694 Jul 8, 2024
75a2425
Merge pull request #551 from fansunqi/Conv1d_sp
LDYang694 Jul 8, 2024
46b290a
Update nn.py
LDYang694 Jul 8, 2024
7dd2e25
Merge pull request #554 from fansunqi/ConvTranspose
LDYang694 Jul 8, 2024
361e2e9
Merge pull request #553 from fansunqi/conv_transpose3d
LDYang694 Jul 8, 2024
08e88be
Merge pull request #548 from fansunqi/binary_cross_entropy_with_logits
LDYang694 Jul 8, 2024
d4886b0
polish nn.Sequential __getattr__
LDYang694 Jul 9, 2024
06c97cf
Merge branch 'master' of https://github.com/Jittor/jittor
LDYang694 Jul 9, 2024
02f6f6d
update acl
uyzhang Jul 9, 2024
6f782bf
Update acl_compiler.py
uyzhang Jul 9, 2024
28e1920
ComplexNumber:polar,view_as_complex,view_as_real
Jul 9, 2024
6eef0f8
fix load bugs
zjp-shadow Jul 10, 2024
d1d39d2
add no gpu device error
LDYang694 Jul 12, 2024
7347b70
FEAT! where,scatter,cumsum,gather,flip
Jul 18, 2024
8236cd6
Merge pull request #570 from dengyx21/dev-dyx
uyzhang Jul 18, 2024
357a0de
fix dtype mismatch in `nn.cross_entropy_loss`
CHEN-Xinsheng Jul 19, 2024
0b369ee
Merge pull request #571 from CHEN-Xinsheng/dev-cross_entropy_loss
uyzhang Jul 19, 2024
bbc448a
FEAT! add aclop unittest
Jul 19, 2024
0de90f2
Format test_aclop.py
uyzhang Jul 19, 2024
aee6664
Merge pull request #572 from dengyx21/dev-dyx
uyzhang Jul 19, 2024
80e3d29
Update acl_compiler.py
uyzhang Jul 24, 2024
ff0c295
Update compile_extern.py
uyzhang Jul 24, 2024
c0e9e0f
FEAT! add floor_int
Jul 24, 2024
b476a2c
Merge pull request #575 from dengyx21/dev-dyx
uyzhang Jul 24, 2024
3973e07
feat: enable ACL optimization in split function
uyzhang Jul 24, 2024
7759a54
Update compile_extern.py
uyzhang Jul 24, 2024
bab510c
Update acl_compiler.py
uyzhang Jul 24, 2024
dc604d7
Merge remote-tracking branch 'upstream/master'
Jul 24, 2024
fa33e0a
Fixed the BUG of ACL op memory
uyzhang Jul 25, 2024
21ac78e
complement of test_aclop
Jul 26, 2024
af818e1
Merge pull request #1 from Jittor/master
Hanyx2021 Jul 26, 2024
b010cc6
complement of test_aclop
Aug 1, 2024
7cf4732
complement of test_aclop
Aug 12, 2024
4159ace
complement of test_aclop: error of scatter()-multiple and where()
Aug 12, 2024
4664211
Merge pull request #567 from Hanyx2021/master
uyzhang Aug 12, 2024
4667225
add several ffunctions
514flowey Aug 20, 2024
a55e296
fix unique bug
514flowey Aug 22, 2024
da45615
Merge pull request #579 from 514flowey/complex
514flowey Aug 22, 2024
a725f20
fix load
liylo Aug 28, 2024
3b63820
simple implementation for block diag
liylo Aug 28, 2024
362d09c
simple implementation for block diag with proper grad
liylo Aug 28, 2024
f56ddbd
init
liylo Aug 28, 2024
29fa67b
forward hooks now could modifiy inputs and outputs
liylo Aug 28, 2024
58cd6b5
remove compatibility
LDYang694 Sep 4, 2024
55f740e
Merge pull request #582 from liylo/master
LDYang694 Sep 4, 2024
79bc57f
Merge pull request #558 from fansunqi/Upsample
LDYang694 Sep 4, 2024
210dca4
Merge pull request #549 from fansunqi/bilinear
LDYang694 Sep 4, 2024
dfc789e
Merge pull request #503 from 514flowey/attention_mask
LDYang694 Sep 4, 2024
06dbec4
Merge pull request #583 from liylo/func
LDYang694 Sep 4, 2024
612c7e8
Merge pull request #584 from liylo/module
LDYang694 Sep 4, 2024
c0996cd
fix dim=3 error
Sep 5, 2024
73bc64d
Merge pull request #586 from fansunqi/dim
514flowey Sep 5, 2024
106380c
fix `jittor.nn.AdaptiveMaxPool3d` doc
luozhiya Sep 11, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -382,10 +382,10 @@ Email: [email protected]

File an issue: https://github.com/Jittor/jittor/issues

QQ Group: 761222083
QQ Group: 836860279


<img src="https://cg.cs.tsinghua.edu.cn/jittor/images/news/2020-12-8-21-19-1_2_2/fig4.png" width="200"/>
<img src="https://github.com/Jittor/jittor/assets/62846124/8dd830bd-b31c-4e4f-9a78-5fd7a3409145" width="200"/>

## The Team

Expand Down
2 changes: 1 addition & 1 deletion doc/source/jittor.nn.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ jittor.nn

.. automodule:: jittor.nn
:imported-members:
:members: Pool, pool, AdaptiveAvgPool2d, Pool3d, AdaptiveMaxPool2d, AdaptiveAvgPool3d, AdaptiveMaxPool2d, pool3d, AvgPool2d, AvgPool3d, avg_pool2d, MaxPool2d, MaxPool3d, max_pool2d, max_pool3d, MaxUnpool2d, MaxUnpool3d
:members: Pool, pool, AdaptiveAvgPool2d, Pool3d, AdaptiveMaxPool2d, AdaptiveAvgPool3d, AdaptiveMaxPool3d, pool3d, AvgPool2d, AvgPool3d, avg_pool2d, MaxPool2d, MaxPool3d, max_pool2d, max_pool3d, MaxUnpool2d, MaxUnpool3d
:undoc-members:

.. autoclass:: jittor.nn.ReLU
Expand Down
72 changes: 58 additions & 14 deletions python/jittor/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************

__version__ = '1.3.9.6'
__version__ = '1.3.9.10'
from jittor_utils import lock
with lock.lock_scope():
ori_int = int
Expand Down Expand Up @@ -428,7 +428,9 @@ def random(shape, dtype="float32", type="uniform"):
jt.Var([[0.96788853 0.28334728 0.30482838]
[0.46107793 0.62798643 0.03457401]], dtype=float32)
'''

for dim in shape:
if dim < 0:
raise RuntimeError(f"Trying to create tensor with negative dimension {dim}: {shape}")
ret = ops.random(shape, "float32", type)
## TODO: move those code to core
#if dtype in ["float16", "bfloat16"]:
Expand Down Expand Up @@ -484,6 +486,9 @@ def ones(*shape, dtype="float32"):
shape = shape[:-1]
if isinstance(shape, tuple) and isinstance(shape[0], (Sequence, NanoVector)):
shape = shape[0]
for dim in shape:
if dim < 0:
raise RuntimeError(f"Trying to create tensor with negative dimension {dim}: {shape}")
return unary(1, dtype).broadcast(shape)

def new_ones(x, size):
Expand Down Expand Up @@ -515,6 +520,9 @@ def zeros(*shape, dtype="float32"):
shape = shape[:-1]
if isinstance(shape, tuple) and isinstance(shape[0], (Sequence, NanoVector)):
shape = shape[0]
for dim in shape:
if dim < 0:
raise RuntimeError(f"Trying to create tensor with negative dimension {dim}: {shape}")
return unary(0, dtype).broadcast(shape)

def new_zeros(x, size):
Expand Down Expand Up @@ -547,6 +555,9 @@ def full(shape,val,dtype="float32"):
'''
if not isinstance(shape, (NanoVector, Sequence)):
shape = (shape,)
for dim in shape:
if dim < 0:
raise RuntimeError(f"Trying to create tensor with negative dimension {dim}: {shape}")
return unary(val, dtype).broadcast(shape)

def new_full(x, size, val):
Expand Down Expand Up @@ -641,14 +652,22 @@ def var(x, dim=None, dims=None, unbiased=False, keepdims=False):
return sqr
Var.var = var

def std(x):
matsize=1
for i in x.shape:
matsize *= i
out=(x-x.mean()).sqr().sum()
out=out/(matsize-1)
out=out.maximum(1e-6).sqrt()
return out
def std(x, dim=None, keepdim=False):
if dim is None:
matsize=1
for i in x.shape:
matsize *= i
out=(x-x.mean()).sqr().sum()
out=out/(matsize-1)
out=out.maximum(1e-6).sqrt()
return out
else:
dimsize=x.size(dim)
mean=jt.mean(x, dim, keepdim=True)
out=(x - mean).sqr().sum(dim=dim, keepdim=keepdim)
out=out/(dimsize-1)
out=out.maximum(1e-6).sqrt()
return out
Var.std = std

def norm(x, p=2, dim=-1, keepdims=False, eps=1e-30, keepdim=False):
Expand Down Expand Up @@ -687,6 +706,8 @@ def flatten(input, start_dim=0, end_dim=-1):
start_dim = len(in_shape) + start_dim if start_dim < 0 else start_dim
end_dim = len(in_shape) + end_dim if end_dim < 0 else end_dim
assert end_dim >= start_dim, "end_dim should be larger than or equal to start_dim for flatten function"
if len(in_shape) <= end_dim:
raise IndexError(f"Dimension out of range (expected to be in range of [{-len(in_shape)}, {len(in_shape) - 1}], but got {end_dim})")
out_shape = []
for i in range(0,start_dim,1): out_shape.append(in_shape[i])
dims = 1
Expand Down Expand Up @@ -917,6 +938,9 @@ def randn(*size, dtype="float32", requires_grad=True) -> Var:
[-0.612632 -1.1471151 -1.1879086 ]], dtype=float32)
'''
if isinstance(size, tuple) and isinstance(size[0], (tuple, list, NanoVector)): size = size[0]
for dim in size:
if dim < 0:
raise RuntimeError(f"Trying to create tensor with negative dimension {dim}: {size}")
arr = jt.random(size, dtype, "normal")
if not requires_grad: return arr.stop_grad()
return arr
Expand Down Expand Up @@ -1013,6 +1037,9 @@ def randint(low, high=None, shape=(1,), dtype="int32") -> Var:
[1 1 1]], dtype=int32)
'''
if high is None: low, high = 0, low
for dim in shape:
if dim < 0:
raise RuntimeError(f"Trying to create tensor with negative dimension {dim}: {shape}")
v = (jt.random(shape) * (high - low) + low).clamp(low, high-0.5)
v = jt.floor_int(v)
return v.astype(dtype)
Expand Down Expand Up @@ -1437,9 +1464,17 @@ def requires_grad_(self, requires_grad=True):
def __hooked_call__(self, *args, **kw):
if hasattr(self, "__fhook2__"):
if len(kw):
self.__fhook2__(self, args, kw)
args_kw_result = self.__fhook2__(self, args, kw)
else:
self.__fhook2__(self, args)
args_kw_result = self.__fhook2__(self, args)
if args_kw_result is not None:
if isinstance(args_kw_result, tuple) and len(args_kw_result) == 2:
args, kw = args_kw_result
else:
raise RuntimeError(
"forward pre-hook must return None or a tuple "
f"of (new_args, new_kwargs), but got {args_kw_result}."
)
if hasattr(self, "__bihook__"):
if len(kw):
LOG.w("backward hook not support kw")
Expand All @@ -1458,9 +1493,11 @@ def __hooked_call__(self, *args, **kw):
ret = grad_hooker(ret, self.__bohook__)
if hasattr(self, "__fhook__"):
if len(kw):
self.__fhook__(self, args, ret, kw)
res = self.__fhook__(self, args, ret, kw)
else:
self.__fhook__(self, args, ret)
res = self.__fhook__(self, args, ret)
if res is not None:
ret = res
return ret

def _place_hooker(self):
Expand Down Expand Up @@ -1595,6 +1632,8 @@ def load_parameters(self, params):
else:
if hasattr(v, k):
v = getattr(v, k)
if v is None:
continue
assert isinstance(v, (Module, Var)), \
f"expect a jittor Module or Var, but got <{v.__class__.__name__}>, key: {key}"
else:
Expand Down Expand Up @@ -2119,6 +2158,7 @@ def is_var(v):
from . import optim
from . import dataset
from . import init
from . import gradfunctional

dtype = NanoString

Expand Down Expand Up @@ -2152,3 +2192,7 @@ def inplace_wrapper(new_k, prev_func):
from . import math_util
from .math_util import *
from . import distributions

if jt.compiler.has_acl:
from jittor.extern.acl.acl_compiler import change_function
change_function()
2 changes: 1 addition & 1 deletion python/jittor/__init__.pyi
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from jittor_core import *
from jittor_core.ops import *
from .misc import *
from . import attention as attention, contrib as contrib, dataset as dataset, init as init, linalg as linalg, lr_scheduler as lr_scheduler, numpy2cupy as numpy2cupy, optim as optim, sparse as sparse
from . import attention as attention, contrib as contrib, dataset as dataset, init as init, linalg as linalg, lr_scheduler as lr_scheduler, numpy2cupy as numpy2cupy, optim as optim, sparse as sparse, gradfunctional as gradfunctional
from .compile_extern import cublas as cublas, cudnn as cudnn, cufft as cufft, curand as curand, mkl_ops as mkl_ops, mpi_ops as mpi_ops, world_size as world_size
from .compiler import compile_custom_op as compile_custom_op, compile_custom_ops as compile_custom_ops
from .contrib import concat as concat
Expand Down
Loading