mirror of
https://github.com/ROCm/jax.git
synced 2025-04-24 14:56:05 +00:00

In principle, JAX should not need a hand-written CUDA kernel for the ThreeFry2x32 algorithm. In practice XLA aggresively inlines, which causes compilation times on GPU blow up when compiling potentially many copies of the PRNG kernel in a program. As a workaround, we add a hand-written CUDA kernel mostly to reduce compilation time. When XLA becomes smarter about compiling this particular hash function, we should be able to remove the hand-written kernel once again.
28 lines
1.2 KiB
Python
28 lines
1.2 KiB
Python
# Copyright 2019 Google LLC
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# https://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
from __future__ import absolute_import
|
|
from .lax import *
|
|
from .lax import (_reduce_sum, _reduce_max, _reduce_min, _reduce_or,
|
|
_reduce_and, _reduce_window_sum, _reduce_window_max,
|
|
_reduce_window_min, _reduce_window_prod,
|
|
_select_and_gather_add, _float, _complex,
|
|
_input_dtype, _const, _eq_meet, _safe_mul,
|
|
_broadcasting_select, _check_user_dtype_supported,
|
|
_one, _const, _upcast_fp16_for_computation,
|
|
_broadcasting_shape_rule)
|
|
from .lax_control_flow import *
|
|
from .lax_fft import *
|
|
from .lax_parallel import *
|