import os
import torch
def is_megatron_available():
return True
def get_torch_cuda_version():
return torch.version.cuda
def get_int(val):
try:
return int(val)
except ValueError:
return val
def initialize():
# 检查Megatron是否可用
if not is_megatron_available():
raise ImportError("Megatron is not available.")
# 检查PyTorch和CUDA版本是否兼容
torch_cuda_version = get_torch_cuda_version()
if torch_cuda_version != 'N/A':
from megatron import megatron_version_required
from pkg_resources import parse_version
if parse_version(torch_cuda_version) < parse_version(megatron_version_required[0]):
raise EnvironmentError(
f"Megatron requires PyTorch CUDA version >= {megatron_version_required[0]}."
f" But found version {torch_cuda_version}."
)
# 设置OMP_NUM_THREADS环境变量
omp_num_threads = os.getenv('OMP_NUM_THREADS', '1')
os.environ['OMP_NUM_THREADS'] = str(get_int(omp_num_threads))
# 设置MKL_NUM_THREADS环境变量
mkl_num_threads = os.getenv('MKL_NUM_THREADS', '1')
os.environ['MKL_NUM_THREADS'] = str(get_int(mkl_num_threads))
# 设置NCCL参数
nccl_max_rw_pairs = os.getenv('NCCL_MAX_RW_PAIRS', '16')
os.environ['NCCL_MAX_RW_PAIRS'] = str(get_int(nccl_max_rw_pairs))
# 设置TVM_NUM_THREADS环境变量
tvm_num_threads = os.getenv('TVM_NUM_THREADS', '1')
os.environ['TVM_NUM_THREADS'] = str(get_int(tvm_num_threads))
# 设置NUMA_BIND环境变量
numa_bind = os.getenv('NUMA_BIND', '1')
os.environ['NUMA_BIND'] = str(get_int(numa_bind))
# 设置TF32_FLUSH_TO_ZERO环境变量
tf32_flush_to_zero = os.getenv('TF32_FLUSH_TO_ZERO', '1')
os.environ['TF32_FLUSH_TO_ZERO'] = str(get_int(tf32_flush_to_zero))
# 设置DD_BIDIRECTIONAL_INFERENCE环境变量
dd_bidirectional_inference = os.getenv('DD_BIDIRECTIONAL_INFERENCE', '0')
os.environ['DD_BIDIRECTIONAL_INFERENCE'] = str(get_int(dd_bidirectional_inference))
# 设置GPU_DIRECT_FAST_PATH环境变量
gpu_direct_fast_path = os.getenv('GPU_DIRECT_FAST_PATH', '1')
os.environ['GPU_DIRECT_FAST_PATH'] = str(get_int(gpu_direct_fast_path))
# 设置DISABLE_CUDA_AFFINITY环境变量
disable_cuda_affinity = os.getenv('DISABLE_CUDA_AFFINITY', '0')
os.environ['DISABLE_CUDA_AFFINITY'] = str(get_int(disable_cuda_affinity))
#
评论已关闭