|
23 | 23 |
|
24 | 24 | setup_logger(log_level=args.verbose, use_stdout=args.log_stdout) |
25 | 25 |
|
| 26 | +if os.name == "nt": |
| 27 | + os.environ['MIMALLOC_PURGE_DELAY'] = '0' |
| 28 | + |
| 29 | +if __name__ == "__main__": |
| 30 | + os.environ['TORCH_ROCM_AOTRITON_ENABLE_EXPERIMENTAL'] = '1' |
| 31 | + if args.default_device is not None: |
| 32 | + default_dev = args.default_device |
| 33 | + devices = list(range(32)) |
| 34 | + devices.remove(default_dev) |
| 35 | + devices.insert(0, default_dev) |
| 36 | + devices = ','.join(map(str, devices)) |
| 37 | + os.environ['CUDA_VISIBLE_DEVICES'] = str(devices) |
| 38 | + os.environ['HIP_VISIBLE_DEVICES'] = str(devices) |
| 39 | + |
| 40 | + if args.cuda_device is not None: |
| 41 | + os.environ['CUDA_VISIBLE_DEVICES'] = str(args.cuda_device) |
| 42 | + os.environ['HIP_VISIBLE_DEVICES'] = str(args.cuda_device) |
| 43 | + os.environ["ASCEND_RT_VISIBLE_DEVICES"] = str(args.cuda_device) |
| 44 | + logging.info("Set cuda device to: {}".format(args.cuda_device)) |
| 45 | + |
| 46 | + if args.oneapi_device_selector is not None: |
| 47 | + os.environ['ONEAPI_DEVICE_SELECTOR'] = args.oneapi_device_selector |
| 48 | + logging.info("Set oneapi device selector to: {}".format(args.oneapi_device_selector)) |
| 49 | + |
| 50 | + if args.deterministic: |
| 51 | + if 'CUBLAS_WORKSPACE_CONFIG' not in os.environ: |
| 52 | + os.environ['CUBLAS_WORKSPACE_CONFIG'] = ":4096:8" |
| 53 | + |
| 54 | + import cuda_malloc |
| 55 | + if "rocm" in cuda_malloc.get_torch_version_noimport(): |
| 56 | + os.environ['OCL_SET_SVM_SIZE'] = '262144' # set at the request of AMD |
| 57 | + |
26 | 58 |
|
27 | 59 | def handle_comfyui_manager_unavailable(): |
28 | 60 | if not args.windows_standalone_build: |
@@ -137,40 +169,6 @@ def execute_script(script_path): |
137 | 169 | import threading |
138 | 170 | import gc |
139 | 171 |
|
140 | | - |
141 | | -if os.name == "nt": |
142 | | - os.environ['MIMALLOC_PURGE_DELAY'] = '0' |
143 | | - |
144 | | -if __name__ == "__main__": |
145 | | - os.environ['TORCH_ROCM_AOTRITON_ENABLE_EXPERIMENTAL'] = '1' |
146 | | - if args.default_device is not None: |
147 | | - default_dev = args.default_device |
148 | | - devices = list(range(32)) |
149 | | - devices.remove(default_dev) |
150 | | - devices.insert(0, default_dev) |
151 | | - devices = ','.join(map(str, devices)) |
152 | | - os.environ['CUDA_VISIBLE_DEVICES'] = str(devices) |
153 | | - os.environ['HIP_VISIBLE_DEVICES'] = str(devices) |
154 | | - |
155 | | - if args.cuda_device is not None: |
156 | | - os.environ['CUDA_VISIBLE_DEVICES'] = str(args.cuda_device) |
157 | | - os.environ['HIP_VISIBLE_DEVICES'] = str(args.cuda_device) |
158 | | - os.environ["ASCEND_RT_VISIBLE_DEVICES"] = str(args.cuda_device) |
159 | | - logging.info("Set cuda device to: {}".format(args.cuda_device)) |
160 | | - |
161 | | - if args.oneapi_device_selector is not None: |
162 | | - os.environ['ONEAPI_DEVICE_SELECTOR'] = args.oneapi_device_selector |
163 | | - logging.info("Set oneapi device selector to: {}".format(args.oneapi_device_selector)) |
164 | | - |
165 | | - if args.deterministic: |
166 | | - if 'CUBLAS_WORKSPACE_CONFIG' not in os.environ: |
167 | | - os.environ['CUBLAS_WORKSPACE_CONFIG'] = ":4096:8" |
168 | | - |
169 | | - import cuda_malloc |
170 | | - if "rocm" in cuda_malloc.get_torch_version_noimport(): |
171 | | - os.environ['OCL_SET_SVM_SIZE'] = '262144' # set at the request of AMD |
172 | | - |
173 | | - |
174 | 172 | if 'torch' in sys.modules: |
175 | 173 | logging.warning("WARNING: Potential Error in code: Torch already imported, torch should never be imported before this point.") |
176 | 174 |
|
|
0 commit comments