Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commit a642430

Browse files
refactor and fix
1 parent f1f5d76 commit a642430

File tree

3 files changed

+24
-14
lines changed

3 files changed

+24
-14
lines changed

‎nipype/pipeline/engine/nodes.py‎

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -822,8 +822,9 @@ def update(self, **opts):
822822
self.inputs.update(**opts)
823823

824824
def is_gpu_node(self):
825-
return ((hasattr(self.inputs, 'use_cuda') and self.inputs.use_cuda)
826-
or (hasattr(self.inputs, 'use_gpu') and self.inputs.use_gpu))
825+
return (hasattr(self.inputs, 'use_cuda') and self.inputs.use_cuda) or (
826+
hasattr(self.inputs, 'use_gpu') and self.inputs.use_gpu
827+
)
827828

828829

829830
class JoinNode(Node):

‎nipype/pipeline/plugins/multiproc.py‎

Lines changed: 15 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -134,13 +134,14 @@ def __init__(self, plugin_args=None):
134134
# GPU found on system
135135
self.n_gpus_visible = MultiProcPlugin.gpu_count()
136136
# proc per GPU set by user
137-
self.n_gpu_procs = plugin_args.get('n_gpu_procs', self.n_gpus_visible)
137+
self.n_gpu_procs = self.plugin_args.get('n_gpu_procs', self.n_gpus_visible)
138138

139139
# total no. of processes allowed on all gpus
140140
if self.n_gpu_procs > self.n_gpus_visible:
141141
logger.info(
142-
'Total number of GPUs proc requested (%d) exceeds the available number of GPUs (%d) on the system. Using requested GPU slots at your own risk!' % (
143-
self.n_gpu_procs, self.n_gpus_visible))
142+
'Total number of GPUs proc requested (%d) exceeds the available number of GPUs (%d) on the system. Using requested GPU slots at your own risk!'
143+
% (self.n_gpu_procs, self.n_gpus_visible)
144+
)
144145

145146
# Instantiate different thread pools for non-daemon processes
146147
logger.debug(
@@ -220,9 +221,7 @@ def _prerun_check(self, graph):
220221
if self.raise_insufficient:
221222
raise RuntimeError("Insufficient resources available for job")
222223
if np.any(np.array(tasks_gpu_th) > self.n_gpu_procs):
223-
logger.warning(
224-
'Nodes demand more GPU than allowed (%d).',
225-
self.n_gpu_procs)
224+
logger.warning('Nodes demand more GPU than allowed (%d).', self.n_gpu_procs)
226225
if self.raise_insufficient:
227226
raise RuntimeError('Insufficient GPU resources available for job')
228227

@@ -257,7 +256,9 @@ def _send_procs_to_workers(self, updatehash=False, graph=None):
257256
)
258257

259258
# Check available resources by summing all threads and memory used
260-
free_memory_gb, free_processors, free_gpu_slots = self._check_resources(self.pending_tasks)
259+
free_memory_gb, free_processors, free_gpu_slots = self._check_resources(
260+
self.pending_tasks
261+
)
261262

262263
stats = (
263264
len(self.pending_tasks),
@@ -267,7 +268,7 @@ def _send_procs_to_workers(self, updatehash=False, graph=None):
267268
free_processors,
268269
self.processors,
269270
free_gpu_slots,
270-
self.n_gpu_procs
271+
self.n_gpu_procs,
271272
)
272273
if self._stats != stats:
273274
tasks_list_msg = ""
@@ -338,8 +339,11 @@ def _send_procs_to_workers(self, updatehash=False, graph=None):
338339
is_gpu_node = self.procs[jobid].is_gpu_node()
339340

340341
# If node does not fit, skip at this moment
341-
if (next_job_th > free_processors or next_job_gb > free_memory_gb
342-
or (is_gpu_node and next_job_gpu_th > free_gpu_slots)):
342+
if (
343+
next_job_th > free_processors
344+
or next_job_gb > free_memory_gb
345+
or (is_gpu_node and next_job_gpu_th > free_gpu_slots)
346+
):
343347
logger.debug(
344348
"Cannot allocate job %d (%0.2fGB, %d threads, %d GPU slots).",
345349
jobid,
@@ -424,6 +428,7 @@ def gpu_count():
424428
n_gpus = 1
425429
try:
426430
import GPUtil
431+
427432
return len(GPUtil.getGPUs())
428433
except ImportError:
429434
return n_gpus

‎nipype/pipeline/plugins/tests/test_multiproc.py‎

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ def test_run_multiproc(tmpdir):
5656
class InputSpecSingleNode(nib.TraitedSpec):
5757
input1 = nib.traits.Int(desc="a random int")
5858
input2 = nib.traits.Int(desc="a random int")
59-
use_gpu = nib.traits.Bool(False, mandatory=False, desc="boolean for GPU nodes")
59+
use_gpu = nib.traits.Bool(False, mandatory=False, desc="boolean for GPU nodes")
6060

6161

6262
class OutputSpecSingleNode(nib.TraitedSpec):
@@ -117,6 +117,7 @@ def test_no_more_threads_than_specified(tmpdir):
117117
with pytest.raises(RuntimeError):
118118
pipe.run(plugin="MultiProc", plugin_args={"n_procs": max_threads})
119119

120+
120121
def test_no_more_gpu_threads_than_specified(tmpdir):
121122
tmpdir.chdir()
122123

@@ -129,7 +130,10 @@ def test_no_more_gpu_threads_than_specified(tmpdir):
129130
max_threads = 2
130131
max_gpu = 1
131132
with pytest.raises(RuntimeError):
132-
pipe.run(plugin="MultiProc", plugin_args={"n_procs": max_threads, 'n_gpu_procs': max_gpu})
133+
pipe.run(
134+
plugin="MultiProc",
135+
plugin_args={"n_procs": max_threads, 'n_gpu_procs': max_gpu},
136+
)
133137

134138

135139
@pytest.mark.skipif(

0 commit comments

Comments
(0)

AltStyle によって変換されたページ (->オリジナル) /