Skip to content

Commit

Permalink
Make parallel tasks interruptible with Ctrl-C
Browse files Browse the repository at this point in the history
The concurrent.futures backport doesn't play well with
KeyboardInterrupt, so I'm using Thread and Queue instead.

Since thread pooling would likely be a pain to implement, I've just
removed `COMPOSE_MAX_WORKERS` for now. We'll implement it later if we
decide we need it.

Signed-off-by: Aanand Prasad <[email protected]>
  • Loading branch information
aanand committed Jul 20, 2015
1 parent 4ffae4a commit 4ba9d9d
Show file tree
Hide file tree
Showing 5 changed files with 22 additions and 26 deletions.
1 change: 0 additions & 1 deletion compose/const.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@

DEFAULT_MAX_WORKERS = 20
DEFAULT_TIMEOUT = 10
LABEL_CONTAINER_NUMBER = 'com.docker.compose.container-number'
LABEL_ONE_OFF = 'com.docker.compose.oneoff'
Expand Down
39 changes: 22 additions & 17 deletions compose/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,11 @@
import hashlib
import json
import logging
import os
import sys

from docker.errors import APIError
import concurrent.futures

from .const import DEFAULT_MAX_WORKERS
from Queue import Queue, Empty
from threading import Thread


log = logging.getLogger(__name__)
Expand All @@ -18,33 +16,40 @@ def parallel_execute(command, containers, doing_msg, done_msg, **options):
"""
Execute a given command upon a list of containers in parallel.
"""
max_workers = os.environ.get('COMPOSE_MAX_WORKERS', DEFAULT_MAX_WORKERS)
stream = codecs.getwriter('utf-8')(sys.stdout)
lines = []
errors = {}

for container in containers:
write_out_msg(stream, lines, container.name, doing_msg)

q = Queue()

def container_command_execute(container, command, **options):
try:
getattr(container, command)(**options)
except APIError as e:
errors[container.name] = e.explanation
q.put(container)

for container in containers:
t = Thread(
target=container_command_execute,
args=(container, command),
kwargs=options,
)
t.daemon = True
t.start()

done = 0

with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
future_container = {
executor.submit(
container_command_execute,
container,
command,
**options
): container for container in containers
}

for future in concurrent.futures.as_completed(future_container):
container = future_container[future]
while done < len(containers):
try:
container = q.get(timeout=1)
write_out_msg(stream, lines, container.name, done_msg)
done += 1
except Empty:
pass

if errors:
for container in errors:
Expand Down
6 changes: 0 additions & 6 deletions docs/reference/overview.md
Original file line number Diff line number Diff line change
Expand Up @@ -44,12 +44,6 @@ the `docker` daemon.

Configures the path to the `ca.pem`, `cert.pem`, and `key.pem` files used for TLS verification. Defaults to `~/.docker`.

### COMPOSE\_MAX\_WORKERS

Configures the maximum number of worker threads to be used when executing
commands in parallel. Only a subset of commands execute in parallel, `stop`,
`kill` and `rm`.




Expand Down
1 change: 0 additions & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ PyYAML==3.10
docker-py==1.3.0
dockerpty==0.3.4
docopt==0.6.1
futures==3.0.3
requests==2.6.1
six==1.7.3
texttable==0.8.2
Expand Down
1 change: 0 additions & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@ def find_version(*file_paths):
'docker-py >= 1.3.0, < 1.4',
'dockerpty >= 0.3.4, < 0.4',
'six >= 1.3.0, < 2',
'futures >= 3.0.3',
]


Expand Down

0 comments on commit 4ba9d9d

Please sign in to comment.