Python Playbook Class
Python Playbook Class
inventory
import ansible.constants as C
import ansible.runner
from ansible.utils.template import template
from ansible import utils
from ansible import errors
from ansible.module_utils.splitter import split_args, unquote
import ansible.callbacks
import ansible.cache
import os
import shlex
import collections
from play import Play
import StringIO
import pipes
class PlayBook(object):
'''
runs an ansible playbook, given as a datastructure or YAML filename.
A playbook is a deployment, config management, or automation based
set of commands to run in series.
# *****************************************************
def __init__(self,
playbook = None,
host_list = C.DEFAULT_HOST_LIST,
module_path = None,
forks = C.DEFAULT_FORKS,
timeout = C.DEFAULT_TIMEOUT,
remote_user = C.DEFAULT_REMOTE_USER,
remote_pass = C.DEFAULT_REMOTE_PASS,
sudo_pass = C.DEFAULT_SUDO_PASS,
remote_port = None,
transport = C.DEFAULT_TRANSPORT,
private_key_file = C.DEFAULT_PRIVATE_KEY_FILE,
callbacks = None,
runner_callbacks = None,
stats = None,
sudo = False,
sudo_user = C.DEFAULT_SUDO_USER,
extra_vars = None,
only_tags = None,
skip_tags = None,
subset = C.DEFAULT_SUBSET,
inventory = None,
check = False,
diff = False,
any_errors_fatal = False,
su = False,
su_user = False,
su_pass = False,
vault_password = False,
force_handlers = False,
):
"""
playbook: path to a playbook file
host_list: path to a file like /etc/ansible/hosts
module_path: path to ansible modules, like /usr/share/ansible/
forks: desired level of parallelism
timeout: connection timeout
remote_user: run as this user if not specified in a particular play
remote_pass: use this remote password (for all plays) vs using SSH
keys
sudo_pass: if sudo==True, and a password is required, this is the
sudo password
remote_port: default remote port to use if not specified with the host
or play
transport: how to connect to hosts that don't specify a transport
(local, paramiko, etc)
callbacks output callbacks for the playbook
runner_callbacks: more callbacks, this time for the runner API
stats: holds aggregrate data about events occurring to each host
sudo: if not specified per play, requests all plays use sudo
mode
inventory: can be specified instead of host_list to use a pre-
existing inventory object
check: don't change anything, just try to detect some potential
changes
any_errors_fatal: terminate the entire execution immediately when one of
the hosts has failed
force_handlers: continue to notify and run handlers even if a task fails
"""
self.SETUP_CACHE = SETUP_CACHE
self.VARS_CACHE = VARS_CACHE
arguments = []
if playbook is None:
arguments.append('playbook')
if callbacks is None:
arguments.append('callbacks')
if runner_callbacks is None:
arguments.append('runner_callbacks')
if stats is None:
arguments.append('stats')
if arguments:
raise Exception('PlayBook missing required arguments: %s' % ',
'.join(arguments))
if extra_vars is None:
extra_vars = {}
if only_tags is None:
only_tags = [ 'all' ]
if skip_tags is None:
skip_tags = []
self.check = check
self.diff = diff
self.module_path = module_path
self.forks = forks
self.timeout = timeout
self.remote_user = remote_user
self.remote_pass = remote_pass
self.remote_port = remote_port
self.transport = transport
self.callbacks = callbacks
self.runner_callbacks = runner_callbacks
self.stats = stats
self.sudo = sudo
self.sudo_pass = sudo_pass
self.sudo_user = sudo_user
self.extra_vars = extra_vars
self.global_vars = {}
self.private_key_file = private_key_file
self.only_tags = only_tags
self.skip_tags = skip_tags
self.any_errors_fatal = any_errors_fatal
self.su = su
self.su_user = su_user
self.su_pass = su_pass
self.vault_password = vault_password
self.force_handlers = force_handlers
self.callbacks.playbook = self
self.runner_callbacks.playbook = self
if inventory is None:
self.inventory = ansible.inventory.Inventory(host_list)
self.inventory.subset(subset)
else:
self.inventory = inventory
# let inventory know the playbook basedir so it can load more vars
self.inventory.set_playbook_basedir(self.basedir)
vars = extra_vars.copy()
vars['playbook_dir'] = os.path.abspath(self.basedir)
if self.inventory.basedir() is not None:
vars['inventory_dir'] = self.inventory.basedir()
self.filename = playbook
(self.playbook, self.play_basedirs) =
self._load_playbook_from_file(playbook, vars)
ansible.callbacks.load_callback_plugins()
ansible.callbacks.set_playbook(self.callbacks, self)
self._ansible_version = utils.version_info(gitinfo=True)
# *****************************************************
# *****************************************************
# *****************************************************
# *****************************************************
return play_vars
# *****************************************************
playbook_data = utils.parse_yaml_from_file(path,
vault_password=self.vault_password)
accumulated_plays = []
play_basedirs = []
if type(playbook_data) != list:
raise errors.AnsibleError("parse error: playbooks must be formatted as
a YAML list, got %s" % type(playbook_data))
if 'include' in play:
# a playbook (list of plays) decided to include some other list of
plays
# from another file. The result is a flat list of plays in the
end.
else:
# *****************************************************
def run(self):
''' run all patterns in the playbook '''
plays = []
matched_tags_all = set()
unmatched_tags_all = set()
if len(unknown_tags) > 0:
unmatched_tags_all.discard('all')
msg = 'tag(s) not found in playbook: %s. possible values: %s'
unknown = ','.join(sorted(unknown_tags))
unmatched = ','.join(sorted(unmatched_tags_all))
raise errors.AnsibleError(msg % (unknown, unmatched))
# *****************************************************
return results
# *****************************************************
# *****************************************************
hosts =
self._trim_unavailable_hosts(self.inventory.list_hosts(task.play._play_hosts))
self.inventory.restrict_to(hosts)
runner = ansible.runner.Runner(
pattern=task.play.hosts,
inventory=self.inventory,
module_name=task.module_name,
module_args=task.module_args,
forks=self.forks,
remote_pass=self.remote_pass,
module_path=self.module_path,
timeout=self.timeout,
remote_user=task.remote_user,
remote_port=task.play.remote_port,
module_vars=task.module_vars,
play_vars=task.play_vars,
play_file_vars=task.play_file_vars,
role_vars=task.role_vars,
role_params=task.role_params,
default_vars=task.default_vars,
extra_vars=self.extra_vars,
private_key_file=self.private_key_file,
setup_cache=self.SETUP_CACHE,
vars_cache=self.VARS_CACHE,
basedir=task.play.basedir,
conditional=task.when,
callbacks=self.runner_callbacks,
sudo=task.sudo,
sudo_user=task.sudo_user,
transport=task.transport,
sudo_pass=task.sudo_pass,
is_playbook=True,
check=self.check,
diff=self.diff,
environment=task.environment,
complex_args=task.args,
accelerate=task.play.accelerate,
accelerate_port=task.play.accelerate_port,
accelerate_ipv6=task.play.accelerate_ipv6,
error_on_undefined_vars=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR,
su=task.su,
su_user=task.su_user,
su_pass=task.su_pass,
vault_pass = self.vault_password,
run_hosts=hosts,
no_log=task.no_log,
run_once=task.run_once,
)
runner.module_vars.update({'play_hosts': hosts})
runner.module_vars.update({'ansible_version': self._ansible_version})
if task.async_seconds == 0:
results = runner.run()
else:
results, poller = runner.run_async(task.async_seconds)
self.stats.compute(results)
if task.async_poll_interval > 0:
# if not polling, playbook requested fire and forget, so don't poll
results = self._async_poll(poller, task.async_seconds,
task.async_poll_interval)
else:
for (host, res) in results.get('contacted', {}).iteritems():
self.runner_callbacks.on_async_ok(host, res,
poller.runner.vars_cache[host]['ansible_job_id'])
contacted = results.get('contacted',{})
dark = results.get('dark', {})
self.inventory.lift_restriction()
return results
# *****************************************************
def _run_task(self, play, task, is_handler):
''' run a single task in the playbook and recursively run any subtasks.
'''
ansible.callbacks.set_task(self.callbacks, task)
ansible.callbacks.set_task(self.runner_callbacks, task)
if task.role_name:
name = '%s | %s' % (task.role_name, task.name)
else:
name = task.name
# template ignore_errors
cond = template(play.basedir, task.ignore_errors, task.module_vars,
expand_lists=False)
task.ignore_errors = utils.check_conditional(cond, play.basedir,
task.module_vars, fail_on_undefined=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR)
ansible.callbacks.set_task(self.callbacks, None)
ansible.callbacks.set_task(self.runner_callbacks, None)
return hosts_remaining
# *****************************************************
found = False
for x in play.handlers():
if handler_name == template(play.basedir, x.name, x.module_vars):
found = True
self.callbacks.on_notify(host, x.name)
x.notified_by.append(host)
if not found:
raise errors.AnsibleError("change handler (%s) is not defined" %
handler_name)
# *****************************************************
host_list = self._trim_unavailable_hosts(play._play_hosts)
self.callbacks.on_setup()
self.inventory.restrict_to(host_list)
ansible.callbacks.set_task(self.callbacks, None)
ansible.callbacks.set_task(self.runner_callbacks, None)
self.inventory.lift_restriction()
# now for each result, load into the setup cache so we can
# let runner template out future commands
setup_ok = setup_results.get('contacted', {})
for (host, result) in setup_ok.iteritems():
utils.update_hash(self.SETUP_CACHE, host, {'module_setup': True})
utils.update_hash(self.SETUP_CACHE, host, result.get('ansible_facts',
{}))
return setup_results
# *****************************************************
buf = StringIO.StringIO()
for x in replay_hosts:
buf.write("%s\n" % x)
basedir = self.inventory.basedir()
filename = "%s.retry" % os.path.basename(self.filename)
filename = filename.replace(".yml","")
filename = os.path.join(os.path.expandvars('$HOME/'), filename)
try:
fd = open(filename, 'w')
fd.write(buf.getvalue())
fd.close()
return filename
except:
pass
return None
# *****************************************************
self.callbacks.on_play_start(play.name)
# Get the hosts for this play
play._play_hosts = self.inventory.list_hosts(play.hosts)
# if no hosts matches this play, drop out
if not play._play_hosts:
self.callbacks.on_no_hosts_matched()
return True
if play.serial.endswith("%"):
serialized_batch = []
if serial <= 0:
serialized_batch = [all_hosts]
else:
# do N forks all the way through before moving to next
while len(all_hosts) > 0:
play_hosts = []
for x in range(serial):
if len(all_hosts) > 0:
play_hosts.append(all_hosts.pop(0))
serialized_batch.append(play_hosts)
task_errors = False
for on_hosts in serialized_batch:
# restrict the play to just the hosts we have in our on_hosts block
that are
# available.
play._play_hosts = self._trim_unavailable_hosts(on_hosts)
self.inventory.also_restrict_to(on_hosts)
for y in task.tags:
if x == y:
should_run = True
break
if should_run:
# Get a new list of what hosts are left as available, the ones that
# did not go fail/dark during the task
host_list = self._trim_unavailable_hosts(play._play_hosts)
return True
fired_names = {}
for handler in play.handlers():
if len(handler.notified_by) > 0:
self.inventory.restrict_to(handler.notified_by)
host_list = self._trim_unavailable_hosts(play._play_hosts)
if handler.any_errors_fatal and len(host_list) <
hosts_count:
play.max_fail_pct = 0
if (hosts_count - len(host_list)) >
int((play.max_fail_pct)/100.0 * hosts_count):
host_list = None
if not host_list and not self.force_handlers:
self.callbacks.on_no_hosts_remaining()
return False
self.inventory.lift_restriction()
new_list = handler.notified_by[:]
for host in handler.notified_by:
if host in on_hosts:
while host in new_list:
new_list.remove(host)
handler.notified_by = new_list
continue
return True