GitBucket
4.21.2
Toggle navigation
Snippets
Sign in
Files
Branches
1
Releases
Issues
Pull requests
Labels
Priorities
Milestones
Wiki
Forks
stephen.cranefield
/
ansible_spark_openstack
Browse code
Moving vars to separate file
master
1 parent
45b0fff
commit
2228f93eb3ce3eb47443540d682100a6c2c6d27e
Johan Dahlberg
authored
on 4 Mar 2015
Patch
Showing
3 changed files
create_spark_cloud_playbook.yml
deploy_spark_playbook.yml
vars/main.yml
Ignore Space
Show notes
View
create_spark_cloud_playbook.yml
--- - hosts: localhost connection: local tasks: - name: Create a new spark master instance nova_compute: state: "{{ instance_state }}" name: spark-master image_id: 9bf957ba-a0ce-4513-ba8c-e80d42ea9faf key_name: "{{ key_name }}" wait_for: 200 flavor_id: 4 nics: - net-id: "{{ network_id }}" security_groups: spark,default meta: hostname: spark-master ansible_host_groups: spark_masters,default register: spark_master - name: Create a new spark slaves nova_compute: state: "{{ instance_state }}" name: "{{ item }}" image_id: 9bf957ba-a0ce-4513-ba8c-e80d42ea9faf key_name: "{{ key_name }}" wait_for: 200 flavor_id: 4 nics: - net-id: "{{ network_id }}" security_groups: spark,default meta: hostname: "{{ item }}" ansible_host_groups: spark_slaves,default register: spark_slaves with_sequence: start=0 end="{{ nbr_of_slaves }}" format=spark-slave%02x
--- - hosts: localhost connection: local vars: state: present # Set to absent to take down machines nbr_of_slaves: 3 key_name: cloud_key tasks: - name: Create a new spark master instance nova_compute: state: "{{ state }}" name: spark-master image_id: 9bf957ba-a0ce-4513-ba8c-e80d42ea9faf key_name: "{{ key_name }}" wait_for: 200 flavor_id: 4 nics: - net-id: b045a7de-5aa4-43a2-86ff-049fd1ba1884 security_groups: spark,default meta: hostname: spark-master ansible_host_groups: spark_masters,default register: spark_master - name: Create a new spark slaves nova_compute: state: "{{ state }}" name: "{{ item }}" image_id: 9bf957ba-a0ce-4513-ba8c-e80d42ea9faf key_name: "{{ key_name }}" wait_for: 200 flavor_id: 4 nics: - net-id: b045a7de-5aa4-43a2-86ff-049fd1ba1884 security_groups: spark,default meta: hostname: "{{ item }}" ansible_host_groups: spark_slaves,default register: spark_slaves with_sequence: start=0 end="{{ nbr_of_slaves }}" format=spark-slave%02x
Ignore Space
Show notes
View
deploy_spark_playbook.yml
--- # ------------------------ # Deploy the general stuff # ------------------------ - hosts: all sudo: yes pre_tasks: - name: Update APT cache apt: update_cache=yes tasks: - name: install java apt: name=openjdk-7-jre state=present update_cache=yes - name: disable net.ipv6.conf.all.disable_ipv6 sysctl: name=net.ipv6.conf.all.disable_ipv6 value=1 state=present - name: disable net.ipv6.conf.default.disable_ipv6 sysctl: name=net.ipv6.conf.default.disable_ipv6 value=1 state=present - name: disable net.ipv6.conf.lo.disable_ipv6 sysctl: name=net.ipv6.conf.lo.disable_ipv6 value=1 state=present - name: distribute host file template: src=templates/hosts.j2 dest=/etc/hosts - name: deploy ssh-keys copy: src={{ssh_keys_to_use}} dest=/home/{{ user }}/.ssh/ register: ssh_key - name: distribute ssh config template: src=templates/config.j2 dest=/home/{{ user }}/.ssh/config register: ssh_debug - name: download spark get_url: url=http://d3kbcqa49mib13.cloudfront.net/spark-1.2.1-bin-hadoop2.4.tgz dest=/opt/ sha256sum=8e618cf67b3090acf87119a96e5e2e20e51f6266c44468844c185122b492b454 - name: unzip spark unarchive: copy=no src=/opt/spark-1.2.1-bin-hadoop2.4.tgz dest=/opt - name: deploy slaves configuration template: src=templates/slaves.j2 dest=/opt/spark-1.2.1-bin-hadoop2.4/conf/slaves # -------------------------------------------------- # Kick of spark (making the master start the slaves) # -------------------------------------------------- - hosts: spark_masters tasks: - name: stop spark master (if running) command: /opt/spark-1.2.1-bin-hadoop2.4/sbin/stop-master.sh - name: start spark master shell: SPARK_MASTER_IP="{{ ansible_hostname }}" /opt/spark-1.2.1-bin-hadoop2.4/sbin/start-master.sh - name: stop the slaves (if running) shell: /opt/spark-1.2.1-bin-hadoop2.4/sbin/stop-slaves.sh - name: start the slaves shell: /opt/spark-1.2.1-bin-hadoop2.4/sbin/start-slaves.sh
--- # ------------------------ # Deploy the general stuff # ------------------------ - hosts: all sudo: yes vars: ssh_keys_to_use: files/cloud.key user: ubuntu pre_tasks: - name: Update APT cache apt: update_cache=yes tasks: - name: install java apt: name=openjdk-7-jre state=present update_cache=yes - name: disable net.ipv6.conf.all.disable_ipv6 sysctl: name=net.ipv6.conf.all.disable_ipv6 value=1 state=present - name: disable net.ipv6.conf.default.disable_ipv6 sysctl: name=net.ipv6.conf.default.disable_ipv6 value=1 state=present - name: disable net.ipv6.conf.lo.disable_ipv6 sysctl: name=net.ipv6.conf.lo.disable_ipv6 value=1 state=present - name: distribute host file template: src=templates/hosts.j2 dest=/etc/hosts - name: deploy ssh-keys copy: src={{ssh_keys_to_use}} dest=/home/{{ user }}/.ssh/ register: ssh_key - name: distribute ssh config template: src=templates/config.j2 dest=/home/{{ user }}/.ssh/config register: ssh_debug - name: download spark get_url: url=http://d3kbcqa49mib13.cloudfront.net/spark-1.2.1-bin-hadoop2.4.tgz dest=/opt/ sha256sum=8e618cf67b3090acf87119a96e5e2e20e51f6266c44468844c185122b492b454 - name: unzip spark unarchive: copy=no src=/opt/spark-1.2.1-bin-hadoop2.4.tgz dest=/opt - name: deploy slaves configuration template: src=templates/slaves.j2 dest=/opt/spark-1.2.1-bin-hadoop2.4/conf/slaves # -------------------------------------------------- # Kick of spark (making the master start the slaves) # -------------------------------------------------- - hosts: spark_masters tasks: - name: stop spark master (if running) command: /opt/spark-1.2.1-bin-hadoop2.4/sbin/stop-master.sh - name: start spark master shell: SPARK_MASTER_IP="{{ ansible_hostname }}" /opt/spark-1.2.1-bin-hadoop2.4/sbin/start-master.sh - name: stop the slaves (if running) shell: /opt/spark-1.2.1-bin-hadoop2.4/sbin/stop-slaves.sh - name: start the slaves shell: /opt/spark-1.2.1-bin-hadoop2.4/sbin/start-slaves.sh
Ignore Space
Show notes
View
vars/main.yml
0 → 100644
--- # Set to absent to take down machines instance_state: present # Number of workers to spawn in the cluster nbr_of_slaves: 3 # The name of the key pair you will use to log in # as set in OpenStack (see the OpenStack security dashboard) key_name: cloud_key # Id of the network to run in (can be found in the OpenStack # dashboard) network_id: b045a7de-5aa4-43a2-86ff-049fd1ba1884 # This is the ssh-key which will be distritbuted # across the cluster. It' important that this key # does NOT use password protection ssh_keys_to_use: files/cloud.key # Name of the user used to install everything # on the remote systems user: ubuntu
Show line notes below