forked from CornellCAC/slurm-cluster-in-openstack
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathprovision_headnode.yml
More file actions
102 lines (87 loc) · 2.27 KB
/
provision_headnode.yml
File metadata and controls
102 lines (87 loc) · 2.27 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
---
- hosts: localhost
gather_facts: false
vars_files:
- ./vars/main.yml
tasks:
- import_tasks: tasks/add_headnode_inventory.yml
- hosts: headnode
become: yes
vars_files:
- ./vars/main.yml
roles:
- role: geerlingguy.nfs
vars:
nfs_exports: [ "/home {{ cluster_network_cidr }}(rw,sync,no_root_squash)","/opt/ohpc/pub {{ cluster_network_cidr }}(rw,sync,no_root_squash)" ]
- role: geerlingguy.ntp
vars:
ntp_servers:
- "ntp0.cac.cornell.edu iburst"
- "ntp1.cac.cornell.edu iburst"
- "ntp2.cac.cornell.edu iburst"
handlers:
- name: restart slurmctld
service:
name: slurmctld
state: restarted
tasks:
- import_tasks: tasks/install_openstack_client.yml
- import_tasks: tasks/install_headnode_packages.yml
- name: Allow incoming traffic from cluster network
ansible.posix.firewalld:
source: "{{ cluster_network_cidr }}"
zone: trusted
state: enabled
permanent: true
immediate: true
- name: Create /var/log/slurm
file:
path: /var/log/slurm
owner: slurm
group: slurm
mode: 0700
state: directory
- name: Copy openrc file
template:
src: openrc.sh.j2
dest: /etc/slurm/openrc.sh
owner: slurm
group: slurm
mode: 0600
- name: Copy slurm.conf
template:
src: slurm.conf.j2
dest: /etc/slurm/slurm.conf
owner: root
group: root
mode: 0644
notify: restart slurmctld
- name: Copy slurm_resume.sh
template:
src: slurm_resume.sh.j2
dest: /usr/local/sbin/slurm_resume.sh
owner: root
group: root
mode: 0755
- name: Copy slurm_suspend.sh
template:
src: slurm_suspend.sh.j2
dest: /usr/local/sbin/slurm_suspend.sh
owner: root
group: root
mode: 0755
- name: Start slurmctld
service:
name: slurmctld
state: started
enabled: true
- name: Start munge
service:
name: munge
state: started
enabled: true
- name: Gather munge.key
fetch:
src: /etc/munge/munge.key
dest: ./files/munge.key
flat: yes