aboutsummaryrefslogtreecommitdiff
path: root/manifests/subsystem
diff options
context:
space:
mode:
Diffstat (limited to 'manifests/subsystem')
-rw-r--r--manifests/subsystem/nas/share.pp109
-rw-r--r--manifests/subsystem/vm/instance.pp208
2 files changed, 317 insertions, 0 deletions
diff --git a/manifests/subsystem/nas/share.pp b/manifests/subsystem/nas/share.pp
new file mode 100644
index 0000000..43922b7
--- /dev/null
+++ b/manifests/subsystem/nas/share.pp
@@ -0,0 +1,109 @@
+# Share a folder pubicly using FTP, Samba, NFS, DLNA, etc.
+define nodo::subsystem::nas::share(
+ $description,
+ $folder,
+ $owner = '',
+ $group = '',
+ $mode = '',
+ $dlna_type = '',
+ $nfs_export_target = '127.0.0.1',
+ $nfs_export_options = 'ro,sync,no_subtree_check',
+ $samba_guest_only = true,
+ $samba_guest_ok = true,
+ $samba_force_user = '',
+ $samba_force_group = '',
+ $samba_read_only = '',
+ $samba_writable = '',
+ $samba_create_mask = '0644',
+ $samba_directory_mask = '0755',
+ $manage_folder = true
+) {
+
+ # DLNA share
+ if $dlna_type != '' {
+ minidlna::share { $folder:
+ type => $dlna_type ? {
+ 'all' => undef,
+ default => $dlna_type,
+ },
+ }
+ }
+
+ # Samba share
+ samba::server::share { $name:
+ comment => $description,
+ path => $folder,
+ guest_only => $samba_guest_only,
+ guest_ok => $samba_guest_ok,
+ guest_account => $samba_guest_account,
+ force_user => $samba_force_user ? {
+ '' => undef,
+ default => $samba_force_user,
+ },
+ force_group => $samba_force_group ? {
+ '' => undef,
+ default => $samba_force_group,
+ },
+ read_only => $samba_read_only ? {
+ '' => undef,
+ default => $samba_read_only,
+ },
+ writable => $samba_writable ? {
+ '' => undef,
+ default => $samba_writable,
+ },
+ create_mask => $samba_create_mask ? {
+ '' => undef,
+ default => $samba_create_mask,
+ },
+ directory_mask => $samba_directory_mask ? {
+ '' => undef,
+ default => $samba_directory_mask,
+ },
+ browsable => true,
+ }
+
+ # NFS export
+ nfs::export { $name:
+ export_directory => $folder,
+ export_target => $nfs_export_target,
+ export_options => $nfs_export_options,
+ }
+
+ # HTTP and FTP symlinks to media assets
+ file { [ "/var/www/data/${name}", "/home/ftp/${name}" ]:
+ ensure => $folder,
+ require => File['/var/www/data', '/home/ftp'],
+ }
+
+ # Avahi service
+ file { "/etc/avahi/services/nfs-${name}.service":
+ ensure => present,
+ owner => root,
+ group => root,
+ mode => 0644,
+ source => "puppet:///modules/site_avahi/services/nfs-${name}.service",
+ notify => Service['avahi-daemon'],
+ }
+
+ # Make sure basic media exist, no matter which disk is attached
+ $cache = hiera('nodo::subsystem::media::folders::base', '/var/cache/media')
+
+ if $cache != '' and $manage_folder != false {
+ file { [ "${cache}/${name}" ]:
+ ensure => directory,
+ owner => $owner ? {
+ '' => undef,
+ default => $owner,
+ },
+ group => $group ? {
+ '' => undef,
+ default => $group,
+ },
+ mode => $mode ? {
+ '' => undef,
+ default => $mode,
+ },
+ }
+ }
+}
diff --git a/manifests/subsystem/vm/instance.pp b/manifests/subsystem/vm/instance.pp
new file mode 100644
index 0000000..4a2bfeb
--- /dev/null
+++ b/manifests/subsystem/vm/instance.pp
@@ -0,0 +1,208 @@
+# Define a vserver instance
+define nodo::subsystem::vm::instance(
+ $context,
+ $distro = 'squeeze',
+ $ensure = 'running',
+ $proxy = false,
+ $puppetmaster = false,
+ $gitd = false,
+ $mail = false,
+ $icecast = false,
+ $sound = false,
+ $tor = false,
+ $ticket = false,
+ $memory_limit = false,
+ $dns = false,
+ $jabber = false,
+ $mumble = false,
+ $gobby = false,
+ $yacy = false,
+ $rsync = false,
+ $avahi = false,
+ $munin_port = false,
+ $monkeysphere_ssh_port = false,
+ $implementation = false
+) {
+
+ # Instance id
+ if $context <= 9 {
+ $id = "0$context"
+ } else {
+ $id = $context
+ }
+
+ # Puppetmaster ssl port
+ case $puppetmaster_port {
+ '': { $puppetmaster_port = "8140" }
+ }
+
+ # Set puppetmaster non-ssl port
+ case $puppetmaster_nonssl_port {
+ '': { $puppetmaster_nonssl_port = "8141" }
+ }
+
+ # Tor port
+ case $tor_port {
+ '': { $tor_port = "9001" }
+ }
+
+ $dev = hiera('nodo::subsystem::vm::interface', 'eth0')
+
+ if $implementation == 'vserver' {
+ virtual::vserver { $name:
+ ensure => $ensure,
+ context => "$context",
+ mark => 'default',
+ distro => $distro,
+ interface => "${dev}:192.168.0.${context}/24",
+ hostname => "$name.$domain",
+ memory_limit => $memory_limit,
+ }
+
+ # Some nodes need a lot of space at /tmp otherwise some admin
+ # tasks like backups might not run.
+ file { "/etc/vservers/${name}/fstab":
+ source => [ "puppet:///modules/site_nodo/etc/fstab/vserver/$name",
+ "puppet:///modules/nodo/etc/fstab/vserver" ],
+ owner => "root",
+ group => "root",
+ mode => 0644,
+ ensure => present,
+ notify => Exec["vs_restart_${name}"],
+ require => Exec["vs_create_${name}"],
+ }
+
+ # Sound support
+ if $sound {
+ if !defined(File["/usr/local/sbin/create-sound-devices"]) {
+ file { "/usr/local/sbin/create-sound-devices":
+ ensure => present,
+ source => "puppet:///modules/nodo/sound/devices.sh",
+ owner => root,
+ group => root,
+ mode => 755,
+ }
+ }
+ exec { "/usr/local/sbin/create-sound-devices ${name}":
+ unless => "/usr/local/sbin/create-sound-devices ${name} --check",
+ user => root,
+ require => [ Exec["vs_create_${name}"], File["/usr/local/sbin/create-sound-devices"] ],
+ }
+ }
+ }
+
+ # Create a munin virtual resource to be realized in the node
+ @@nodo::subsystem::monitor::munin { "${name}":
+ port => $munin_port ? {
+ false => "49$id",
+ default => $munin_port,
+ }
+ }
+
+ # Create a monkeysphere virtual resource to be realized in the node
+ @@nodo::subsystem::monkeysphere { "$name":
+ port => $monkeysphere_ssh_port ? {
+ false => "22$id",
+ default => $monkeysphere_ssh_port,
+ }
+ }
+
+ # Apply firewall rules just for running vservers
+ case $ensure {
+ 'running': {
+ firewall::vm::ssh { "$name":
+ destination => "192.168.0.$context",
+ port_orig => "22$id",
+ port_dest => "22",
+ }
+
+ firewall::vm::munin { "$name":
+ destination => "192.168.0.$context",
+ port_orig => "49$id",
+ port_dest => "49$id",
+ }
+
+ if $proxy {
+ class {
+ "firewall::vm::http": destination => "192.168.0.$context";
+ "firewall::vm::https": destination => "192.168.0.$context";
+ }
+ }
+
+ if $puppetmaster {
+ class {
+ "firewall::vm::puppetmaster":
+ destination => "192.168.0.$context",
+ puppetmaster_port => $puppetmaster_port,
+ puppetmaster_nonssl_port => $puppetmaster_nonssl_port,
+ }
+ }
+
+ if $gitd {
+ class {
+ "firewall::vm::gitd": destination => "192.168.0.$context";
+ }
+ }
+
+ if $icecast {
+ class {
+ "firewall::vm::icecast": destination => "192.168.0.$context";
+ }
+ }
+
+ if $mail {
+ class {
+ "firewall::vm::mail": destination => "192.168.0.$context";
+ }
+ }
+
+ if $dns {
+ class {
+ "firewall::vm::dns": destination => "192.168.0.$context";
+ }
+ }
+
+ if $tor {
+ class {
+ "firewall::vm::tor": destination => "192.168.0.$context";
+ }
+ }
+
+ if $jabber {
+ class {
+ "firewall::vm::jabber": destination => "192.168.0.$context";
+ }
+ }
+
+ if $mumble {
+ class {
+ "firewall::vm::mumble": destination => "192.168.0.$context";
+ }
+ }
+
+ if $gobby {
+ class {
+ "firewall::vm::gobby": destination => "192.168.0.$context";
+ }
+ }
+
+ if $yacy {
+ class {
+ "firewall::vm::yacy": destination => "192.168.0.$context";
+ }
+ }
+
+ if $rsync {
+ class {
+ "firewall::vm::rsync": destination => "192.168.0.$context";
+ }
+ }
+
+ if $avahi {
+ class {
+ "firewall::vm::mdns": destination => "192.168.0.$context";
+ }
+ }
+ }
+ }
+}