# # Nodo class definitions # import "firewall.pp" import "firewire.pp" import "initramfs.pp" import "lsb.pp" import "motd.pp" import "sudo.pp" import "sysctl.pp" import "ups.pp" import "utils.pp" import "database.pp" import "websites.pp" import "munin.pp" class nodo { include lsb include puppetd include backup include exim include sudo include users::admin include motd include utils include cron # Set timezone and ntp config # # We config those here but leave class inclusion elsewhere # as ntp config differ from server to vserver. # $ntp_timezone = "Brazil/East" $ntp_pool = "south-america.pool.ntp.org" $ntp_servers = [ 'a.ntp.br', 'b.ntp.br', 'c.ntp.br' ] # Monkeysphere # # Currently we don't have a defined policy regarding whether # to publish all our node keys to public keyservers, so leave # automatic publishing disabled for now. # $monkeysphere_publish_key = false include monkeysphere # Apt configuration $backports_enabled = true $apt_update_method = 'cron' include apt # Default SSH configuration $sshd_password_authentication = "yes" $sshd_shared_ip = "yes" file { "/etc/hostname": owner => "root", group => "root", mode => 0644, ensure => present, content => "$fqdn\n", } host { "$hostname": ensure => present, ip => "$ipaddress", alias => [ "$fqdn" ], } file { "/etc/rc.local": source => "puppet://$server/modules/nodo/etc/rc.local", owner => "root", group => "root", mode => 0755, ensure => present, } file { "/etc/screenrc": source => "puppet://$server/modules/nodo/etc/screenrc", owner => "root", group => "root", mode => 0644, ensure => present, } file { "/etc/profile": source => "puppet://$server/modules/nodo/etc/profile", owner => "root", group => "root", mode => 0644, ensure => present, require => File['/usr/local/bin/prompt.sh'], } file { "/etc/bash.bashrc": source => "puppet://$server/modules/nodo/etc/bash.bashrc", owner => "root", group => "root", mode => 0644, ensure => present, require => File['/usr/local/bin/prompt.sh'], } file { "/usr/local/bin/prompt.sh": source => "puppet://$server/modules/nodo/bin/prompt.sh", owner => "root", group => "root", mode => 0755, ensure => present, } } class nodo::physical inherits nodo { include syslog-ng include firewall include vserver::host include initramfs include firewire include sysctl include ups include utils::physical include smartmontools # Time configuration case $ntpdate { false: { include timezone } default: { include ntpdate } } # DNS resolver $resolvconf_domain = "$domain" $resolvconf_search = "$fqdn" include resolvconf # SSH Server # # We need to restrict listen address so multiple instances # can live together in the same physical host. # $sshd_listen_address = [ "$ipaddress" ] include sshd backupninja::sys { "sys": ensure => present, } # Munin configuration munin_node { "$hostname": port => '4900', } } class nodo::server inherits nodo::physical { # fstab file { "/etc/fstab": source => "puppet://$server/modules/nodo/etc/fstab/server", owner => "root", group => "root", mode => 0644, ensure => present, } # crypttab file { "/etc/crypttab": source => "puppet://$server/modules/nodo/etc/crypttab/server", owner => "root", group => "root", mode => 0644, ensure => present, } } class nodo::desktop inherits nodo::physical { include utils::desktop # fstab file { "/etc/fstab": source => "puppet://$desktop/modules/nodo/etc/fstab/desktop", owner => "root", group => "root", mode => 0644, ensure => present, } # crypttab file { "/etc/crypttab": source => "puppet://$desktop/modules/nodo/etc/crypttab/desktop", owner => "root", group => "root", mode => 0644, ensure => present, } # data file { "/var/data": ensure => directory, mode => 0755, } # pam - login file { "/etc/pam.d/login": source => "puppet://$desktop/modules/nodo/etc/pam.d/login", owner => "root", group => "root", mode => 0644, ensure => present, } # pam - gdm file { "/etc/pam.d/gdm": source => "puppet://$desktop/modules/nodo/etc/pam.d/gdm", owner => "root", group => "root", mode => 0644, ensure => present, } } class nodo::vserver inherits nodo { include sshd include timezone include syslog-ng::vserver backupninja::sys { "sys": ensure => present, partitions => false, hardware => false, dosfdisk => false, dohwinfo => false, } $hosting_type = $node_hosting_type ? { '' => "direct", default => "$node_hosting_type", } case $hosting_type { "direct": { # Apply munin configuration for this node for # directly hosted nodes. Munin_node <<| title == $hostname |>> } "third-party": { # Apply munin configuration for this node for third-party # hosted nodes. munin_node { "$hostname": } } } # Define a vserver instance define instance($context, $ensure = 'running', $proxy = false, $puppetmaster = false, $gitd = false, $icecast = false, $sound = false, $ticket = false, $memory_limit = false) { # set instance id if $context < 9 { $id = "0$context" } else { $id = $context } vserver { $name: ensure => $ensure, context => "$context", mark => 'default', distro => 'lenny', interface => "eth0:192.168.0.$context/24", hostname => "$name.$domain", memory_limit => $memory_limit, } # Some nodes need a lot of space at /tmp otherwise some admin # tasks like backups might not run. file { "/etc/vservers/${name}/fstab": source => "puppet://$server/modules/nodo/etc/fstab/vserver", owner => "root", group => "root", mode => 0644, ensure => present, notify => Exec["vs_restart_${name}"], require => Exec["vs_create_${name}"], } # Create a munin virtual resource to be realized in the node @@munin_node { "$name": port => "49$id", } # Sound support if $sound { if !defined(File["/usr/local/sbin/create-sound-devices"]) { file { "/usr/local/sbin/create-sound-devices": ensure => present, source => "puppet://$server/modules/nodo/sound/devices.sh", owner => root, group => root, mode => 755, } } exec { "/usr/local/sbin/create-sound-devices ${name}": unless => "/usr/local/sbin/create-sound-devices ${name} --check", user => root, require => [ Exec["vs_create_${name}"], File["/usr/local/sbin/create-sound-devices"] ], } } # Apply firewall rules just for running vservers case $ensure { 'running': { shorewall::rule { "ssh-$context-1": action => 'DNAT', source => 'net', destination => "vm:192.168.0.$context:22", proto => 'tcp', destinationport => "22$id", ratelimit => '-', order => "2$id", } shorewall::rule { "ssh-$context-2": action => 'DNAT', source => '$FW', destination => "fw:192.168.0.$context:22", proto => 'tcp', destinationport => "22$id", originaldest => "$ipaddress", ratelimit => '-', order => "3$id", } shorewall::rule { "munin-$context-1": action => 'DNAT', source => 'net', destination => "fw:192.168.0.$context:49$id", proto => 'tcp', destinationport => "49$id", ratelimit => '-', order => "4$id", } shorewall::rule { "munin-$context-2": action => 'DNAT', source => '$FW', destination => "fw:192.168.0.$context:49$id", proto => 'tcp', destinationport => "49$id", originaldest => "$ipaddress", ratelimit => '-', order => "5$id", } if $proxy { shorewall::rule { 'http-route-1': action => 'DNAT', source => 'net', destination => "vm:192.168.0.$context:80", proto => 'tcp', destinationport => '80', ratelimit => '-', order => '600', } shorewall::rule { 'http-route-2': action => 'DNAT', source => '$FW', destination => "fw:192.168.0.$context:80", proto => 'tcp', destinationport => '80', originaldest => "$ipaddress", ratelimit => '-', order => '601', } shorewall::rule { 'https-route-1': action => 'DNAT', source => 'net', destination => "vm:192.168.0.$context:443", proto => 'tcp', destinationport => '443', ratelimit => '-', order => '602', } shorewall::rule { 'https-route-2': action => 'DNAT', source => '$FW', destination => "fw:192.168.0.$context:443", proto => 'tcp', destinationport => '443', originaldest => "$ipaddress", ratelimit => '-', order => '602', } } if $puppetmaster { shorewall::rule { 'puppetmaster-1': action => 'DNAT', source => 'net', destination => "fw:192.168.0.$context:8140", proto => 'tcp', destinationport => '8140', ratelimit => '-', order => '700', } shorewall::rule { 'puppetmaster-2': action => 'DNAT', source => 'net', destination => "fw:192.168.0.$context:8140", proto => 'udp', destinationport => '8140', ratelimit => '-', order => '701', } shorewall::rule { 'puppetmaster-3': action => 'DNAT', source => '$FW', destination => "fw:192.168.0.$context:8140", proto => 'tcp', destinationport => '8140', originaldest => "$ipaddress", ratelimit => '-', order => '702', } shorewall::rule { 'puppetmaster-4': action => 'DNAT', source => '$FW', destination => "fw:192.168.0.$context:8140", proto => 'udp', destinationport => '8140', originaldest => "$ipaddress", ratelimit => '-', order => '703', } } if $gitd { shorewall::rule { 'git-daemon-1': action => 'DNAT', source => 'net', destination => "fw:192.168.0.$context:9418", proto => 'tcp', destinationport => '9418', ratelimit => '-', order => '800', } shorewall::rule { 'git-daemon-2': action => 'DNAT', source => '$FW', destination => "fw:192.168.0.$context:9418", proto => 'tcp', destinationport => '9418', originaldest => "$ipaddress", ratelimit => '-', order => '801', } } if $icecast { shorewall::rule { 'icecast-1': action => 'DNAT', source => 'net', destination => "fw:192.168.0.$context:8000", proto => 'tcp', destinationport => '8000', ratelimit => '-', order => '900', } shorewall::rule { 'icecast-2': action => 'DNAT', source => '$FW', destination => "fw:192.168.0.$context:8000", proto => 'tcp', destinationport => '8000', originaldest => "$ipaddress", ratelimit => '-', order => '901', } } } } } } class nodo::web inherits nodo::vserver { include git-daemon include websites include database include users::virtual include utils::web backupninja::svn { "svn": src => "/var/svn", } backupninja::mysql { "all_databases": backupdir => '/var/backups/mysql', compress => true, sqldump => true, } } class nodo::master { # Puppetmaster should be included before nodo::vserver include puppetmasterd include nodo::vserver include database include gitosis include websites::admin case $main_master { '': { fail("You need to define if this is the main master! Please set \$main_master in host config") } } if $main_master == true { include munin::host # The main master has a host entry pointing to itself, other # masters still retrieve catalogs from the main master. host { "puppet": ensure => present, ip => "127.0.0.1", alias => ["puppet.$domain"], } } else { host { "puppet": ensure => absent, } } case $puppetmaster_db_password { '': { fail("Please set \$puppetmaster_db_password in your host config") } } # update master's puppet.conf if you change here database::instance { "puppet": password => "$puppetmaster_db_password", } backupninja::mysql { "all_databases": backupdir => '/var/backups/mysql', compress => true, sqldump => true, } # used for trac dependency graphs package { "graphviz": ensure => present, } } class nodo::proxy inherits nodo::vserver { include nginx } class nodo::storage inherits nodo::vserver { # Class for backup nodes include utils::storage } class nodo::test inherits nodo::web { # Class for test nodes }