# # Nodo class definitions # import "firewall.pp" import "firewire.pp" import "initramfs.pp" import "lsb.pp" import "motd.pp" import "sudo.pp" import "sysctl.pp" import "ups.pp" import "utils.pp" import "database.pp" import "websites.pp" class nodo { include lsb include puppetd include backup include exim include sudo include users::admin include motd include utils include cron # Set timezone and ntp config # # We config those here but leave class inclusion elsewhere # as ntp config differ from server to vserver. # $ntp_timezone = "Brazil/East" $ntp_pool = "south-america.pool.ntp.org" $ntp_servers = [ 'a.ntp.br', 'b.ntp.br', 'c.ntp.br' ] # Monkeysphere # # Currently we don't have a defined policy regarding whether # to publish all our node keys to public keyservers, so leave # automatic publishing disabled for now. # $monkeysphere_publish_key = false include monkeysphere # Apt configuration $backports_enabled = true $apt_update_method = 'cron' include apt # Default SSH configuration $sshd_password_authentication = "yes" $sshd_shared_ip = "yes" file { "/etc/hostname": owner => "root", group => "root", mode => 0644, ensure => present, content => "$fqdn\n", } host { "$hostname": ensure => present, ip => "$ipaddress", alias => [ "$fqdn" ], } file { "/etc/rc.local": source => "puppet://$server/modules/nodo/etc/rc.local", owner => "root", group => "root", mode => 0755, ensure => present, } file { "/etc/screenrc": source => "puppet://$server/modules/nodo/etc/screenrc", owner => "root", group => "root", mode => 0644, ensure => present, } file { "/etc/bash.bashrc": source => "puppet://$server/modules/nodo/etc/bash.bashrc", owner => "root", group => "root", mode => 0644, ensure => present, } } class nodo::server inherits nodo { include syslog-ng include firewall include vserver::host include initramfs include firewire include sysctl include ups include utils::server include smartmontools # Time configuration case $ntpdate { false: { include timezone } default: { include ntpdate } } # DNS resolver $resolvconf_domain = "$domain" $resolvconf_search = "$fqdn" include resolvconf # SSH Server # # We need to restrict listen address so multiple instances # can live together in the same physical host. # $sshd_listen_address = [ "$ipaddress" ] include sshd # Munin #$munin_port = "4901" #include munin::client backupninja::sys { "sys": ensure => present, } # fstab file { "/etc/fstab": source => "puppet://$server/modules/nodo/etc/fstab", owner => "root", group => "root", mode => 0644, ensure => present, } # crypttab file { "/etc/crypttab": source => "puppet://$server/modules/nodo/etc/crypttab", owner => "root", group => "root", mode => 0644, ensure => present, } } class nodo::vserver inherits nodo { include sshd include timezone include syslog-ng::vserver backupninja::sys { "sys": ensure => present, partitions => false, hardware => false, dosfdisk => false, dohwinfo => false, } define munin($type, $id) { # Use one port for each node $munin_port = "49$id" case $type { 'host': { include munin::host include munin::client } 'client': { include munin::client } } } # Apply the munin configuration for this host #Nodo::vserver::munin <| tag == $name |> # Define a vserver instance define instance($context, $ensure = 'running', $proxy = false, $puppetmaster = false, $gitd = false, $munin = 'client', $icecast = false, $sound = false, $ticket = false) { # set instance id if $context < 9 { $id = "0$context" } else { $id = $context } vserver { $name: ensure => $ensure, context => "$context", mark => 'default', distro => 'lenny', interface => "eth0:192.168.0.$context/24", hostname => "$name.$domain", } # Some nodes need a lot of space at /tmp otherwise some admin # tasks like backups might not run. file { "/etc/vservers/${name}/fstab": source => "puppet://$server/modules/nodo/etc/fstab-vserver", owner => "root", group => "root", mode => 0644, ensure => present, notify => Exec["vs_restart_${name}"], require => Exec["vs_create_${name}"], } # Create a munin virtual resource to be realized in the node #@nodo::vserver::munin { # type => $munin, # id => $id, # tag => $name, #} # Sound support if $sound { if !defined(File["/usr/local/sbin/create-sound-devices"]) { file { "/usr/local/sbin/create-sound-devices": ensure => present, source => "puppet://$server/modules/nodo/sound/devices.sh", owner => root, group => root, mode => 755, } } exec { "/usr/local/sbin/create-sound-devices ${name}": unless => "/usr/local/sbin/create-sound-devices ${name} --check", user => root, require => [ Exec["vs_create_${name}"], File["/usr/local/sbin/create-sound-devices"] ], } } # Apply firewall rules just for running vservers case $ensure { 'running': { shorewall::rule { "ssh-$context": action => 'DNAT', source => 'net', destination => "vm:192.168.0.$context:22", proto => 'tcp', destinationport => "22$id", ratelimit => '-', order => "2$id", } if $proxy { shorewall::rule { 'http-route': action => 'DNAT', source => 'net', destination => "vm:192.168.0.$context:80", proto => 'tcp', destinationport => '80', ratelimit => '-', order => '300', } shorewall::rule { 'https-route': action => 'DNAT', source => 'net', destination => "vm:192.168.0.$context:443", proto => 'tcp', destinationport => '443', ratelimit => '-', order => '301', } } if $puppetmaster { shorewall::rule { 'puppetmaster-1': action => 'DNAT', source => 'net', destination => "fw:192.168.0.$context:8140", proto => 'tcp', destinationport => '8140', ratelimit => '-', order => '302', } shorewall::rule { 'puppetmaster-2': action => 'DNAT', source => 'net', destination => "fw:192.168.0.$context:8140", proto => 'udp', destinationport => '8140', ratelimit => '-', order => '303', } shorewall::rule { 'puppetmaster-3': action => 'DNAT', source => '$FW', destination => "fw:192.168.0.$context:8140", proto => 'tcp', destinationport => '8140', originaldest => "$ipaddress", ratelimit => '-', order => '304', } shorewall::rule { 'puppetmaster-4': action => 'DNAT', source => '$FW', destination => "fw:192.168.0.$context:8140", proto => 'udp', destinationport => '8140', originaldest => "$ipaddress", ratelimit => '-', order => '305', } } if $gitd { shorewall::rule { 'git-daemon-1': action => 'DNAT', source => 'net', destination => "fw:192.168.0.$context:9418", proto => 'tcp', destinationport => '9418', ratelimit => '-', order => '306', } shorewall::rule { 'git-daemon-2': action => 'DNAT', source => '$FW', destination => "vm:192.168.0.$context:9418", proto => 'tcp', destinationport => '9418', ratelimit => '-', order => '307', } } if $icecast { shorewall::rule { 'icecast-1': action => 'DNAT', source => 'net', destination => "fw:192.168.0.$context:8000", proto => 'tcp', destinationport => '8000', ratelimit => '-', order => '308', } shorewall::rule { 'icecast-2': action => 'DNAT', source => '$FW', destination => "vm:192.168.0.$context:8000", proto => 'tcp', destinationport => '8000', ratelimit => '-', order => '309', } } } } } } class nodo::web inherits nodo::vserver { include git-daemon include websites include database include users::virtual backupninja::svn { "svn": src => "/var/svn", } backupninja::mysql { "all_databases": backupdir => '/var/backups/mysql', compress => true, sqldump => true, } } class nodo::master { # Puppetmaster should be included before nodo::vserver include puppetmasterd include nodo::vserver include database include gitosis include websites::admin # TODO: #include munin::host host { "puppet": ensure => present, ip => "127.0.0.1", alias => ["puppet.$domain"], } case $puppetmaster_db_password { '': { fail("Please set \$puppetmaster_db_password in your host config") } } # update master's puppet.conf if you change here database::instance { "puppet": password => "$puppetmaster_db_password", } backupninja::mysql { "all_databases": backupdir => '/var/backups/mysql', compress => true, sqldump => true, } # used for trac dependency graphs package { "graphviz": ensure => present, } } class nodo::proxy inherits nodo::vserver { include nginx } class nodo::storage inherits nodo::vserver { # Class for backup nodes } class nodo::test inherits nodo::web { # Class for test nodes }