Tags:
, view all tags

Instalacao do novo dCache-admin

Description

Instalacão do Sistema Operacional

Primeiro criamos o mirror para a instalacao, na sprace:
[root@sprace ~]# mkdir /export/linux/SL_45_i386
[root@sprace ~]# cd
[root@sprace ~]# vim download.verbose
set ftp:list-options -a
open ftp.scientificlinux.org/linux/scientific/45/i386
lcd /export/linux/SL_45_i386
mirror --delete --exclude sites/Fermi --exclude errata/debuginfo --exclude errata/obsolete --verbose=4
quit
[root@sprace ~]# nohup lftp -f download.verbose &
Para fazer a instalacao via rede, precisamos configurar o dhcp:
[root@sprace ~]# vim /etc/hosts
192.168.1.151   osg-se.grid     osg-se
[root@sprace ~]# vim /etc/dhcpd.conf
ddns-update-style none;

option space PXE;
option PXE.mtftp-ip               code 1 = ip-address;
option PXE.mtftp-cport            code 2 = unsigned integer 16;
option PXE.mtftp-sport            code 3 = unsigned integer 16;
option PXE.mtftp-tmout            code 4 = unsigned integer 8;
option PXE.mtftp-delay            code 5 = unsigned integer 8;
option PXE.discovery-control      code 6 = unsigned integer 8;
option PXE.discovery-mcast-addr   code 7 = ip-address;

# PXE specific options
class "pxeclients" {
  match if substring (option vendor-class-identifier, 0, 9) =
         "PXEClient";
  option vendor-class-identifier "PXEClient";
  vendor-option-space PXE;
  option PXE.mtftp-ip 0.0.0.0;
}

subnet 200.136.80.0 netmask 255.255.255.0
   { 
   }
subnet 192.168.1.0 netmask 255.255.255.0 {
  deny unknown-clients;
  min-lease-time 300;
  default-lease-time 1800;
  max-lease-time 1800;
  use-host-decl-names on;
  option subnet-mask 255.255.255.0;
  option routers 192.168.1.200;

host osg-se
{
        hardware ethernet 00:30:48:89:5D:40;
        fixed-address osg-se;
        filename "/tftpboot/pxelinux.0";
   }
}
[root@sprace ~]# /etc/init.d/dhcpd start
e o tftpd
[root@sprace ~]# cp /export/linux/SL_45_i386/isolinux/vmlinuz /tftpboot/.
[root@sprace ~]# cp /export/linux/SL_45_i386/isolinux/initrd.img /tftpboot/.
[root@sprace ~]# vim /tftpboot/pxelinux.cfg/default
#SERIAL 1 19200

# Mensagem a ser apresentada
DISPLAY msgs/message.txt

# timeout de espera
TIMEOUT 100

# Sempre mostrar o prompt
PROMPT  1

# Default e' realizar o boot normal
DEFAULT linux

LABEL linux
    kernel vmlinuz
    append ks=nfs:192.168.1.200:/export/linux/kickstart/ks_osgse_instalar.cfg initrd=initrd.img  devfs=nomount ksdevice=eth0
#console=ttyS1,19200

LABEL boot
    LOCALBOOT 0


[root@sprace ~]# /etc/init.d/xinetd restart
A instalacao é feita usando o seguinte kickstart anexo no
[root@sprace ~]# less  /export/linux/kickstart/ks_osgse_instalar.cfg

Post-Install

Após a instalacão foram feitos alguns ajustes
[root@osg-se ~]# echo "/osg-se    /etc/auto.osg-se    --timeout=30" >> /etc/auto.master
[root@osg-se ~]# echo "OSG      -rw,soft,bg,rsize=8192,wsize=8192,tcp     spg00:/OSG" >/etc/auto.osg-se
[root@osg-se ~]# mkdir /osg-se
[root@osg-se ~]# chmod 775 /osg-se/
[root@osg-se ~]# ln -s /osg-se/OSG /OSG
[root@osg-se ~]# ln -s /osg-se/OSG /usr/local/opt/OSG
[root@osg-se ~]# chkconfig autofs on
Remova a linha mcast_if eth1 do /etc/gmond se a rede interna for conectada a eth0, como é o caso.

Com a última upgrade do gmetad da spgrid há um problema com as permissões quanto a insercão de máquinas novas no cluster. Isso foi resolvido na spgrid da seguinte forma:

[root@spgrid ~]# echo "192.168.1.151   osg-se.grid     osg-se">>/etc/hosts
[root@spgrid ~]# cd /var/lib/ganglia/rrds/SPGRID\ Cluster/
[root@spgrid SPGRID Cluster]# /etc/init.d/gmond stop; /etc/init.d/gmetad stop
[root@spgrid SPGRID Cluster]# cp -rpf node86.grid/ osg-se.grid
[root@spgrid SPGRID Cluster]# /etc/init.d/gmetad start; /etc/init.d/gmond start
[root@spgrid SPGRID Cluster]# tail -f /var/log/messages

Instalacao do java

[root@osg-se ~]# wget http://www.java.net/download/jdk6/6u2/promoted/b02/binaries/jdk-6u2-ea-bin-b02-linux-i586-12_apr_2007-rpm.bin
[root@osg-se ~]# chmod 755 jdk-6u2-ea-bin-b02-linux-i586-12_apr_2007-rpm.bin
[root@osg-se ~]# ./jdk-6u2-ea-bin-b02-linux-i586-12_apr_2007-rpm.bin
[root@osg-se etc]# updatedb; locate javac |grep bin
[root@osg-se ~]# cd /etc/
[root@osg-se etc]# vim profile
export JAVA_HOME=/usr/java/jdk1.6.0_02
[root@osg-se ~]# java -version
java version "1.6.0_02-ea"
Java(TM) SE Runtime Environment (build 1.6.0_02-ea-b02)
Java HotSpot(TM) Server VM (build 1.6.0_02-ea-b02, mixed mode)
[root@osg-se alternatives]# cd
[root@osg-se ~]# cd /etc/alternatives/
[root@osg-se alternatives]# ls -al java
lrwxrwxrwx  1 root root 35 Jan  8 09:59 java -> /usr/lib/jvm/jre-1.4.2-sun/bin/java
[root@osg-se alternatives]# rm -rf java
[root@osg-se alternatives]# ln -s  /usr/java/jdk1.6.0_02/bin/java java

Instalação do Postgresql

Baixando:
[root@osg-se ~]# cd /tmp/
[root@osg-se tmp]# wget ftp://ftp.postgresql.org/pub/latest/postgresql-8.2.5.tar.bz2
[root@osg-se tmp]# tar -xjvf postgresql-8.2.5.tar.bz2
[root@osg-se tmp]# cd postgresql-8.2.5/
[root@osg-se postgresql-8.2.5]# ./configure --prefix=/usr/local/pgsql --bindir=/usr/bin --sysconfdir=/etc/postgres 
[root@osg-se postgresql-8.2.5]# gmake
[root@osg-se postgresql-8.2.5]# gmake install

Criando usuário e grupo

[root@osg-se postgresql-8.2.5]# groupadd postgres
[root@osg-se postgresql-8.2.5]# adduser -g postgres postgres
Criando diretorio para os dados
[root@osg-se postgresql-8.2.5]# mkdir /usr/local/pgsql/data
[root@osg-se postgresql-8.2.5]# chown postgres:postgres /usr/local/pgsql/data
Inicializando o banco
[root@osg-se postgresql-8.2.5]#  su - postgres
[postgres@osg-se ~]$ initdb -D /usr/local/pgsql/data
Rodando em segundo plano e gravando os logs
[postgres@osg-se ~]$ postmaster -D /usr/local/pgsql/data >> /usr/local/pgsql/data/logfile &
Criando a base de dados e testando
[postgres@osg-se ~]$ createdb test
[postgres@osg-se ~]$ psql test
\q
Para ja rodar no boot da maquina
[root@osg-se ~]# vim /etc/init.d/postgresql

#! /bin/sh

#chkconfig:2345 90 10
#description: PostgreSQL

#onde foi instalado
prefix=/usr/local/pgsql
#diretorio de dados
PGDATA="/usr/local/pgsql/data"
#para rodar pg_ctl deve ser postgres
PGUSER=postgres
#onde manter o arquivo de log
PGLOG="/usr/local/pgsql/data/logfile "
####################################
if echo 'c'|grep -s c> /dev/null 2>&1; then
ECHO_N="echo -n"
ECHO_C=""
else
ECHO_N="echo"
ECHO_C='c'
fi
#Caminho usado pelo script
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
#o que usar para startar o postmaster
DAEMON="/usr/bin/pg_ctl"

set -e
#somente inicia se pode achar pg_ctl
test -f $DAEMON ||exit 0
# comandos
case $1 in
start)
$ECHO_N "Iniciando PostgreSQL:"$ECHO_C
su -l $PGUSER -s /bin/sh -c "$DAEMON -D '$PGDATA' -o -"i" -l $PGLOG start"
echo "ok"
;;
stop)
echo -n "Parando PostgreSQL: "
su - $PGUSER -c "$DAEMON stop -D '$PGDATA' -s -m fast"
echo "ok"
;;
restart)
echo -n "Reiniciando PostgreSQL: "
su - $PGUSER -c "$DAEMON restart -D '$PGDATA' -s -m fast"
echo "ok"
;;
status)
su - $PGUSER -c "$DAEMON status -D '$PGDATA'"
;;
*)
#exibe ajuda
echo "Modos de uso: postgresql {start|stop|restart|status}" 1>&2
exit 1
;;
esac

exit 0

[root@osg-se ~]# chmod a+x /etc/init.d/postgresql
[root@osg-se ~]# chkconfig --add postgresql
Instalando o JDBC: primeiro instalamos o driver
[root@osg-se ~]# mkdir  /usr/local/pgsql/share/java
[root@osg-se ~]# cd /usr/local/pgsql/share/java
[root@osg-se java]# wget http://jdbc.postgresql.org/download/postgresql-8.2-507.jdbc4.jar 
[root@osg-se java]# mv postgresql-8.2-507.jdbc4.jar postgresql.jar
[root@osg-se java]# vim /etc/profile
export CLASSPATH=/usr/local/pgsql/share/java/postgresql.jar:$CLASSPATH
[root@osg-se java]# vim /usr/local/pgsql/data/postgresql.conf
listen_addresses = '*' 
[root@osg-se java]# vim /usr/local/pgsql/data/pg_hba.conf
host    all         all         200.136.80.0    255.255.255.0         trust
[root@osg-se java]# /etc/init.d/postgresql restart

Testando

[root@osg-se ~]# vim pgdbping.java
// -- begin
/*
** pgdbping.java - programa para testar uma conexo jdbc com PostGRES
**
** Elielson - 24.07.2002
*/

   import java.sql.*;

    class pgdbping {
       public static void main(String args[]) {
         try {
            Class.forName("org.postgresql.Driver");
            Connection con;

            if ( args.length != 3 )
            {
               String
               url =
"jdbc:postgresql://200.136.80.27/test", user = "postgres", password="";

               System.out.println("Uso: java dbping URL user password");
               System.out.println("URL jdbc:postgresql://host:port/dbname");
               System.out.println("Vou usar conexao interna: " + url
                  + ", " + user + ", " + "*******");

               con = DriverManager.getConnection(url, user, password);
            }
            else
            {
               con = DriverManager.getConnection(args[0], args[1], args[2]);
            }
            System.out.println("Sucesso na conexo!!");
            con.close();
         }
             catch(Exception e) {
               System.out.println("Falha na conexao");
               e.printStackTrace();
            }
      }
   }
// -- end
[root@osg-se ~]# javac  pgdbping.java
[root@osg-se ~]# java pgdbping
verificar os hosts autorizados a rodar isto em /usr/local/pgsql/data/pg_hba.conf

Instalacao do dCache

Devido a nossa configuracão vamos fazer o link do /opt para /usr/local/opt
[root@osg-se ~]# mkdir /usr/local/opt
[root@osg-se ~]# cp -prf /opt/* /usr/local/opt/.
[root@osg-se ~]# rm -rf /opt
[root@osg-se ~]# ln -s /usr/local/opt/ /opt
Baixando os pacotes e inicializando o banco de dados.
[root@osg-se ~]# wget http://cvs.dcache.org/repository/yum/sl3.0.5/i386/RPMS.stable/pnfs-postgresql-3.1.10-3.i386.rpm
[root@osg-se ~]# wget http://www.dcache.org/downloads/1.8.0/dcache-server-1.8.0-8.noarch.rpm
[root@osg-se ~]# wget http://www.dcache.org/downloads/1.8.0/dcache-client-1.8.0-0.noarch.rpm
[root@osg-se ~]# rpm -ivh pnfs-postgresql-3.1.10-3.i386.rpm
[root@osg-se ~]# rpm -ivh dcache-server-1.8.0-8.noarch.rpm
[root@osg-se ~]# rpm -Uvh dcache-client-1.8.0-0.noarch.rpm
[root@osg-se ~]# createuser -U postgres --no-superuser --no-createrole --createdb --pwprompt pnfsserver
[root@osg-se ~]# createuser -U postgres --no-superuser --no-createrole --createdb --pwprompt srmdcache
[root@osg-se ~]# createdb  -U srmdcache dcache
[root@osg-se ~]# createdb  -U srmdcache companion
[root@osg-se ~]# psql -U srmdcache companion -f /opt/d-cache/etc/psql_install_companion.sql
[root@osg-se ~]# createdb -U srmdcache replicas
[root@osg-se ~]# psql -U srmdcache replicas -f /opt/d-cache/etc/psql_install_replicas.sql 
[root@osg-se ~]# createdb -U srmdcache billing
Configurando o pnfs
[root@osg-se ~]# cd /opt/pnfs
[root@osg-se pnfs]# cp etc/pnfs_config.template etc/pnfs_config
[root@osg-se pnfs]# vim etc/pnfs_config
[root@osg-se pnfs]# /opt/pnfs/install/pnfs-install.sh
[root@osg-se pnfs]# cp /opt/pnfs/bin/pnfs /etc/init.d/.
[root@osg-se pnfs]# chkconfig --add pnfs
[root@osg-se pnfs]# chkconfig pnfs on
[root@osg-se ~]# service pnfs start
Configurando para que os clientes 200.136.80. possam fazer a montagem pnfs e setando a segurança
[root@osg-se ~]# cd /pnfs/fs/admin/etc/exports
[root@osg-se exports]# touch 255.255.255.0..200.136.80.0
[root@osg-se exports]# echo "/pnfs /0/root/fs/usr  30 nooptions" >>255.255.255.0..200.136.80.0
[root@osg-se exports]# echo "/pnfsdoors /0/root/fs/usr/  30 nooptions" >>255.255.255.0..200.136.80.0
[root@osg-se exports]# touch trusted/200.136.80.10
[root@osg-se exports]# echo "15" >> trusted/200.136.80.10
[root@osg-se exports]# echo "15" >> trusted/200.136.80.5
[root@osg-se exports]# echo "15" >> trusted/200.136.80.5
[root@osg-se exports]# echo "15" >> trusted/200.136.80.27
[root@osg-se ~]# /etc/init.d/pnfs stop
[root@osg-se ~]# /etc/init.d/pnfs start
Testando, de um outro host:
[root@spdc00 mdias]# mount  -o intr,rw,noac,hard,vers=2,udp 200.136.80.27:/pnfs /home/mdias/teste

Instalacao do Phedex

Vamos dar uma parada para instalar o Phedex. Primeiro precisamos instalar o glite UI (user interface), para conseguir fazer transferências FTS
[root@osg-se ~]# cd /etc/yum.repos.d/
[root@osg-se yum.repos.d]# wget http://grid-deployment.web.cern.ch/grid-deployment/yaim/repos/glite-UI.repo
[root@osg-se yum.repos.d]# more glite-UI.repo
# This is the official YUM repository string for the glite 3.1 UI
# Fetched from: http://grid-deployment.web.cern.ch/grid-deployment/yaim/repos/glite-UI.repo
# Place it to /etc/yum.repos.d/ and run 'yum update'

[glite-UI]
name=gLite 3.1 User Interface
baseurl=http://linuxsoft.cern.ch/EGEE/gLite/R3.1/glite-UI/sl4/i386/
enabled=1

[root@osg-se yum.repos.d]# vim /etc/yum.repos.d/ca.repo
[CA]
name=CAs
baseurl=http://linuxsoft.cern.ch/LCG-CAs/current
[root@osg-se yum.repos.d]# vim /etc/yum.repos.d/dag.repo
enabled=1
[root@osg-se yum.repos.d]# vim /etc/yum.repos.d/dries.repo
enabled=1
[root@osg-se yum.repos.d]# vim /etc/yum.repos.d/jpackage.repo
[main]
[jpackage17-generic]
name=JPackage 1.7, generic
baseurl=http://mirrors.dotsrc.org/jpackage/1.7/generic/free/
        http://sunsite.informatik.rwth-aachen.de/ftp/pub/Linux/jpackage/1.7/generic/freeenabled=1
protect=1
[root@osg-se yum.repos.d]# rpm --import http://www.jpackage.org/jpackage.asc
[root@osg-se yum.repos.d]# yum update
[root@osg-se yum.repos.d]# cd
[root@osg-se ~]# yum install lcg-CA
[root@osg-se ~]# cp  /opt/glite/yaim/examples/siteinfo/site-info.def /opt/glite/yaim/etc/.
Colocando uma senha para o mysql
[root@osg-se ~]# /etc/init.d/mysqld start
[root@osg-se ~]# /usr/bin/mysqladmin -u root password 'senha' 
Configurar o mysql para só escutar a própria máquina
[root@osg-se ~]# vim /etc/my.cnf
[mysqld]
bind-address=127.0.0.1
[root@osg-se ~]# /etc/init.d/mysqld restart
[root@osg-se ~]# chkconfig mysqld on
Veja se ele esta escutando em
[root@osg-se ~]# netstat -tlnp
[root@osg-se ~]# vim /opt/glite/yaim/etc/site-info.def
MY_DOMAIN=sprace.org.br
CE_HOST=osg-se.$MY_DOMAIN
RB_HOST=osg-se.$MY_DOMAIN
WMS_HOST=osg-se.$MY_DOMAIN
PX_HOST=osg-se.$MY_DOMAIN
BDII_HOST=osg-se.$MY_DOMAIN
MON_HOST=osg-se.$MY_DOMAIN
REG_HOST=osg-se.gridpp.rl.ac.uk
FTS_HOST=osg-se.$MY_DOMAIN
LFC_HOST=osg-se.$MY_DOMAIN
VOBOX_HOST=osg-se.$MY_DOMAIN
LCG_REPOSITORY="baseurl= http://glitesoft.cern.ch/EGEE/gLite/R3.1/glite-UI/sl4/i386/"
REPOSITORY_TYPE="yum"
JAVA_LOCATION="/usr/java/jdk1.6.0_02"
MYSQL_PASSWORD=senha
SITE_NAME=Sao Paulo
SITE_LOC="Sao Paulo, Brazil"
SITE_LAT=23.0 # -90 to 90 degrees
SITE_LONG=46.0 # -180 to 180 degrees
SITE_WEB="http://www.sprace.org.br"
SITE_SUPPORT_SITE="sprace.org"
CE_OS_RELEASE=4.5
DCACHE_POOLS="ftp-01.sprace.org.br:all:/raid2 ftp-01.sprace.org.br:all:/raid3 ftp-01.sprace.org.br:all:/raid4 ftp-01.sprace.org.br:all:/raid5"
DCACHE_ADMIN="osg-se.sprace.org.br"
DCACHE_DOOR_SRM="osg-se.sprace.org.br"
DCACHE_DOOR_GSIFTP="ftp-01.sprace.org.br"
DCACHE_DOOR_GSIDCAP="ftp-01.sprace.org.br"
DCACHE_DOOR_DCAP="ftp-01.sprace.org.br"
DCACHE_PNFS_SERVER="osg-se.sprace.org.br"
configure usando o yam
[root@osg-se ~]# /opt/glite/yaim/bin/yaim -c -s /opt/glite/yaim/etc/site-info.def -n glite-UI
Agora comeca a instalacao do Phedex propriamente
[root@osg-se ~]# groupadd phedex
[root@osg-se ~]# useradd -g phedex phedex
[root@osg-se ~]# su - phedex
[phedex@osg-se ~]$ mkdir -p state logs sw gridcert
[phedex@osg-se ~]$ chmod 700 gridcert
[phedex@osg-se ~]$ export sw=$PWD/sw
[phedex@osg-se ~]$ export version=2_5_4_2
[phedex@osg-se ~]$ export myarch=slc4_ia32_gcc345
[phedex@osg-se ~]$ wget -O $sw/bootstrap-${myarch}.sh http://cmsrep.cern.ch/cmssw/bootstrap-${myarch}.sh
[phedex@osg-se ~]$ sh -x $sw/bootstrap-${myarch}.sh setup -path $sw
[phedex@osg-se ~]$ source $sw/$myarch/external/apt/0.5.15lorg3.2-CMS3/etc/profile.d/init.sh
[phedex@osg-se ~]$ apt-get update
[phedex@osg-se ~]$ apt-get install cms+PHEDEX+PHEDEX_$version
[phedex@osg-se ~]$ rm -f PHEDEX; ln -s $sw/$myarch/cms/PHEDEX/PHEDEX_$version PHEDEX
Da spdc00
[root@spdc00 ~]# cd /home/phedex/
[root@spdc00 phedex]#  tar -cjpvf phedex_conf.tar.bz2  SITECONF/SPRACE/PhEDEx/; scp phedex_conf.tar.bz2  200.136.80.27:/home/phedex/. ; rm phedex_conf.tar.bz2
Voltando a osg-se
[phedex@osg-se ~]$ tar -xjvpf phedex_conf.tar.bz2
[phedex@osg-se ~]$ rm phedex_conf.tar.bz2
[phedex@osg-se ~]$ cd SITECONF/SPRACE/PhEDEx/
[phedex@osg-se PhEDEx]$ sed -i 's/2_5_1/2_5_4_2/g' *
[phedex@osg-se PhEDEx]$ sed -i 's/slc3_ia32_gcc323/slc4_ia32_gcc345/g' *
[phedex@osg-se PhEDEx]$ sed -i 's/\/etc\/glite\/profile.d\/glite_setenv.sh/\/opt\/glite\/etc\/profile.d\/grid-env.sh/g' *
[phedex@osg-se PhEDEx]$ cd
[phedex@osg-se ~]$ echo "source /OSG/setup.sh" >>.bashrc
[phedex@osg-se ~]$echo "export X509_USER_PROXY=$HOME/gridcert/proxy.cert">>.bashrc
[phedex@osg-se ~]$ echo "source $HOME/PHEDEX/etc/profile.d/init.sh">>.bashrc [phedex@osg-se ~]$ echo "export PATH=$PATH:$HOME/PHEDEX/Utilities">>.bashrc
[phedex@osg-se ~]$ echo "source /opt/glite/etc/profile.d/grid-env.sh">>.bashrc
[phedex@osg-se ~]$ echo "export PYTHONPATH=$PYTHONPATH:/opt/lcg/lib/python">>.bashrc
[phedex@osg-se ~]$ source $sw/$myarch/cms/PHEDEX/PHEDEX_$version/etc/profile.d/env.sh
[phedex@osg-se ~]$ vim SITECONF/SPRACE/PhEDEx/storage.xml
/pnfs/sprace.org.br/data/cms/phedex_loadtest/
/pnfs/sprace.org.br/data/cms/store/PhEDEx_LoadTest07/LoadTest07_Debug_SPRACE/LoadTest07_SPRACE_$1pnfs/sprace.org.br/data/cms/store/PhEDEx_LoadTest07/LoadTest07_Prod_SPRACE/LoadTest07_SPRACE_$1"/\>
 srm://osg-se.sprace.org.br:8443/srm/managerv1?SFN=$1
gsiftp://osg-se.sprace.org.br/$1"
Criação do proxy, valido por um mês:
[mdias@osg-se ~]$ grid-proxy-init -valid 720:00
[mdias@osg-se ~]$ su -
[root@osg-se ~]# cp /tmp/x509up_u`id -u mdias` /home/phedex/gridcert/proxy.cert
[root@osg-se ~]# ls -al /home/phedex/gridcert/proxy.cert
-rw-------  1 phedex phedex 2588 Jan  9 17:53 /home/phedex/gridcert/proxy.cert
Deve ter como proprietario o phedex e ter permissao 600.

Obtenção dos Certificados

[root@osg-se ~]# . /OSG/setup.sh
[root@osg-se ~]# cd $VDT_LOCATION
[root@osg-se OSG]# source  ./setup.sh
[root@osg-se OSG]# cd /root/
[root@osg-se ~]# cert-request -ou s -dir . -label osg-se  -agree  -email mdias1@ift.unesp.br  -phone +55.11.XXXXXXX  -reason "Instaling a new Storage Element head node for SPRACE site"  -name "Marco Dias"
input full hostname: osg-se.sprace.org.br
registration authority: osg
virtual organization: dosar
O pessoal da DOE pediu um e-mail de confirmacao para o requerimento (muitos dias depois..). Na maquina criamos um arquivo texto com a confirmacao message.txt e ela é enviada pela linha de comando:
[mdias@spgrid ~]$ openssl smime -sign -signer ~/.globus/usercert.pem -inkey ~/.globus/userkey.pem -in message.txt | mail -s "DOEGrids
 CA - OSG:DOSAR - osg-se.sprace.org.br- Confirmation of Certificate Request in Queue (request id XXXX)" pessoa@alguma.coisa 
Feito isso, com o numero recebido por e-mail
[root@osg-se ~]# . /OSG/setup.sh
[root@osg-se ~]# cert-retrieve  -certnum XXXXX -label osg-se -dir . -prefix osg-se.sprace.org.br
 using CA doegrids
Checking that the usercert and ./osg-sekey.pem match
writing RSA key
./osg-se.sprace.org.brcert.pem and ./osg-se.sprace.org.brkey.pem now contain your Globus credential
[root@osg-se ~]# mv osg-se.sprace.org.brcert.pem osg-se.sprace.org.brkey.pem /etc/grid-security/
[root@osg-se ~]# chmod 444 /etc/grid-security/osg-se.sprace.org.br
[root@osg-se ~]# chmod 400 /etc/grid-security/osg-se.sprace.org.brkey.pem
[root@osg-se ~]# ln -s /etc/grid-security/osg-se.sprace.org.brcert.pem /etc/grid-security/hostcert.pem
[root@osg-se ~]# ln -s /etc/grid-security/osg-se.sprace.org.brkey.pem /etc/grid-security/hostkey.pem
Verifica se esta legivel
openssl x509 -text -noout -in /etc/grid-security/hostcert.pem

Configuracao do dCache

Da spgrid, sera necessário setar o dns para a osg-se temporariamente, e copiar um file
[root@spgrid ~]# scp /etc/grid-security/grid-mapfile 200.136.80.27:/etc/grid-security/grid-mapfile
[root@spgrid ~]# vim /var/named/chroot/var/named/80.136.200.in-addr.arpa.zone
                                      2007121701 ; Serial
27      IN      PTR     osg-se. <----era 10 anteriormente
[root@spgrid ~]# vim /var/named/chroot/var/named/sprace.org.br.zone
                                      2007121701 ; Serial
osg-se            IN      A       200.136.80.27 <- era 200.136.80.10
[root@spgrid ~]# /etc/init.d/named restart
Voltando à osg-ce
[root@osg-se ~]# chmod 0644 /etc/grid-security/hostcert.pem
[root@osg-se ~]# chmod 0400 /etc/grid-security/hostkey.pem
[root@osg-se ~]# cd /opt/d-cache/bin
[root@osg-se bin]# wget http://www.atlasgrid.bnl.gov/dcache_admin/pkg/dcache_scripts/grid-mapfile2dcache-kpwd
[root@osg-se bin]# chmod a+x grid-mapfile2dcache-kpwd
[root@osg-se bin]# ./grid-mapfile2dcache-kpwd -g -r  /pnfs/fs/usr/data
[root@osg-se bin]# mv dcache.kpwd /opt/d-cache/etc/.
[root@osg-se bin]# cp -p /opt/d-cache/etc/dCacheSetup.template /opt/d-cache/config/dCacheSetup
serviceLocatorHost=osg-se.sprace.org
java="/usr/java/jdk1.6.0_02/bin/java"
useGPlazmaAuthorizationModule=true
useGPlazmaAuthorizationCell=false
OBS: colocando o gplazma dessa forma fez com que o gsidcap tentasse funcionar, como mostra abaixo depois de instalado o d-cache:
[root@osg-se bin]# netstat -tap|grep *:22128
tcp        0      0 *:22128                     *:*                         LISTEN      25642/java
Continuando
[root@osg-se ~]# cp -p /opt/d-cache/etc/node_config.template /opt/d-cache/etc/node_config
[root@osg-se bin]# vim /opt/d-cache/etc/node_config
NODE_TYPE=admin 
ADMIN_NODE=osg-se.sprace.org.br
GSIDCAP=yes
DCAP=yes
GRIDFTP=yes
SRM=yes
XROOTD=no
replicaManager=no
infoProvider=yes
statistics=no
gPlazmaService=yes
[root@osg-se bin]#vim /opt/d-cache/etc/dcachesrm-gplazma.policy
saml-vo-mapping="ON"
kpwd="ON"
grid-mapfile="OFF"
gplazmalite-vorole-mapping="OFF"
saml-vo-mapping-priority="1"
kpwd-priority="2"
grid-mapfile-priority="3"
gplazmalite-vorole-mapping-priority="4"
mappingServiceUrl="https://spgrid.if.usp.br:8443/gums/services/GUMSAuthorizationServicePort"
[root@osg-se bin]#  more /opt/d-cache/etc/dcache.kpwd|grep sprace.org.br|sed 's/login/authorize/g' > /etc/grid-security/storage-authzdb
[root@osg-se bin]# /opt/d-cache/install/install.sh
O problema com o srm abaixo
[root@osg-se ~]# /opt/d-cache/bin/dcache-core start
Pinging srm server to wake it up, will take few seconds ...
- Unable to find required classes (javax.activation.DataHandler and javax.mail.internet.MimeMultipart). Attachment support is disabled.
foi resolvido da seguinte forma: 1)fazendo o download do JavaBeans(TM) Activation Framework 1.1.1e do JavaMail API 1.4.1. 2)unzip
[root@osg-se ~]# mv mail.jar activation.jar /usr/local/pgsql/share/java/.
[root@osg-se ~]# vim /etc/profile
export CLASSPATH=/usr/local/pgsql/share/java/postgresql.jar:/usr/local/pgsql/share/java/mail.jar:/usr/local/pgsql/share/java/activation.jar:$CLASSPATH
A versão do java foi mudada devido a instalacão do tomcat
[root@osg-se ~]# mv /usr/bin/java /usr/bin/java.old
[root@osg-se ~]# ln -s /usr/java/jdk1.6.0_02/bin/java /usr/bin/java
Feito estes devidos ajustes
[root@osg-se ~]# cp /opt/d-cache/bin/dcache-core /etc/init.d/.
[root@osg-se ~]# chkconfig --add dcache-core
[root@osg-se ~]# chkconfig dcache-core on
[root@osg-se ~]# /etc/init.d/dcache-core start
Problemas de dependência da glibc2.4,
[root@osg-se ~]# ldd /opt/d-cache/dcap/bin/dccp
/opt/d-cache/dcap/bin/dccp: /lib/i686/libc.so.6: version `GLIBC_2.4' not found (required by /opt/d-cache/dcap/bin/dccp)
[root@osg-se ~]# wget http://www.dcache.org/downloads/1.8.0/dcache-dcap-1.8.0-4.i586.rpm
[root@osg-se ~]# rpm -ivh --force dcache-dcap-1.8.0-4.i586.rpm
Solucão de um erro de configuracão do dccp
[root@osg-se data]# dccp -d 63 /bin/sh my-test-file
No IO tunneling plugin specified for osg-se.sprace.org.br.sprace.org.br:22125.
Totaly 1 doors entries found
Allocated message queues 1, used 1
[root@osg-se data]# echo "osg-se.sprace.org.br:22125" >>/pnfs/fs/admin/etc/config/dCache/dcache.conf 

Outro erro que apareceu em transferências srm:

[mdias@osg-se ~]$ PATH=/opt/d-cache/srm/bin/:$PATH
[mdias@osg-se ~]$ srmcp srm://osg-se.sprace.org.br:8443/pnfs/sprace.org.br/data/my-test-file file:///tmp/test
Authentication failed. Caused by GSSException: Operation unauthorized (Mechanism level: [JGLOBUS-56] Authorization failed. Expected "/CN=host/200.136.80.27" target but received "/DC=org/DC=doegrids/OU=Services/CN=osg-se.sprace.org.br")
Que foi resolvido simplesmente adicionando ao /etc/hosts
200.136.80.27   osg-se.sprace.org.br    osg-se

[root@osg-se ~]# vim /etc/cron.daily/grid-mapfile2dcache-kpwd
#!/bin/sh
/opt/d-cache/bin/grid-mapfile2dcache-kpwd -g -r  /pnfs/sprace.org.br/data -o /opt/d-cache/etc/dcache.kpwd
[root@osg-se ~]# chmod 755 /etc/cron.daily/grid-mapfile2dcache-kpwd
Configurando o logrotate:
[root@osg-se ~]# vim /etc/logrotate.conf
compress
/var/log/srm*.log  {
        weekly
        create 0664 root root
        size 250M
        rotate 1
}
[root@osg-se ~]# vim /etc/cron.daily/logrotate
#!/bin/sh

/usr/sbin/logrotate /etc/logrotate.conf
EXITVALUE=$?
if [ $EXITVALUE != 0 ]; then
    /usr/bin/logger -t logrotate "ALERT exited abnormally with [$EXITVALUE]"
fi
DELPID=`lsof /dev/md5|tr -s ' ' ' ' |grep srm*| grep "deleted"|tail -1|cut -f2
-d' '`
while  [ "$DELPID" ]; do
kill -9 $DELPID
DELPID=`lsof /dev/md5|tr -s ' ' ' ' |grep srm*| grep "deleted"|tail -1|cut -f2
-d' '`
done
exit 0

Instalação do Frontier-dcache-squid

[root@osg-se ~]# groupadd dbfrontier
[root@osg-se ~]# useradd -g dbfrontier dbfrontier
[root@osg-se frontier_squid-1.0rc4]# mkdir /usr/local/frontier
[root@osg-se frontier_squid-1.0rc4]# chown dbfrontier:dbfrontier  /usr/local/dbfrontier/frontier-cache/squid/var/cache
[root@osg-se ~]# cd /tmp/
[root@osg-se ~]# su dbfrontier
[dbfrontier@osg-se tmp]# wget http://edge.fnal.gov:8888/frontier/dist/frontier_squid-1.0rc4.tar.gz
[dbfrontier@osg-se tmp]# tar -xvzf frontier_squid-1.0rc4.tar.gz 
[dbfrontier@osg-se tmp]# cd frontier_squid-1.0rc4
[dbfrontier@osg-se frontier_squid-1.0rc4]# ./configure
Responda as perguntas do script: o diretório de instalação é /usr/local/frontier; as redes que deve acessar são as 200.136.80.0/255.255.255.0 192.168.1.0/255.255.255.0
[dbfrontier@osg-se frontier_squid-1.0rc4]# make
Problemas: setar a variável visible_hostname e também as variaveis:
[dbfrontier@osg-se frontier_squid-1.0rc4]# vim /tmp/frontier_squid-1.0rc4/squid/files/postinstall/squid.conf
cache_effective_user dbfrontier
cache_effective_group dbfrontier
visible_hostname osg-se.sprace.org.br
[dbfrontier@osg-se frontier_squid-1.0rc4]# make install
[dbfrontier@osg-se frontier_squid-1.0rc4]$ /usr/local/frontier/frontier-cache/utils/bin/fn-local-squid.sh start

Testando a instalação. Da sprace, faça duas vezes, a terceira e quarta linha:

[mdias@sprace ~]$ wget http://edge.fnal.gov:8888/frontier/dist/fnget.py
[mdias@sprace ~]$ chmod +x fnget.py
[mdias@sprace ~]$./fnget.py --url=http://cmsfrontier.cern.ch:8000/Frontier/Frontier --sql="select 1 from dual"
[mdias@sprace ~]$ export http_proxy=http://200.136.80.27:3128
Do terminal da osg-se deve aparecer
[dbfrontier@osg-se frontier_squid-1.0rc4]$ tail -f /usr/local/frontier/frontier-cache/squid/var/logs/access.log
Agora, ainda como dbfrontier
[dbfrontier@osg-se ~]$ crontab -e
7 7 * * * /usr/local/frontier-cache/utils/cron/daily.sh 2>&1 >/dev/null
[dbfrontier@osg-se ~]$ exit
Como root, adicione logo após o /bin/bash:
[root@osg-se ~]# cp /usr/local/frontier/frontier-cache/utils/init.d/frontier-squid.sh  /etc/init.d/.
[root@osg-se ~]# vim /etc/init.d/./frontier-squid.sh
#chkconfig: 345 99 01
#description: starts Frontier services
[root@osg-se ~]# /sbin/chkconfig --add frontier-squid.sh

Ja fomos obrigados a fazer uma upgrade do squid

[root@osg-se ~]# cd /tmp/
[root@osg-se tmp]# su dbfrontier
[dbfrontier@osg-se tmp]$ wget http://frontier.cern.ch/dist/frontier_squid-3.0rc5.tar.gz
[dbfrontier@osg-se tmp]$ tar -xvzf frontier_squid-3.0rc5.tar.gz
[dbfrontier@osg-se tmp]$ cd frontier_squid-3.0rc5
[dbfrontier@osg-se frontier_squid-3.0rc5]$ ./configure
/usr/local/frontier
 200.136.80.0/255.255.255.0 192.168.1.0/255.255.255.0
cache_mem: 1000
cache_dir: 68000
[dbfrontier@osg-se frontier_squid-3.0rc5]$ make install
[dbfrontier@osg-se frontier_squid-3.0rc5]$ crontab -e
7 7 * * * /usr/local/frontier/frontier-cache/utils/cron/daily.sh 2>&1 >/dev/null
[dbfrontier@osg-se frontier_squid-3.0rc5]$ exit
exit
[root@osg-se tmp]# cp /usr/local/frontier/frontier-cache/utils/init.d/frontier-squid.sh  /etc/init.d/.
[root@osg-se tmp]# /sbin/chkconfig --add frontier-squid.sh

Upgrade da instalacao do Phedex

Antes de colocar em producao foi necessario fazer uma upgrade da instalacao do phedex

[root@osg-se ~]# su - phedex
[phedex@osg-se ~]$ version=2_6_1
[phedex@osg-se ~]$ myarch=slc4_ia32_gcc345
[phedex@osg-se ~]$ export sw=$PWD/sw
[phedex@osg-se ~]$ pwd
/home/phedex
[phedex@osg-se ~]$ source $sw/$myarch/external/apt/0.5.15lorg3.2-CMS3/etc/profile.d/init.sh
[phedex@osg-se ~]$ apt-get update
[phedex@osg-se ~]$ apt-get install cms+PHEDEX+PHEDEX_$version
[phedex@osg-se ~]$ rm -f PHEDEX; ln -s $sw/$myarch/cms/PHEDEX/PHEDEX_$version PHEDEX
[phedex@osg-se PhEDEx]$ cd /home/phedex/SITECONF/SPRACE/PhEDEx/
[phedex@osg-se PhEDEx]$ sed -i 's/2_5_4_2/2_6_1/g' *

Problemas quanto ao particionamento

O diretorio /home/phedex encheu o /. Teremos de mudar esse diretorio:

[root@osg-se ~]# mkdir /usr/local/phedex
[root@osg-se ~]# chown phedex:phedex /usr/local/phedex
[root@osg-se ~]# rm -rf /home/phedex/
[root@osg-se ~]# vim /etc/passwd
phedex:x:502:502::/usr/local/phedex:/bin/bash
[root@osg-se ~]# su - phedex -bash-3.00$ mkdir -p state logs sw gridcert -bash-3.00$ chmod 700 gridcert -bash-3.00$ export sw=$PWD/sw -bash-3.00$ export version=2_6_2 -bash-3.00$ export myarch=slc4_ia32_gcc345 -bash-3.00$ wget -O $sw/bootstrap-${myarch}.sh http://cmsrep.cern.ch/cmssw/bootstrap-${myarch}.sh -bash-3.00$ sh -x $sw/bootstrap-${myarch}.sh setup -path $sw -bash-3.00$ source $sw/$myarch/external/apt/0.5.15lorg3.2-CMS3/etc/profile.d/init.sh -bash-3.00$ apt-get update -bash-3.00$ apt-get install cms+PHEDEX+PHEDEX_$version [phedex@osg-se ~]$ rm -f PHEDEX; ln -s $sw/$myarch/cms/PHEDEX/PHEDEX_$version PHEDEX Na spdc00
[root@spdc00 ~]# cd /home/phedex/
[root@spdc00 phedex]#  tar -cjpvf phedex_conf.tar.bz2  SITECONF/SPRACE/PhEDEx/; scp phedex_conf.tar.bz2  192.168.1.151:/usr/local/phedex/. ; rm phedex_conf.tar.bz2
de volta na osg-se
-bash-3.00$  tar -xjvpf phedex_conf.tar.bz2
-bash-3.00$ rm phedex_conf.tar.bz2
-bash-3.00$ cd SITECONF/SPRACE/PhEDEx/
-bash-3.00$ sed -i 's/2_5_1/2_6_2/g' *
-bash-3.00$ sed -i 's/slc3_ia32_gcc323/slc4_ia32_gcc345/g' *
-bash-3.00$ sed -i 's/\/etc\/glite\/profile.d\/glite_setenv.sh/\/opt\/glite\/etc\/profile.d\/grid-env.sh/g' *
-bash-3.00$ sed  -i 's/home/usr\/local/g' SITECONF/SPRACE/PhEDEx/*
-bash-3.00$ cd
-bash-3.00$ echo "source /OSG/setup.sh" >>.bashrc
-bash-3.00$ echo "export X509_USER_PROXY=$HOME/gridcert/proxy.cert">>.bashrc
-bash-3.00$ echo "export X509_USER_PROXY=$HOME/gridcert/proxy.cert">>.bashrc
-bash-3.00$ echo "source $HOME/PHEDEX/etc/profile.d/init.sh">>.bashrc ; echo "export PATH=$PATH:$HOME/PHEDEX/Utilities">>.bashrc
-bash-3.00$ echo "source /opt/glite/etc/profile.d/grid-env.sh">>.bashrc
-bash-3.00$ echo "export PYTHONPATH=$PYTHONPATH:/opt/lcg/lib/python">>.bashrc
-bash-3.00$ source $sw/$myarch/cms/PHEDEX/PHEDEX_$version/etc/profile.d/env.sh
-bash-3.00$ vim SITECONF/SPRACE/PhEDEx/storage.xml
mudar para as maquinas novas e dominio como sprace.org.br
-bash-3.00$ exit
[root@osg-se ~]# su - mdias
[mdias@osg-se ~]$ grid-proxy-init -valid 720:00
[root@osg-se ~]# cp /tmp/x509up_u`id -u mdias` /usr/local/phedex/gridcert/proxy.cert
[root@osg-se ~]# chown phedex:phedex /usr/local/phedex/gridcert/proxy.cert
[root@osg-se ~]# ln -s /OSG/ /opt/osg-0.8.0

Topic attachments
I Attachment History Action Size Date Who Comment
Unknown file formatcfg ks_osgse_instalar.cfg r1 manage 3.6 K 2008-01-15 - 14:36 UnknownUser Kickstart para instala a OSG-SE
Edit | Attach | Print version | History: r48 < r47 < r46 < r45 < r44 | Backlinks | Raw View | Raw edit | More topic actions...
Topic revision: r45 - 2008-03-07 - MarcoAndreFerreiraDias
 

This site is powered by the TWiki collaboration platform Powered by PerlCopyright © 2008-2024 by the contributing authors. All material on this collaboration platform is the property of the contributing authors.
Ideas, requests, problems regarding TWiki? Send feedback

antalya escort bursa escort eskisehir escort istanbul escort izmir escort