1、配置网络设置
1、配置yum仓库
[root@node1 yum.repos.d]# cat *repo
[AppStream]
name=AppStream
baseurl=http://foundation0.ilt.example.com/dvd/AppStream
enabled=1
gpgcheck=0
[BaseOS]
name=BaseOS
baseurl=http://foundation0.ilt.example.com/dvd/BaseOS
enabled=1
gpgcheck=0
[root@node1 yum.repos.d]# yum repolist
repo id repo name
AppStream AppStream
BaseOS BaseOS
1、配置SELinux
[root@node1 yum.repos.d]# man semanager port|grep http
[root@node1 yum.repos.d]# semanage port -a -t http_port_t -p tcp 82
[root@node1 yum.repos.d]# restorecon -Rv /var/www/html/
[root@node1 yum.repos.d]# systemctl enable httpd
1、用户及组管理
[root@node1 yum.repos.d]# groupadd sysmgrs
[root@node1 yum.repos.d]# useradd natasha -G sysmgrs
[root@node1 yum.repos.d]# useradd harry -G sysmgrs
[root@node1 yum.repos.d]# useradd -s /sbin/nogin sarah
[root@node1 yum.repos.d]# echo "flectrag"|passwd --stdin natasha
1、计划任务cron
[root@node1 yum.repos.d]# crontab -e -u natasha
*/2 * * * * logger "EX200 in progress"
[root@node1 etc]# tail -f /var/log/cron
1、目录权限设置
[root@node1 etc]# mkdir -p /home/managers
[root@node1 etc]# chgrp sysmgrs /home/managers
[root@node1 etc]# chmod 2770 /home/managers
1、配置ntp
[root@node1 etc]# yum install -y chrony
[root@node1 etc]# cat /etc/chro*
#server 3.rhel.pool.ntp.org iburst
server materials.example.com iburst
[root@node1 etc]# systemctl restart chronyd
[root@node1 etc]# systemctl enable chronyd
配置 autofs
[root@node1 ~]# yum install autofs -y
[root@node1 ~]# cat /etc/auto.master
/misc /etc/auto.misc
/rhome /etc/auto.rhome
[root@node1 ~]# cat /etc/auto.rhome
remoteuser1 -rw,sync 172.25.254.254:/rhome/remoteuser1
[root@node1 ~]# systemctl enable autofs
[root@node1 ~]# systemctl restart autofa
[root@node1 rhome]# su - remoteuser1
配置 /var/tmp/fstab 权限
[root@node1 rhome]# cp /etc/fstab /var/tmp/fstab
[root@node1 rhome]# serfacl -m u:harry:- /var/tmp/fstab
[root@node1 rhome]# setfacl -m u:harry:- /var/tmp/fstab
配置用户帐户
[root@node1 etc]# useradd manalo -u 3533
[root@node1 etc]# echo "flectrag"|passwd --stdin manalo
查找文件
[root@node1 etc]# mkdir /root/findfiles
[root@node1 etc]# find / -user jacques -exec cp -a {} /root/findfiles/ \;
查找字符串
[root@node1 etc]# grep ng /usr/share/xml/iso-codes/iso_639_3.xml > /root/list
创建存档
[root@node1 etc]# tar -zcvf /root/backup.tar.gz /usr/local
配置容器使其自动启动
利用注册服务器上的 rsyslog 镜像,创建一个名为 logserver 的容器
面向 wallah 用户,配置一个 systemd 服务
该服务命名为 container-logserver ,并在系统重启时自动启动,无需干预
为容器配置持久存储
通过以下方式扩展上一个任务的服务
配置主机系统的 journald 日志以在系统重启后保留数据,并重新启动日志记录服务
将主机 /var/log/journal目录下任何以 *.journal 的文件复制到 /home/wallah/container_logfile 中
将服务配置为在启动时自动将 /home/wallah/container_logfile 挂载到容器中的 /var/log/journal 下
十六、创建shell脚本
在/usr/bin目录下创建一个repwis脚本,查找/usr目录下小于10M并且组id不为root的文件,把查到的文件结果拷贝到/root/myfiles文件夹内
[root@node1 rhome]# cat /usr/bin/repwis
#!/usr/bin/bash
for i in `cat /etc/group|awk -F':' '{print $3}'`
do
if [ 0 != ${i} ];then
find /usr -size -10M -gid ${i} -exec cp -a {} /root/myfiles \;
fi
done
一、重置root密码
按下e进行编辑
增加,然后ctrl+x
改密码但是现在的根是以只读的方式挂载,需要以读写的方式重新挂载根(sysroot是当前根目录)-----autorelabel的作用是告诉selinux重新打标签
[root@node2 yum.repos.d]# lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
vo myvol -wi-ao---- 184.00m
swap vgroup -wi-ao---- 256.00m
[root@node2 yum.repos.d]# lvextend -L 230M /dev/myvol/vo
[root@node2 yum.repos.d]# resize2fs /dev/myvol/vo
[root@node2 yum.repos.d]# df -h
[root@node2 ~]# lsblk
[root@node2 ~]# fdisk vdb
[root@node2 ~]# mkswap /dev/vdb3
Setting up swapspace version 1, size = 756 MiB (792719360 bytes)
no label, UUID=56b4627f-c9c5-432b-b9a5-0e0c5c15bf62
[root@node2 ~]# cat /etc/fstab
UUID=56b4627f-c9c5-432b-b9a5-0e0c5c15bf62 swap swap defaults 0 0
[root@node2 ~]# swapon -a
[root@node2 ~]# swapon -s
创建vg
[root@node2 ~]# fdisk /dev/vdb
[root@node2 ~]# vgcreate -s 16M qagroup /dev/vdb4
Physical volume "/dev/vdb4" successfully created.
Volume group "qagroup" successfully created
创建lv
[root@node2 ~]# lvcreate -l 10 -n qa qagroup
[root@node2 ~]# lvscan
ACTIVE '/dev/qagroup/qa' [160.00 MiB] inherit
创建文件系统
[root@node2 ~]# mkfs.ext3 /dev/qagroup/qa
[root@node2 ~]# cat /etc/fstab
/dev/qagroup/qa /mnt/qa ext3 defaults 0 0
[root@node2 ~]# mkdir -p /mnt/qa
[root@node2 ~]# mount -a
[root@node2 ~]# yum install -y tuned
[root@node2 ~]# systemctl enable tuned
[root@node2 ~]# systemctl restart tuned
[root@node2 ~]# tuned-adm list
[root@node2 ~]# tuned-adm profile throughput-performance
[greg@control ansible]$ sudo yum install -y rhel-system-roles
roles_path = /home/greg/ansible/roles:/usr/share/ansible/roles
[greg@control ansible]$ cp -rf /usr/share/doc/rhel-system-roles/timesync/example-timesync-playbook.yml timesync.yml
[greg@control roles]$ ansible-galaxy install -r requirements.yml -p .
- downloading role from http://materials/haproxy.tar
- extracting balancer to /home/greg/ansible/roles/balancer
- balancer was installed successfully
- downloading role from http://materials/phpinfo.tar
- extracting phpinfo to /home/greg/ansible/roles/phpinfo
- phpinfo was installed successfully
[greg@control roles]$ cat requirements.yml
---
- src: http://materials/haproxy.tar
name: balancer
- src: http://materials/phpinfo.tar
name: phpinfo
[greg@control roles]$ ansible-galaxy init apache
# tasks file for apache
- name: start service httpd
service:
name: httpd
state: started
enabled: yes
- name: open firewall port
firewalld:
service: http
permanent: yes
state: enabled
# immediate: yes
- name: template file
template:
src: index.html.j2
dest: /var/www/html/index.html
index.html.j2 main.yml
[greg@control tasks]$ cat *j2
Welcome to {{ ansible_fqdn }} on {{ ansible_default_ipv4.address }}
[greg@control ansible]$ cat roles.yml
---
- name: use role balancer
hosts: balancers
roles:
- balancer
- name: user apache and phpinfo
hosts: webservers
roles:
- apache
- phpinfo