Chinaunix首页 | 论坛 | 博客
  • 博客访问: 18638
  • 博文数量: 7
  • 博客积分: 90
  • 博客等级: 民兵
  • 技术积分: 75
  • 用 户 组: 普通用户
  • 注册时间: 2008-01-06 15:59
个人简介

苦力强,IBM苦力强

文章分类

全部博文(7)

文章存档

2020年(4)

2016年(1)

2015年(1)

2014年(1)

我的朋友

分类: 云计算

2015-10-20 17:57:44

操作系统准备

##############################################################################

operations release:
CentOS-7-x86_64-DVD-1503-01.iso
# 下载地址

#硬盘分区:
/dev/sda3       196G  2.4G  193G   2% /
devtmpfs         63G     0   63G   0% /dev
tmpfs            63G     0   63G   0% /dev/shm
tmpfs            63G  8.9M   63G   1% /run
tmpfs            63G     0   63G   0% /sys/fs/cgroup
/dev/sda5       2.6T  114G  2.5T   5% /Data
/dev/sda2       497M  142M  356M  29% /boot

# kiskstart 需要的服务。
systemctl restart xinetd.service tftp.service dhcpd.service

# 方便更改计算节点的操作系统配置文件。
IP="10.209.230.17"; scp /root/.ssh/authorized_keys $IP:/root/.ssh/authorized_keys; scp /etc/ssh/sshd_config $IP:/etc/ssh/sshd_config; scp /etc/profile $IP:/etc/profile; scp /etc/yum.repos.d/* $IP:/etc/yum.repos.d/; scp /etc/selinux/config $IP:/etc/selinux/config; scp /etc/security/limits.conf $IP:/etc/security/limits.conf; scp /etc/sysctl.conf $IP:/etc/sysctl.conf; scp /opt/openrc $IP:/opt/openrc; scp /etc/hosts $IP:/etc/hosts; ssh $IP "rm -rf /etc/yum.repos.d/*; systemctl restart sshd.service; source /etc/profile; yum update;"; scp /etc/yum.repos.d/* $IP:/etc/yum.repos.d/; scp /etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7 $IP:/etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7; scp /etc/resolv.conf $IP:/etc/resolv.conf;

# 把网卡配置拷去计算节点。
IP="10.209.230.15"; scp ifcfg-br-* $IP:/etc/sysconfig/network-scripts/ ; scp ifcfg-em1 $IP:/etc/sysconfig/network-scripts/;
# 有四个网卡的配置。
ifcfg-em1
ifcfg-br-int
ifcfg-br-ex
ifcfg-be-tun

# Centos 7 的host配置。
cat > /etc/sysconfig/network << EOF
L-18
EOF

hostnamectl --static set-hostname L-18

yum -y update && yum -y groupinstall "Development Tools" && yum install -y system* && yum -y groupinstall "Virtual*" &&  yum -y install docker && yum install qemu-kvm qemu-img libguestfs;
systemctl list-unit-files|grep enable |awk '{ print $1 }'|egrep -v "multi-user.target|ssh|libvirtd|@tty|docker" |xargs -i systemctl disable {}
virsh net-destroy default && virsh net-undefine default;
systemctl enable docker.service && systemctl restart docker.service;
systemctl enable crond.service && systemctl restart crond.service;

# 确认时区。
ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime ; ntpdate  10.77.131.19;

# 设置定时同步时区。
cat > /var/spool/cron/root < 20 * * * * ( /usr/sbin/ntpdate  10.77.131.19 > /dev/null 2>&1 )
EOF

# yum源的添加:
# 这个是为了补充你ISO中不包含的,安装openstack应用的时候所需的系统rpm包。
yum install

# 这个是OpenStack Liberty 的来源:
yum install
@说明:因为选取的是centos的test的源。所以yum安装openstack组件和python包的时候,右侧都会提示:centos-openstack-liberty-test。

#添加如下内容至:/etc/profile
source /opt/openrc
ulimit -S -c 0 > /dev/null 2>&1
ulimit -n 10240
ulimit -u 77823

# 安装需要的操作系统组件;
保留尽量少的服务;
为安装Openstack准备。
#######################################################################################

# liberty 版本的源代码下载地址。


#新建环境变量文件:/opt/openrc,添加如下内容:
export OS_TENANT_NAME=service
export OS_USERNAME=nova
export OS_PASSWORD=nova
export OS_AUTH_URL=""
export SERVICE_ENDPOINT=""
export SERVICE_TOKEN=ADMIN

# 开始安装OpenStack Liberty 的组件:
安装,配置,初始化一个组建的步骤是。
1.确认安装包成功安装。如:yum install 没有报错。
2.确定数据库里对应本组件的库已经建立,并设定了对应的用户名和密码。确定db_sync能创建本库的表格。
3.确定本组件的配置文件都是对应本用户名和组,所需的进程都能启动。
4.选择是否调整参数值大小。

#######################################################################################

keystone
#######################################################################################
# 步骤是:
1.建立keystone的数据库,为keystone库建立用户和密码。
2.安装包。这一步骤需要安装两个包:openstack-utils 和 openstack-keystone。
3.执行keystone-manage db_sync。在keystone库中建立表结构。
4. 启动openstack-keystone.service。即:systemctl enable openstack-keystone.service && systemctl restart openstack-keystone.service。必须openstack-keystone.service正常启动之后,才能进行 keystone数据的初始化。建立用户,并将信息存入keystone库。
5.

#将安装的版本是 1:8.0.0-0.6.0rc2.el7
这一步骤需要安装两个包:openstack-utils 和 openstack-keystone
grant all on *.* to'keystone'@'%' identified by "keystone"; grant all on *.* to'keystone'@'10.209.230.13' identified by "keystone";
chown keystone:keystone /etc/keystone/ -R

#######################################################################################

安装 rabbitmq-server.service

#######################################################################################

rabbitmq-server-3.3.5-6.el7.noarch
rabbitmqctl change_password guest openstack1
#设置rabbitmq的连接数:
rabbitmqctl status

#######################################################################################

安装glance
openstack-glance-11.0.0-1.el7.noarch 和 python-glanceclient-1.1.0-1.el7.noarch

#######################################################################################
# 先执行包的安装:
yum -y install openstack-glance python-glanceclient
建立glance的数据库:
MariaDB [(none)]> create database glance;
MariaDB [(none)]> grant all on *.* to'glance'@'%' identified by "glance"; grant all on *.* to'glance'@'10.209.230.13' identified by "glance";
确认glance的数据库初始化完成,glance-manage db_sync。

chown glance:glance /etc/glance/ -R
确认这三个服务都能启动,否则运行一遍之后去/var/glance/下看日志,修改glance的配置文件,排除日志里提示的错误。
确认以下三个服务能起来,而且日志中没有错误信息。
systemctl restart openstack-glance-scrubber.service openstack-glance-api.service openstack-glance-registry.service

# 这里我碰到了错误提示,api.log里提示:/etc/glance/glance-api-paste.ini有标签项没找到。我看了安装候本机上的 /etc/glance/glance-api-paste.ini。配置文件比kilo版下的少很多,没细看,直接复制kilo下的/etc /glance/glance-api-paste.ini过来覆盖。本错误排除。

/etc/glance/glance-registry-paste.ini也有错误。复制kilo版的这个文件/etc/glance/glance-registry-paste.ini,覆盖。文错误排除。

# 修改配置文件。
在glance-api.conf里。

[DEFAULT]
workers = 160
rpc_backend = rabbit
notification_driver = messaging
#transport_url = rabbit://
rabbit_host = 10.209.230.13
rabbit_port = 5672
rabbit_use_ssl = true
rabbit_userid = guest
rabbit_password = openstack1
rabbit_virtual_host = /

在glance-registry.conf里。

[DEFAULT]
workers = 160
rpc_backend = rabbit
notification_driver = messaging
#transport_url = rabbit://
rabbit_host = 10.209.230.13
rabbit_port = 5672
rabbit_use_ssl = true
rabbit_userid = guest
rabbit_password = openstack1
rabbit_virtual_host = /

# 重启glance的三个服务,没有报错。

systemctl restart openstack-glance-scrubber.service openstack-glance-api.service openstack-glance-registry.service
# 运行:
[root@K-13 glance]# glance image-list
+----+------+
| ID | Name |
+----+------+
+----+------+
# 表示glance运行正常,且,目前没有镜像。

glance image-create --name W8 --disk-format qcow2 --container-format ovf --progress < cn_windows_8.1_enterprise_with_update_x64_dvd_6050374.qcow2
glance image-create --name "Centos-6.5-x86_64-20G_resize.20141210.developer" --disk-format qcow2 --container-format ovf --progress < Centos-6.5-x86_64-20G_resize.20141210.developer.qcow2
glance image-create --name "WinServer2008r2_80G" --disk-format qcow2 --container-format ovf --progress < WinServer2008r2_80G.qcow2

#######################################################################################


安装swift,(可以跳过swift的安装,尽心nova的安装)

安装nova
#######################################################################################

# 检查nova的运行情况。

systemctl enable openstack-nova-api.service openstack-nova-compute.service openstack-nova-novncproxy.service openstack-nova-xvpvncproxy.service openstack-nova-cells.service openstack-nova-conductor.service openstack-nova-objectstore.service openstack-nova-cert.service openstack-nova-console.service openstack-nova-scheduler.service

systemctl restart openstack-nova-api.service openstack-nova-compute.service openstack-nova-novncproxy.service openstack-nova-xvpvncproxy.service openstack-nova-cells.service openstack-nova-conductor.service openstack-nova-objectstore.service openstack-nova-cert.service openstack-nova-console.service openstack-nova-scheduler.service openstack-nova-consoleauth.service

# 确认除了 openstack-nova-metadata-api.service  openstack-nova-network.service 其它nova的进程都能启动。

[root@L-13 nova]# nova list  # 确认nova list的输出如下。
+----+------+--------+------------+-------------+----------+
| ID | Name | Status | Task State | Power State | Networks |
+----+------+--------+------------+-------------+----------+
+----+------+--------+------------+-------------+----------+

nova的配置参数之后,还有太多的参数要修改,连接数等等。
#######################################################################################

安装 cinder
只是简单安装过去, 以前安装安装经验告诉我,neutron的scheduler的时候,会检测cinder服务的端口。

#######################################################################################
create database cinder;
grant all on *.* to'cinder'@'%' identified by "cinder"; grant all on *.* to'cinder'@'10.209.230.13' identified by "cinder";grant all on *.* to'cinder'@'localhost' identified by "cinder";
#######################################################################################

安装neutron
#######################################################################################

create database ovs_neutron;

grant all on *.* to'neutron'@'%' identified by "neutron"; grant all on *.* to'neutron'@'10.209.230.13' identified by "neutron";grant all on *.* to'neutron'@'localhost' identified by "neutron";

# 安装这些包。
yum install -y openstack-neutron openstack-neutron-openvswitch python-neutronclient openstack-neutron-lbaas openstack-neutron-fwaas openstack-neutron-vpnaas
# 安装的时候提示缺少这个文件,/usr/bin/neutron-server-setup。直接从kilo的机器上拷贝过来。
# 安装的时候提示缺少这个文件,/etc/neutron/plugins/ml2/ml2_conf.ini。直接从kilo版的机器上拷过来。
# 安装的时候提示没找到这个文件,/etc/neutron/plugin.ini。先做个软连接。
ln -s /etc/neutron/plugins/ml2/openvswitch_agent.ini /etc/neutron/plugin.ini
# 重启neutron的服务,只有neutron-server.service服务没有起来了。那就表示neutron安装成功。可以进入neutron的配置了。

[root@L-13 neutron]# systemctl restart neutron-dhcp-agent.service neutron-lbaas-agent.service neutron-vpn-agent.service neutron-l3-agent.service neutron-lbaasv2-agent.service neutron-openvswitch-agent.service neutron-server.service
Job for neutron-server.service failed. See 'systemctl status neutron-server.service' and 'journalctl -xn' for details.

# 确定openvswitch是能启动的。
systemctl restart openvswitch-nonetwork.service openvswitch.service

#######################################################################################

配置neutron。

#######################################################################################

# 添加网桥。。确认br-int 和 br-ex都已经建立。
ovs-vsctl add-br br-ex && ovs-vsctl add-port br-ex em1
[root@L-13 ~]# ovs-vsctl show
0cdb8fd6-e369-4d6d-a62e-5044b1718f19
    Bridge br-ex
        Port br-ex
            Interface br-ex
                type: internal
        Port "em2"
            Interface "em2"
    Bridge br-int
        fail_mode: secure
        Port br-int
            Interface br-int
                type: internal
    ovs_version: "2.4.0"

systemctl restart openvswitch.service neutron-dhcp-agent.service neutron-lbaas-agent.service neutron-vpn-agent.service neutron-l3-agent.service neutron-lbaasv2-agent.service neutron-openvswitch-agent.service neutron-server.service
neutron net-create --tenant-id c5568b555b7344b99484f637339bf91d sharednet1 --shared --provider:network_type flat --provider:physical_network physnet1
neutron subnet-create --tenant-id c5568b555b7344b99484f637339bf91d --ip-version 4 sharednet1 10.209.230.0/24 --allocation-pool start=10.209.230.200,end=10.209.230.249 --dns_nameservers list=true 114.114.114.114

# 小经验:
因为我没把rabbitmq的配置些在[oslo_messaging_rabbit]这个标签下面,而导致neutron-server.service启动很慢。
确认安装包openstack-neutron-ml2.noarch。
#需要确认安装这些是不是就够了。

yum install -y openstack-neutron openstack-neutron-openvswitch python-neutronclient openstack-neutron-lbaas openstack-neutron-fwaas openstack-neutron-vpnaas

# 网络采用flat+vxlan的模式。
neutron net-create --tenant-id c5568b555b7344b99484f637339bf91d sharednet1 --shared --provider:network_type flat --provider:physical_network physnet1
neutron subnet-create --tenant-id c5568b555b7344b99484f637339bf91d --ip-version 4 sharednet1 10.209.230.0/24 --allocation-pool start=10.209.230.200,end=10.209.230.249 --dns_nameservers list=true 114.114.114.114
neutron subnet-create --tenant-id c5568b555b7344b99484f637339bf91d --ip-version 4 sharednet1 10.209.230.0/24 --allocation-pool start=10.209.230.200,end=10.209.230.249 --gateway=10.209.230.254 --dns_nameservers list=true 114.114.114.114
#######################################################################################

Horizon简单安装过去就能用。
#######################################################################################

    yum -y install python-django14
yum -y install memcached python-memcached mod_wsgi openstack-dashboard

#######################################################################################


# 计算节点只需要安装以下的包;
yum -y install openstack-utils python-neutronclient openstack-utils openstack-nova-compute openstack-neutron-openvswitch
yum -y install

yum -y install openstack-utils python-neutronclient openstack-utils openstack-nova-compute openstack-neutron-openvswitch
yum -y install openstack-utils python-neutronclient openstack-utils openstack-nova-compute openstack-neutron-openvswitch
yum -y install openstack-utils python-neutronclient openstack-utils openstack-nova-compute openstack-neutron-openvswitch
yum -y install
yum -y install openstack-utils python-neutronclient openstack-utils openstack-nova-compute openstack-neutron-openvswitch
yum install -y
yum install -y
yum install -y
yum -y install openstack-utils python-neutronclient openstack-utils openstack-nova-compute openstack-neutron-openvswitch
yum -y install openstack-utils python-neutronclient openstack-utils openstack-nova-compute openstack-neutron-openvswitch
yum -y install
yum -y install openstack-utils python-neutronclient openstack-utils openstack-nova-compute openstack-neutron-openvswitch

systemctl enable openstack-nova-compute.service; systemctl restart openstack-nova-compute.service; systemctl enable openvswitch.service; systemctl restart openvswitch.service; systemctl enable neutron-openvswitch-agent.service ; systemctl restart neutron-openvswitch-agent.service;

systemctl restart openstack-nova-compute.service; systemctl restart openvswitch.service; systemctl restart neutron-openvswitch-agent.service;

ovs-vsctl add-br br-ex; ovs-vsctl add-port br-ex em1;

scp 10.209.230.13:/etc/neutron/neutron.conf ./
scp 10.209.230.13:/etc/neutron/plugins/ml2/ml2_conf.ini ./
scp 10.209.230.13:/etc/neutron/plugins/ml2/openvswitch_agent.ini ./









阅读(3969) | 评论(0) | 转发(0) |
给主人留下些什么吧!~~