openstack linux bridge ovs为什么不用ovs

[root@wb5 ~]#
[root@wb5 ~]#
[root@wb5 ~]#
[root@wb5 ~]#
[root@wb5 ~]#
[root@wb5 ~]#
[root@wb5 ~]#
[root@wb5 ~]#
[root@wb5 ~]#
[root@wb5 ~]#
[root@wb5 ~]#
[root@wb5 ~]#
[root@wb5 ~]#
[root@wb5 ~]# ovs-vsctl show =========================原始======================
bf9e88f0-f9f7-4878-81df-6faa332ab037
Bridge br-tun
fail_mode: secure
Port patch-int
Interface patch-int
type: patch
options: {peer=patch-tun}
Port br-tun
Interface br-tun
type: internal
Bridge br-ex
Port br-ex
Interface br-ex
type: internal
Port "ens256"
Interface "ens256"
Bridge br-int
fail_mode: secure
Port patch-tun
Interface patch-tun
type: patch
options: {peer=patch-int}
Port br-int
Interface br-int
type: internal
ovs_version: "2.3.0"
[root@wb5 ~]# create admin-int=====================增加br-int的一个tap的port=========================
-bash: create: command not found
[root@wb5 ~]# ovs-vsctl show
bf9e88f0-f9f7-4878-81df-6faa332ab037
Bridge br-tun
fail_mode: secure
Port patch-int
Interface patch-int
type: patch
options: {peer=patch-tun}
Port br-tun
Interface br-tun
type: internal
Bridge br-ex
Port br-ex
Interface br-ex
type: internal
Port "ens256"
Interface "ens256"
Bridge br-int
fail_mode: secure
Port patch-tun
Interface patch-tun
type: patch
options: {peer=patch-int}
Port br-int
Interface br-int
type: internal
Port "tapb"-----------------------------------------------------------------
Interface "tapb"
type: internal
ovs_version: "2.3.0"
[root@wb5 ~]# add admin-router=============================没有变化================
-bash: add: command not found
[root@wb5 ~]# ovs-vsctl show
bf9e88f0-f9f7-4878-81df-6faa332ab037
Bridge br-tun
fail_mode: secure
Port patch-int
Interface patch-int
type: patch
options: {peer=patch-tun}
Port br-tun
Interface br-tun
type: internal
Bridge br-ex
Port br-ex
Interface br-ex
type: internal
Port "ens256"
Interface "ens256"
Bridge br-int
fail_mode: secure
Port patch-tun
Interface patch-tun
type: patch
options: {peer=patch-int}
Port br-int
Interface br-int
type: internal
Port "tapb"
Interface "tapb"
type: internal
ovs_version: "2.3.0"
[root@wb5 ~]# add ext-net======================================没有变化=========
-bash: add: command not found
[root@wb5 ~]# ovs-vsctl show
bf9e88f0-f9f7-4878-81df-6faa332ab037
Bridge br-tun
fail_mode: secure
Port patch-int
Interface patch-int
type: patch
options: {peer=patch-tun}
Port br-tun
Interface br-tun
type: internal
Bridge br-ex
Port br-ex
Interface br-ex
type: internal
Port "ens256"
Interface "ens256"
Bridge br-int
fail_mode: secure
Port patch-tun
Interface patch-tun
type: patch
options: {peer=patch-int}
Port br-int
Interface br-int
type: internal
Port "tapb"
Interface "tapb"
type: internal
ovs_version: "2.3.0"
[root@wb5 ~]# add ext-subnet
101~105 108~110=====================增加br-int的一个tap的port====================
-bash: add: command not found
[root@wb5 ~]# ovs-vsctl show
bf9e88f0-f9f7-4878-81df-6faa332ab037
Bridge br-tun
fail_mode: secure
Port patch-int
Interface patch-int
type: patch
options: {peer=patch-tun}
Port br-tun
Interface br-tun
type: internal
Bridge br-ex
Port br-ex
Interface br-ex
type: internal
Port "ens256"
Interface "ens256"
Bridge br-int
fail_mode: secure
Port "tap7d3cee3e-2b"
Interface "tap7d3cee3e-2b"----------------------------------------------------------------------
type: internal
Port patch-tun
Interface patch-tun
type: patch
options: {peer=patch-int}
Port br-int
Interface br-int
type: internal
Port "tapb"
Interface "tapb"
type: internal
ovs_version: "2.3.0"
[root@wb5 ~]# add interface to extnet in admin-router==========================没有变化========
-bash: add: command not found
[root@wb5 ~]# ovs-vsctl show
bf9e88f0-f9f7-4878-81df-6faa332ab037
Bridge br-tun
fail_mode: secure
Port patch-int
Interface patch-int
type: patch
options: {peer=patch-tun}
Port br-tun
Interface br-tun
type: internal
Bridge br-ex
Port br-ex
Interface br-ex
type: internal
Port "ens256"
Interface "ens256"
Bridge br-int
fail_mode: secure
Port "tap7d3cee3e-2b"
Interface "tap7d3cee3e-2b"
type: internal
Port patch-tun
Interface patch-tun
type: patch
options: {peer=patch-int}
Port br-int
Interface br-int
type: internal
Port "tapb"
Interface "tapb"
type: internal
ovs_version: "2.3.0"
[root@wb5 ~]# add interface to admin-int in admin-router===========================没有变化==================
-bash: add: command not found
[root@wb5 ~]# ovs-vsctl show
bf9e88f0-f9f7-4878-81df-6faa332ab037
Bridge br-tun
fail_mode: secure
Port patch-int
Interface patch-int
type: patch
options: {peer=patch-tun}
Port br-tun
Interface br-tun
type: internal
Bridge br-ex
Port br-ex
Interface br-ex
type: internal
Port "ens256"
Interface "ens256"
Bridge br-int
fail_mode: secure
Port "tap7d3cee3e-2b"
Interface "tap7d3cee3e-2b"
type: internal
Port patch-tun
Interface patch-tun
type: patch
options: {peer=patch-int}
Port br-int
Interface br-int
type: internal
Port "tapb"
Interface "tapb"
type: internal
ovs_version: "2.3.0"
[root@wb5 ~]# add a vm 192.168.1.6=====================================增加了br-int的一个qvo的port=======
-bash: add: command not found
[root@wb5 ~]# ovs-vsctl show
bf9e88f0-f9f7-4878-81df-6faa332ab037
Bridge br-tun
fail_mode: secure
Port patch-int
Interface patch-int
type: patch
options: {peer=patch-tun}
Port br-tun
Interface br-tun
type: internal
Bridge br-ex
Port br-ex
Interface br-ex
type: internal
Port "ens256"
Interface "ens256"
Bridge br-int
fail_mode: secure
Port "tap7d3cee3e-2b"
Interface "tap7d3cee3e-2b"
type: internal
Port "qvo1a7bcce3-c9"-------------------------------------------------------------
Interface "qvo1a7bcce3-c9"
Port patch-tun
Interface patch-tun
type: patch
options: {peer=patch-int}
Port br-int
Interface br-int
type: internal
Port "tapb"
Interface "tapb"
type: internal
ovs_version: "2.3.0"
[root@wb5 ~]# add a vm2 192.168.1.7=========================================增加了br-int的一个qvo的port=============
-bash: add: command not found
[root@wb5 ~]# ovs-vsctl show
bf9e88f0-f9f7-4878-81df-6faa332ab037
Bridge br-tun
fail_mode: secure
Port patch-int
Interface patch-int
type: patch
options: {peer=patch-tun}
Port br-tun
Interface br-tun
type: internal
Bridge br-ex
Port br-ex
Interface br-ex
type: internal
Port "ens256"
Interface "ens256"
Bridge br-int
fail_mode: secure
Port "tap7d3cee3e-2b"
Interface "tap7d3cee3e-2b"
type: internal
Port "qvo1a7bcce3-c9"
Interface "qvo1a7bcce3-c9"
Port patch-tun
Interface patch-tun
type: patch
options: {peer=patch-int}
Port "qvo824b8f9d-c5"----------------------------------------------------------------------
Interface "qvo824b8f9d-c5"
Port br-int
Interface br-int
type: internal
Port "tapb"
Interface "tapb"
type: internal
ovs_version: "2.3.0"
[root@wb5 ~]#
在增加 &虚机 &和 & 有子网的网络 & 的时候才会在br-int这个网桥增加一个port(虚机是qvo, 网络是tap)
阅读(...) 评论()下次自动登录
现在的位置:
& 综合 & 正文
理解OpenStack中的OpenvSwitch的几个要点
OpenvSwitch是实现虚拟化网络的重要基础组件,在OpenStack中利用OpenvSwitch作为底层部件来完成虚拟网络提供和租户网络管理。
在部署和应用OpenStack的过程中,可能会碰到网络相关的一些问题,能够准确的理解OpenStack中OpenvSwitch的角色和网络的理念,会有助于解决问题和快速部署。
OpenvSwitch可以认为是一种Linux Bridge的实现,只不过功能更多一些。因此,它完全可以替代Bridge。原先Bridge可以实现的功能在OpenvSwitch中都可以实现。
OpenvSwitch不等于OpenFlow Switch,恰恰相反,支持OpenFlow只是它的一个feature而已,可能还没有支持OvsDB协议更重要。因此,它在实现上自定义了很多地方,只是兼容OpenFlow协议。比如在OpenvSwitch中用户态的vswitchd和内核态的datapath。vswitchd会总管流表,而datapath中有精确匹配的部分流表,类似cache架构。
正因为OpenvSwitch不仅仅是一个OpenFlow Switch,它的流表组成除了of流表外,还有其他一些(隐藏)流表。这些隐藏流表是由于默认交换机和控制器在同一网络中(in-band),因此要保证两者互通。要关闭默认的inband可以通过“ovs-vsctl set controller br0 connection-mode=out-of-band”。
检查所有流表信息可以通过ovs-appctl bridge/dump-flows br0,这往往要比ovs-vsctl dump-flows br0打印出更多的信息来。ovs-dpctl dump-flows [dp]可以打印出datapath的流表信息。ovs-appctl dpif/dump-flows &br&可以打印出指定bridge上的datapath流。
OpenvSwitch可以实现访问控制功能,通过转发规则,可以实现简单的安全行为,包括通过、禁止等。现在OpenStack中的安全组是由iptables实现的,造成在节点上多了好几级的Bridge。理论上,以后都可以统一由OpenvSwitch来管理。
支持VLan(eth0为trunk口,创建vlan 1的tap0口):
ovs-vsctl add-br br0
ovs-vsctl add-port br0 eth0
ovs-vsctl add-port br0 tap0 tag=1
OpenvSwitch 在无法连接到控制器时候(fail mode)可以选择两种fail状态,一种是standalone,一种是secure状态。如果是配置了standalone(或者未设置fail mode)mode,在三次探测控制器连接不成功后,此时ovs-vswitchd将会接管转发逻辑(后台仍然尝试连接到控制器,一旦连接则退出fail状态),OpenvSwitch将作为一个正常的mac 学习的二层交换机。如果是配置了secure mode,则ovs-vswitchd将不会自动配置新的转发流表,OpenvSwitch将按照原先有的流表转发。可以通过下面命令进行管理。
ovs-vsctl get-fail-mode bridge
ovs-vsctl del-fail-mode bridge
ovs-vsctl set-fail-mode bridge standalone|secure
&&&&推荐文章:
【上篇】【下篇】初探OpenStack Mitaka - OpenStack Mitaka安装部署_服务器应用_Linux公社-Linux系统门户网站
你好,游客
初探OpenStack Mitaka
OpenStack Mitaka安装部署
来源:Linux社区&
作者:徐超
北京时间4月8日,OpenStack社区如期发布了OpenStack的第13个版本&&Mitaka,新版本更多聚焦于可管理性、可扩展性和终端用户体验方面。OpenStack作为一个IaaS基础设施架构的集大成者,通过支持、集成众多插件的方式,向用户提供更多可选择的功能和需求,比如SDN、NFV、Container等。OpenStack已经成为众多企业和服务器提供商的云平台选择之一。
由于,笔者更多从事的是OpenStack研发测试体系相关的CI-CT-CD工作。因此,在笔者看来,OpenStack另一个非常成功的一方面是其面向所有人的参与、贡献和交流等平台。为了协调这些,OpenStack社区构建和维护了一套非常完整的研发测试系统。这些基础设施,无疑是奠定OpenStack取得成功的重要基石之一。
鉴于,OpenStack的开发和体验,一般是使用Devstack的方式自动化部署。因此,这里基于自己的实践过程,总结出一份本日Release的Mitaka完整安装过程。
与其他资料所不同的是,本次安装较详细的给出了安装后的相关网络配置,相信能给初学或体验者带来更好的体验。
一台 7.2 VM;
一张NAT网卡;
ip地址10.10.10.129;
1、关闭iptables防火墙和selinux
# vim /etc/selinux/config
#SELINUX=enforcing & & & //注释掉
SELINUX=disabled & & & & //增加
# yum -y install iptables-services
# systemctl disable iptables
2、安装epel源和git
# yum -y install epel-release git
# yum -y install net-tools
3、准备Devstack&
下载代码:
# cd /home
# git clone /openstack-dev/devstack.git -b stable/mitaka &//指定clone mitaka版本
4、需要创建stack用户运行
# cd /home/devstack/tools/
# bash ./create-stack-user.sh
5、修改devstack目录权限,让stack用户可以运行
# chown -R stack:stack /home/devstack
# chmod 777 /opt/stack -R
6、切换到stack用户下
# su stack
$ cd /home/devstack
7、编辑创建localrc文件,添加以下内容:
ADMIN_PASSWORD=admin
DATABASE_PASSWORD=$ADMIN_PASSWORD
RABBIT_PASSWORD=$ADMIN_PASSWORD
SERVICE_PASSWORD=$ADMIN_PASSWORD
SERVICE_TOKEN=$ADMIN_PASSWORD
# Target Path
DEST=/opt/stack
# Enable Logging
LOGFILE=$DEST/logs/stack.sh.log
VERBOSE=True
LOG_COLOR=True
SCREEN_LOGDIR=$DEST/logs
KEYSTONE_TOKEN_FORMAT=UUID
enable_service n-novnc n-cauth
disable_service n-net
ENABLED_SERVICES+=,q-svc,q-agt,q-dhcp,q-l3,q-meta,neutron
ENABLED_SERVICES+=,q-lbaas,q-vpn,q-fwaas
#enable_service s-proxy s-object s-container s-accounts
#SWIFT_HASH=66a3d6b56c1f479c8b4e70ab5c2000f5
VOLUME_GROUP="cinder-volumes"
ENABLED_SERVICES+=,cinder,c-api,c-vol,c-sch,c-bak
# Ceilometer
#enable_service ceilometer-acompute ceilometer-acentral ceilometer-anotification ceilometer-collector ceilometer-api
#enable_service ceilometer-alarm-notifier ceilometer-alarm-evaluator
enable_service heat h-api h-api-cfn h-api-cw h-eng
enable_service tempest
enable_service trove tr-api tr-tmgr tr-cond
enable_service sahara
enable_plugin murano git://git.openstack.org/openstack/murano
enable_service murano-cfapi
enable_service g-glare
MURANO_APPS=io.murano.apps.apache.Tomcat,io.murano.apps.Guacamole
enable_service murano murano-api murano-engine
HOST_IP=10.10.10.129 //本机IP地址&
FIXED_RANGE=10.0.0.0/24 //私有IP网段&
NETWORK_GATEWAY=10.0.0.1 //私有网段网关
注意:若需要安装其他服务,请参考其他资料。
8、运行Devstack,执行安装
$ ./stack.sh
注意:使用的是stack用户运行。
9、默认Devstack创建 admin和demo两个用户,通过设置环境变量可以进行相关操作。进入到/home/devstack目录下。&
admin 用户:
$ source openrc admin admin
demo 用户:
$ source openrc demo demo
安装失败时,可以再次执行安装命令。
$ &./unstack.sh && ./stack.sh
11、配置网络&
由于在Devstac安装过程中,将br-ex的地址设置成了其他ip,因此需要将br-ex地址清除掉,重新配置。
$ sudo ip addr flush br-ex
$ sudo ovs-vsctl del-br be-ex
之后将物理网卡eno(即eth0)作为br-ex的port,之后创建的虚拟机就可以通过eth0访问网络,也能登陆Dashboard了,Host也可以通过floating ip访问虚拟机。&
这里,我给出自己的配置内容
# cat ifcfg-eno
TYPE=OVSPort
DEVICE=eno
DEVICETYPE=ovs
OVS_BRIDGE=br-ex
ONBOOT=yes
# cat ifcfg-br-ex&
TYPE=OVSBridge
DEVICE=br-ex
DEVICETYPE=ovs
BOOTPROTO=static
IPADDR=10.0.0.9
NETMASK=255.255.255.0
GATEWAY=10.0.0.2
# ovs-vsctl add-port br-ex eno ;systemctl restart network
PS:因为,一般VM是通过br-ex外部网桥的ip出外网的,所以,必须保证能ping通。
关闭Selinux、iptables;
如果部署的是GRE、VXLAN网络,使用ip a命令,查看br-ex的ip地址,是否能ping通;
使用ovs-vsctl show命令,查看网桥br-ex的设置情况。这里的eno是我的实际网卡,即eth0,如下图所示:
3)ip a查看br-ex ip地址
# ip a | grep br-ex
14:br-ex:&BROADCAST,MULTICAST,UP,LOWER_UP& mtu 1500 qdisc noqueue state UNKNOWN&
inet 10.10.10.129/24 brd 10.10.10.255 scope global br-ex
最后,我们使用这个ip地址登陆Dashboard!
三.Horizon有哪些变化
1.登录Dashboard&
http://10.10.10.129/dashboard&
用户名:admin&
密码:admin&
2. Horizon有哪些变化
1)Keystone&&身份管理变化&
Mitaka版本界面中,新增加了组、域和角色的操作功能。原来只能通过命令行操作的,现在也能通过界面来实现了。&
组:就是把用户放在一个组里,一个用户可以属于多个组,这样可以方便进行权限管理。&
角色:也就是给创建项目的用户,分配member、admin的roles。&
域:将用户和其他组使用的基础设施相隔离,具体管理的有组和组下的用户。
如下图所示
2)Cinder&&云硬盘一致性组
3)Horizon&&添加开发者(Bootstrap主题)面板&
Mitaka版本新增加的Bootstrap主题预览面板功能,可以让用户在Default和Bootstrap两种主题中自由切换,并且可以在Bootstrap主题中进行自由配置。
4)Nova&&主机集合&
主机集合,就是把计算节点分组,一个计算节点只能属于一个组。这样我们在资源调度上可以更加灵活,根据需求把创建的虚拟机放到相应的主机集合里。主机集合,用户是看不到的,是管理员通过设置,调度生效。通常,适用于大规模的环境。
5)Nova&&元数据定义&
可以让用户导入自定义的元数据和删除等。
此外,云主机类型中,也新增加了许多字段,比如RX/TX 因子、Swap磁盘等。
3.一些建议&
Devstack安装trove组件时,会默认下载mysql.qcow2镜像。为了提高安装进度,可以事先自己下载,保存到该目录下/home/devstack/files/,下载地址:http://tarballs.openstack.org/trove/images//mysql.qcow2。
如果你安装和配置成功了,请立即对该虚拟机平台做个快照,方便以后做各种操作后出现问题时,利于恢复;并避免因宿主系统关机,导致的不必要问题。
四.Screen的使用
既然,我们一般使用DevStack部署OpenStack环境来进行开发,那么掌握Screen的使用就是必须的了。
Screen是一个窗口命令,当你需要离开电脑一阵,然后回来再继续在这个窗口工作,可以启动一个screen,离开时通过screen -d xxx退出这个窗口,窗口中的任务会在后台执行。回来后可以通过screen -x xxx再次打开窗口继续工作。
比如scp一个大文件的时候,如果你不是在screen环境,退出终端或者ssh连接的话,这个任务将终止。使用screen后,screen -d这个任务就在后台运行,回来后再ssh连接到服务器,然后screen -x xxx再打开刚才的那个窗口,可以看到任务继续再执行。
Devstack的所有进程都在 stack用户的screen中运行,可以通过screen -x stack重新打开窗口,打开后最下边会有一行工具栏显示所有在这个窗口中运行的任务,可以通过:
ctrl+a+n切换下一个任务,
ctrl+a+p切换前一个任务,
crtl+a在最近两次任务之间切换;
Ctrl+a+d退出当前的screen;
切换到某一个任务时,比如需要重启这个服务,可以ctrl+c先停掉,通过向上箭头查阅命令历史记录,第一条应该是启动这个服务的命令,再次执行,这个任务开始运行。
调试python代码特别有用,import pdb;pdb.set_trace()到某一个文件后,然后再次运行这个任务,执行到断点是,任务窗口中就可以调试这个任务了。至于pdb的用法这里就不介绍了。
1)列出screens:
$ screen -list
There is a screen on:
51432.stack (Detached)
1 Socket in /var/run/screen/S-stack.
2)进入screen:
$ screen -x 51432 & & // ID号
3)中断screen session:
$ screen -S 51432 -X quit
其他资料:&
Devstack docs:http://docs.openstack.org/developer/devstack/
下面是小编为你精选的Openstack相关知识,看看是否有你喜欢的:
在Ubuntu 12.10 上安装部署Openstack
Ubuntu 12.04 OpenStack Swift单节点部署手册
OpenStack云计算快速入门教程
企业部署OpenStack:该做与不该做的事
CentOS 6.5 x64bit 快速安装OpenStack
徐超,专注于工作和研究基于OpenStack产品研发测试过程的持续集成、持续测试和持续部署/交付领域,曾参与创建国内一OpenStack服务提供商从0到1+的测试体系。
本文永久更新链接地址:
相关资讯 & & &
& (05月03日)
   同意评论声明
   发表
尊重网上道德,遵守中华人民共和国的各项有关法律法规
承担一切因您的行为而直接或间接导致的民事或刑事法律责任
本站管理人员有权保留或删除其管辖留言中的任意内容
本站有权在网站内转载或引用您的评论
参与本评论即表明您已经阅读并接受上述条款博客分类:
参考L版本的linuxbridge的安装方式
和k版本的ovs方式结合修改
systemctl stop NetworkManager&
否则各种坑
[url]https://review.openstack.org/#/c//neutron/agent/linux/ip_lib.py [/url]
☆★★★★★★★★★★★★controller node begin★★★★★★★★★★★★
■■■■■■■■安装controller节点nova begin■■■■■■■■■■■■■■■■■■■
CREATE DATABASE
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'haoning';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'haoning';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'vcontroller' IDENTIFIED BY 'haoning';
openstack user create --password haoning nova
openstack role add --project service --user nova admin
openstack service create --name nova --description "OpenStack Compute" compute
openstack endpoint create --region wuhan compute public http://vcontroller:8774/v2/%\(tenant_id\)s
openstack endpoint create --region wuhan compute internal http://vcontroller:8774/v2/%\(tenant_id\)s
openstack endpoint create --region wuhan compute admin http://vcontroller:8774/v2/%\(tenant_id\)s
yum install openstack-nova-api openstack-nova-cert openstack-nova-conductor openstack-nova-console openstack-nova-novncproxy openstack-nova-scheduler python-novaclient -y
-----------------
/etc/nova/nova.conf
[database]
connection = mysql://nova:haoning@vcontroller/nova
[DEFAULT]
rpc_backend = rabbit
[oslo_messaging_rabbit]
rabbit_host = vcontroller
rabbit_userid = openstack
rabbit_password = haoning
[DEFAULT]
auth_strategy = keystone
[keystone_authtoken]
auth_uri = http://vcontroller:5000
auth_url = http://vcontroller:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = nova
password = haoning
[DEFAULT]
my_ip = 192.168.139.74
[DEFAULT]
#network_api_class = nova.network.neutronv2.api.API
#security_group_api = neutron
#linuxnet_interface_driver = nova.network.linux_net.NeutronLinuxBridgeInterfaceDriver
#firewall_driver = nova.virt.firewall.NoopFirewallDriver
[vnc]
vncserver_listen = $my_ip
vncserver_proxyclient_address = $my_ip
[glance]
host = vcontroller
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[DEFAULT]
enabled_apis=osapi_compute,metadata
[DEFAULT]
verbose = True
su -s /bin/sh -c "nova-manage db sync" nova
systemctl enable openstack-nova-api.service openstack-nova-cert.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
systemctl start openstack-nova-api.service openstack-nova-cert.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
■■■■■■■■安装controller节点nova end■■■■■■■■■■■■■■■■■■■
■■■■■■■■安装compute节点nova begin■■■■■■■■■■■■■■■■■■■
yum install openstack-nova-compute sysfsutils -y
----------------
/etc/nova/nova.conf
[DEFAULT]
rpc_backend = rabbit
[oslo_messaging_rabbit]
rabbit_host = vcontroller
rabbit_userid = openstack
rabbit_password = haoning
[DEFAULT]
auth_strategy = keystone
[keystone_authtoken]
auth_uri = http://vcontroller:5000
auth_url = http://vcontroller:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = nova
password = haoning
[DEFAULT]
my_ip = 192.168.139.72
[DEFAULT]
vnc_enabled = True
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = 192.168.139.72
novncproxy_base_url = http://vcontroller:6080/vnc_auto.html
[glance]
host = vcontroller
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[DEFAULT]
verbose = True
[libvirt]
#virt_type = qemu
systemctl enable libvirtd.service openstack-nova-compute.service
systemctl start libvirtd.service openstack-nova-compute.service
nova service-list
nova endpoints
nova image-list
■■■■■■■■安装compute节点nova end■■■■■■■■■■■■■■■■■■■■■■■
------------------------------------------------neutron------------------------------------
■■■■■■■■安装controller节点neutron begin■■■■■■■■■■■■■■■■■■■
CREATE DATABASE
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY 'haoning';
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'haoning';
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'vcontroller' IDENTIFIED BY 'haoning';
openstack user create --password haoning neutron
openstack role add --project service --user neutron admin
openstack service create --name neutron --description "OpenStack Networking" network
openstack endpoint create --region wuhan network public http://vcontroller:9696
openstack endpoint create --region wuhan network internal http://vcontroller:9696
openstack endpoint create --region wuhan network admin http://vcontroller:9696
yum install openstack-neutron openstack-neutron-ml2 python-neutronclient which -y
--------------------
/etc/neutron/neutron.conf
[database]
connection = mysql://neutron:haoning@vcontroller/neutron
[DEFAULT]
rpc_backend = rabbit
[oslo_messaging_rabbit]
rabbit_host = vcontroller
rabbit_userid = openstack
rabbit_password = haoning
[DEFAULT]
auth_strategy = keystone
[keystone_authtoken]
auth_uri = http://vcontroller:5000
auth_url = http://vcontroller:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = neutron
password = haoning
#################
[DEFAULT]
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = True
[DEFAULT]
notify_nova_on_port_status_changes = True
notify_nova_on_port_data_changes = True
nova_url = http://vcontroller:8774/v2
[nova]
auth_url = http://vcontroller:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
region_name = wuhan
project_name = service
username = nova
password = haoning
[DEFAULT]
verbose = True
-------------------------
/etc/neutron/plugins/ml2/ml2_conf.ini
[ml2]
type_drivers = flat,vlan,gre,vxlan
tenant_network_types = gre
mechanism_drivers = openvswitch
[ml2_type_gre]
tunnel_id_ranges = 1:1000
[securitygroup]
enable_security_group = True
enable_ipset = True
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
-------------------
/etc/nova/nova.conf
[DEFAULT]
network_api_class = nova.network.neutronv2.api.API
security_group_api = neutron
linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[neutron]
url = http://vcontroller:9696
auth_url = http://vcontroller:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
region_name = wuhan
project_name = service
username = neutron
password = haoning
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
systemctl restart openstack-nova-api.service
systemctl restart openstack-nova-scheduler.service
systemctl restart openstack-nova-conductor.service
####一步一步执行,否则会有错误assert cur is not self.greenlet, 'Cannot switch to MAINLOOP from MAINLOOP
systemctl enable neutron-server.service
systemctl start neutron-server.service
neutron ext-list
-----network node---
--------------
/etc/sysctl.conf
net.ipv4.ip_forward=1
net.ipv4.conf.all.rp_filter=0
net.ipv4.conf.default.rp_filter=0
yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-openvswitch
------------------
/etc/neutron/neutron.conf
[DEFAULT]
rpc_backend = rabbit
[oslo_messaging_rabbit]
rabbit_host = vcontroller
rabbit_userid = openstack
rabbit_password = haoning
[DEFAULT]
auth_strategy = keystone
[keystone_authtoken]
auth_uri = http://vcontroller:5000
auth_url = http://vcontroller:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = neutron
password = haoning
[DEFAULT]
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = True
[DEFAULT]
verbose = True
/etc/neutron/plugins/ml2/ml2_conf.ini
[ml2]
type_drivers = flat,vlan,gre,vxlan
tenant_network_types = gre
mechanism_drivers = openvswitch
#In the [ml2_type_flat] section, configure the external flat provider network:
[ml2_type_flat]
flat_networks = external
[ml2_type_gre]
tunnel_id_ranges = 1:1000
#In the [securitygroup] section, enable security groups, enable ipset, and configure the OVS iptables firewall driver:
[securitygroup]
enable_security_group = True
enable_ipset = True
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
#In the [ovs] section, enable tunnels, configure the local tunnel endpoint, and map the external flat provider network to the br-ex external network bridge:
[ovs]
local_ip = 192.168.139.74
bridge_mappings = external:br-ex
#Replace INSTANCE_TUNNELS_INTERFACE_IP_ADDRESS with the IP address of the instance tunnels network interface on your network node.
#In the [agent] section, enable GRE tunnels:
[agent]
tunnel_types = gre
---------------
/etc/neutron/l3_agent.ini
[DEFAULT]
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
external_network_bridge =
router_delete_namespaces = True
verbose = True
---------------
/etc/neutron/dhcp_agent.ini
[DEFAULT]
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
dhcp_delete_namespaces = True
verbose = True
[DEFAULT]
dnsmasq_config_file = /etc/neutron/dnsmasq-neutron.conf
---------------------
/etc/neutron/dnsmasq-neutron.conf
dhcp-option-force=26,1454
pkill dnsmasq
-----------------------
/etc/neutron/metadata_agent.ini
[DEFAULT]
auth_uri = http://vcontroller:5000
auth_url = http://vcontroller:35357
auth_region = wuhan
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = neutron
password = haoning
[DEFAULT]
nova_metadata_ip = vcontroller
[DEFAULT]
metadata_proxy_shared_secret = METADATA_SECRET
[DEFAULT]
verbose = True
----------
/etc/nova/nova.conf
[neutron]
service_metadata_proxy = True
metadata_proxy_shared_secret = METADATA_SECRET
systemctl restart openstack-nova-api.service
systemctl enable openvswitch.service
systemctl start openvswitch.service
ovs-vsctl add-br br-ex
####ovs-vsctl add-port br-ex eth0
cp /etc/sysconfig/network-scripts/ifcfg-eth0& /etc/sysconfig/network-scripts/ifcfg-br-ex
[root@vcontroller network-scripts]# cat ifcfg-br-ex
TYPE=Ethernet
BOOTPROTO=static
DEFROUTE=yes
PEERDNS=no
PEERROUTES=no
IPV4_FAILURE_FATAL=no
IPV6INIT=no
NAME=br-ex
ONBOOT=yes
DEVICE=br-ex
IPADDR=192.168.139.74
NETMASK=255.255.240.0
GATEWAY=192.168.128.1
DEVICETYPE=ovs
TYPE=OVSBridge
[root@vcontroller network-scripts]# cat ifcfg-eth0
TYPE=Ethernet
BOOTPROTO=static
DEFROUTE=yes
PEERDNS=no
PEERROUTES=no
IPV4_FAILURE_FATAL=no
IPV6INIT=no
NAME=eth0
ONBOOT=yes
DEVICE=eth0
DEVICETYPE=ovs
OVS_BRIDGE=br-ex
TYPE=OVSPort
ip link
如果br-ex没有up起来就
ip link set br-ex up
ip link set ovs-system up
★★★★★★★★★★★★★★★★★★★
systemctl stop NetworkManager
这个很重要
systemctl disable NetworkManager
systemctl restart network
ethtool -K eth0 gro off
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
cp /usr/lib/systemd/system/neutron-openvswitch-agent.service& /usr/lib/systemd/system/neutron-openvswitch-agent.service.orig
# sed -i 's,plugins/openvswitch/ovs_neutron_plugin.ini,plugin.ini,g'& /usr/lib/systemd/system/neutron-openvswitch-agent.service
sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g'& /usr/lib/systemd/system/neutron-openvswitch-agent.service
systemctl enable neutron-openvswitch-agent.service neutron-l3-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service neutron-ovs-cleanup.service
systemctl start neutron-openvswitch-agent.service
systemctl start neutron-l3-agent.service
systemctl start neutron-dhcp-agent.service
systemctl start neutron-metadata-agent.service
一个一个启动,否则可能会报greenlet的协程的问题
neutron agent-list
■■■■■■■■安装controller节点neutron end■■■■■■■■■■■■■■■■■■
■■■■■■■■安装compute节点neutron begin■■■■■■■■■■■■■■■■■■
---------------------
/etc/sysctl.conf
net.ipv4.conf.all.rp_filter=0
net.ipv4.conf.default.rp_filter=0
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-openvswitch -y
------------------------
/etc/neutron/neutron.conf
[DEFAULT]
rpc_backend = rabbit
[oslo_messaging_rabbit]
rabbit_host = vcontroller
rabbit_userid = openstack
rabbit_password = haoning
[DEFAULT]
auth_strategy = keystone
[keystone_authtoken]
auth_uri = http://vcontroller:5000
auth_url = http://vcontroller:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = neutron
password = haoning
################################
[DEFAULT]
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = True
[DEFAULT]
verbose = True
------------------------------------
/etc/neutron/plugins/ml2/ml2_conf.ini
[ml2]
type_drivers = flat,vlan,gre,vxlan
tenant_network_types = gre
mechanism_drivers = openvswitch
[ml2_type_gre]
tunnel_id_ranges = 1:1000
[securitygroup]
enable_security_group = True
enable_ipset = True
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
[ovs]
local_ip = 192.168.139.72
[agent]
tunnel_types = gre
systemctl enable openvswitch.service
systemctl start openvswitch.service
--------------
/etc/nova/nova.conf
[DEFAULT]
network_api_class = nova.network.neutronv2.api.API
security_group_api = neutron
linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[neutron]
url = http://vcontroller:9696
auth_url = http://vcontroller:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
region_name = wuhan
project_name = service
username = neutron
password = haoning
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
cp /usr/lib/systemd/system/neutron-openvswitch-agent.service /usr/lib/systemd/system/neutron-openvswitch-agent.service.orig
# sed -i 's,plugins/openvswitch/ovs_neutron_plugin.ini,plugin.ini,g' /usr/lib/systemd/system/neutron-openvswitch-agent.service
sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g'& /usr/lib/systemd/system/neutron-openvswitch-agent.service
systemctl restart openstack-nova-compute.service
systemctl enable neutron-openvswitch-agent.service
systemctl start neutron-openvswitch-agent.service
neutron agent-list
■■■■■■■■安装compute节点neutron end■■■■■■■■■■■■■■■■■■■
-----例子----
neutron net-create ext-net --router:external& --provider:physical_network external --provider:network_type flat
neutron net-list
#neutron subnet-create ext-net EXTERNAL_NETWORK_CIDR --name ext-subnet --allocation-pool start=FLOATING_IP_START,end=FLOATING_IP_END --disable-dhcp --gateway EXTERNAL_NETWORK_GATEWAY
#neutron subnet-create ext-net 192.168.142.0/20 --name ext-subnet --allocation-pool start=192.168.142.170,end=192.168.142.179& --disable-dhcp --gateway 192.168.128.1
neutron subnet-create ext-net 192.168.142.0/20 --name ext-subnet --allocation-pool start=192.168.142.170,end=192.168.142.179& --dns-nameserver 8.8.4.4 --gateway 192.168.128.1
ovs-vsctl show
[root@vcontroller ~(keystone_admin_v3)]# ovs-vsctl show
1df8c1b3-def6-4b23-851f-d94ed709100e
&&& Bridge br-ex
&&&&&&& Port phy-br-ex
&&&&&&&&&&& Interface phy-br-ex
&&&&&&&&&&&&&&& type: patch
&&&&&&&&&&&&&&& options: {peer=int-br-ex}
&&&&&&& Port "eth0"
&&&&&&&&&&& Interface "eth0"
&&&&&&& Port br-ex
&&&&&&&&&&& Interface br-ex
&&&&&&&&&&&&&&& type: internal
&&& Bridge br-int
&&&&&&& fail_mode: secure
&&&&&&& Port br-int
&&&&&&&&&&& Interface br-int
&&&&&&&&&&&&&&& type: internal
&&&&&&& Port patch-tun
&&&&&&&&&&& Interface patch-tun
&&&&&&&&&&&&&&& type: patch
&&&&&&&&&&&&&&& options: {peer=patch-int}
&&&&&&& Port int-br-ex
&&&&&&&&&&& Interface int-br-ex
&&&&&&&&&&&&&&& type: patch
&&&&&&&&&&&&&&& options: {peer=phy-br-ex}
&&&&&&& Port "tap1f149beb-f7"
&&&&&&&&&&& tag: 1
&&&&&&&&&&& Interface "tap1f149beb-f7"
&&&&&&&&&&&&&&& type: internal
&&& Bridge br-tun
&&&&&&& fail_mode: secure
&&&&&&& Port patch-int
&&&&&&&&&&& Interface patch-int
&&&&&&&&&&&&&&& type: patch
&&&&&&&&&&&&&&& options: {peer=patch-tun}
&&&&&&& Port br-tun
&&&&&&&&&&& Interface br-tun
&&&&&&&&&&&&&&& type: internal
&&&&&&& Port "gre-c0a88b48"
&&&&&&&&&&& Interface "gre-c0a88b48"
&&&&&&&&&&&&&&& type: gre
&&&&&&&&&&&&&&& options: {df_default="true", in_key=flow, local_ip="192.168.139.74", out_key=flow, remote_ip="192.168.139.72"}
&&& ovs_version: "2.4.0"
[root@vcontroller ~(keystone_admin_v3)]#
-----租户网络---
neutron net-create demo-net
#neutron subnet-create demo-net TENANT_NETWORK_CIDR --name demo-subnet --dns-nameserver DNS_RESOLVER --gateway TENANT_NETWORK_GATEWAY
neutron subnet-create demo-net 192.168.1.0/24 --name demo-subnet --dns-nameserver 8.8.4.4 --gateway 192.168.1.1
neutron net-list
neutron subnet-list
neutron router-create demo-router
neutron router-list
neutron router-interface-add demo-router demo-subnet
neutron router-port-list& demo-router
neutron router-gateway-set demo-router ext-net
neutron router-port-list& demo-router
ping -c 4 192.168.142.171
ip netns
-----create& a& vm----------
ssh-keygen -q -N ""
nova keypair-add --pub-key ~/.ssh/id_rsa.pub mykey
nova keypair-list
nova secgroup-list
nova& secgroup-list-rules default
nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0
nova secgroup-add-rule default tcp 22 22 0.0.0.0/0
nova flavor-list
nova image-list
neutron net-list
nova secgroup-list
#nova boot --flavor m1.tiny --image cirros --nic net-id=PRIVATE_NET_ID --security-group default --key-name mykey private-instance
#nova boot --flavor m1.tiny --image cirros --nic net-id=425e9cbb-f74f--8e97e59c2be4 --security-group default --key-name mykey private-instance
nova list
#Add security group rules
nova boot --flavor m1.tiny --image cirros --nic net-id=425e9cbb-f74f--8e97e59c2be4 --security-group default --key-name mykey private-instance
nova list
nova get-vnc-console private-instance novnc
neutron net-list
ip netns
ip netns exec qdhcp-425e9cbb-f74f--8e97e59c2be4 ssh cirros@192.168.1.3
neutron floatingip-create& ext-net
nova floating-ip-associate private-instance& 192.168.142.172
nova list
ssh cirros@192.168.142.172
haoningabc
浏览: 819040 次
来自: 北京
我喜欢代码简洁易读,服务稳定的推送服务,前段时间研究了一下go ...
http://www./WebSoc ...
感谢您的分享,给我提供了很大的帮助,在使用过程中发现了一个问题 ...
leebyte 写道太NB了,期待早日用上Killinux!么 ...
太NB了,期待早日用上Killinux!
(window.slotbydup=window.slotbydup || []).push({
id: '4773203',
container: s,
size: '200,200',
display: 'inlay-fix'

我要回帖

更多关于 openstack ovs vxlan 的文章

 

随机推荐