<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>TheKenApp Tracker</title>
    <link>https://communities.vmware.com/wbsdv95928/tracker</link>
    <description>TheKenApp Tracker</description>
    <pubDate>Wed, 15 Nov 2023 12:16:25 GMT</pubDate>
    <dc:date>2023-11-15T12:16:25Z</dc:date>
    <item>
      <title>Re: Need to upgrade DVSwitch from 6.5.0 to 7.0.3 - ESXi 7.0.3 and later</title>
      <link>https://communities.vmware.com/t5/vSphere-vNetwork-Discussions/Need-to-upgrade-DVSwitch-from-6-5-0-to-7-0-3-ESXi-7-0-3-and/m-p/2982377#M14753</link>
      <description>&lt;P&gt;OK, I will look into that as an option. Thanks!&lt;/P&gt;</description>
      <pubDate>Tue, 15 Aug 2023 19:57:45 GMT</pubDate>
      <guid>https://communities.vmware.com/t5/vSphere-vNetwork-Discussions/Need-to-upgrade-DVSwitch-from-6-5-0-to-7-0-3-ESXi-7-0-3-and/m-p/2982377#M14753</guid>
      <dc:creator>TheKenApp</dc:creator>
      <dc:date>2023-08-15T19:57:45Z</dc:date>
    </item>
    <item>
      <title>Re: Need to upgrade DVSwitch from 6.5.0 to 7.0.3 - ESXi 7.0.3 and later</title>
      <link>https://communities.vmware.com/t5/vSphere-vNetwork-Discussions/Need-to-upgrade-DVSwitch-from-6-5-0-to-7-0-3-ESXi-7-0-3-and/m-p/2982375#M14751</link>
      <description>&lt;P&gt;Thanks for the advice. We have spare NIC ports on our hosts, but not on the switch that services them, so I will not be able to pursue this option.&amp;nbsp;&lt;/P&gt;</description>
      <pubDate>Tue, 15 Aug 2023 19:52:14 GMT</pubDate>
      <guid>https://communities.vmware.com/t5/vSphere-vNetwork-Discussions/Need-to-upgrade-DVSwitch-from-6-5-0-to-7-0-3-ESXi-7-0-3-and/m-p/2982375#M14751</guid>
      <dc:creator>TheKenApp</dc:creator>
      <dc:date>2023-08-15T19:52:14Z</dc:date>
    </item>
    <item>
      <title>Need to upgrade DVSwitch from 6.5.0 to 7.0.3 - ESXi 7.0.3 and later</title>
      <link>https://communities.vmware.com/t5/vSphere-vNetwork-Discussions/Need-to-upgrade-DVSwitch-from-6-5-0-to-7-0-3-ESXi-7-0-3-and/m-p/2982320#M14749</link>
      <description>&lt;P&gt;Hi,&lt;/P&gt;&lt;P&gt;I did create a support request for this issue, but I also want to reach out to the community regarding this as well.&amp;nbsp;&lt;/P&gt;&lt;P&gt;We have multiple version 6.5.0 distributed virtual switches, Which we want to upgrade to "7.0.3 - ESXi 7.0.3 and later".&lt;BR /&gt;My concern is the outage this may cause for VMs.&lt;/P&gt;&lt;P&gt;I figure there are two options available to me:&lt;BR /&gt;1) Upgrade the existing distributed switches, or&lt;BR /&gt;2) Create new Distributed switches, and migrate the network for each VM.&lt;/P&gt;&lt;P&gt;I would like to know if upgrading a distributed switch would cause a disruption of network traffic to/from our VMs, and if so, what kind of outage would this cause (milliseconds, seconds, minutes ...).&lt;/P&gt;&lt;P&gt;Option #2 would be very time consuming, but if this causes less of an outage for each VM, I would create new switches, and change the network adapter for each VM.&lt;/P&gt;&lt;P&gt;Any advice regarding what would be the best process for upgrading our distributed switches to 7.0.3 would be appreciated.&lt;/P&gt;&lt;P&gt;Thanks,&lt;/P&gt;&lt;P&gt;Ken&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;</description>
      <pubDate>Tue, 15 Aug 2023 14:54:49 GMT</pubDate>
      <guid>https://communities.vmware.com/t5/vSphere-vNetwork-Discussions/Need-to-upgrade-DVSwitch-from-6-5-0-to-7-0-3-ESXi-7-0-3-and/m-p/2982320#M14749</guid>
      <dc:creator>TheKenApp</dc:creator>
      <dc:date>2023-08-15T14:54:49Z</dc:date>
    </item>
    <item>
      <title>Re: Skyline advisor not collecting logs, and issue with VC_Events endpoint</title>
      <link>https://communities.vmware.com/t5/Skyline-Community-Discussions/Skyline-advisor-not-collecting-logs-and-issue-with-VC-Events/m-p/2291676#M2392</link>
      <description>&lt;HTML&gt;&lt;HEAD&gt;&lt;/HEAD&gt;&lt;BODY&gt;&lt;P&gt;I am not sure why, but I was able to re-validate the collector for the necessary privileges required for Log Assist today.&lt;/P&gt;&lt;P&gt;Nothing changed in our vSphere environment and Skyline appliance, but it works now. &lt;/P&gt;&lt;/BODY&gt;&lt;/HTML&gt;</description>
      <pubDate>Thu, 02 Apr 2020 19:12:36 GMT</pubDate>
      <guid>https://communities.vmware.com/t5/Skyline-Community-Discussions/Skyline-advisor-not-collecting-logs-and-issue-with-VC-Events/m-p/2291676#M2392</guid>
      <dc:creator>TheKenApp</dc:creator>
      <dc:date>2020-04-02T19:12:36Z</dc:date>
    </item>
    <item>
      <title>Skyline advisor not collecting logs, and issue with VC_Events endpoint</title>
      <link>https://communities.vmware.com/t5/Skyline-Community-Discussions/Skyline-advisor-not-collecting-logs-and-issue-with-VC-Events/m-p/2291674#M2390</link>
      <description>&lt;HTML&gt;&lt;HEAD&gt;&lt;/HEAD&gt;&lt;BODY&gt;&lt;P dir="ltr"&gt;&lt;SPAN style="font-size: 11pt; font-family: Arial; color: #000000;"&gt;Last week we upgraded our vCenter appliance from 6.5 to 6.7U3b&lt;/SPAN&gt;&lt;/P&gt;&lt;P dir="ltr"&gt;&lt;SPAN style="font-size: 11pt; font-family: Arial; color: #000000;"&gt;Upon checking the Skyline appliance, the Skyline collector was no longer working.&lt;/SPAN&gt;&lt;/P&gt;&lt;P dir="ltr"&gt;&lt;SPAN style="font-size: 11pt; font-family: Arial; color: #000000;"&gt;I removed the vCenter server from Skyline, and re-added it again.&lt;/SPAN&gt;&lt;/P&gt;&lt;P dir="ltr"&gt;&lt;SPAN style="font-size: 11pt; font-family: Arial; color: #000000;"&gt;I needed to update the vsphere.local\skyline account password, as well as the admin and root passwords of the skyline appliance. &lt;/SPAN&gt;&lt;/P&gt;&lt;P dir="ltr"&gt;&lt;/P&gt;&lt;P&gt;&lt;SPAN&gt;&lt;SPAN&gt; &lt;/SPAN&gt;&lt;/SPAN&gt;&lt;/P&gt;&lt;P dir="ltr"&gt;&lt;SPAN style="font-size: 11pt; font-family: Arial; color: #000000;"&gt;There are a couple of issues regarding functionality of Skyline:&lt;/SPAN&gt;&lt;/P&gt;&lt;OL&gt;&lt;LI&gt;&lt;SPAN style="font-size: 11pt;"&gt;Endpoint VC_EVENTS reports: “This endpoint has been disabled by VMware”. Is this normal? Any information I find regarding this appears to have been resolved in an earlier version of Skyline.&lt;/SPAN&gt;&lt;/LI&gt;&lt;LI&gt;&lt;SPAN dir="ltr"&gt;&lt;SPAN style="font-size: 11pt;"&gt;The Skyline Advisor reports that our environment is “Healthy”. The inventory of VSphere objects is listed in the Skyline Advisor. However, Log Assist reports: “The necessary privileges required for Log Assist to work are not granted to the Skyline service account. Please revalidate after making necessary changes. I verified that the skyline collector read-only role that was created when skyline was first deployed, and assigned to the vsphere.local\skyline account, is correct as per &lt;/SPAN&gt;&lt;A href="https://kb.vmware.com/s/article/59661"&gt;&lt;SPAN style="font-size: 11pt; color: #1155cc;"&gt;&lt;/SPAN&gt;&lt;/A&gt;&lt;A href="https://kb.vmware.com/s/article/59661" target="test_blank"&gt;https://kb.vmware.com/s/article/59661&lt;/A&gt;&lt;/SPAN&gt;&lt;SPAN style="font-size: 11pt;"&gt;. So I revalidated, but still have the same issue that logs cannot be collected. &lt;/SPAN&gt;&lt;/LI&gt;&lt;/OL&gt;&lt;P dir="ltr"&gt;&lt;SPAN style="font-size: 11pt; font-family: Arial; color: #000000;"&gt;This morning, I upgraded the Skyline Appliance from 2.2.0.0 Build 14617436 to 2.3.0.2 Build 15347811, hoping this would resolve these issues, but it didn't.&lt;/SPAN&gt;&lt;/P&gt;&lt;P dir="ltr"&gt;&lt;/P&gt;&lt;P dir="ltr"&gt;&lt;SPAN style="font-size: 11pt; font-family: Arial; color: #000000;"&gt;I am not sure how to resolve this issue, and would appreciate assistance. Thanks.&lt;/SPAN&gt;&lt;/P&gt;&lt;P&gt;&lt;SPAN style="font-size: 11pt; font-family: Arial; color: #000000;"&gt; &lt;/SPAN&gt;&lt;/P&gt;&lt;/BODY&gt;&lt;/HTML&gt;</description>
      <pubDate>Tue, 31 Mar 2020 13:45:28 GMT</pubDate>
      <guid>https://communities.vmware.com/t5/Skyline-Community-Discussions/Skyline-advisor-not-collecting-logs-and-issue-with-VC-Events/m-p/2291674#M2390</guid>
      <dc:creator>TheKenApp</dc:creator>
      <dc:date>2020-03-31T13:45:28Z</dc:date>
    </item>
    <item>
      <title>Re: Need clarification of LBT options for dvSwitch</title>
      <link>https://communities.vmware.com/t5/vSphere-vNetwork-Discussions/Need-clarification-of-LBT-options-for-dvSwitch/m-p/506759#M1890</link>
      <description>&lt;HTML&gt;&lt;HEAD&gt;&lt;/HEAD&gt;&lt;BODY&gt;&lt;P&gt;&lt;SPAN style="color: #666666; font-family: proxima-nova, Arial, sans-serif; font-size: 14px; font-style: normal; font-weight: 400; text-align: left; text-indent: 0px;"&gt;Thanks for taking the time to respond to all my questions &lt;/SPAN&gt;&lt;A _jive_internal="true" data-containerid="-1" data-containertype="-1" data-objectid="1194314" data-objecttype="3" href="https://communities.vmware.com/people/bayupw" name="&amp;amp;amp;lpos=apps_scodevmw : 189" style="padding: 1px 0 1px calc(12px + 0.35ex); font-weight: 400; font-style: normal; font-size: 14px; font-family: proxima-nova, Arial, sans-serif; color: #3399cc; background-position: 0px 50%; text-align: left; text-indent: 0px;"&gt;bayupw&lt;/A&gt;&lt;SPAN style="color: #666666; font-family: proxima-nova, Arial, sans-serif; font-size: 14px; font-style: normal; font-weight: 400; text-align: left; text-indent: 0px;"&gt;, I appreciate it.&lt;/SPAN&gt;&lt;/P&gt;&lt;/BODY&gt;&lt;/HTML&gt;</description>
      <pubDate>Tue, 27 Feb 2018 12:48:45 GMT</pubDate>
      <guid>https://communities.vmware.com/t5/vSphere-vNetwork-Discussions/Need-clarification-of-LBT-options-for-dvSwitch/m-p/506759#M1890</guid>
      <dc:creator>TheKenApp</dc:creator>
      <dc:date>2018-02-27T12:48:45Z</dc:date>
    </item>
    <item>
      <title>Re: Need clarification of LBT options for dvSwitch</title>
      <link>https://communities.vmware.com/t5/vSphere-vNetwork-Discussions/Need-clarification-of-LBT-options-for-dvSwitch/m-p/506757#M1888</link>
      <description>&lt;HTML&gt;&lt;HEAD&gt;&lt;/HEAD&gt;&lt;BODY&gt;&lt;P&gt;The more I read about LBT, Route Based on Physical NIC Load, from what I understand, if the physical NIC load exceeds 75% utilization, it is the VM load that is taken into account for balancing. I am having a hard time determining if other loads, like you mention vMotion, or more importantly in my case the NFS VM storage, is also monitored and balanced if a physical NIC becomes more than 75% saturated. &lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;In are case, I am considering both links being active, which is how our current VM infrastructure is designed (using Rout based on originating VM port).&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;You said that you use LBT/Physical NIC load only for non VMkernel PortGroups such as VM networks. If you use NFS (or ISCSI for that matter), how do you handle load balancing/NIC teaming?&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;The blog by Chris Wahl uses LBT/Physical NIC load with NFS. He states "&lt;STRONG style="color: #555555; font-family: 'Open Sans'; font-size: 15px; font-style: normal; text-align: left; text-indent: 0px;"&gt;any portgroup will proactively monitor the vmnic utilization in their team and shift workloads around." &lt;/STRONG&gt;This would seem to indicate that it is not only VM workloads that are monitored and balanced.&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;Our NFS arrays are on the same subnet. NFS traffice is not routed, and the hosts, and storage are connected to the same physical switch. There is a 4x 10GB lag on the physical switch to the NFS array, so the bottle neck will be on the host. If I am understanding LBT correctly, the NFS portgroup/vmk in the DvSwitch will use one of the phyiscal NICs in the host (Without LACP on the switch for these ports, not sure how that traffic would be shared between the ports), but other loads, such as ESX management, and vMotion may be moved if the NFS link is over 75% utilization.&lt;/P&gt;&lt;P&gt;Am I understanding how LBT will work with these non-VM loads correctly?&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;Thanks again.&lt;/P&gt;&lt;/BODY&gt;&lt;/HTML&gt;</description>
      <pubDate>Mon, 26 Feb 2018 15:27:07 GMT</pubDate>
      <guid>https://communities.vmware.com/t5/vSphere-vNetwork-Discussions/Need-clarification-of-LBT-options-for-dvSwitch/m-p/506757#M1888</guid>
      <dc:creator>TheKenApp</dc:creator>
      <dc:date>2018-02-26T15:27:07Z</dc:date>
    </item>
    <item>
      <title>Re: Need clarification of LBT options for dvSwitch</title>
      <link>https://communities.vmware.com/t5/vSphere-vNetwork-Discussions/Need-clarification-of-LBT-options-for-dvSwitch/m-p/506755#M1886</link>
      <description>&lt;HTML&gt;&lt;HEAD&gt;&lt;/HEAD&gt;&lt;BODY&gt;&lt;P&gt;Bayu,&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;These will be the network loads for each set of vmnics:&lt;/P&gt;&lt;TABLE border="1" class="jiveBorder" jive-data-cell="{&amp;quot;color&amp;quot;:&amp;quot;#3D3D3D&amp;quot;,&amp;quot;textAlign&amp;quot;:&amp;quot;left&amp;quot;,&amp;quot;padding&amp;quot;:&amp;quot;6&amp;quot;,&amp;quot;backgroundColor&amp;quot;:&amp;quot;transparent&amp;quot;,&amp;quot;fontFamily&amp;quot;:&amp;quot;Helvetica Neue,Helvetica,Arial,Lucida Grande,sans-serif&amp;quot;,&amp;quot;verticalAlign&amp;quot;:&amp;quot;baseline&amp;quot;}" jive-data-header="{&amp;quot;color&amp;quot;:&amp;quot;#505050&amp;quot;,&amp;quot;backgroundColor&amp;quot;:&amp;quot;#F2F2F2&amp;quot;,&amp;quot;textAlign&amp;quot;:&amp;quot;left&amp;quot;,&amp;quot;padding&amp;quot;:&amp;quot;6&amp;quot;,&amp;quot;fontFamily&amp;quot;:&amp;quot;Helvetica Neue,Helvetica,Arial,Lucida Grande,sans-serif&amp;quot;,&amp;quot;verticalAlign&amp;quot;:&amp;quot;baseline&amp;quot;}" style="border: 1px solid #c6c6c6; width: 100%;"&gt;&lt;TBODY&gt;&lt;TR&gt;&lt;TH style="background-color: #f2f2f2; color: #505050; text-align: left; padding: 6px; font-family: Helvetica\ Neue, Helvetica, Arial, Lucida\ Grande, sans-serif; vertical-align: baseline; width: 15%;" valign="middle"&gt;&lt;STRONG&gt;Port Type and Port #&lt;/STRONG&gt;&lt;/TH&gt;&lt;TH style="background-color: #f2f2f2; color: #505050; text-align: left; padding: 6px; font-family: Helvetica\ Neue, Helvetica, Arial, Lucida\ Grande, sans-serif; vertical-align: baseline; width: 8%;" valign="middle"&gt;&lt;STRONG&gt;vmnic#&lt;/STRONG&gt;&lt;/TH&gt;&lt;TH style="background-color: #f2f2f2; color: #505050; text-align: left; padding: 6px; font-family: Helvetica\ Neue, Helvetica, Arial, Lucida\ Grande, sans-serif; vertical-align: baseline; width: 11%;" valign="middle"&gt;&lt;STRONG&gt;Physical switch&lt;/STRONG&gt;&lt;/TH&gt;&lt;TH style="background-color: #f2f2f2; color: #505050; text-align: left; padding: 6px; font-family: Helvetica\ Neue, Helvetica, Arial, Lucida\ Grande, sans-serif; vertical-align: baseline;" valign="middle"&gt;&lt;STRONG&gt;Networks&lt;/STRONG&gt;&lt;/TH&gt;&lt;/TR&gt;&lt;TR&gt;&lt;TD style="color: #3d3d3d; text-align: left; padding: 6px; background-color: transparent; font-family: Helvetica\ Neue, Helvetica, Arial, Lucida\ Grande, sans-serif; vertical-align: baseline;"&gt;Copper Port#1&lt;/TD&gt;&lt;TD style="color: #3d3d3d; text-align: left; padding: 6px; background-color: transparent; font-family: Helvetica\ Neue, Helvetica, Arial, Lucida\ Grande, sans-serif; vertical-align: baseline;"&gt;vmnic2&lt;/TD&gt;&lt;TD style="color: #3d3d3d; text-align: left; padding: 6px; background-color: transparent; font-family: Helvetica\ Neue, Helvetica, Arial, Lucida\ Grande, sans-serif; vertical-align: baseline;"&gt;Enterasys 7100&lt;/TD&gt;&lt;TD style="color: #3d3d3d; text-align: left; padding: 6px; background-color: transparent; font-family: Helvetica\ Neue, Helvetica, Arial, Lucida\ Grande, sans-serif; vertical-align: baseline;"&gt;vMotion, Fault Tolerance, vSAN and &lt;STRONG&gt;NFS storage&lt;/STRONG&gt;&lt;/TD&gt;&lt;/TR&gt;&lt;TR&gt;&lt;TD style="color: #3d3d3d; text-align: left; padding: 6px; background-color: transparent; font-family: Helvetica\ Neue, Helvetica, Arial, Lucida\ Grande, sans-serif; vertical-align: baseline;"&gt;Copper Port#2&lt;/TD&gt;&lt;TD style="color: #3d3d3d; text-align: left; padding: 6px; background-color: transparent; font-family: Helvetica\ Neue, Helvetica, Arial, Lucida\ Grande, sans-serif; vertical-align: baseline;"&gt;vmnic4&lt;/TD&gt;&lt;TD style="color: #3d3d3d; text-align: left; padding: 6px; background-color: transparent; font-family: Helvetica\ Neue, Helvetica, Arial, Lucida\ Grande, sans-serif; vertical-align: baseline;"&gt;Enterasys 7100&lt;/TD&gt;&lt;TD style="color: #3d3d3d; text-align: left; padding: 6px; background-color: transparent; font-family: Helvetica\ Neue, Helvetica, Arial, Lucida\ Grande, sans-serif; vertical-align: baseline;"&gt;vMotion, Fault Tolerance, vSAN and &lt;STRONG&gt;NFS storage&lt;/STRONG&gt;&lt;/TD&gt;&lt;/TR&gt;&lt;TR&gt;&lt;TD style="color: #3d3d3d; text-align: left; padding: 6px; background-color: transparent; font-family: Helvetica\ Neue, Helvetica, Arial, Lucida\ Grande, sans-serif; vertical-align: baseline;"&gt;Fibre port#1&lt;/TD&gt;&lt;TD style="color: #3d3d3d; text-align: left; padding: 6px; background-color: transparent; font-family: Helvetica\ Neue, Helvetica, Arial, Lucida\ Grande, sans-serif; vertical-align: baseline;"&gt;vmnic0&lt;/TD&gt;&lt;TD style="color: #3d3d3d; text-align: left; padding: 6px; background-color: transparent; font-family: Helvetica\ Neue, Helvetica, Arial, Lucida\ Grande, sans-serif; vertical-align: baseline;"&gt;Extreme 670&lt;/TD&gt;&lt;TD style="color: #3d3d3d; text-align: left; padding: 6px; background-color: transparent; font-family: Helvetica\ Neue, Helvetica, Arial, Lucida\ Grande, sans-serif; vertical-align: baseline;"&gt;5 different VM networks, ESX management, backup storage&lt;/TD&gt;&lt;/TR&gt;&lt;TR&gt;&lt;TH style="color: #505050; background-color: #f2f2f2; text-align: left; padding: 6px; font-family: Helvetica\ Neue, Helvetica, Arial, Lucida\ Grande, sans-serif; vertical-align: baseline;"&gt;Fibre port#2&lt;/TH&gt;&lt;TH style="color: #505050; background-color: #f2f2f2; text-align: left; padding: 6px; font-family: Helvetica\ Neue, Helvetica, Arial, Lucida\ Grande, sans-serif; vertical-align: baseline;"&gt;vmnic1&lt;/TH&gt;&lt;TH style="color: #505050; background-color: #f2f2f2; text-align: left; padding: 6px; font-family: Helvetica\ Neue, Helvetica, Arial, Lucida\ Grande, sans-serif; vertical-align: baseline;"&gt;Extreme 670&lt;/TH&gt;&lt;TH style="color: #505050; background-color: #f2f2f2; text-align: left; padding: 6px; font-family: Helvetica\ Neue, Helvetica, Arial, Lucida\ Grande, sans-serif; vertical-align: baseline;"&gt;5 different VM networks, ESX management, backup storage&lt;/TH&gt;&lt;/TR&gt;&lt;/TBODY&gt;&lt;/TABLE&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;Separate port groups will be made for each network, with vmkernel ports created for vMotion, FT, ESX management, vSAN, and NFS storage (not tied to a specific service).&lt;/P&gt;&lt;P&gt;All ports are 10GB. I am not planning any link aggregation on the physical ports, only VLAN trunking.&lt;/P&gt;&lt;P&gt;Right now, my concern is the NFS traffic. I read through the information you pointed me towards, as well as several other sources. I found a series of whitepapers on &lt;A href="http://wahlnetwork.com/2012/04/23/nfs-on-vsphere-technical-deep-dive-on-same-subnet-storage-traffic/" title="http://wahlnetwork.com/2012/04/23/nfs-on-vsphere-technical-deep-dive-on-same-subnet-storage-traffic/"&gt;NFS on vSphere Part 2 - Technical Deep Dive on Same Subnet Storage Traffic - Wahl Network&lt;/A&gt; &lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;If I am to stick strictly with LBT (route based on physical NIC load) and no link aggregation, I realize that NFS traffic will not be shared among the 2 physical host ports to the 7100 switch stack. Considering we have two 10GB copper ports on each of the 4 new hosts, and a 4x10GB lag from the 7100 to the storage, I am not sure that this would be an issue with 20GB bandwidth on each host to the 7100 stack. I am assuming that the distributed vSwitch will move workloads other than NFS to the other vmnic, if one is saturated. Is this a correct understanding?&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;Is there any reason I should consider route based on originating virtual port, rather than on physical NIC load, in our opinion?&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;I have yet to dig into vSAN, but have some time since this will be a future use case. Though I hope that this architecture would be sufficient.&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;What are your thoughts?&lt;/P&gt;&lt;P&gt;Thanks,&lt;/P&gt;&lt;/BODY&gt;&lt;/HTML&gt;</description>
      <pubDate>Fri, 23 Feb 2018 16:50:34 GMT</pubDate>
      <guid>https://communities.vmware.com/t5/vSphere-vNetwork-Discussions/Need-clarification-of-LBT-options-for-dvSwitch/m-p/506755#M1886</guid>
      <dc:creator>TheKenApp</dc:creator>
      <dc:date>2018-02-23T16:50:34Z</dc:date>
    </item>
    <item>
      <title>Re: Need clarification of LBT options for dvSwitch</title>
      <link>https://communities.vmware.com/t5/vSphere-vNetwork-Discussions/Need-clarification-of-LBT-options-for-dvSwitch/m-p/506754#M1885</link>
      <description>&lt;HTML&gt;&lt;HEAD&gt;&lt;/HEAD&gt;&lt;BODY&gt;&lt;P&gt;vSAN will not be implemented at this point, but we want to have the network defined and available for when we get to that point. Right now, all our VMs are on NFS.&lt;/P&gt;&lt;/BODY&gt;&lt;/HTML&gt;</description>
      <pubDate>Thu, 22 Feb 2018 18:50:05 GMT</pubDate>
      <guid>https://communities.vmware.com/t5/vSphere-vNetwork-Discussions/Need-clarification-of-LBT-options-for-dvSwitch/m-p/506754#M1885</guid>
      <dc:creator>TheKenApp</dc:creator>
      <dc:date>2018-02-22T18:50:05Z</dc:date>
    </item>
    <item>
      <title>Re: Need clarification of LBT options for dvSwitch</title>
      <link>https://communities.vmware.com/t5/vSphere-vNetwork-Discussions/Need-clarification-of-LBT-options-for-dvSwitch/m-p/506752#M1883</link>
      <description>&lt;HTML&gt;&lt;HEAD&gt;&lt;/HEAD&gt;&lt;BODY&gt;&lt;P&gt;Thanks for the reply Bayu.&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;We want to avoid using LACP on the switch stacks, which is why I am evaluating the load based teaming options.&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;Yes, it does seem to make sense to create 2 different distributed vSwitches, since different traffic is routing to different physical switches. All the port groups on each switch can then be separate, which seems like it would be easier to set up, and for potential trouble shooting in the future.&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;For specificity's sake, on all hosts:&lt;/P&gt;&lt;P&gt;vmnic0 &amp;amp; vmnic1 are the fiber connections to the Extreme 670's&lt;/P&gt;&lt;P&gt;vmnic2 &amp;amp; vmnic4 are the copper connections to the Enterasys 7100's&lt;/P&gt;&lt;P&gt;vmnic3 &amp;amp; vmnic5 will not be physically connected.&lt;/P&gt;&lt;/BODY&gt;&lt;/HTML&gt;</description>
      <pubDate>Thu, 22 Feb 2018 18:12:14 GMT</pubDate>
      <guid>https://communities.vmware.com/t5/vSphere-vNetwork-Discussions/Need-clarification-of-LBT-options-for-dvSwitch/m-p/506752#M1883</guid>
      <dc:creator>TheKenApp</dc:creator>
      <dc:date>2018-02-22T18:12:14Z</dc:date>
    </item>
    <item>
      <title>Need clarification of LBT options for dvSwitch</title>
      <link>https://communities.vmware.com/t5/vSphere-vNetwork-Discussions/Need-clarification-of-LBT-options-for-dvSwitch/m-p/506750#M1881</link>
      <description>&lt;HTML&gt;&lt;HEAD&gt;&lt;/HEAD&gt;&lt;BODY&gt;&lt;P&gt;Hi,&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;I am tasked with creating a new 6.5 cluster, and it will utilize a dvSwitch.&lt;/P&gt;&lt;P&gt;I am trying, with not much success, to determine which load based teaming&amp;nbsp; option will best meet our needs:&lt;/P&gt;&lt;UL&gt;&lt;LI&gt;Route based on originating virtual port&lt;/LI&gt;&lt;LI&gt;Route based on source MAC hash, or&lt;/LI&gt;&lt;LI&gt;Rout based on physical NIC load.&lt;/LI&gt;&lt;/UL&gt;&lt;P&gt;We have four Dell R730 ESXi 6.5 hosts.&lt;/P&gt;&lt;P&gt;Each host has two 10GB 2-port copper cards, and one 10GB 2-port fiber card.&lt;/P&gt;&lt;P&gt;We will use one port on each of the two copper cards for all back-end traffic (vMotion, Fault Tolerance, vSAN and NFS storage). Each host will have 2 physical connections to a 2 member 10GB physical switch stack (Enterasys 7100) for this traffic.&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;We will use both fiber ports for all front end traffic (five virtual machine networks, backup network for Veeam backups of VMs, and ESX management).&lt;/P&gt;&lt;P&gt;Each fiber port will connect to one of our two core switches, which are stacked as well (Extreme 670's that have a&amp;nbsp; MLAG connection).&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;Since the traffic will be physically separated, I am not sure if I should create one or two distributed switches. This is a new architecture for us, but I am assuming that one switch would be fine, and I define which vmnics each port group would use, correct?&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;My current thought is that route based on source MAC hash may not be the best option, basically due to the higher resource consumption. &lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;Are there any real advantages over the other two load based teaming options? I have been pouring over documentation, this forum and other resources in an effort to better understand this technology, but I am still kind of fuzzy. Our goal would be to have redundancy between the two fiber ports, and the two copper ports, as well as a certain degree of load balancing for each pair. It is the load balancing that is confusing me. &lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;From what I am reading, route based on physical NIC load will load balance as needed so that neither link becomes saturated, is this correct? &lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;From what I understand, when using route based on originating virtual port, load balancing occurs as VMs power on, selecting the best path to use. However, they will use the same port until either they are powered off, are vMotioned to another host, or a physical port fails. There is no active load balancing based on the traffic that is being sent by the running VMs, correct?&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;If my understanding is correct, our best bet would be go utilize route based on physical NIC load. Would you concur? If not, I am very open to any suggestions or insight you could provide.&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;Thanks,&lt;/P&gt;&lt;P&gt;Ken&lt;/P&gt;&lt;/BODY&gt;&lt;/HTML&gt;</description>
      <pubDate>Thu, 22 Feb 2018 17:14:48 GMT</pubDate>
      <guid>https://communities.vmware.com/t5/vSphere-vNetwork-Discussions/Need-clarification-of-LBT-options-for-dvSwitch/m-p/506750#M1881</guid>
      <dc:creator>TheKenApp</dc:creator>
      <dc:date>2018-02-22T17:14:48Z</dc:date>
    </item>
    <item>
      <title>esxtop and client performance chart discrepancy for receive packets dropped</title>
      <link>https://communities.vmware.com/t5/vSphere-vNetwork-Discussions/esxtop-and-client-performance-chart-discrepancy-for-receive/m-p/439263#M1682</link>
      <description>&lt;HTML&gt;&lt;HEAD&gt;&lt;/HEAD&gt;&lt;BODY&gt;&lt;P&gt;I am having difficulty analyzing the various data I receive at the host level, versus within the vSphere client. I need this to determine if a change I made to a VM is fixing a specific problem, as well as to find out if there are other issues with the networking.&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;The VM in question is on an ESXi 5.1 host, and it is a Windows server 2008 R2.&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;What I did was upgrade the VM hardware from version 7 to version 9, deleted the E1000 NIC and installed the VMXNET3 NIC. This was done because there was a number of receive packets dropped for this VM, and we had been having issues with the DB that runs on it.&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;After I made the above changes, this VM is showing a rather large number of receive packets dropped while monitoring via the vSphere client in real-time. Compared to packets received, it would appear that there is a 43% loss (average packets received summation = 763, average receive packets dropped summation = 565).&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;However, when I monitor the host this VM is on with esxtop, and viewing the network statistics after a 10 minute run, all values for %DRPRX = 0.00.&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;I am trying to evaluate the fix I made to this VM, because prior to that fix, I was seeing an average %DRPRX of about 2.65% as well as the dropped packets in the client.&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;Can someone explain to me why esxtop would show 0.00% receive packets dropped, while at the same time the performance charts in the vSphere client show what I believe to be about 43% loss?&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;Lastly, within the VMXNET3 device on the VM, the values for Rx Ring #1 Size, and Rx Ring #2 Size are blank. Should I be stting this to the default of 1024?&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;Any help would be appreciated. Thanks,&lt;/P&gt;&lt;P&gt;Ken&lt;/P&gt;&lt;/BODY&gt;&lt;/HTML&gt;</description>
      <pubDate>Wed, 01 May 2013 18:57:52 GMT</pubDate>
      <guid>https://communities.vmware.com/t5/vSphere-vNetwork-Discussions/esxtop-and-client-performance-chart-discrepancy-for-receive/m-p/439263#M1682</guid>
      <dc:creator>TheKenApp</dc:creator>
      <dc:date>2013-05-01T18:57:52Z</dc:date>
    </item>
    <item>
      <title>vMotion traffic isolation with VDswitch best practive question (ESXi 5.1)</title>
      <link>https://communities.vmware.com/t5/vMotion-Resource-Management/vMotion-traffic-isolation-with-VDswitch-best-practive-question/m-p/865615#M1307</link>
      <description>&lt;HTML&gt;&lt;HEAD&gt;&lt;/HEAD&gt;&lt;BODY&gt;&lt;P class="MsoNormal"&gt;I have a question regarding what is the best practice for vMotion traffic utilizing a VDswitch in vSphere 5.1.&lt;/P&gt;&lt;P class="MsoNormal"&gt;&lt;/P&gt;&lt;P class="MsoNormal"&gt;Our&amp;nbsp; VDS is shared between two (soon to be 3) ESXi 5.1 hosts. They all have&amp;nbsp; twelve 1GB physical NIC ports. I have created 11 port groups as follows:&lt;/P&gt;&lt;UL&gt;&lt;LI&gt;VLAN trunking is utilized.&lt;/LI&gt;&lt;LI&gt;Four&amp;nbsp; iSCSI port groups. They all physically lead to a switch that is&amp;nbsp; isolated from other switches in our infrastructure so we isolate all&amp;nbsp; iSCSI traffic. This is our VLAN 1060. These port groups each have a&amp;nbsp; dedicated physical NIC assigned to them.&lt;/LI&gt;&lt;LI&gt;All other port groups are physically connected to the same switch.&lt;/LI&gt;&lt;LI&gt;One port group is for Fault Tolerance, VLAN 1066.&lt;/LI&gt;&lt;LI&gt;One port group is for vMotion, VLAN 1065.&lt;/LI&gt;&lt;LI&gt;One port group is for Management, VLAN 1100.&lt;/LI&gt;&lt;LI&gt;We&amp;nbsp; have four other port groups, each with their own VLAN ID. These are for&amp;nbsp; VMs that will reside on various VLANs within our infrastructure (VLAN&amp;nbsp; IDs 20, 30, 1100, 2003).&lt;/LI&gt;&lt;LI&gt;All non-iSCSI port groups share the&amp;nbsp; same active uplinks within the Teaming and Failover settings of the port&amp;nbsp; group. Route is based on physical NIC load.&lt;/LI&gt;&lt;LI&gt;We are using shares defined in the Network Resource Pool to prioritize traffic:&lt;BR /&gt; NFS=50, Management=5, vMotion =10, vSphere SAN=50, vSphere&amp;nbsp; replication=50, iSCSI=50, VM=20, FT=10. I believe these are default&amp;nbsp; values of the network resource pool.&lt;/LI&gt;&lt;/UL&gt;&lt;P class="MsoNormal"&gt;&lt;/P&gt;&lt;P class="MsoNormal"&gt;Here&amp;nbsp; is my question: Should we be isolating vMotion traffic by dedicating&amp;nbsp; physical NICs that are exclusively dedicated for vMotion, rather than&amp;nbsp; isolating via VLAN as I have described above? Will vMotion traffic&amp;nbsp; degrade performance in the way I have it configured above?&lt;/P&gt;&lt;P class="MsoNormal"&gt;&lt;/P&gt;&lt;P class="MsoNormal"&gt;From&amp;nbsp; the various best practice white papers put out by VMware, I understand&amp;nbsp; that vMotion traffic should be isolated from other traffic. I have done&amp;nbsp; this by utilizing different VLANs for different traffic types. However, I&amp;nbsp; am wondering if vMotion traffic should be isolated by using NIC ports&amp;nbsp; dedicated exclusively for vMotion.&lt;/P&gt;&lt;P class="MsoNormal"&gt;&lt;/P&gt;&lt;P class="MsoNormal"&gt;Any help with determining which design is best to use would be greatly appreciated.&lt;/P&gt;&lt;/BODY&gt;&lt;/HTML&gt;</description>
      <pubDate>Tue, 26 Feb 2013 19:58:51 GMT</pubDate>
      <guid>https://communities.vmware.com/t5/vMotion-Resource-Management/vMotion-traffic-isolation-with-VDswitch-best-practive-question/m-p/865615#M1307</guid>
      <dc:creator>TheKenApp</dc:creator>
      <dc:date>2013-02-26T19:58:51Z</dc:date>
    </item>
    <item>
      <title>vMotion traffic isolation with VDswitch best practive question (ESXi 5.1)..</title>
      <link>https://communities.vmware.com/t5/vSphere-vNetwork-Discussions/vMotion-traffic-isolation-with-VDswitch-best-practive-question/m-p/2156559#M10546</link>
      <description>&lt;HTML&gt;&lt;HEAD&gt;&lt;/HEAD&gt;&lt;BODY&gt;&lt;P class="MsoNormal"&gt;I have a question regarding what is the best practice for vMotion traffic utilizing a VDswitch in vSphere 5.1.&lt;/P&gt;&lt;P class="MsoNormal"&gt;&lt;/P&gt;&lt;P class="MsoNormal"&gt;Our VDS is shared between two (soon to be 3) ESXi 5.1 hosts. They all have twelve 1GB physical NIC ports. I have created 11 port groups as follows:&lt;/P&gt;&lt;UL&gt;&lt;LI&gt;VLAN trunking is utilized.&lt;/LI&gt;&lt;LI&gt;Four iSCSI port groups. They all physically lead to a switch that is isolated from other switches in our infrastructure so we isolate all iSCSI traffic. This is our VLAN 1060. These port groups each have a dedicated physical NIC assigned to them.&lt;/LI&gt;&lt;LI&gt;All other port groups are physically connected to the same switch.&lt;/LI&gt;&lt;LI&gt;One port group is for Fault Tolerance, VLAN 1066.&lt;/LI&gt;&lt;LI&gt;One port group is for vMotion, VLAN 1065.&lt;/LI&gt;&lt;LI&gt;One port group is for Management, VLAN 1100.&lt;/LI&gt;&lt;LI&gt;We have four other port groups, each with their own VLAN ID. These are for VMs that will reside on various VLANs within our infrastructure (VLAN IDs 20, 30, 1100, 2003).&lt;/LI&gt;&lt;LI&gt;All non-iSCSI port groups share the same active uplinks within the Teaming and Failover settings of the port group. Route is based on physical NIC load.&lt;/LI&gt;&lt;LI&gt;We are using shares defined in the Network Resource Pool to prioritize traffic:&lt;BR /&gt; NFS=50, Management=5, vMotion =10, vSphere SAN=50, vSphere replication=50, iSCSI=50, VM=20, FT=10. I believe these are default values of the network resource pool.&lt;/LI&gt;&lt;/UL&gt;&lt;P class="MsoNormal"&gt;&lt;/P&gt;&lt;P class="MsoNormal"&gt;Here is my question: Should we be isolating vMotion traffic by dedicating physical NICs that are exclusively dedicated for vMotion, rather than isolating via VLAN as I have described above? Will vMotion traffic degrade performance in the way I have it configured above?&lt;/P&gt;&lt;P class="MsoNormal"&gt;&lt;/P&gt;&lt;P class="MsoNormal"&gt;From the various best practice white papers put out by VMware, I understand that vMotion traffic should be isolated from other traffic. I have done this by utilizing different VLANs for different traffic types. However, I am wondering if vMotion traffic should be isolated by using NIC ports dedicated exclusively for vMotion.&lt;/P&gt;&lt;P class="MsoNormal"&gt;&lt;/P&gt;&lt;P class="MsoNormal"&gt;Any help with determining which design is best to use would be greatly appreciated.&lt;/P&gt;&lt;/BODY&gt;&lt;/HTML&gt;</description>
      <pubDate>Fri, 22 Feb 2013 16:42:09 GMT</pubDate>
      <guid>https://communities.vmware.com/t5/vSphere-vNetwork-Discussions/vMotion-traffic-isolation-with-VDswitch-best-practive-question/m-p/2156559#M10546</guid>
      <dc:creator>TheKenApp</dc:creator>
      <dc:date>2013-02-22T16:42:09Z</dc:date>
    </item>
    <item>
      <title>Re: Creating an iSCSI vmkernel port in dvswitch: vSphere 5.1</title>
      <link>https://communities.vmware.com/t5/vSphere-vNetwork-Discussions/Creating-an-iSCSI-vmkernel-port-in-dvswitch-vSphere-5-1/m-p/841358#M3027</link>
      <description>&lt;HTML&gt;&lt;HEAD&gt;&lt;/HEAD&gt;&lt;BODY&gt;&lt;P&gt;Thank you so much MKguy!&lt;/P&gt;&lt;/BODY&gt;&lt;/HTML&gt;</description>
      <pubDate>Wed, 06 Feb 2013 15:23:46 GMT</pubDate>
      <guid>https://communities.vmware.com/t5/vSphere-vNetwork-Discussions/Creating-an-iSCSI-vmkernel-port-in-dvswitch-vSphere-5-1/m-p/841358#M3027</guid>
      <dc:creator>TheKenApp</dc:creator>
      <dc:date>2013-02-06T15:23:46Z</dc:date>
    </item>
    <item>
      <title>Creating an iSCSI vmkernel port in dvswitch: vSphere 5.1</title>
      <link>https://communities.vmware.com/t5/vSphere-vNetwork-Discussions/Creating-an-iSCSI-vmkernel-port-in-dvswitch-vSphere-5-1/m-p/841356#M3025</link>
      <description>&lt;HTML&gt;&lt;HEAD&gt;&lt;/HEAD&gt;&lt;BODY&gt;&lt;P&gt;Hi. The documentation is not all that clear on this point, so posting this here. I have a vSphere 5.1 implementation consisting of ESXi hosts with 12 physical NICs. I dedicated 4 NICs for iSCSI, and the rest are used for FT, management, virtual-machines, and vMotion.&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;I created one dVswitch. I have already created vmkernel port for management, vMotion and FT traffic on the appropriate adapters for that traffic, but there does not appear to be iSCSI services available for vmkernels in 5.1. My question is this: Do I just create an iSCSI vmkernel adapter with no services associated with it, or do I need to create a standard switch for iSCSI traffic?&lt;BR /&gt;Thanks.&lt;/P&gt;&lt;/BODY&gt;&lt;/HTML&gt;</description>
      <pubDate>Wed, 06 Feb 2013 14:37:05 GMT</pubDate>
      <guid>https://communities.vmware.com/t5/vSphere-vNetwork-Discussions/Creating-an-iSCSI-vmkernel-port-in-dvswitch-vSphere-5-1/m-p/841356#M3025</guid>
      <dc:creator>TheKenApp</dc:creator>
      <dc:date>2013-02-06T14:37:05Z</dc:date>
    </item>
  </channel>
</rss>

