[dpdk-dev] eal: make resource initialization more robust

Message ID 1454066522-80045-1-git-send-email-jianfeng.tan@intel.com (mailing list archive)
State Changes Requested, archived
Headers

Commit Message

Jianfeng Tan Jan. 29, 2016, 11:22 a.m. UTC
  Current issue: DPDK is not that friendly to container environment, which
caused by that it pre-alloc resource like cores and hugepages. But there
are this or that resource limitations, for examples, cgroup, rlimit,
cpuset, etc.

For cores, this patch makes use of pthread_getaffinity_np to further
narrow down detected cores before parsing coremask (-c), corelist (-l),
and coremap (--lcores).

For hugepages, this patch adds a recover mechanism to the case that
there are no that many hugepages can be used. It relys on a mem access
to fault-in hugepages, and if fails with SIGBUS, recover to previously
saved stack environment with siglongjmp().

Test example:
    a. cgcreate -g cpuset,hugetlb:/test-subgroup
    b. cgset -r cpuset.cpus=2-3 test-subgroup
    c. cgset -r hugetlb.1GB.limit_in_bytes=2147483648 test-subgroup
    d. cgexec -g cpuset,hugetlb:test-subgroup \
	    ./examples/l2fwd/build/l2fwd -n 4 -- -p 3

Signed-off-by: Jianfeng Tan <jianfeng.tan@intel.com>
---
 lib/librte_eal/common/eal_common_lcore.c | 10 +++-
 lib/librte_eal/linuxapp/eal/eal_memory.c | 78 ++++++++++++++++++++++++++++----
 2 files changed, 79 insertions(+), 9 deletions(-)
  

Comments

Neil Horman Feb. 1, 2016, 6:08 p.m. UTC | #1
On Fri, Jan 29, 2016 at 07:22:02PM +0800, Jianfeng Tan wrote:
> Current issue: DPDK is not that friendly to container environment, which
> caused by that it pre-alloc resource like cores and hugepages. But there
> are this or that resource limitations, for examples, cgroup, rlimit,
> cpuset, etc.
> 
> For cores, this patch makes use of pthread_getaffinity_np to further
> narrow down detected cores before parsing coremask (-c), corelist (-l),
> and coremap (--lcores).
> 
> For hugepages, this patch adds a recover mechanism to the case that
> there are no that many hugepages can be used. It relys on a mem access
> to fault-in hugepages, and if fails with SIGBUS, recover to previously
> saved stack environment with siglongjmp().
> 
> Test example:
>     a. cgcreate -g cpuset,hugetlb:/test-subgroup
>     b. cgset -r cpuset.cpus=2-3 test-subgroup
>     c. cgset -r hugetlb.1GB.limit_in_bytes=2147483648 test-subgroup
>     d. cgexec -g cpuset,hugetlb:test-subgroup \
> 	    ./examples/l2fwd/build/l2fwd -n 4 -- -p 3
> 
> Signed-off-by: Jianfeng Tan <jianfeng.tan@intel.com>
> ---
>  lib/librte_eal/common/eal_common_lcore.c | 10 +++-
>  lib/librte_eal/linuxapp/eal/eal_memory.c | 78 ++++++++++++++++++++++++++++----
>  2 files changed, 79 insertions(+), 9 deletions(-)
> 

This looks alot better.  One minor comment, the sigbus handler, you should
probably store the previous bus handler and restore it after you map all the
hugepages you want (lest you overwrite something an application is doing with
sigbus).


Other than that, nice work.
Neil
 
> diff --git a/lib/librte_eal/common/eal_common_lcore.c b/lib/librte_eal/common/eal_common_lcore.c
> index a4263ba..8e9c675 100644
> --- a/lib/librte_eal/common/eal_common_lcore.c
> +++ b/lib/librte_eal/common/eal_common_lcore.c
> @@ -57,6 +57,13 @@ rte_eal_cpu_init(void)
>  	struct rte_config *config = rte_eal_get_configuration();
>  	unsigned lcore_id;
>  	unsigned count = 0;
> +	rte_cpuset_t cpuset;
> +	pthread_t tid;
> +
> +	tid = pthread_self();
> +	if (pthread_getaffinity_np(tid, sizeof(rte_cpuset_t), &cpuset) != 0)
> +		for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++)
> +			CPU_SET(lcore_id, &cpuset);
>  
>  	/*
>  	 * Parse the maximum set of logical cores, detect the subset of running
> @@ -70,7 +77,8 @@ rte_eal_cpu_init(void)
>  
>  		/* in 1:1 mapping, record related cpu detected state */
>  		lcore_config[lcore_id].detected = eal_cpu_detected(lcore_id);
> -		if (lcore_config[lcore_id].detected == 0) {
> +		if (lcore_config[lcore_id].detected == 0 ||
> +		    !CPU_ISSET(lcore_id, &cpuset)) {
>  			config->lcore_role[lcore_id] = ROLE_OFF;
>  			lcore_config[lcore_id].core_index = -1;
>  			continue;
> diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
> index 846fd31..837fd9e 100644
> --- a/lib/librte_eal/linuxapp/eal/eal_memory.c
> +++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
> @@ -80,6 +80,8 @@
>  #include <errno.h>
>  #include <sys/ioctl.h>
>  #include <sys/time.h>
> +#include <signal.h>
> +#include <setjmp.h>
>  
>  #include <rte_log.h>
>  #include <rte_memory.h>
> @@ -309,6 +311,12 @@ get_virtual_area(size_t *size, size_t hugepage_sz)
>  	return addr;
>  }
>  
> +static sigjmp_buf jmpenv;
> +
> +static void sigbus_handler(int signo __rte_unused)
> +{
> +	siglongjmp(jmpenv, 1);
> +}
>  /*
>   * Mmap all hugepages of hugepage table: it first open a file in
>   * hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
> @@ -396,7 +404,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
>  		if (fd < 0) {
>  			RTE_LOG(ERR, EAL, "%s(): open failed: %s\n", __func__,
>  					strerror(errno));
> -			return -1;
> +			return i;
>  		}
>  
>  		virtaddr = mmap(vma_addr, hugepage_sz, PROT_READ | PROT_WRITE,
> @@ -405,11 +413,26 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
>  			RTE_LOG(ERR, EAL, "%s(): mmap failed: %s\n", __func__,
>  					strerror(errno));
>  			close(fd);
> -			return -1;
> +			return i;
>  		}
>  
>  		if (orig) {
>  			hugepg_tbl[i].orig_va = virtaddr;
> +			/* In linux, hugetlb limitations, like cgroup, are
> +			 * enforced at fault time instead of mmap(), even
> +			 * with the option of MAP_POPULATE. Kernel will send
> +			 * a SIGBUS signal. To avoid to be killed, save stack
> +			 * environment here, if SIGBUS happens, we can jump
> +			 * back here.
> +			 */
> +			if (sigsetjmp(jmpenv, 0)) {
> +				RTE_LOG(ERR, EAL, "SIGBUS: Cannot mmap more "
> +					"hugepages of size %u MB\n",
> +					(unsigned)(hugepage_sz / 0x100000));
> +				munmap(virtaddr, hugepage_sz);
> +				close(fd);
> +				return i;
> +			}
>  			memset(virtaddr, 0, hugepage_sz);
>  		}
>  		else {
> @@ -421,7 +444,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
>  			RTE_LOG(ERR, EAL, "%s(): Locking file failed:%s \n",
>  				__func__, strerror(errno));
>  			close(fd);
> -			return -1;
> +			return i;
>  		}
>  
>  		close(fd);
> @@ -429,7 +452,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
>  		vma_addr = (char *)vma_addr + hugepage_sz;
>  		vma_len -= hugepage_sz;
>  	}
> -	return 0;
> +	return i;
>  }
>  
>  #ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
> @@ -1075,6 +1098,31 @@ calc_num_pages_per_socket(uint64_t * memory,
>  	return total_num_pages;
>  }
>  
> +static struct sigaction action_old;
> +static int need_recover = 0;
> +
> +static void
> +register_sigbus(void)
> +{
> +	sigset_t mask;
> +	struct sigaction action;
> +
> +	sigemptyset(&mask);
> +	sigaddset(&mask, SIGBUS);
> +	action.sa_flags = 0;
> +	action.sa_mask = mask;
> +	action.sa_handler = sigbus_handler;
> +
> +	need_recover = !sigaction(SIGBUS, &action, &action_old);
> +}
> +
> +static void
> +recover_sigbus(void)
> +{
> +	if (need_recover)
> +		sigaction(SIGBUS, &action_old, NULL);
> +}
> +
>  /*
>   * Prepare physical memory mapping: fill configuration structure with
>   * these infos, return 0 on success.
> @@ -1161,8 +1209,11 @@ rte_eal_hugepage_init(void)
>  
>  	hp_offset = 0; /* where we start the current page size entries */
>  
> +	register_sigbus();
> +
>  	/* map all hugepages and sort them */
>  	for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
> +		int pages_old, pages_new;
>  		struct hugepage_info *hpi;
>  
>  		/*
> @@ -1176,10 +1227,19 @@ rte_eal_hugepage_init(void)
>  			continue;
>  
>  		/* map all hugepages available */
> -		if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 1) < 0){
> -			RTE_LOG(DEBUG, EAL, "Failed to mmap %u MB hugepages\n",
> -					(unsigned)(hpi->hugepage_sz / 0x100000));
> -			goto fail;
> +		pages_old = hpi->num_pages[0];
> +		pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi, 1);
> +		if (pages_new < pages_old) {
> +			RTE_LOG(DEBUG, EAL,
> +				"%d not %d hugepages of size %u MB allocated\n",
> +				pages_new, pages_old,
> +				(unsigned)(hpi->hugepage_sz / 0x100000));
> +			internal_config.memory -=
> +				hpi->hugepage_sz * (pages_old - pages_new);
> +			nr_hugepages -= (pages_old - pages_new);
> +			hpi->num_pages[0] = pages_new;
> +			if (pages_new == 0)
> +				continue;
>  		}
>  
>  		/* find physical addresses and sockets for each hugepage */
> @@ -1226,6 +1286,8 @@ rte_eal_hugepage_init(void)
>  #endif
>  	}
>  
> +	recover_sigbus();
> +
>  #ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
>  	nr_hugefiles = 0;
>  	for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++) {
> -- 
> 2.1.4
> 
>
  
Jianfeng Tan Feb. 22, 2016, 6:08 a.m. UTC | #2
Hi Neil,

Sorry that for my previous misconfiguration of email agent, I missed 
this email.

> This looks alot better.  One minor comment, the sigbus handler, you should
> probably store the previous bus handler and restore it after you map 
> all the
> hugepages you want (lest you overwrite something an application is 
> doing with
> sigbus).
>

I did not catch your point. I did store it "static struct sigaction 
action_old" and recover it after mapping all the hugepages. Can you give 
more details on this?

Thanks,
Jianfeng
  
Neil Horman Feb. 22, 2016, 1:18 p.m. UTC | #3
On Mon, Feb 22, 2016 at 02:08:51PM +0800, Tan, Jianfeng wrote:
> Hi Neil,
> 
> Sorry that for my previous misconfiguration of email agent, I missed this
> email.
> 
> >This looks alot better.  One minor comment, the sigbus handler, you should
> >probably store the previous bus handler and restore it after you map all
> >the
> >hugepages you want (lest you overwrite something an application is doing
> >with
> >sigbus).
> >
> 
> I did not catch your point. I did store it "static struct sigaction
> action_old" and recover it after mapping all the hugepages. Can you give
> more details on this?
> 
Nope, I can't because I missed the fact you had done that.  Apologies, it looks
good.

Acked-by: Neil Horman <nhorman@tuxdriver.com>

> Thanks,
> Jianfeng
> 
>
  
Thomas Monjalon Feb. 28, 2016, 9:12 p.m. UTC | #4
Hi,

2016-01-29 19:22, Jianfeng Tan:
> Current issue: DPDK is not that friendly to container environment, which
> caused by that it pre-alloc resource like cores and hugepages. But there
> are this or that resource limitations, for examples, cgroup, rlimit,
> cpuset, etc.
> 
> For cores, this patch makes use of pthread_getaffinity_np to further
> narrow down detected cores before parsing coremask (-c), corelist (-l),
> and coremap (--lcores).
> 
> For hugepages, this patch adds a recover mechanism to the case that
> there are no that many hugepages can be used. It relys on a mem access
> to fault-in hugepages, and if fails with SIGBUS, recover to previously
> saved stack environment with siglongjmp().

They are some interesting ideas.
However, I am not sure a library should try to be so smart silently.
It needs more feedback to decide wether it can be the default behaviour
or an option.

Please send coremask and hugepage mapping as separate patches as they
are totally different and may be integrated separately.

Thanks
  
Jianfeng Tan Feb. 29, 2016, 1:50 a.m. UTC | #5
Hi Thomas,

On 2/29/2016 5:12 AM, Thomas Monjalon wrote:
> Hi,
>
> 2016-01-29 19:22, Jianfeng Tan:
>> Current issue: DPDK is not that friendly to container environment, which
>> caused by that it pre-alloc resource like cores and hugepages. But there
>> are this or that resource limitations, for examples, cgroup, rlimit,
>> cpuset, etc.
>>
>> For cores, this patch makes use of pthread_getaffinity_np to further
>> narrow down detected cores before parsing coremask (-c), corelist (-l),
>> and coremap (--lcores).
>>
>> For hugepages, this patch adds a recover mechanism to the case that
>> there are no that many hugepages can be used. It relys on a mem access
>> to fault-in hugepages, and if fails with SIGBUS, recover to previously
>> saved stack environment with siglongjmp().
> They are some interesting ideas.
> However, I am not sure a library should try to be so smart silently.
> It needs more feedback to decide wether it can be the default behaviour
> or an option.
>
> Please send coremask and hugepage mapping as separate patches as they
> are totally different and may be integrated separately.

Good advise, thanks! I'll do it.

And one more thing FYI, coremask using pthread_getaffinity_np() may have 
issue on some Linux versions or distros: it excludes isolcpus. This is 
reported by Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>, 
and I'm still working it out.

Thanks,
Jianfeng

>
> Thanks
  

Patch

diff --git a/lib/librte_eal/common/eal_common_lcore.c b/lib/librte_eal/common/eal_common_lcore.c
index a4263ba..8e9c675 100644
--- a/lib/librte_eal/common/eal_common_lcore.c
+++ b/lib/librte_eal/common/eal_common_lcore.c
@@ -57,6 +57,13 @@  rte_eal_cpu_init(void)
 	struct rte_config *config = rte_eal_get_configuration();
 	unsigned lcore_id;
 	unsigned count = 0;
+	rte_cpuset_t cpuset;
+	pthread_t tid;
+
+	tid = pthread_self();
+	if (pthread_getaffinity_np(tid, sizeof(rte_cpuset_t), &cpuset) != 0)
+		for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++)
+			CPU_SET(lcore_id, &cpuset);
 
 	/*
 	 * Parse the maximum set of logical cores, detect the subset of running
@@ -70,7 +77,8 @@  rte_eal_cpu_init(void)
 
 		/* in 1:1 mapping, record related cpu detected state */
 		lcore_config[lcore_id].detected = eal_cpu_detected(lcore_id);
-		if (lcore_config[lcore_id].detected == 0) {
+		if (lcore_config[lcore_id].detected == 0 ||
+		    !CPU_ISSET(lcore_id, &cpuset)) {
 			config->lcore_role[lcore_id] = ROLE_OFF;
 			lcore_config[lcore_id].core_index = -1;
 			continue;
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index 846fd31..837fd9e 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -80,6 +80,8 @@ 
 #include <errno.h>
 #include <sys/ioctl.h>
 #include <sys/time.h>
+#include <signal.h>
+#include <setjmp.h>
 
 #include <rte_log.h>
 #include <rte_memory.h>
@@ -309,6 +311,12 @@  get_virtual_area(size_t *size, size_t hugepage_sz)
 	return addr;
 }
 
+static sigjmp_buf jmpenv;
+
+static void sigbus_handler(int signo __rte_unused)
+{
+	siglongjmp(jmpenv, 1);
+}
 /*
  * Mmap all hugepages of hugepage table: it first open a file in
  * hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
@@ -396,7 +404,7 @@  map_all_hugepages(struct hugepage_file *hugepg_tbl,
 		if (fd < 0) {
 			RTE_LOG(ERR, EAL, "%s(): open failed: %s\n", __func__,
 					strerror(errno));
-			return -1;
+			return i;
 		}
 
 		virtaddr = mmap(vma_addr, hugepage_sz, PROT_READ | PROT_WRITE,
@@ -405,11 +413,26 @@  map_all_hugepages(struct hugepage_file *hugepg_tbl,
 			RTE_LOG(ERR, EAL, "%s(): mmap failed: %s\n", __func__,
 					strerror(errno));
 			close(fd);
-			return -1;
+			return i;
 		}
 
 		if (orig) {
 			hugepg_tbl[i].orig_va = virtaddr;
+			/* In linux, hugetlb limitations, like cgroup, are
+			 * enforced at fault time instead of mmap(), even
+			 * with the option of MAP_POPULATE. Kernel will send
+			 * a SIGBUS signal. To avoid to be killed, save stack
+			 * environment here, if SIGBUS happens, we can jump
+			 * back here.
+			 */
+			if (sigsetjmp(jmpenv, 0)) {
+				RTE_LOG(ERR, EAL, "SIGBUS: Cannot mmap more "
+					"hugepages of size %u MB\n",
+					(unsigned)(hugepage_sz / 0x100000));
+				munmap(virtaddr, hugepage_sz);
+				close(fd);
+				return i;
+			}
 			memset(virtaddr, 0, hugepage_sz);
 		}
 		else {
@@ -421,7 +444,7 @@  map_all_hugepages(struct hugepage_file *hugepg_tbl,
 			RTE_LOG(ERR, EAL, "%s(): Locking file failed:%s \n",
 				__func__, strerror(errno));
 			close(fd);
-			return -1;
+			return i;
 		}
 
 		close(fd);
@@ -429,7 +452,7 @@  map_all_hugepages(struct hugepage_file *hugepg_tbl,
 		vma_addr = (char *)vma_addr + hugepage_sz;
 		vma_len -= hugepage_sz;
 	}
-	return 0;
+	return i;
 }
 
 #ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
@@ -1075,6 +1098,31 @@  calc_num_pages_per_socket(uint64_t * memory,
 	return total_num_pages;
 }
 
+static struct sigaction action_old;
+static int need_recover = 0;
+
+static void
+register_sigbus(void)
+{
+	sigset_t mask;
+	struct sigaction action;
+
+	sigemptyset(&mask);
+	sigaddset(&mask, SIGBUS);
+	action.sa_flags = 0;
+	action.sa_mask = mask;
+	action.sa_handler = sigbus_handler;
+
+	need_recover = !sigaction(SIGBUS, &action, &action_old);
+}
+
+static void
+recover_sigbus(void)
+{
+	if (need_recover)
+		sigaction(SIGBUS, &action_old, NULL);
+}
+
 /*
  * Prepare physical memory mapping: fill configuration structure with
  * these infos, return 0 on success.
@@ -1161,8 +1209,11 @@  rte_eal_hugepage_init(void)
 
 	hp_offset = 0; /* where we start the current page size entries */
 
+	register_sigbus();
+
 	/* map all hugepages and sort them */
 	for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
+		int pages_old, pages_new;
 		struct hugepage_info *hpi;
 
 		/*
@@ -1176,10 +1227,19 @@  rte_eal_hugepage_init(void)
 			continue;
 
 		/* map all hugepages available */
-		if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 1) < 0){
-			RTE_LOG(DEBUG, EAL, "Failed to mmap %u MB hugepages\n",
-					(unsigned)(hpi->hugepage_sz / 0x100000));
-			goto fail;
+		pages_old = hpi->num_pages[0];
+		pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi, 1);
+		if (pages_new < pages_old) {
+			RTE_LOG(DEBUG, EAL,
+				"%d not %d hugepages of size %u MB allocated\n",
+				pages_new, pages_old,
+				(unsigned)(hpi->hugepage_sz / 0x100000));
+			internal_config.memory -=
+				hpi->hugepage_sz * (pages_old - pages_new);
+			nr_hugepages -= (pages_old - pages_new);
+			hpi->num_pages[0] = pages_new;
+			if (pages_new == 0)
+				continue;
 		}
 
 		/* find physical addresses and sockets for each hugepage */
@@ -1226,6 +1286,8 @@  rte_eal_hugepage_init(void)
 #endif
 	}
 
+	recover_sigbus();
+
 #ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
 	nr_hugefiles = 0;
 	for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++) {