AMD-GFX Archive on lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] drm/amdkfd: Wait vm update fence after retry fault recovered
@ 2023-09-22 21:37 Philip Yang
  2023-09-26 20:43 ` Chen, Xiaogang
  2023-09-28 21:50 ` Felix Kuehling
  0 siblings, 2 replies; 5+ messages in thread
From: Philip Yang @ 2023-09-22 21:37 UTC (permalink / raw)
  To: amd-gfx; +Cc: Philip Yang, Felix.Kuehling

Otherwise kfd flush tlb does nothing if vm update fence callback doesn't
update vm->tlb_seq. H/W will generate retry fault again.

This works now because retry fault keep coming, recover will update page
table again after AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING timeout and flush
tlb.

Remove wait parameter in svm_range_validate_and_map because it is
always called with true.

Signed-off-by: Philip Yang <Philip.Yang@amd.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 15 +++++++--------
 1 file changed, 7 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 70aa882636ab..61f4de1633a8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1447,7 +1447,7 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
 static int
 svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
 		      unsigned long npages, bool readonly,
-		      unsigned long *bitmap, bool wait, bool flush_tlb)
+		      unsigned long *bitmap, bool flush_tlb)
 {
 	struct kfd_process_device *pdd;
 	struct amdgpu_device *bo_adev = NULL;
@@ -1480,8 +1480,7 @@ svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
 
 		r = svm_range_map_to_gpu(pdd, prange, offset, npages, readonly,
 					 prange->dma_addr[gpuidx],
-					 bo_adev, wait ? &fence : NULL,
-					 flush_tlb);
+					 bo_adev, &fence, flush_tlb);
 		if (r)
 			break;
 
@@ -1605,7 +1604,7 @@ static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
  */
 static int svm_range_validate_and_map(struct mm_struct *mm,
 				      struct svm_range *prange, int32_t gpuidx,
-				      bool intr, bool wait, bool flush_tlb)
+				      bool intr, bool flush_tlb)
 {
 	struct svm_validate_context *ctx;
 	unsigned long start, end, addr;
@@ -1729,7 +1728,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
 
 		if (!r)
 			r = svm_range_map_to_gpus(prange, offset, npages, readonly,
-						  ctx->bitmap, wait, flush_tlb);
+						  ctx->bitmap, flush_tlb);
 
 		if (!r && next == end)
 			prange->mapped_to_gpu = true;
@@ -1823,7 +1822,7 @@ static void svm_range_restore_work(struct work_struct *work)
 		mutex_lock(&prange->migrate_mutex);
 
 		r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
-					       false, true, false);
+					       false, false);
 		if (r)
 			pr_debug("failed %d to map 0x%lx to gpus\n", r,
 				 prange->start);
@@ -3064,7 +3063,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
 		}
 	}
 
-	r = svm_range_validate_and_map(mm, prange, gpuidx, false, false, false);
+	r = svm_range_validate_and_map(mm, prange, gpuidx, false, false);
 	if (r)
 		pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n",
 			 r, svms, prange->start, prange->last);
@@ -3603,7 +3602,7 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
 		flush_tlb = !migrated && update_mapping && prange->mapped_to_gpu;
 
 		r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
-					       true, true, flush_tlb);
+					       true, flush_tlb);
 		if (r)
 			pr_debug("failed %d to map svm range\n", r);
 
-- 
2.35.1


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH] drm/amdkfd: Wait vm update fence after retry fault recovered
  2023-09-22 21:37 [PATCH] drm/amdkfd: Wait vm update fence after retry fault recovered Philip Yang
@ 2023-09-26 20:43 ` Chen, Xiaogang
  2023-09-27 14:29   ` Philip Yang
  2023-09-28 21:50 ` Felix Kuehling
  1 sibling, 1 reply; 5+ messages in thread
From: Chen, Xiaogang @ 2023-09-26 20:43 UTC (permalink / raw)
  To: Philip Yang, amd-gfx; +Cc: Felix.Kuehling


On 9/22/2023 4:37 PM, Philip Yang wrote:
> Caution: This message originated from an External Source. Use proper caution when opening attachments, clicking links, or responding.
>
>
> Otherwise kfd flush tlb does nothing if vm update fence callback doesn't
> update vm->tlb_seq. H/W will generate retry fault again.
>
> This works now because retry fault keep coming, recover will update page
> table again after AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING timeout and flush
> tlb.

I think what this patch does is waiting vm->last_update fence at gpu 
page fault retry handler. I do not know what bug it tries to fix. h/w 
will keep generating retry fault as long as vm page table is not setup 
correctly, no matter kfd driver waits the fence or not. vm page table 
eventually will be setup.

There is a consequence I saw: if we wait vm page table update fence it 
will delay gpu page fault handler exit. Then more h/w interrupt vectors 
will be sent to sw ring, potentially cause the ring overflow.

Regards

Xiaogang

> Remove wait parameter in svm_range_validate_and_map because it is
> always called with true.
>
> Signed-off-by: Philip Yang <Philip.Yang@amd.com>
> ---
>   drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 15 +++++++--------
>   1 file changed, 7 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
> index 70aa882636ab..61f4de1633a8 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
> @@ -1447,7 +1447,7 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
>   static int
>   svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
>                        unsigned long npages, bool readonly,
> -                     unsigned long *bitmap, bool wait, bool flush_tlb)
> +                     unsigned long *bitmap, bool flush_tlb)
>   {
>          struct kfd_process_device *pdd;
>          struct amdgpu_device *bo_adev = NULL;
> @@ -1480,8 +1480,7 @@ svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
>
>                  r = svm_range_map_to_gpu(pdd, prange, offset, npages, readonly,
>                                           prange->dma_addr[gpuidx],
> -                                        bo_adev, wait ? &fence : NULL,
> -                                        flush_tlb);
> +                                        bo_adev, &fence, flush_tlb);
>                  if (r)
>                          break;
>
> @@ -1605,7 +1604,7 @@ static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
>    */
>   static int svm_range_validate_and_map(struct mm_struct *mm,
>                                        struct svm_range *prange, int32_t gpuidx,
> -                                     bool intr, bool wait, bool flush_tlb)
> +                                     bool intr, bool flush_tlb)
>   {
>          struct svm_validate_context *ctx;
>          unsigned long start, end, addr;
> @@ -1729,7 +1728,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
>
>                  if (!r)
>                          r = svm_range_map_to_gpus(prange, offset, npages, readonly,
> -                                                 ctx->bitmap, wait, flush_tlb);
> +                                                 ctx->bitmap, flush_tlb);
>
>                  if (!r && next == end)
>                          prange->mapped_to_gpu = true;
> @@ -1823,7 +1822,7 @@ static void svm_range_restore_work(struct work_struct *work)
>                  mutex_lock(&prange->migrate_mutex);
>
>                  r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
> -                                              false, true, false);
> +                                              false, false);
>                  if (r)
>                          pr_debug("failed %d to map 0x%lx to gpus\n", r,
>                                   prange->start);
> @@ -3064,7 +3063,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
>                  }
>          }
>
> -       r = svm_range_validate_and_map(mm, prange, gpuidx, false, false, false);
> +       r = svm_range_validate_and_map(mm, prange, gpuidx, false, false);
>          if (r)
>                  pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n",
>                           r, svms, prange->start, prange->last);
> @@ -3603,7 +3602,7 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
>                  flush_tlb = !migrated && update_mapping && prange->mapped_to_gpu;
>
>                  r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
> -                                              true, true, flush_tlb);
> +                                              true, flush_tlb);
>                  if (r)
>                          pr_debug("failed %d to map svm range\n", r);
>
> --
> 2.35.1
>

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] drm/amdkfd: Wait vm update fence after retry fault recovered
  2023-09-26 20:43 ` Chen, Xiaogang
@ 2023-09-27 14:29   ` Philip Yang
  2023-09-27 18:22     ` Chen, Xiaogang
  0 siblings, 1 reply; 5+ messages in thread
From: Philip Yang @ 2023-09-27 14:29 UTC (permalink / raw)
  To: Chen, Xiaogang, Philip Yang, amd-gfx; +Cc: Felix.Kuehling

[-- Attachment #1: Type: text/html, Size: 14229 bytes --]

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] drm/amdkfd: Wait vm update fence after retry fault recovered
  2023-09-27 14:29   ` Philip Yang
@ 2023-09-27 18:22     ` Chen, Xiaogang
  0 siblings, 0 replies; 5+ messages in thread
From: Chen, Xiaogang @ 2023-09-27 18:22 UTC (permalink / raw)
  To: Philip Yang, Philip Yang, amd-gfx; +Cc: Felix.Kuehling


On 9/27/2023 9:29 AM, Philip Yang wrote:
>
>
> On 2023-09-26 16:43, Chen, Xiaogang wrote:
>>
>> On 9/22/2023 4:37 PM, Philip Yang wrote:
>>> Caution: This message originated from an External Source. Use proper 
>>> caution when opening attachments, clicking links, or responding.
>>>
>>>
>>> Otherwise kfd flush tlb does nothing if vm update fence callback 
>>> doesn't
>>> update vm->tlb_seq. H/W will generate retry fault again.
>>>
>>> This works now because retry fault keep coming, recover will update 
>>> page
>>> table again after AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING timeout and 
>>> flush
>>> tlb.
>>
>> I think what this patch does is waiting vm->last_update fence at gpu 
>> page fault retry handler. I do not know what bug it tries to fix. h/w 
>> will keep generating retry fault as long as vm page table is not 
>> setup correctly, no matter kfd driver waits the fence or not. vm page 
>> table eventually will be setup.
>
> This issue was there, I notice it when implementing the granularity 
> bitmap_mapped flag for mGPUs, to skip the retry fault if prange mapped 
> on the GPU. The retry fault keep coming after updating GPU page table, 
> because restore_pages -> svm_range_validate_and_map doesn't wait for 
> vm update fence before kfd_flush_tlb.
>
Now I start knowing your change: it is a general issue for gpu retry 
fault handling that tlb flush did not happen actually since vm->tlb_seq 
was not updated as we did not give fence to amdgpu_vm_update_range.
>
> It is working now because we handle the same retry fault again after 
> timeout AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING, and kfd_flush_tlb does 
> flush on the second time
>
> The issue only exist if using sdma update GPU page table, as no fence 
> if cpu update GPU page table.
>
> There are several todo items to optimize this further:
>
> A. After updating GPU page table, we only wait for fence and flush tlb 
> if updating existing mapping, or vm params.table_freed (this needs 
> amdgpu vm interface change).
>
> B. Use sync to wait mGPUs update fences.
>
> C. Use multiple workers to handle restore_pages.
>
>>
>> There is a consequence I saw: if we wait vm page table update fence 
>> it will delay gpu page fault handler exit. Then more h/w interrupt 
>> vectors will be sent to sw ring, potentially cause the ring overflow.
>
> retry CAM filter, or sw filter drop the duplicate retry fault, to 
> prevent sw ring overflow.
>
Inside sw ring there is not only gpu retry fault type, also has other 
interrupt types, right? These filters just drop interrupt, not make sure 
all interrupts needed to handle got handled. I mean delay gpu retry 
handler exist may drop more interrupts, some of them never got handled.  
But I think this risk already existed, and we may need increase sw ring.

Regards

Xiaogang

> Regards,
>
> Philip
>
>>
>> Regards
>>
>> Xiaogang
>>
>>> Remove wait parameter in svm_range_validate_and_map because it is
>>> always called with true.
>>>
>>> Signed-off-by: Philip Yang <Philip.Yang@amd.com>
>>> ---
>>>   drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 15 +++++++--------
>>>   1 file changed, 7 insertions(+), 8 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c 
>>> b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
>>> index 70aa882636ab..61f4de1633a8 100644
>>> --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
>>> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
>>> @@ -1447,7 +1447,7 @@ svm_range_map_to_gpu(struct kfd_process_device 
>>> *pdd, struct svm_range *prange,
>>>   static int
>>>   svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
>>>                        unsigned long npages, bool readonly,
>>> -                     unsigned long *bitmap, bool wait, bool flush_tlb)
>>> +                     unsigned long *bitmap, bool flush_tlb)
>>>   {
>>>          struct kfd_process_device *pdd;
>>>          struct amdgpu_device *bo_adev = NULL;
>>> @@ -1480,8 +1480,7 @@ svm_range_map_to_gpus(struct svm_range 
>>> *prange, unsigned long offset,
>>>
>>>                  r = svm_range_map_to_gpu(pdd, prange, offset, 
>>> npages, readonly,
>>> prange->dma_addr[gpuidx],
>>> -                                        bo_adev, wait ? &fence : NULL,
>>> -                                        flush_tlb);
>>> +                                        bo_adev, &fence, flush_tlb);
>>>                  if (r)
>>>                          break;
>>>
>>> @@ -1605,7 +1604,7 @@ static void *kfd_svm_page_owner(struct 
>>> kfd_process *p, int32_t gpuidx)
>>>    */
>>>   static int svm_range_validate_and_map(struct mm_struct *mm,
>>>                                        struct svm_range *prange, 
>>> int32_t gpuidx,
>>> -                                     bool intr, bool wait, bool 
>>> flush_tlb)
>>> +                                     bool intr, bool flush_tlb)
>>>   {
>>>          struct svm_validate_context *ctx;
>>>          unsigned long start, end, addr;
>>> @@ -1729,7 +1728,7 @@ static int svm_range_validate_and_map(struct 
>>> mm_struct *mm,
>>>
>>>                  if (!r)
>>>                          r = svm_range_map_to_gpus(prange, offset, 
>>> npages, readonly,
>>> - ctx->bitmap, wait, flush_tlb);
>>> + ctx->bitmap, flush_tlb);
>>>
>>>                  if (!r && next == end)
>>>                          prange->mapped_to_gpu = true;
>>> @@ -1823,7 +1822,7 @@ static void svm_range_restore_work(struct 
>>> work_struct *work)
>>>                  mutex_lock(&prange->migrate_mutex);
>>>
>>>                  r = svm_range_validate_and_map(mm, prange, 
>>> MAX_GPU_INSTANCE,
>>> -                                              false, true, false);
>>> +                                              false, false);
>>>                  if (r)
>>>                          pr_debug("failed %d to map 0x%lx to 
>>> gpus\n", r,
>>>                                   prange->start);
>>> @@ -3064,7 +3063,7 @@ svm_range_restore_pages(struct amdgpu_device 
>>> *adev, unsigned int pasid,
>>>                  }
>>>          }
>>>
>>> -       r = svm_range_validate_and_map(mm, prange, gpuidx, false, 
>>> false, false);
>>> +       r = svm_range_validate_and_map(mm, prange, gpuidx, false, 
>>> false);
>>>          if (r)
>>>                  pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] 
>>> to gpus\n",
>>>                           r, svms, prange->start, prange->last);
>>> @@ -3603,7 +3602,7 @@ svm_range_set_attr(struct kfd_process *p, 
>>> struct mm_struct *mm,
>>>                  flush_tlb = !migrated && update_mapping && 
>>> prange->mapped_to_gpu;
>>>
>>>                  r = svm_range_validate_and_map(mm, prange, 
>>> MAX_GPU_INSTANCE,
>>> -                                              true, true, flush_tlb);
>>> +                                              true, flush_tlb);
>>>                  if (r)
>>>                          pr_debug("failed %d to map svm range\n", r);
>>>
>>> -- 
>>> 2.35.1
>>>

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] drm/amdkfd: Wait vm update fence after retry fault recovered
  2023-09-22 21:37 [PATCH] drm/amdkfd: Wait vm update fence after retry fault recovered Philip Yang
  2023-09-26 20:43 ` Chen, Xiaogang
@ 2023-09-28 21:50 ` Felix Kuehling
  1 sibling, 0 replies; 5+ messages in thread
From: Felix Kuehling @ 2023-09-28 21:50 UTC (permalink / raw)
  To: Philip Yang, amd-gfx

On 2023-09-22 17:37, Philip Yang wrote:
> Otherwise kfd flush tlb does nothing if vm update fence callback doesn't
> update vm->tlb_seq. H/W will generate retry fault again.
>
> This works now because retry fault keep coming, recover will update page
> table again after AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING timeout and flush
> tlb.

I think I'm OK with this change. But as I understand it, this is really 
part of another patch series that depends on this fix. It's not needed 
with the way we currently handle retry faults. Am I misunderstanding it?

This is not an optimal solution, but I think it's only meant to be 
temporary. I think we want to get to a solution that allows us to 
schedule TLB flushes asynchronously using the fences. For now, the 
impact is limited to small-BAR GPUs that use SDMA for page table 
updates, so I'm OK with that.

Regards,
   Felix


> Remove wait parameter in svm_range_validate_and_map because it is
> always called with true.
>
> Signed-off-by: Philip Yang <Philip.Yang@amd.com>
> ---
>   drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 15 +++++++--------
>   1 file changed, 7 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
> index 70aa882636ab..61f4de1633a8 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
> @@ -1447,7 +1447,7 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
>   static int
>   svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
>   		      unsigned long npages, bool readonly,
> -		      unsigned long *bitmap, bool wait, bool flush_tlb)
> +		      unsigned long *bitmap, bool flush_tlb)
>   {
>   	struct kfd_process_device *pdd;
>   	struct amdgpu_device *bo_adev = NULL;
> @@ -1480,8 +1480,7 @@ svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
>   
>   		r = svm_range_map_to_gpu(pdd, prange, offset, npages, readonly,
>   					 prange->dma_addr[gpuidx],
> -					 bo_adev, wait ? &fence : NULL,
> -					 flush_tlb);
> +					 bo_adev, &fence, flush_tlb);
>   		if (r)
>   			break;
>   
> @@ -1605,7 +1604,7 @@ static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
>    */
>   static int svm_range_validate_and_map(struct mm_struct *mm,
>   				      struct svm_range *prange, int32_t gpuidx,
> -				      bool intr, bool wait, bool flush_tlb)
> +				      bool intr, bool flush_tlb)
>   {
>   	struct svm_validate_context *ctx;
>   	unsigned long start, end, addr;
> @@ -1729,7 +1728,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
>   
>   		if (!r)
>   			r = svm_range_map_to_gpus(prange, offset, npages, readonly,
> -						  ctx->bitmap, wait, flush_tlb);
> +						  ctx->bitmap, flush_tlb);
>   
>   		if (!r && next == end)
>   			prange->mapped_to_gpu = true;
> @@ -1823,7 +1822,7 @@ static void svm_range_restore_work(struct work_struct *work)
>   		mutex_lock(&prange->migrate_mutex);
>   
>   		r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
> -					       false, true, false);
> +					       false, false);
>   		if (r)
>   			pr_debug("failed %d to map 0x%lx to gpus\n", r,
>   				 prange->start);
> @@ -3064,7 +3063,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
>   		}
>   	}
>   
> -	r = svm_range_validate_and_map(mm, prange, gpuidx, false, false, false);
> +	r = svm_range_validate_and_map(mm, prange, gpuidx, false, false);
>   	if (r)
>   		pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n",
>   			 r, svms, prange->start, prange->last);
> @@ -3603,7 +3602,7 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
>   		flush_tlb = !migrated && update_mapping && prange->mapped_to_gpu;
>   
>   		r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
> -					       true, true, flush_tlb);
> +					       true, flush_tlb);
>   		if (r)
>   			pr_debug("failed %d to map svm range\n", r);
>   

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2023-09-28 21:50 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-09-22 21:37 [PATCH] drm/amdkfd: Wait vm update fence after retry fault recovered Philip Yang
2023-09-26 20:43 ` Chen, Xiaogang
2023-09-27 14:29   ` Philip Yang
2023-09-27 18:22     ` Chen, Xiaogang
2023-09-28 21:50 ` Felix Kuehling

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox