[PATCH 07/13] drm/radeon: use simpler remove_conflicting_pci_framebuffers()

2017-11-24 Thread Michał Mirosław
Signed-off-by: Michał Mirosław 
---
 drivers/gpu/drm/radeon/radeon_drv.c | 23 +--
 1 file changed, 1 insertion(+), 22 deletions(-)

diff --git a/drivers/gpu/drm/radeon/radeon_drv.c 
b/drivers/gpu/drm/radeon/radeon_drv.c
index 31dd04f6baa1..49f51b17ae81 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -311,27 +311,6 @@ static struct drm_driver kms_driver;
 
 bool radeon_device_is_virtual(void);
 
-static int radeon_kick_out_firmware_fb(struct pci_dev *pdev)
-{
-   struct apertures_struct *ap;
-   bool primary = false;
-
-   ap = alloc_apertures(1);
-   if (!ap)
-   return -ENOMEM;
-
-   ap->ranges[0].base = pci_resource_start(pdev, 0);
-   ap->ranges[0].size = pci_resource_len(pdev, 0);
-
-#ifdef CONFIG_X86
-   primary = pdev->resource[PCI_ROM_RESOURCE].flags & 
IORESOURCE_ROM_SHADOW;
-#endif
-   drm_fb_helper_remove_conflicting_framebuffers(ap, "radeondrmfb", 
primary);
-   kfree(ap);
-
-   return 0;
-}
-
 static int radeon_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
 {
@@ -341,7 +320,7 @@ static int radeon_pci_probe(struct pci_dev *pdev,
return -EPROBE_DEFER;
 
/* Get rid of things like offb */
-   ret = radeon_kick_out_firmware_fb(pdev);
+   ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, 
"radeondrmfb");
if (ret)
return ret;
 
-- 
2.11.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 02/13] fbdev: add remove_conflicting_pci_framebuffers()

2017-11-24 Thread Michał Mirosław
Almost all drivers using remove_conflicting_framebuffers() wrap it with
the same code. Extract common part from PCI drivers into separate
remove_conflicting_pci_framebuffers().

Signed-off-by: Michał Mirosław 
---
 drivers/video/fbdev/core/fbmem.c | 22 ++
 include/drm/drm_fb_helper.h  | 12 
 include/linux/fb.h   |  2 ++
 3 files changed, 36 insertions(+)

diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 30a18d4c9de4..5ea980e5d3b7 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -34,6 +34,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include 
 
@@ -1788,6 +1789,27 @@ int remove_conflicting_framebuffers(struct 
apertures_struct *a,
 }
 EXPORT_SYMBOL(remove_conflicting_framebuffers);
 
+int remove_conflicting_pci_framebuffers(struct pci_dev *pdev, int res_id, 
const char *name)
+{
+   struct apertures_struct *ap;
+   bool primary = false;
+
+   ap = alloc_apertures(1);
+   if (!ap)
+   return -ENOMEM;
+
+   ap->ranges[0].base = pci_resource_start(pdev, res_id);
+   ap->ranges[0].size = pci_resource_len(pdev, res_id);
+#ifdef CONFIG_X86
+   primary = pdev->resource[PCI_ROM_RESOURCE].flags &
+   IORESOURCE_ROM_SHADOW;
+#endif
+   remove_conflicting_framebuffers(ap, name, primary);
+   kfree(ap);
+   return 0;
+}
+EXPORT_SYMBOL(remove_conflicting_pci_framebuffers);
+
 /**
  * register_framebuffer - registers a frame buffer device
  * @fb_info: frame buffer info structure
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 33fe95927742..ac3412290289 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -520,4 +520,16 @@ drm_fb_helper_remove_conflicting_framebuffers(struct 
apertures_struct *a,
 #endif
 }
 
+static inline int
+drm_fb_helper_remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
+ int resource_id,
+ const char *name)
+{
+#if IS_REACHABLE(CONFIG_FB)
+   return remove_conflicting_pci_framebuffers(pdev, resource_id, name);
+#else
+   return 0;
+#endif
+}
+
 #endif
diff --git a/include/linux/fb.h b/include/linux/fb.h
index f4386b0ccf40..4196cb09e58e 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -624,6 +624,8 @@ extern ssize_t fb_sys_write(struct fb_info *info, const 
char __user *buf,
 extern int register_framebuffer(struct fb_info *fb_info);
 extern int unregister_framebuffer(struct fb_info *fb_info);
 extern int unlink_framebuffer(struct fb_info *fb_info);
+extern int remove_conflicting_pci_framebuffers(struct pci_dev *pdev, int 
res_id,
+  const char *name);
 extern int remove_conflicting_framebuffers(struct apertures_struct *a,
   const char *name, bool primary);
 extern int fb_prepare_logo(struct fb_info *fb_info, int rotate);
-- 
2.11.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdkfd: Delete a useless parameter from create_queue function pointer

2017-11-24 Thread Yong Zhao
Signed-off-by: Yong Zhao 
Reviewed-by: Felix Kuehling 
---
 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c  | 13 +++--
 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h  |  3 +--
 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c |  6 ++
 3 files changed, 6 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 8447810..81ec7bb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -149,8 +149,7 @@ static void deallocate_vmid(struct device_queue_manager 
*dqm,
 
 static int create_queue_nocpsch(struct device_queue_manager *dqm,
struct queue *q,
-   struct qcm_process_device *qpd,
-   int *allocated_vmid)
+   struct qcm_process_device *qpd)
 {
int retval;
 
@@ -170,7 +169,6 @@ static int create_queue_nocpsch(struct device_queue_manager 
*dqm,
if (retval)
goto out_unlock;
}
-   *allocated_vmid = qpd->vmid;
q->properties.vmid = qpd->vmid;
 
q->properties.tba_addr = qpd->tba_addr;
@@ -184,10 +182,8 @@ static int create_queue_nocpsch(struct 
device_queue_manager *dqm,
retval = -EINVAL;
 
if (retval) {
-   if (list_empty(>queues_list)) {
+   if (list_empty(>queues_list))
deallocate_vmid(dqm, qpd, q);
-   *allocated_vmid = 0;
-   }
goto out_unlock;
}
 
@@ -812,16 +808,13 @@ static void destroy_kernel_queue_cpsch(struct 
device_queue_manager *dqm,
 }
 
 static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue 
*q,
-   struct qcm_process_device *qpd, int *allocate_vmid)
+   struct qcm_process_device *qpd)
 {
int retval;
struct mqd_manager *mqd;
 
retval = 0;
 
-   if (allocate_vmid)
-   *allocate_vmid = 0;
-
mutex_lock(>lock);
 
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h 
b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 8752edf..c61b693 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -84,8 +84,7 @@ struct device_process_node {
 struct device_queue_manager_ops {
int (*create_queue)(struct device_queue_manager *dqm,
struct queue *q,
-   struct qcm_process_device *qpd,
-   int *allocate_vmid);
+   struct qcm_process_device *qpd);
 
int (*destroy_queue)(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index eeb7726..fbfa274 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -201,8 +201,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
goto err_create_queue;
pqn->q = q;
pqn->kq = NULL;
-   retval = dev->dqm->ops.create_queue(dev->dqm, q, >qpd,
-   >properties.vmid);
+   retval = dev->dqm->ops.create_queue(dev->dqm, q, >qpd);
pr_debug("DQM returned %d for create_queue\n", retval);
print_queue(q);
break;
@@ -222,8 +221,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
goto err_create_queue;
pqn->q = q;
pqn->kq = NULL;
-   retval = dev->dqm->ops.create_queue(dev->dqm, q, >qpd,
-   >properties.vmid);
+   retval = dev->dqm->ops.create_queue(dev->dqm, q, >qpd);
pr_debug("DQM returned %d for create_queue\n", retval);
print_queue(q);
break;
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdkfd: Delete a useless parameter from create_queue function pointer

2017-11-24 Thread Zhao, Yong
I based it on amd-staging-drm-next. I will pull from amdkfd-next, remove 
Change-Id from and add you in commit message.


Regards,

Yong


From: Kuehling, Felix
Sent: Friday, November 24, 2017 4:44:48 PM
To: Zhao, Yong; oded.gab...@gmail.com; amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH] drm/amdkfd: Delete a useless parameter from create_queue 
function pointer

Hi Oded,

Yong made this patch against our internal KFD branch. I asked him to
send it to upstream as well, since it applies to code I have already
upstreamed. I'm going to do this for more patches in the future as more
KFD code is upstream and more of our internal changes are applicable
upstream directly.

Is this OK with you, or do you prefer getting all patches through me, in
roughly bi-weekly batches as part of my on-going upstreaming effort?

Hi Yong,

Which branch is this patch against? For upstream KFD commits, they
should be applied against amdkfd-next on
git://people.freedesktop.org/~gabbayo/linux. Oded will apply the patch
if he accepts it, so you only need read access to this repository.

Also, please remove the Change-Id from the commit message. It is
meaningless for upstream submissions outside of our Gerrit server.

Other than that, this patch is Reviewed-by: Felix Kuehling 

Regards,
  Felix


On 2017-11-24 03:57 PM, Yong Zhao wrote:
> Change-Id: Ia5c74ad567c30e206ed804b204fdf8a0f8a75a19
> Signed-off-by: Yong Zhao 
> ---
>  drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c  | 14 --
>  drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h  |  3 +--
>  drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c |  3 +--
>  3 files changed, 6 insertions(+), 14 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 
> b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
> index 53a66e8..1df1123 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
> @@ -144,8 +144,7 @@ static void deallocate_vmid(struct device_queue_manager 
> *dqm,
>
>  static int create_queue_nocpsch(struct device_queue_manager *dqm,
>struct queue *q,
> - struct qcm_process_device *qpd,
> - int *allocated_vmid)
> + struct qcm_process_device *qpd)
>  {
>int retval;
>
> @@ -165,7 +164,6 @@ static int create_queue_nocpsch(struct 
> device_queue_manager *dqm,
>if (retval)
>goto out_unlock;
>}
> - *allocated_vmid = qpd->vmid;
>q->properties.vmid = qpd->vmid;
>
>if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
> @@ -176,10 +174,9 @@ static int create_queue_nocpsch(struct 
> device_queue_manager *dqm,
>retval = -EINVAL;
>
>if (retval) {
> - if (list_empty(>queues_list)) {
> + if (list_empty(>queues_list))
>deallocate_vmid(dqm, qpd, q);
> - *allocated_vmid = 0;
> - }
> +
>goto out_unlock;
>}
>
> @@ -788,16 +785,13 @@ static void select_sdma_engine_id(struct queue *q)
>  }
>
>  static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue 
> *q,
> - struct qcm_process_device *qpd, int *allocate_vmid)
> + struct qcm_process_device *qpd)
>  {
>int retval;
>struct mqd_manager *mqd;
>
>retval = 0;
>
> - if (allocate_vmid)
> - *allocate_vmid = 0;
> -
>mutex_lock(>lock);
>
>if (dqm->total_queue_count >= max_num_of_queues_per_device) {
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h 
> b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
> index faf820a..449407a 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
> @@ -84,8 +84,7 @@ struct device_process_node {
>  struct device_queue_manager_ops {
>int (*create_queue)(struct device_queue_manager *dqm,
>struct queue *q,
> - struct qcm_process_device *qpd,
> - int *allocate_vmid);
> + struct qcm_process_device *qpd);
>
>int (*destroy_queue)(struct device_queue_manager *dqm,
>struct qcm_process_device *qpd,
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c 
> b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
> index 03bec76..1e7bcae 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
> @@ -199,8 +199,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
>goto err_create_queue;
>pqn->q = q;
>pqn->kq = 

[PATCH] drm/amdkfd: Delete a useless parameter from create_queue function pointer

2017-11-24 Thread Yong Zhao
Change-Id: Ia5c74ad567c30e206ed804b204fdf8a0f8a75a19
Signed-off-by: Yong Zhao 
---
 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c  | 14 --
 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h  |  3 +--
 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c |  3 +--
 3 files changed, 6 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 53a66e8..1df1123 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -144,8 +144,7 @@ static void deallocate_vmid(struct device_queue_manager 
*dqm,
 
 static int create_queue_nocpsch(struct device_queue_manager *dqm,
struct queue *q,
-   struct qcm_process_device *qpd,
-   int *allocated_vmid)
+   struct qcm_process_device *qpd)
 {
int retval;
 
@@ -165,7 +164,6 @@ static int create_queue_nocpsch(struct device_queue_manager 
*dqm,
if (retval)
goto out_unlock;
}
-   *allocated_vmid = qpd->vmid;
q->properties.vmid = qpd->vmid;
 
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
@@ -176,10 +174,9 @@ static int create_queue_nocpsch(struct 
device_queue_manager *dqm,
retval = -EINVAL;
 
if (retval) {
-   if (list_empty(>queues_list)) {
+   if (list_empty(>queues_list))
deallocate_vmid(dqm, qpd, q);
-   *allocated_vmid = 0;
-   }
+
goto out_unlock;
}
 
@@ -788,16 +785,13 @@ static void select_sdma_engine_id(struct queue *q)
 }
 
 static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue 
*q,
-   struct qcm_process_device *qpd, int *allocate_vmid)
+   struct qcm_process_device *qpd)
 {
int retval;
struct mqd_manager *mqd;
 
retval = 0;
 
-   if (allocate_vmid)
-   *allocate_vmid = 0;
-
mutex_lock(>lock);
 
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h 
b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index faf820a..449407a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -84,8 +84,7 @@ struct device_process_node {
 struct device_queue_manager_ops {
int (*create_queue)(struct device_queue_manager *dqm,
struct queue *q,
-   struct qcm_process_device *qpd,
-   int *allocate_vmid);
+   struct qcm_process_device *qpd);
 
int (*destroy_queue)(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 03bec76..1e7bcae 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -199,8 +199,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
goto err_create_queue;
pqn->q = q;
pqn->kq = NULL;
-   retval = dev->dqm->ops.create_queue(dev->dqm, q, >qpd,
-   >properties.vmid);
+   retval = dev->dqm->ops.create_queue(dev->dqm, q, >qpd);
pr_debug("DQM returned %d for create_queue\n", retval);
print_queue(q);
break;
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amd/display: check plane state before validating fbc

2017-11-24 Thread Andrey Grodzovsky
The patch is good, but why validate_fbc is being called from 
dce110_apply_ctx_to_hw (atomic_commit) ? It should be called from 
atomic_check.


Thanks,
Andrey

On 2017-11-22 02:13 AM, S, Shirish wrote:

From: Shirish S 

While validation fbc, array_mode of the pipe is accessed without checking 
plane_state exists for it.
Causing to null pointer dereferencing followed by reboot when a crtc associated 
with external display(not
connected) is page flipped.

This patch adds a check for plane_state before using it to validate fbc.

Signed-off-by: Shirish S 
Reviewed-by: Roman Li 
---
  drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 4 
  1 file changed, 4 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c 
b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index ee3b944..a6cd63a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1724,6 +1724,10 @@ static enum dc_status validate_fbc(struct dc *dc,
if (pipe_ctx->stream->sink->link->psr_enabled)
return DC_ERROR_UNEXPECTED;
  
+	/* Nothing to compress */

+   if (!pipe_ctx->plane_state)
+   return DC_ERROR_UNEXPECTED;
+
/* Only for non-linear tiling */
if (pipe_ctx->plane_state->tiling_info.gfx8.array_mode == 
DC_ARRAY_LINEAR_GENERAL)
return DC_ERROR_UNEXPECTED;
--
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amd/display: remove usage of legacy_cursor_update

2017-11-24 Thread Andrey Grodzovsky



On 2017-11-23 01:16 AM, S, Shirish wrote:

From: Shirish S 

Currently the atomic check code uses legacy_cursor_update to differnetiate if 
the cursor plane is being requested by the user, which is not required as we 
shall be updating plane only if modeset is requested/required.

Have tested cursor plane and underlay get updated seamlessly, without any lag 
or frame drops.

Signed-off-by: Shirish S 
Reviewed-by: Harry Wentland 
---
  drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 41 +++
  1 file changed, 12 insertions(+), 29 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 8638f1c..2df2e32 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -4752,8 +4752,6 @@ static int dm_update_planes_state(struct dc *dc,  static 
int amdgpu_dm_atomic_check(struct drm_device *dev,
  struct drm_atomic_state *state)
  {
-   int i;
-   int ret;
struct amdgpu_device *adev = dev->dev_private;
struct dc *dc = adev->dm.dc;
struct dm_atomic_state *dm_state = to_dm_atomic_state(state); @@ 
-4761,6 +4759,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
struct drm_connector_state *old_con_state, *new_con_state;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+   int ret, i;
  
  	/*

 * This bool will be set for true for any modeset/reset @@ -4772,36 
+4771,20 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
  
-	/*

-* legacy_cursor_update should be made false for SoC's having
-* a dedicated hardware plane for cursor in amdgpu_dm_atomic_commit(),
-* otherwise for software cursor plane,
-* we should not add it to list of affected planes.
-*/
-   if (state->legacy_cursor_update) {
-   for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
-   if (new_crtc_state->color_mgmt_changed) {
-   ret = drm_atomic_add_affected_planes(state, 
crtc);


I don't even understand this, why even bundle together cursor updates 
and gamma settings...


Change is Reviewed-by: Andrey Grodzovsky 

Thanks,
Andrey


-   if (ret)
-   goto fail;
-   }
-   }
-   } else {
-   for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, 
new_crtc_state, i) {
-   if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
-   continue;
+   for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, 
new_crtc_state, i) {
+   if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
+   continue;
  
-			if (!new_crtc_state->enable)

-   continue;
+   if (!new_crtc_state->enable)
+   continue;
  
-			ret = drm_atomic_add_affected_connectors(state, crtc);

-   if (ret)
-   return ret;
+   ret = drm_atomic_add_affected_connectors(state, crtc);
+   if (ret)
+   return ret;
  
-			ret = drm_atomic_add_affected_planes(state, crtc);

-   if (ret)
-   goto fail;
-   }
+   ret = drm_atomic_add_affected_planes(state, crtc);
+   if (ret)
+   goto fail;
}
  
  	dm_state->context = dc_create_state();

--
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdkfd: Delete a useless parameter from create_queue function pointer

2017-11-24 Thread Felix Kuehling
Hi Oded,

Yong made this patch against our internal KFD branch. I asked him to
send it to upstream as well, since it applies to code I have already
upstreamed. I'm going to do this for more patches in the future as more
KFD code is upstream and more of our internal changes are applicable
upstream directly.

Is this OK with you, or do you prefer getting all patches through me, in
roughly bi-weekly batches as part of my on-going upstreaming effort?

Hi Yong,

Which branch is this patch against? For upstream KFD commits, they
should be applied against amdkfd-next on
git://people.freedesktop.org/~gabbayo/linux. Oded will apply the patch
if he accepts it, so you only need read access to this repository.

Also, please remove the Change-Id from the commit message. It is
meaningless for upstream submissions outside of our Gerrit server.

Other than that, this patch is Reviewed-by: Felix Kuehling 

Regards,
  Felix


On 2017-11-24 03:57 PM, Yong Zhao wrote:
> Change-Id: Ia5c74ad567c30e206ed804b204fdf8a0f8a75a19
> Signed-off-by: Yong Zhao 
> ---
>  drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c  | 14 --
>  drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h  |  3 +--
>  drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c |  3 +--
>  3 files changed, 6 insertions(+), 14 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 
> b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
> index 53a66e8..1df1123 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
> @@ -144,8 +144,7 @@ static void deallocate_vmid(struct device_queue_manager 
> *dqm,
>  
>  static int create_queue_nocpsch(struct device_queue_manager *dqm,
>   struct queue *q,
> - struct qcm_process_device *qpd,
> - int *allocated_vmid)
> + struct qcm_process_device *qpd)
>  {
>   int retval;
>  
> @@ -165,7 +164,6 @@ static int create_queue_nocpsch(struct 
> device_queue_manager *dqm,
>   if (retval)
>   goto out_unlock;
>   }
> - *allocated_vmid = qpd->vmid;
>   q->properties.vmid = qpd->vmid;
>  
>   if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
> @@ -176,10 +174,9 @@ static int create_queue_nocpsch(struct 
> device_queue_manager *dqm,
>   retval = -EINVAL;
>  
>   if (retval) {
> - if (list_empty(>queues_list)) {
> + if (list_empty(>queues_list))
>   deallocate_vmid(dqm, qpd, q);
> - *allocated_vmid = 0;
> - }
> +
>   goto out_unlock;
>   }
>  
> @@ -788,16 +785,13 @@ static void select_sdma_engine_id(struct queue *q)
>  }
>  
>  static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue 
> *q,
> - struct qcm_process_device *qpd, int *allocate_vmid)
> + struct qcm_process_device *qpd)
>  {
>   int retval;
>   struct mqd_manager *mqd;
>  
>   retval = 0;
>  
> - if (allocate_vmid)
> - *allocate_vmid = 0;
> -
>   mutex_lock(>lock);
>  
>   if (dqm->total_queue_count >= max_num_of_queues_per_device) {
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h 
> b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
> index faf820a..449407a 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
> @@ -84,8 +84,7 @@ struct device_process_node {
>  struct device_queue_manager_ops {
>   int (*create_queue)(struct device_queue_manager *dqm,
>   struct queue *q,
> - struct qcm_process_device *qpd,
> - int *allocate_vmid);
> + struct qcm_process_device *qpd);
>  
>   int (*destroy_queue)(struct device_queue_manager *dqm,
>   struct qcm_process_device *qpd,
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c 
> b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
> index 03bec76..1e7bcae 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
> @@ -199,8 +199,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
>   goto err_create_queue;
>   pqn->q = q;
>   pqn->kq = NULL;
> - retval = dev->dqm->ops.create_queue(dev->dqm, q, >qpd,
> - >properties.vmid);
> + retval = dev->dqm->ops.create_queue(dev->dqm, q, >qpd);
>   pr_debug("DQM returned %d for create_queue\n", retval);
>   print_queue(q);
>   break;

___
amd-gfx mailing 

[PATCH] drm/amdkfd: Delete a useless parameter from create_queue function pointer

2017-11-24 Thread Yong Zhao
Change-Id: Ia5c74ad567c30e206ed804b204fdf8a0f8a75a19
Signed-off-by: Yong Zhao 
---
 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c  | 14 --
 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h  |  3 +--
 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c |  3 +--
 3 files changed, 6 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 53a66e8..1df1123 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -144,8 +144,7 @@ static void deallocate_vmid(struct device_queue_manager 
*dqm,
 
 static int create_queue_nocpsch(struct device_queue_manager *dqm,
struct queue *q,
-   struct qcm_process_device *qpd,
-   int *allocated_vmid)
+   struct qcm_process_device *qpd)
 {
int retval;
 
@@ -165,7 +164,6 @@ static int create_queue_nocpsch(struct device_queue_manager 
*dqm,
if (retval)
goto out_unlock;
}
-   *allocated_vmid = qpd->vmid;
q->properties.vmid = qpd->vmid;
 
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
@@ -176,10 +174,9 @@ static int create_queue_nocpsch(struct 
device_queue_manager *dqm,
retval = -EINVAL;
 
if (retval) {
-   if (list_empty(>queues_list)) {
+   if (list_empty(>queues_list))
deallocate_vmid(dqm, qpd, q);
-   *allocated_vmid = 0;
-   }
+
goto out_unlock;
}
 
@@ -788,16 +785,13 @@ static void select_sdma_engine_id(struct queue *q)
 }
 
 static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue 
*q,
-   struct qcm_process_device *qpd, int *allocate_vmid)
+   struct qcm_process_device *qpd)
 {
int retval;
struct mqd_manager *mqd;
 
retval = 0;
 
-   if (allocate_vmid)
-   *allocate_vmid = 0;
-
mutex_lock(>lock);
 
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h 
b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index faf820a..449407a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -84,8 +84,7 @@ struct device_process_node {
 struct device_queue_manager_ops {
int (*create_queue)(struct device_queue_manager *dqm,
struct queue *q,
-   struct qcm_process_device *qpd,
-   int *allocate_vmid);
+   struct qcm_process_device *qpd);
 
int (*destroy_queue)(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 03bec76..1e7bcae 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -199,8 +199,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
goto err_create_queue;
pqn->q = q;
pqn->kq = NULL;
-   retval = dev->dqm->ops.create_queue(dev->dqm, q, >qpd,
-   >properties.vmid);
+   retval = dev->dqm->ops.create_queue(dev->dqm, q, >qpd);
pr_debug("DQM returned %d for create_queue\n", retval);
print_queue(q);
break;
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu: don't try to move pinned BOs

2017-11-24 Thread Michel Dänzer
On 2017-11-24 11:46 AM, Christian König wrote:
> Never try to move pinned BOs during CS.
> 
> Signed-off-by: Christian König 
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 4 
>  1 file changed, 4 insertions(+)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> index dbae7411e754..d15836b4826a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> @@ -414,6 +414,10 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser 
> *p,
>   if (candidate->robj == validated)
>   break;
>  
> + /* We can't move pinned BOs here */
> + if (bo->pin_count)
> + continue;
> +
>   other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
>  
>   /* Check if this BO is in one of the domains we need space for 
> */
> 

Reviewed-by: Michel Dänzer 


-- 
Earthling Michel Dänzer   |   http://www.amd.com
Libre software enthusiast | Mesa and X developer
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amd/display: Print type if we get wrong ObjectID from bios

2017-11-24 Thread Harry Wentland
We've seen a bunch of issues where we can't get the connector from vbios
for what we think should be a valid connector id. Print some more info
when this happens.

Change-Id: I96aee657ed4632fbc0ab313671da356dc225c8c4
Signed-off-by: Harry Wentland 
---

Hi Shawn,

mind trying this patch? It won't fix anything but should give us a bit
more info on why you're getting this invalid connector ObjectId.

Harry

 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c | 13 ++---
 drivers/gpu/drm/amd/display/dc/core/dc_link.c |  5 +++--
 2 files changed, 13 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c 
b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index aaaebd06d7ee..065f855fd39c 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -190,6 +190,7 @@ static struct graphics_object_id 
bios_parser_get_connector_id(
struct bios_parser *bp = BP_FROM_DCB(dcb);
struct graphics_object_id object_id = dal_graphics_object_id_init(
0, ENUM_ID_UNKNOWN, OBJECT_TYPE_UNKNOWN);
+   uint16_t id;
 
uint32_t connector_table_offset = bp->object_info_tbl_offset
+ 
le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset);
@@ -197,12 +198,18 @@ static struct graphics_object_id 
bios_parser_get_connector_id(
ATOM_OBJECT_TABLE *tbl =
GET_IMAGE(ATOM_OBJECT_TABLE, connector_table_offset);
 
-   if (tbl && tbl->ucNumberOfObjects > i) {
-   const uint16_t id = le16_to_cpu(tbl->asObjects[i].usObjectID);
+   if (!tbl) {
+   dm_error("Can't get connector table from atom bios.\n");
+   return object_id;
+   }
 
-   object_id = object_id_from_bios_object_id(id);
+   if (tbl->ucNumberOfObjects > i) {
+   dm_error("Can't find connector id %d in connector table of size 
%d.\n",
+i, tbl->ucNumberOfObjects);
}
 
+   id = le16_to_cpu(tbl->asObjects[i].usObjectID);
+   object_id = object_id_from_bios_object_id(id);
return object_id;
 }
 
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 7b0e43c0685c..80fc02671e0e 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -938,8 +938,9 @@ static bool construct(
link->link_id = bios->funcs->get_connector_id(bios, 
init_params->connector_index);
 
if (link->link_id.type != OBJECT_TYPE_CONNECTOR) {
-   dm_error("%s: Invalid Connector ObjectID from Adapter Service 
for connector index:%d!\n",
-   __func__, init_params->connector_index);
+   dm_error("%s: Invalid Connector ObjectID from Adapter Service 
for connector index:%d! type %d expected %d\n",
+__func__, init_params->connector_index,
+link->link_id.type, OBJECT_TYPE_CONNECTOR);
goto create_fail;
}
 
-- 
2.14.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdgpu: fix amdgpu_sync_resv v2

2017-11-24 Thread Christian König
Fixes a bug introduced by AMDGPU_GEM_CREATE_EXPLICIT_SYNC. We still need
to wait for pipelined moves in the shared fences list.

v2: fix typo

Signed-off-by: Christian König 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | 7 ++-
 1 file changed, 2 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index a4bf21f8f1c1..bbbc40d630a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -191,9 +191,6 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
f = reservation_object_get_excl(resv);
r = amdgpu_sync_fence(adev, sync, f);
 
-   if (explicit_sync)
-   return r;
-
flist = reservation_object_get_list(resv);
if (!flist || r)
return r;
@@ -212,11 +209,11 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
 (fence_owner == AMDGPU_FENCE_OWNER_VM)))
continue;
 
-   /* Ignore fence from the same owner as
+   /* Ignore fence from the same owner and explicit one as
 * long as it isn't undefined.
 */
if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
-   fence_owner == owner)
+   (fence_owner == owner || explicit_sync))
continue;
}
 
-- 
2.11.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdgpu: don't try to move pinned BOs

2017-11-24 Thread Christian König
Never try to move pinned BOs during CS.

Signed-off-by: Christian König 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 4 
 1 file changed, 4 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index dbae7411e754..d15836b4826a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -414,6 +414,10 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
if (candidate->robj == validated)
break;
 
+   /* We can't move pinned BOs here */
+   if (bo->pin_count)
+   continue;
+
other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
 
/* Check if this BO is in one of the domains we need space for 
*/
-- 
2.11.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdgpu: fix amdgpu_sync_resv

2017-11-24 Thread Christian König
Fixes a bug introduced by AMDGPU_GEM_CREATE_EXPLICIT_SYNC. We still need
to wait for pipelined moves in the shared fences list.

Signed-off-by: Christian König 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | 7 ++-
 1 file changed, 2 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index a4bf21f8f1c1..73f72e4e45f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -191,9 +191,6 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
f = reservation_object_get_excl(resv);
r = amdgpu_sync_fence(adev, sync, f);
 
-   if (explicit_sync)
-   return r;
-
flist = reservation_object_get_list(resv);
if (!flist || r)
return r;
@@ -212,11 +209,11 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
 (fence_owner == AMDGPU_FENCE_OWNER_VM)))
continue;
 
-   /* Ignore fence from the same owner as
+   /* Ignore fence from the same owner and explicit one as
 * long as it isn't undefined.
 */
if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
-   fence_owner == owner)
+   (fence_owner == owner || explicit))
continue;
}
 
-- 
2.11.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 28/43] drm/amd/display: Switch to drm_atomic_helper_wait_for_flip_done

2017-11-24 Thread Michel Dänzer
On 2017-11-23 08:53 PM, Harry Wentland wrote:
> From: Andrey Grodzovsky 
> 
> This new helper function is advised to be used for drviers that
> use the nonblocking commit tracking support instead of
> drm_atomic_helper_wait_for_vblanks.
> 
> Signed-off-by: Andrey Grodzovsky 
> Reviewed-by: Harry Wentland 

I gave

Reviewed-and-Tested-by: Michel Dänzer 

for this patch.


-- 
Earthling Michel Dänzer   |   http://www.amd.com
Libre software enthusiast | Mesa and X developer
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH v9 4/5] x86/PCI: Enable a 64bit BAR on AMD Family 15h (Models 30h-3fh) Processors v5

2017-11-24 Thread Boris Ostrovsky



On 11/23/2017 03:11 AM, Christian König wrote:

Am 22.11.2017 um 18:27 schrieb Boris Ostrovsky:

On 11/22/2017 11:54 AM, Christian König wrote:

Am 22.11.2017 um 17:24 schrieb Boris Ostrovsky:

On 11/22/2017 05:09 AM, Christian König wrote:

Am 21.11.2017 um 23:26 schrieb Boris Ostrovsky:

On 11/21/2017 08:34 AM, Christian König wrote:

Hi Boris,

attached are two patches.

The first one is a trivial fix for the infinite loop issue, it now
correctly aborts the fixup when it can't find address space for the
root window.

The second is a workaround for your board. It simply checks if there
is exactly one Processor Function to apply this fix on.

Both are based on linus current master branch. Please test if they
fix
your issue.

Yes, they do fix it but that's because the feature is disabled.

Do you know what the actual problem was (on Xen)?

I still haven't understood what you actually did with Xen.

When you used PCI pass through with those devices then you have made a
major configuration error.

When the problem happened on dom0 then the explanation is most likely
that some PCI device ended up in the configured space, but the routing
was only setup correctly on one CPU socket.

The problem is that dom0 can be (and was in my case() booted with less
than full physical memory and so the "rest" of the host memory is not
necessarily reflected in iomem. Your patch then tried to configure that
memory for MMIO and the system hang.

And so my guess is that this patch will break dom0 on a single-socket
system as well.

Oh, thanks!

I've thought about that possibility before, but wasn't able to find a
system which actually does that.

May I ask why the rest of the memory isn't reported to the OS?
That memory doesn't belong to the OS (dom0), it is owned by the 
hypervisor.



Sounds like I can't trust Linux resource management and probably need
to read the DRAM config to figure things out after all.


My question is whether what you are trying to do should ever be done for
a guest at all (any guest, not necessarily Xen).


The issue is probably that I don't know enough about Xen: What exactly 
is dom0? My understanding was that dom0 is the hypervisor, but that 
seems to be incorrect.


The issue is that under no circumstances *EVER* a virtualized guest 
should have access to the PCI devices marked as "Processor Function" on 
AMD platforms. Otherwise it is trivial to break out of the virtualization.


When dom0 is something like the system domain with all hardware access 
then the approach seems legitimate, but then the hypervisor should 
report the stolen memory to the OS using the e820 table.


When the hypervisor doesn't do that and the Linux kernel isn't aware 
that there is memory at a given location mapping PCI space there will 
obviously crash the hypervisor.


Possible solutions as far as I can see are either disabling this feature 
when we detect that we are a Xen dom0, scanning the DRAM settings to 
update Linux resource handling or fixing Xen to report stolen memory to 
the dom0 OS as reserved.


Opinions?


You are right, these functions are not exposed to a regular guest.

I think for dom0 (which is a special Xen guest, with additional 
privileges) we may be able to add a reserved e820 region for host memory 
that is not assigned to dom0. Let me try it on Monday (I am out until then).


-boris
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 00/16] Cleanup vega10 header files.

2017-11-24 Thread Christian König

Thanks, finally somebody who wants to take care of this.

Whole series is Acked-by: Christian König .

Regards,
Christian.

Am 24.11.2017 um 07:26 schrieb Feifei Xu:

To avoid duplication of header files,amd/include/asic_reg/vega10
will be removed.
Header files under this folder will be moved to corresponding
ip folders within asic_reg/.

Also removed some unused header files of vega10.

https://lists.freedesktop.org/archives/amd-gfx/2017-November/016191.html
Included above thread in this patch-set as they are all cleaning
up vega10 header files.

Patches are formated with flag --find-renames and --irreversible-delete.
This will omit the preimage for delete and renames.
But resulting patches are just for reviewing and not meant to be
applied with git apply.

Feifei Xu (16):
   drm/amd/include:cleanup vega10 sdma0/1 header files.
   drm/amd/include:cleanup vega10 hdp header files.
   drm/amd/include:cleanup vega10 mp header files.
   drm/amd/include:cleanup vega10 athub header files.
   drm/amd/include:cleanup vega10 thm header files.
   drm/amd/include: cleanup vega10 umc header files.
   drm/amd/include:cleanup vega10 dce header files.
   drm/amd/include:cleanup vega10 uvd header files.
   drm/amd/include:cleanup vega10 vce header files.
   drm/amd/include:cleanup vega10 gc header files.
   drm/amd/include:cleanup vega10 mmhub header files.
   drm/amd/include:cleanup vega10 nbio header files.
   drm/amd/include:cleanup vega10 nbif header files.
   drm/amd/include:cleanup vega10 smuio header files.
   drm/amd/include:cleanup vega10 osssys header files.
   drm/amd/include:cleanup vega10 header files.

  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c|2 +-
  drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c  |   10 +-
  drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c   |   10 +-
  drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c  |   20 +-
  drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c|   15 +-
  drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c  |   10 +-
  drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c |   10 +-
  drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c |4 +-
  drivers/gpu/drm/amd/amdgpu/psp_v10_0.c |2 +-
  drivers/gpu/drm/amd/amdgpu/psp_v3_1.c  |   12 +-
  drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c |   16 +-
  drivers/gpu/drm/amd/amdgpu/soc15.c |   24 +-
  drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c  |   20 +-
  drivers/gpu/drm/amd/amdgpu/vce_v4_0.c  |   12 +-
  drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c  |4 +-
  drivers/gpu/drm/amd/amdgpu/vega10_ih.c |6 +-
  drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c  |2 +-
  .../amd/display/dc/dce120/dce120_hw_sequencer.c|6 +-
  .../drm/amd/display/dc/dce120/dce120_resource.c|8 +-
  .../display/dc/dce120/dce120_timing_generator.c|6 +-
  .../gpu/drm/amd/display/dc/dcn10/dcn10_resource.c  |2 +-
  .../amd/display/dc/gpio/dce120/hw_factory_dce120.c |6 +-
  .../display/dc/gpio/dce120/hw_translate_dce120.c   |6 +-
  .../amd/display/dc/gpio/dcn10/hw_factory_dcn10.c   |2 +-
  .../amd/display/dc/gpio/dcn10/hw_translate_dcn10.c |2 +-
  .../amd/display/dc/i2caux/dce120/i2caux_dce120.c   |6 +-
  .../drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c |2 +-
  .../amd/display/dc/irq/dce120/irq_service_dce120.c |6 +-
  .../amd/display/dc/irq/dcn10/irq_service_dcn10.c   |2 +-
  .../amd/include/asic_reg/athub/athub_1_0_offset.h  |  453 +
  .../amd/include/asic_reg/athub/athub_1_0_sh_mask.h | 2045 
  .../asic_reg/{vega10/DC => dce}/dce_12_0_offset.h  |0
  .../asic_reg/{vega10/DC => dce}/dce_12_0_sh_mask.h |0
  .../asic_reg/{vega10/GC => gc}/gc_9_0_default.h|0
  .../asic_reg/{vega10/GC => gc}/gc_9_0_offset.h |0
  .../asic_reg/{vega10/GC => gc}/gc_9_0_sh_mask.h|0
  .../drm/amd/include/asic_reg/hdp/hdp_4_0_offset.h  |  209 +
  .../drm/amd/include/asic_reg/hdp/hdp_4_0_sh_mask.h |  601 ++
  .../{vega10/MMHUB => mmhub}/mmhub_1_0_default.h|0
  .../{vega10/MMHUB => mmhub}/mmhub_1_0_offset.h |0
  .../{vega10/MMHUB => mmhub}/mmhub_1_0_sh_mask.h|0
  .../drm/amd/include/asic_reg/mp/mp_9_0_offset.h|  375 +
  .../drm/amd/include/asic_reg/mp/mp_9_0_sh_mask.h   | 1463 +++
  .../{vega10/NBIF => nbif}/nbif_6_1_offset.h|0
  .../{vega10/NBIF => nbif}/nbif_6_1_sh_mask.h   |0
  .../{vega10/NBIO => nbio}/nbio_6_1_default.h   |0
  .../{vega10/NBIO => nbio}/nbio_6_1_offset.h|0
  .../{vega10/NBIO => nbio}/nbio_6_1_sh_mask.h   |0
  .../{vega10/OSSSYS => oss}/osssys_4_0_offset.h |0
  .../{vega10/OSSSYS => oss}/osssys_4_0_sh_mask.h|0
  .../amd/include/asic_reg/sdma0/sdma0_4_0_default.h |  286 +
  .../amd/include/asic_reg/sdma0/sdma0_4_0_offset.h  |  547 ++
  .../amd/include/asic_reg/sdma0/sdma0_4_0_sh_mask.h | 1852