@@ -193,6 +193,13 @@ static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
193193 intel_gtt_chipset_flush ();
194194}
195195
196+ static u64 gen8_ggtt_pte_encode (dma_addr_t addr ,
197+ enum i915_cache_level level ,
198+ u32 flags )
199+ {
200+ return addr | _PAGE_PRESENT ;
201+ }
202+
196203#ifdef __NetBSD__
197204static inline void
198205gen8_set_pte (bus_space_tag_t bst , bus_space_handle_t bsh , unsigned i ,
@@ -228,9 +235,9 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
228235
229236#ifdef __NetBSD__
230237 gen8_set_pte (ggtt -> gsmt , ggtt -> gsmh , offset / I915_GTT_PAGE_SIZE ,
231- gen8_pte_encode (addr , level , 0 ));
238+ gen8_ggtt_pte_encode (addr , level , 0 ));
232239#else
233- gen8_set_pte (pte , gen8_pte_encode (addr , level , 0 ));
240+ gen8_set_pte (pte , gen8_ggtt_pte_encode (addr , level , 0 ));
234241#endif
235242
236243 ggtt -> invalidate (ggtt );
@@ -250,7 +257,8 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
250257 struct sgt_iter sgt_iter ;
251258 gen8_pte_t __iomem * gtt_entries ;
252259#endif
253- const gen8_pte_t pte_encode = gen8_pte_encode (0 , level , 0 );
260+ gen8_pte_t __iomem * gte ;
261+ gen8_pte_t __iomem * end ;
254262 dma_addr_t addr ;
255263
256264 /*
@@ -274,11 +282,17 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
274282 KASSERT (len == 0 );
275283 }
276284#else
277- gtt_entries = (gen8_pte_t __iomem * )ggtt -> gsm ;
278- gtt_entries += vma -> node .start / I915_GTT_PAGE_SIZE ;
279- for_each_sgt_daddr (addr , sgt_iter , vma -> pages )
280- gen8_set_pte (gtt_entries ++ , pte_encode | addr );
285+ gte = (gen8_pte_t __iomem * )ggtt -> gsm ;
286+ gte += vma -> node .start / I915_GTT_PAGE_SIZE ;
287+ end = gte + vma -> node .size / I915_GTT_PAGE_SIZE ;
288+
289+ for_each_sgt_daddr (addr , iter , vma -> pages )
290+ gen8_set_pte (gte ++ , pte_encode | addr );
291+ GEM_BUG_ON (gte > end );
281292#endif
293+ /* Fill the allocated but "unused" space beyond the end of the buffer */
294+ while (gte < end )
295+ gen8_set_pte (gte ++ , vm -> scratch [0 ].encode );
282296
283297 /*
284298 * We want to flush the TLBs only after we're certain all the PTE
@@ -329,8 +343,8 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
329343 unsigned seg ;
330344 unsigned pgno ;
331345#else
332- gen6_pte_t __iomem * entries = ( gen6_pte_t __iomem * ) ggtt -> gsm ;
333- unsigned int i = vma -> node . start / I915_GTT_PAGE_SIZE ;
346+ gen6_pte_t __iomem * gte ;
347+ gen6_pte_t __iomem * end ;
334348 struct sgt_iter iter ;
335349#endif
336350 dma_addr_t addr ;
@@ -355,8 +369,17 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
355369 /* XXX KASSERT(pgno <= ...)? */
356370 }
357371#else
372+ gte = (gen6_pte_t __iomem * )ggtt -> gsm ;
373+ gte += vma -> node .start / I915_GTT_PAGE_SIZE ;
374+ end = gte + vma -> node .size / I915_GTT_PAGE_SIZE ;
375+
358376 for_each_sgt_daddr (addr , iter , vma -> pages )
359- iowrite32 (vm -> pte_encode (addr , level , flags ), & entries [i ++ ]);
377+ iowrite32 (vm -> pte_encode (addr , level , flags ), gte ++ );
378+ GEM_BUG_ON (gte > end );
379+
380+ /* Fill the allocated but "unused" space beyond the end of the buffer */
381+ while (gte < end )
382+ iowrite32 (vm -> scratch [0 ].encode , gte ++ );
360383#endif
361384
362385 /*
@@ -1064,7 +1087,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
10641087 ggtt -> vm .vma_ops .set_pages = ggtt_set_pages ;
10651088 ggtt -> vm .vma_ops .clear_pages = clear_pages ;
10661089
1067- ggtt -> vm .pte_encode = gen8_pte_encode ;
1090+ ggtt -> vm .pte_encode = gen8_ggtt_pte_encode ;
10681091
10691092 setup_private_pat (ggtt -> vm .gt -> uncore );
10701093
0 commit comments