spark-issues mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From "Michael Armbrust (JIRA)" <j...@apache.org>
Subject [jira] [Updated] (SPARK-9831) TPCDS Q73 Fails
Date Tue, 11 Aug 2015 18:44:47 GMT

     [ https://issues.apache.org/jira/browse/SPARK-9831?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
]

Michael Armbrust updated SPARK-9831:
------------------------------------
    Description: 
{code}
select
  c_last_name,
  c_first_name,
  c_salutation,
  c_preferred_cust_flag,
  ss_ticket_number,
  cnt
from
  (select
    ss_ticket_number,
    ss_customer_sk,
    count(*) cnt
  from
    store_sales
    join household_demographics on (store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk)
    join store on (store_sales.ss_store_sk = store.s_store_sk)
    -- join date_dim on (store_sales.ss_sold_date_sk = date_dim.d_date_sk)
  where
    store.s_county in ('Saginaw County', 'Sumner County', 'Appanoose County', 'Daviess County')
    -- and date_dim.d_dom between 1 and 2
    -- and date_dim.d_year in(1998, 1998 + 1, 1998 + 2)
    -- and ss_date between '1999-01-01' and '2001-12-02'
    -- and dayofmonth(ss_date) in (1,2)
    -- partition key filter
    -- and ss_sold_date_sk in (2450816, 2450846, 2450847, 2450874, 2450875, 2450905, 2450906,
2450935, 2450936, 2450966, 2450967,
    --                         2450996, 2450997, 2451027, 2451028, 2451058, 2451059, 2451088,
2451089, 2451119, 2451120, 2451149,
    --                         2451150, 2451180, 2451181, 2451211, 2451212, 2451239, 2451240,
2451270, 2451271, 2451300, 2451301,
    --                         2451331, 2451332, 2451361, 2451362, 2451392, 2451393, 2451423,
2451424, 2451453, 2451454, 2451484,
    --                         2451485, 2451514, 2451515, 2451545, 2451546, 2451576, 2451577,
2451605, 2451606, 2451636, 2451637,
    --                         2451666, 2451667, 2451697, 2451698, 2451727, 2451728, 2451758,
2451759, 2451789, 2451790, 2451819,
    --                         2451820, 2451850, 2451851, 2451880, 2451881)
    and (household_demographics.hd_buy_potential = '>10000'
      or household_demographics.hd_buy_potential = 'unknown')
    and household_demographics.hd_vehicle_count > 0
    and case when household_demographics.hd_vehicle_count > 0 then household_demographics.hd_dep_count
/ household_demographics.hd_vehicle_count else null end > 1
    and ss_sold_date_sk between 2451180 and 2451269 -- partition key filter (3 months)
  group by
    ss_ticket_number,
    ss_customer_sk
  ) dj
  join customer on (dj.ss_customer_sk = customer.c_customer_sk)
where
  cnt between 1 and 5
order by
  cnt desc
limit 1000
{code}

{code}
Job aborted due to stage failure: Task 63 in stage 57.0 failed 4 times, most recent failure:
Lost task 63.3 in stage 57.0 (TID 5103, 10.0.197.102): java.io.IOException: java.lang.IllegalArgumentException:
Initial capacity must be greater than 0
	at org.apache.spark.util.Utils$.tryOrIOException(Utils.scala:1264)
	at org.apache.spark.sql.execution.joins.UnsafeHashedRelation.readExternal(HashedRelation.scala:280)
	at java.io.ObjectInputStream.readExternalData(ObjectInputStream.java:1837)
	at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:1796)
	at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1350)
	at java.io.ObjectInputStream.readObject(ObjectInputStream.java:370)
	at org.apache.spark.serializer.JavaDeserializationStream.readObject(JavaSerializer.scala:72)
	at org.apache.spark.broadcast.TorrentBroadcast$.unBlockifyObject(TorrentBroadcast.scala:217)
	at org.apache.spark.broadcast.TorrentBroadcast$$anonfun$readBroadcastBlock$1.apply(TorrentBroadcast.scala:178)
	at org.apache.spark.util.Utils$.tryOrIOException(Utils.scala:1276)
	at org.apache.spark.broadcast.TorrentBroadcast.readBroadcastBlock(TorrentBroadcast.scala:165)
	at org.apache.spark.broadcast.TorrentBroadcast._value$lzycompute(TorrentBroadcast.scala:64)
	at org.apache.spark.broadcast.TorrentBroadcast._value(TorrentBroadcast.scala:64)
	at org.apache.spark.broadcast.TorrentBroadcast.getValue(TorrentBroadcast.scala:88)
	at org.apache.spark.broadcast.Broadcast.value(Broadcast.scala:70)
	at org.apache.spark.sql.execution.joins.BroadcastHashJoin$$anonfun$2.apply(BroadcastHashJoin.scala:91)
	at org.apache.spark.sql.execution.joins.BroadcastHashJoin$$anonfun$2.apply(BroadcastHashJoin.scala:90)
	at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$17.apply(RDD.scala:706)
	at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$17.apply(RDD.scala:706)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:297)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:264)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:297)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:264)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:297)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:264)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:297)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:264)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:297)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:264)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:73)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)
	at org.apache.spark.scheduler.Task.run(Task.scala:88)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:214)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
	at java.lang.Thread.run(Thread.java:745)
Caused by: java.lang.IllegalArgumentException: Initial capacity must be greater than 0
	at org.apache.spark.unsafe.map.BytesToBytesMap.<init>(BytesToBytesMap.java:185)
	at org.apache.spark.unsafe.map.BytesToBytesMap.<init>(BytesToBytesMap.java:203)
	at org.apache.spark.sql.execution.joins.UnsafeHashedRelation$$anonfun$readExternal$1.apply$mcV$sp(HashedRelation.scala:295)
	at org.apache.spark.util.Utils$.tryOrIOException(Utils.scala:1261)
	... 40 more
{code}

  was:
{code}
select c_last_name
      ,c_first_name
      ,c_salutation
      ,c_preferred_cust_flag
      ,ss_ticket_number
      ,cnt from
        (select ss_ticket_number
          ,ss_customer_sk
          ,count(*) cnt
          from store_sales
          JOIN date_dim ON store_sales.ss_sold_date_sk = date_dim.d_date_sk
        JOIN store ON store_sales.ss_store_sk = store.s_store_sk
        JOIN household_demographics ON store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk
        where
        date_dim.d_dom between 1 and 2
        and (household_demographics.hd_buy_potential = '>10000' or
    household_demographics.hd_buy_potential = 'unknown')
    and household_demographics.hd_vehicle_count > 0
    and case when household_demographics.hd_vehicle_count > 0 then
      household_demographics.hd_dep_count/ household_demographics.hd_vehicle_count else null
end > 1
    and date_dim.d_year in (1998,1998+1,1998+2)
    and store.s_county in ('Williamson County','Williamson County','Williamson County','Williamson
County')
    group by ss_ticket_number,ss_customer_sk) dj
    JOIN customer ON dj.ss_customer_sk = customer.c_customer_sk
    where
    cnt between 5 and 10
    order by cnt desc
{code}

{code}
Job aborted due to stage failure: Task 63 in stage 57.0 failed 4 times, most recent failure:
Lost task 63.3 in stage 57.0 (TID 5103, 10.0.197.102): java.io.IOException: java.lang.IllegalArgumentException:
Initial capacity must be greater than 0
	at org.apache.spark.util.Utils$.tryOrIOException(Utils.scala:1264)
	at org.apache.spark.sql.execution.joins.UnsafeHashedRelation.readExternal(HashedRelation.scala:280)
	at java.io.ObjectInputStream.readExternalData(ObjectInputStream.java:1837)
	at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:1796)
	at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1350)
	at java.io.ObjectInputStream.readObject(ObjectInputStream.java:370)
	at org.apache.spark.serializer.JavaDeserializationStream.readObject(JavaSerializer.scala:72)
	at org.apache.spark.broadcast.TorrentBroadcast$.unBlockifyObject(TorrentBroadcast.scala:217)
	at org.apache.spark.broadcast.TorrentBroadcast$$anonfun$readBroadcastBlock$1.apply(TorrentBroadcast.scala:178)
	at org.apache.spark.util.Utils$.tryOrIOException(Utils.scala:1276)
	at org.apache.spark.broadcast.TorrentBroadcast.readBroadcastBlock(TorrentBroadcast.scala:165)
	at org.apache.spark.broadcast.TorrentBroadcast._value$lzycompute(TorrentBroadcast.scala:64)
	at org.apache.spark.broadcast.TorrentBroadcast._value(TorrentBroadcast.scala:64)
	at org.apache.spark.broadcast.TorrentBroadcast.getValue(TorrentBroadcast.scala:88)
	at org.apache.spark.broadcast.Broadcast.value(Broadcast.scala:70)
	at org.apache.spark.sql.execution.joins.BroadcastHashJoin$$anonfun$2.apply(BroadcastHashJoin.scala:91)
	at org.apache.spark.sql.execution.joins.BroadcastHashJoin$$anonfun$2.apply(BroadcastHashJoin.scala:90)
	at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$17.apply(RDD.scala:706)
	at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$17.apply(RDD.scala:706)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:297)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:264)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:297)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:264)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:297)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:264)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:297)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:264)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:297)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:264)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:73)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)
	at org.apache.spark.scheduler.Task.run(Task.scala:88)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:214)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
	at java.lang.Thread.run(Thread.java:745)
Caused by: java.lang.IllegalArgumentException: Initial capacity must be greater than 0
	at org.apache.spark.unsafe.map.BytesToBytesMap.<init>(BytesToBytesMap.java:185)
	at org.apache.spark.unsafe.map.BytesToBytesMap.<init>(BytesToBytesMap.java:203)
	at org.apache.spark.sql.execution.joins.UnsafeHashedRelation$$anonfun$readExternal$1.apply$mcV$sp(HashedRelation.scala:295)
	at org.apache.spark.util.Utils$.tryOrIOException(Utils.scala:1261)
	... 40 more
{code}


> TPCDS Q73 Fails
> ---------------
>
>                 Key: SPARK-9831
>                 URL: https://issues.apache.org/jira/browse/SPARK-9831
>             Project: Spark
>          Issue Type: Sub-task
>          Components: SQL
>            Reporter: Michael Armbrust
>            Assignee: Davies Liu
>            Priority: Blocker
>
> {code}
> select
>   c_last_name,
>   c_first_name,
>   c_salutation,
>   c_preferred_cust_flag,
>   ss_ticket_number,
>   cnt
> from
>   (select
>     ss_ticket_number,
>     ss_customer_sk,
>     count(*) cnt
>   from
>     store_sales
>     join household_demographics on (store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk)
>     join store on (store_sales.ss_store_sk = store.s_store_sk)
>     -- join date_dim on (store_sales.ss_sold_date_sk = date_dim.d_date_sk)
>   where
>     store.s_county in ('Saginaw County', 'Sumner County', 'Appanoose County', 'Daviess
County')
>     -- and date_dim.d_dom between 1 and 2
>     -- and date_dim.d_year in(1998, 1998 + 1, 1998 + 2)
>     -- and ss_date between '1999-01-01' and '2001-12-02'
>     -- and dayofmonth(ss_date) in (1,2)
>     -- partition key filter
>     -- and ss_sold_date_sk in (2450816, 2450846, 2450847, 2450874, 2450875, 2450905,
2450906, 2450935, 2450936, 2450966, 2450967,
>     --                         2450996, 2450997, 2451027, 2451028, 2451058, 2451059,
2451088, 2451089, 2451119, 2451120, 2451149,
>     --                         2451150, 2451180, 2451181, 2451211, 2451212, 2451239,
2451240, 2451270, 2451271, 2451300, 2451301,
>     --                         2451331, 2451332, 2451361, 2451362, 2451392, 2451393,
2451423, 2451424, 2451453, 2451454, 2451484,
>     --                         2451485, 2451514, 2451515, 2451545, 2451546, 2451576,
2451577, 2451605, 2451606, 2451636, 2451637,
>     --                         2451666, 2451667, 2451697, 2451698, 2451727, 2451728,
2451758, 2451759, 2451789, 2451790, 2451819,
>     --                         2451820, 2451850, 2451851, 2451880, 2451881)
>     and (household_demographics.hd_buy_potential = '>10000'
>       or household_demographics.hd_buy_potential = 'unknown')
>     and household_demographics.hd_vehicle_count > 0
>     and case when household_demographics.hd_vehicle_count > 0 then household_demographics.hd_dep_count
/ household_demographics.hd_vehicle_count else null end > 1
>     and ss_sold_date_sk between 2451180 and 2451269 -- partition key filter (3 months)
>   group by
>     ss_ticket_number,
>     ss_customer_sk
>   ) dj
>   join customer on (dj.ss_customer_sk = customer.c_customer_sk)
> where
>   cnt between 1 and 5
> order by
>   cnt desc
> limit 1000
> {code}
> {code}
> Job aborted due to stage failure: Task 63 in stage 57.0 failed 4 times, most recent failure:
Lost task 63.3 in stage 57.0 (TID 5103, 10.0.197.102): java.io.IOException: java.lang.IllegalArgumentException:
Initial capacity must be greater than 0
> 	at org.apache.spark.util.Utils$.tryOrIOException(Utils.scala:1264)
> 	at org.apache.spark.sql.execution.joins.UnsafeHashedRelation.readExternal(HashedRelation.scala:280)
> 	at java.io.ObjectInputStream.readExternalData(ObjectInputStream.java:1837)
> 	at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:1796)
> 	at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1350)
> 	at java.io.ObjectInputStream.readObject(ObjectInputStream.java:370)
> 	at org.apache.spark.serializer.JavaDeserializationStream.readObject(JavaSerializer.scala:72)
> 	at org.apache.spark.broadcast.TorrentBroadcast$.unBlockifyObject(TorrentBroadcast.scala:217)
> 	at org.apache.spark.broadcast.TorrentBroadcast$$anonfun$readBroadcastBlock$1.apply(TorrentBroadcast.scala:178)
> 	at org.apache.spark.util.Utils$.tryOrIOException(Utils.scala:1276)
> 	at org.apache.spark.broadcast.TorrentBroadcast.readBroadcastBlock(TorrentBroadcast.scala:165)
> 	at org.apache.spark.broadcast.TorrentBroadcast._value$lzycompute(TorrentBroadcast.scala:64)
> 	at org.apache.spark.broadcast.TorrentBroadcast._value(TorrentBroadcast.scala:64)
> 	at org.apache.spark.broadcast.TorrentBroadcast.getValue(TorrentBroadcast.scala:88)
> 	at org.apache.spark.broadcast.Broadcast.value(Broadcast.scala:70)
> 	at org.apache.spark.sql.execution.joins.BroadcastHashJoin$$anonfun$2.apply(BroadcastHashJoin.scala:91)
> 	at org.apache.spark.sql.execution.joins.BroadcastHashJoin$$anonfun$2.apply(BroadcastHashJoin.scala:90)
> 	at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$17.apply(RDD.scala:706)
> 	at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$17.apply(RDD.scala:706)
> 	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
> 	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:297)
> 	at org.apache.spark.rdd.RDD.iterator(RDD.scala:264)
> 	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
> 	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:297)
> 	at org.apache.spark.rdd.RDD.iterator(RDD.scala:264)
> 	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
> 	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:297)
> 	at org.apache.spark.rdd.RDD.iterator(RDD.scala:264)
> 	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
> 	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:297)
> 	at org.apache.spark.rdd.RDD.iterator(RDD.scala:264)
> 	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
> 	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:297)
> 	at org.apache.spark.rdd.RDD.iterator(RDD.scala:264)
> 	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:73)
> 	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)
> 	at org.apache.spark.scheduler.Task.run(Task.scala:88)
> 	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:214)
> 	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
> 	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
> 	at java.lang.Thread.run(Thread.java:745)
> Caused by: java.lang.IllegalArgumentException: Initial capacity must be greater than
0
> 	at org.apache.spark.unsafe.map.BytesToBytesMap.<init>(BytesToBytesMap.java:185)
> 	at org.apache.spark.unsafe.map.BytesToBytesMap.<init>(BytesToBytesMap.java:203)
> 	at org.apache.spark.sql.execution.joins.UnsafeHashedRelation$$anonfun$readExternal$1.apply$mcV$sp(HashedRelation.scala:295)
> 	at org.apache.spark.util.Utils$.tryOrIOException(Utils.scala:1261)
> 	... 40 more
> {code}



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscribe@spark.apache.org
For additional commands, e-mail: issues-help@spark.apache.org


Mime
View raw message