zeppelin sql报错找不到表 Table not found

川长思鸟来 2022-07-13 20:40 263阅读 0赞

zeppelin中使用sql查询,报错:找不到表,如下

  1. org.apache.spark.sql.AnalysisException: Table not found: emobilelog; line 1 pos 21
  2. at org.apache.spark.sql.catalyst.analysis.package$AnalysisErrorAt.failAnalysis(package.scala:42)
  3. at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$.getTable(Analyzer.scala:305)
  4. at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$$anonfun$apply$9.applyOrElse(Analyzer.scala:314)
  5. at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$$anonfun$apply$9.applyOrElse(Analyzer.scala:309)
  6. at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan$$anonfun$resolveOperators$1.apply(LogicalPlan.scala:57)
  7. at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan$$anonfun$resolveOperators$1.apply(LogicalPlan.scala:57)
  8. at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:69)
  9. at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperators(LogicalPlan.scala:56)
  10. at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan$$anonfun$1.apply(LogicalPlan.scala:54)
  11. at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan$$anonfun$1.apply(LogicalPlan.scala:54)
  12. at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$4.apply(TreeNode.scala:281)
  13. at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
  14. at scala.collection.Iterator$class.foreach(Iterator.scala:727)
  15. at scala.collection.AbstractIterator.foreach(Iterator.scala:1157)
  16. at scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:48)
  17. at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:103)
  18. at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:47)
  19. at scala.collection.TraversableOnce$class.to(TraversableOnce.scala:273)
  20. at scala.collection.AbstractIterator.to(Iterator.scala:1157)
  21. at scala.collection.TraversableOnce$class.toBuffer(TraversableOnce.scala:265)
  22. at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1157)
  23. at scala.collection.TraversableOnce$class.toArray(TraversableOnce.scala:252)
  24. at scala.collection.AbstractIterator.toArray(Iterator.scala:1157)
  25. at org.apache.spark.sql.catalyst.trees.TreeNode.transformChildren(TreeNode.scala:321)
  26. at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperators(LogicalPlan.scala:54)
  27. at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$.apply(Analyzer.scala:309)
  28. at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$.apply(Analyzer.scala:299)
  29. at org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1$$anonfun$apply$1.apply(RuleExecutor.scala:83)
  30. at org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1$$anonfun$apply$1.apply(RuleExecutor.scala:80)
  31. at scala.collection.LinearSeqOptimized$class.foldLeft(LinearSeqOptimized.scala:111)
  32. at scala.collection.immutable.List.foldLeft(List.scala:84)
  33. at org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1.apply(RuleExecutor.scala:80)
  34. at org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1.apply(RuleExecutor.scala:72)
  35. at scala.collection.immutable.List.foreach(List.scala:318)
  36. at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:72)
  37. at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:36)
  38. at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:36)
  39. at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:34)
  40. at org.apache.spark.sql.DataFrame.<init>(DataFrame.scala:133)
  41. at org.apache.spark.sql.DataFrame$.apply(DataFrame.scala:52)
  42. at org.apache.spark.sql.SQLContext.sql(SQLContext.scala:817)
  43. at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
  44. at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
  45. at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
  46. at java.lang.reflect.Method.invoke(Method.java:606)
  47. at org.apache.zeppelin.spark.SparkSqlInterpreter.interpret(SparkSqlInterpreter.java:115)
  48. at org.apache.zeppelin.interpreter.LazyOpenInterpreter.interpret(LazyOpenInterpreter.java:94)
  49. at org.apache.zeppelin.interpreter.remote.RemoteInterpreterServer$InterpretJob.jobRun(RemoteInterpreterServer.java:341)
  50. at org.apache.zeppelin.scheduler.Job.run(Job.java:176)
  51. at org.apache.zeppelin.scheduler.FIFOScheduler$1.run(FIFOScheduler.java:139)
  52. at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:471)
  53. at java.util.concurrent.FutureTask.run(FutureTask.java:262)
  54. at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$201(ScheduledThreadPoolExecutor.java:178)
  55. at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:292)
  56. at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
  57. at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
  58. at java.lang.Thread.run(Thread.java:745)

原因一般是代码里使用了 new SQLContext(),用这个sqlContext转化的dataFrame所注册的表不能被zeppelin内置的sqlContext找到。

解决办法:

一定要用zeppelin内置的SQLContext 来生成dataFrame

  1. import org.apache.spark.SparkContext
  2. import org.elasticsearch.spark._
  3. import org.apache.spark.sql._
  4. import org.elasticsearch.spark.sql._
  5. val options13 = Map("es.nodes" -> "168.7.1.67,168.7.1.68,168.7.1.69", "es.port" -> "9200")
  6. //val sqlContext = new SQLContext(sc) 不能新建SQLContext,否则后面的%sql 查询会查不到表
  7. //这里一定要使用zeppelin内置的SQLContext,即sqlc
  8. val df: DataFrame = sql.esDF("log4j-emobilelog/emobilelog", "?q=_exists_:oid",options13)
  9. df.registerTempTable("emobilelog")

发表评论

表情:
评论列表 (有 0 条评论,263人围观)

还没有评论,来说两句吧...

相关阅读