我搭建了一个spark集群,1个master,3个slaver
Spark-1.2.0, Hadoop-2.4.0
用spark包中/bin/spark-shell启动scala后输入如下命令
data.txt 为本地本目录下的文件
scala> val inFile = sc.textFile("data.txt")
15/01/23 12:43:09 INFO MemoryStore: ensureFreeSpace(73391) called with curMem=369072, maxMem=278019440
15/01/23 12:43:09 INFO MemoryStore: Block broadcast_2 stored as values in memory (estimated size 71.7 KB, free 264.7 MB)
15/01/23 12:43:09 INFO MemoryStore: ensureFreeSpace(31711) called with curMem=442463, maxMem=278019440
15/01/23 12:43:09 INFO MemoryStore: Block broadcast_2_piece0 stored as bytes in memory (estimated size 31.0 KB, free 264.7 MB)
15/01/23 12:43:09 INFO BlockManagerInfo: Added broadcast_2_piece0 in memory on localhost:55952 (size: 31.0 KB, free: 265.1 MB)
15/01/23 12:43:09 INFO BlockManagerMaster: Updated info of block broadcast_2_piece0
15/01/23 12:43:09 INFO SparkContext: Created broadcast 2 from textFile at <console>:12
inFile: org.apache.spark.rdd.RDD[String] = data.txt MappedRDD[5] at textFile at <console>:12
scala> inFile.first()
org.apache.hadoop.mapred.InvalidInputException: Input path does not exist: hdfs://D1:9000/user/hadoop/data.txt
at org.apache.hadoop.mapred.FileInputFormat.singleThreadedListStatus(FileInputFormat.java:285)
at org.apache.hadoop.mapred.FileInputFormat.listStatus(FileInputFormat.java:228)
at org.apache.hadoop.mapred.FileInputFormat.getSplits(FileInputFormat.java:304)
at org.apache.spark.rdd.HadoopRDD.getPartitions(HadoopRDD.scala:201)
at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:205)
at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:203)
at scala.Option.getOrElse(Option.scala:120)
at org.apache.spark.rdd.RDD.partitions(RDD.scala:203)
at org.apache.spark.rdd.MappedRDD.getPartitions(MappedRDD.scala:28)
at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:205)
at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:203)
at scala.Option.getOrElse(Option.scala:120)
at org.apache.spark.rdd.RDD.partitions(RDD.scala:203)
at org.apache.spark.rdd.RDD.take(RDD.scala:1060)
at org.apache.spark.rdd.RDD.first(RDD.scala:1093)
at $iwC$$iwC$$iwC$$iwC.<init>(<console>:15)
at $iwC$$iwC$$iwC.<init>(<console>:20)
at $iwC$$iwC.<init>(<console>:22)
at $iwC.<init>(<console>:24)
at <init>(<console>:26)
at .<init>(<console>:30)
at .<clinit>(<console>)
at .<init>(<console>:7)
at .<clinit>(<console>)
at $print(<console>)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:483)
at org.apache.spark.repl.SparkIMain$ReadEvalPrint.call(SparkIMain.scala:852)
at org.apache.spark.repl.SparkIMain$Request.loadAndRun(SparkIMain.scala:1125)
at org.apache.spark.repl.SparkIMain.loadAndRunReq$1(SparkIMain.scala:674)
at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:705)
at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:669)
at org.apache.spark.repl.SparkILoop.reallyInterpret$1(SparkILoop.scala:828)
at org.apache.spark.repl.SparkILoop.interpretStartingWith(SparkILoop.scala:873)
at org.apache.spark.repl.SparkILoop.command(SparkILoop.scala:785)
at org.apache.spark.repl.SparkILoop.processLine$1(SparkILoop.scala:628)
at org.apache.spark.repl.SparkILoop.innerLoop$1(SparkILoop.scala:636)
at org.apache.spark.repl.SparkILoop.loop(SparkILoop.scala:641)
at org.apache.spark.repl.SparkILoop$$anonfun$process$1.apply$mcZ$sp(SparkILoop.scala:968)
at org.apache.spark.repl.SparkILoop$$anonfun$process$1.apply(SparkILoop.scala:916)
at org.apache.spark.repl.SparkILoop$$anonfun$process$1.apply(SparkILoop.scala:916)
at scala.tools.nsc.util.ScalaClassLoader$.savingContextLoader(ScalaClassLoader.scala:135)
at org.apache.spark.repl.SparkILoop.process(SparkILoop.scala:916)
at org.apache.spark.repl.SparkILoop.process(SparkILoop.scala:1011)
at org.apache.spark.repl.Main$.main(Main.scala:31)
at org.apache.spark.repl.Main.main(Main.scala)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:483)
at org.apache.spark.deploy.SparkSubmit$.launch(SparkSubmit.scala:358)
at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:75)
at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
我在单机下运行Spark,slaver和master都在一台机器上,输入同样的命令后,调用inFile.first()后出现下面的问题
java.net.ConnectException: Call From D1/192.168.150.82 to D1:9000 failed on connection exception: java.net.ConnectException:
Spark,HDFS不是可以加载本地文件的吗?怎么会出现上面的那个问题?
还有一个问题就是在
scala>
输入命令时,发现出错时不能按“Backspace”删除,郁闷,怎么会这样?网上怎么搜都搜不到关于这个命令行下删除字符的按键是哪个。