Github user tgravescs commented on a diff in the pull request:

    https://github.com/apache/spark/pull/94#discussion_r10384191
  
    --- Diff: core/src/test/scala/org/apache/spark/PipedRDDSuite.scala ---
    @@ -89,4 +97,37 @@ class PipedRDDSuite extends FunSuite with 
SharedSparkContext {
         }
       }
     
    +  test("test pipe exports map_input_file") {
    +    testExportInputFile("map_input_file")
    +  }
    +
    +  test("test pipe exports mapreduce_map_input_file") {
    +    testExportInputFile("mapreduce_map_input_file")
    +  }
    +
    +  def testExportInputFile(varName:String) {
    +    val nums = new HadoopRDD(sc, new JobConf(), classOf[TextInputFormat], 
classOf[LongWritable],
    +        classOf[Text], 2) {
    +      override def getPartitions: Array[Partition] = 
Array(generateFakeHadoopPartition())
    +      override val getDependencies = List[Dependency[_]]()
    +      override def compute(theSplit: Partition, context: TaskContext) = {
    +        new InterruptibleIterator[(LongWritable, Text)](context, 
Iterator((new LongWritable(1),
    +          new Text("b"))))
    +      }
    +    }
    +    val hadoopPart1 = generateFakeHadoopPartition()
    +    val pipedRdd = new PipedRDD(nums, "printenv " + varName)
    --- End diff --
    
    I'm definitely fine with fixing it, I was just wondering if we had some 
generic utilities that perhaps handled it for various platforms. Or perhaps a 
class that handled calling correct function depending on OS. 


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

Reply via email to