Github user JoshRosen commented on a diff in the pull request:

    https://github.com/apache/spark/pull/10835#discussion_r50781539
  
    --- Diff: core/src/main/scala/org/apache/spark/Accumulator.scala ---
    @@ -75,43 +84,65 @@ private[spark] object Accumulators extends Logging {
        * This global map holds the original accumulator objects that are 
created on the driver.
        * It keeps weak references to these objects so that accumulators can be 
garbage-collected
        * once the RDDs and user-code that reference them are cleaned up.
    +   * TODO: Don't use a global map; these should be tied to a SparkContext 
at the very least.
        */
    +  @GuardedBy("Accumulators")
       val originals = mutable.Map[Long, WeakReference[Accumulable[_, _]]]()
     
    -  private var lastId: Long = 0
    +  private val nextId = new AtomicLong(0L)
     
    -  def newId(): Long = synchronized {
    -    lastId += 1
    -    lastId
    -  }
    +  /**
    +   * Return a globally unique ID for a new [[Accumulable]].
    +   * Note: Once you copy the [[Accumulable]] the ID is no longer unique.
    +   */
    +  def newId(): Long = nextId.getAndIncrement
     
    +  /**
    +   * Register an [[Accumulable]] created on the driver such that it can be 
used on the executors.
    +   *
    +   * All accumulators registered here can later be used as a container for 
accumulating partial
    +   * values across multiple tasks. This is what 
[[org.apache.spark.scheduler.DAGScheduler]] does.
    +   * Note: if an accumulator is registered here, it should also be 
registered with the active
    +   * context cleaner for cleanup so as to avoid memory leaks.
    +   *
    +   * If an [[Accumulable]] with the same ID was already registered, do 
nothing instead of
    +   * overwriting it. This happens when we copy accumulators, e.g. when we 
reconstruct
    +   * [[org.apache.spark.executor.TaskMetrics]] from accumulator updates.
    +   */
       def register(a: Accumulable[_, _]): Unit = synchronized {
    -    originals(a.id) = new WeakReference[Accumulable[_, _]](a)
    +    if (!originals.contains(a.id)) {
    +      originals(a.id) = new WeakReference[Accumulable[_, _]](a)
    +    }
       }
     
    -  def remove(accId: Long) {
    -    synchronized {
    -      originals.remove(accId)
    -    }
    +  /**
    +   * Unregister the [[Accumulable]] with the given ID, if any.
    +   */
    +  def remove(accId: Long): Unit = synchronized {
    +    originals.remove(accId)
       }
     
    -  // Add values to the original accumulators with some given IDs
    -  def add(values: Map[Long, Any]): Unit = synchronized {
    -    for ((id, value) <- values) {
    -      if (originals.contains(id)) {
    -        // Since we are now storing weak references, we must check whether 
the underlying data
    -        // is valid.
    -        originals(id).get match {
    -          case Some(accum) => accum.asInstanceOf[Accumulable[Any, Any]] 
++= value
    -          case None =>
    -            throw new IllegalAccessError("Attempted to access garbage 
collected Accumulator.")
    -        }
    -      } else {
    -        logWarning(s"Ignoring accumulator update for unknown accumulator 
id $id")
    +  /**
    +   * Return the [[Accumulable]] registered with the given ID, if any.
    +   */
    +  def get(id: Long): Option[Accumulable[_, _]] = synchronized {
    +    originals.get(id).map { weakRef =>
    +      // Since we are storing weak references, we must check whether the 
underlying data is valid.
    +      weakRef.get match {
    --- End diff --
    
    This can be written as a `getOrElse(throw ...)`.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to