/*
RDD.scala
*/
abstract class RDD[T: ClassTag](
@transient private var _sc: SparkContext,
@transient private var deps: Seq[Dependency[_]]
) extends Serializable with Logging {
private def sc: SparkContext = {}
private[spark] def conf = sc.conf
val id: Int = sc.newRddId()
@transient var name: String = _
/* state synchronization */
private val stateLock = new Serializable {}
/* partition */
@transient val partitioner: Option[Partitioner] = None
@volatile @transient private var partitions_ : Array[Partition] = _
/* dependencies */
@volatile private var dependencies_ : Seq[Dependency[_]] = _
/* storagelevel */
private var storageLevel: StorageLevel = StorageLevel.NONE
/* creation related */
@transient private[spark] val creationSite = sc.getCallSite()
@transient private[spark] val scope: Option[RDDOperationScope] =
/* checkpoint related */
private[spark] var checkpointData: Option[RDDCheckpointData[T]] = None
private val checkpointAllMarkedAncestors =
@transient private var doCheckpointCalled = false
@transient protected lazy val isBarrier_ : Boolean =
private[spark] final lazy val outputDeterministicLevel: DeterministicLevel.Value =
}
꼭 위와 같은 것은 아님. 그냥 많은 경우 이러하는 것임.ㄷ
ㅇㅇ이거 어떻게 구현한건지 모르겠음..