Further step by step build
This commit is contained in:
parent
21685191e7
commit
297a2d4969
@ -18,6 +18,7 @@ package com.ionspin.kotlin.crypto.keyderivation
|
||||
import com.ionspin.kotlin.bignum.integer.toBigInteger
|
||||
import com.ionspin.kotlin.crypto.hash.blake2b.Blake2b
|
||||
import com.ionspin.kotlin.crypto.util.*
|
||||
|
||||
/**
|
||||
*
|
||||
* Further resources and examples of implementation:
|
||||
@ -63,15 +64,14 @@ class Argon2 internal constructor(
|
||||
}
|
||||
|
||||
|
||||
|
||||
@ExperimentalStdlibApi
|
||||
companion object {
|
||||
|
||||
fun Array<UByte>.xor(target : Array<UByte>, other : Array<UByte>) {
|
||||
fun Array<UByte>.xor(target: Array<UByte>, other: Array<UByte>) {
|
||||
if (this.size != other.size || this.size != target.size) {
|
||||
throw RuntimeException("Invalid array sizes, this ${this.size}, other ${other.size}")
|
||||
}
|
||||
target.mapIndexed { index, _ -> this[index] xor other[index]}
|
||||
target.mapIndexed { index, _ -> this[index] xor other[index] }
|
||||
}
|
||||
|
||||
|
||||
@ -99,6 +99,7 @@ class Argon2 internal constructor(
|
||||
|
||||
fun compressionFunctionG(x: Array<UByte>, y: Array<UByte>): Array<UByte> {
|
||||
val r = x xor y
|
||||
// Xor works in first pass!
|
||||
// val r = Array<UByte>(1024) { 0U } // view as 8x8 matrix of 16 byte registers
|
||||
// x.forEachIndexed { index, it -> r[index] = it xor y[index] } // R = X xor Y
|
||||
val q = Array<UByte>(1024) { 0U }
|
||||
@ -111,7 +112,7 @@ class Argon2 internal constructor(
|
||||
.map { it.toLittleEndianUByteArray() }
|
||||
.flatMap { it.asIterable() }
|
||||
.toTypedArray()
|
||||
.copyInto(q, startOfRow, endOfRow)
|
||||
.copyInto(q, startOfRow)
|
||||
}
|
||||
// Do the argon/blake2b mixing on columns
|
||||
for (i in 0..7) {
|
||||
@ -179,7 +180,7 @@ class Argon2 internal constructor(
|
||||
|
||||
private fun computeIndexes(
|
||||
indexContext: IndexContext,
|
||||
matrix : Array<Array<Array<UByte>>>
|
||||
matrix: Array<Array<Array<UByte>>>
|
||||
): Pair<Int, Int> {
|
||||
val block = indexContext.indexMatrix
|
||||
val parallelism = indexContext.parallelism
|
||||
@ -258,11 +259,20 @@ class Argon2 internal constructor(
|
||||
val laneCounter: Int
|
||||
)
|
||||
|
||||
private fun computeIndexNew(matrix : Array<Array<Array<UByte>>>, lane: Int, column: Int, columnCount: Int, parallelism: Int, iteration : Int, slice : Int, argonType: ArgonType) : Pair<Int, Int> {
|
||||
private fun computeIndexNew(
|
||||
matrix: Array<Array<Array<UByte>>>,
|
||||
lane: Int,
|
||||
column: Int,
|
||||
columnCount: Int,
|
||||
parallelism: Int,
|
||||
iteration: Int,
|
||||
slice: Int,
|
||||
argonType: ArgonType
|
||||
): Pair<Int, Int> {
|
||||
val (j1, j2) = when (argonType) {
|
||||
ArgonType.Argon2d -> {
|
||||
val previousBlock = if (column == 0) {
|
||||
matrix[lane - 1][columnCount - 1]
|
||||
matrix[lane][columnCount - 1] //Get last block in the SAME lane
|
||||
} else {
|
||||
matrix[lane][column - 1]
|
||||
}
|
||||
@ -296,24 +306,78 @@ class Argon2 internal constructor(
|
||||
// blocks in the last SL - 1 = 3 segments computed and finished in
|
||||
// lane l. If B[i][j] is the first block of a segment, then the
|
||||
// very last index from W is excluded.
|
||||
if (iteration == 0) {
|
||||
val referenceAreaSize = if (iteration == 0) {
|
||||
if (slice == 0) {
|
||||
//All indices except the previous
|
||||
val from0Until = column - 1
|
||||
column - 1
|
||||
} else {
|
||||
if (lane == l) {
|
||||
//Same lane
|
||||
val from0Until = slice * (columnCount / 4) + column - 1
|
||||
column - 1
|
||||
} else {
|
||||
val from0Until = slice * (columnCount / 4) + if(column == 0) { -1 } else { 0 }
|
||||
slice * (columnCount / 4) + if (column % (columnCount / 4) == 0) { // Check if column is first block of the SEGMENT
|
||||
-1
|
||||
} else {
|
||||
0
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (lane == l) {
|
||||
columnCount - (columnCount / 4) + column - 1
|
||||
} else {
|
||||
columnCount - (columnCount / 4) + if (column == 0) {
|
||||
-1
|
||||
} else {
|
||||
0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
val availableIndicesSet =
|
||||
val x = (j1.toULong() * j1) shr 32
|
||||
val y = (referenceAreaSize.toULong() * x) shr 32
|
||||
val z = referenceAreaSize.toULong() - 1U - y
|
||||
|
||||
return Pair(l, j2.toInt())
|
||||
val startPosition = if (iteration == 0) {
|
||||
0
|
||||
} else {
|
||||
if (slice == 3) {
|
||||
0
|
||||
} else {
|
||||
(slice + 1) * (columnCount / 4) //TODO replace all of these with segment length when consolidating variables
|
||||
}
|
||||
}
|
||||
|
||||
val absolutePosition = (startPosition + z.toInt()) % columnCount
|
||||
|
||||
return Pair(l, absolutePosition)
|
||||
}
|
||||
|
||||
data class ArgonContext(
|
||||
val password: Array<UByte>,
|
||||
val salt: Array<UByte>,
|
||||
val parallelism: UInt,
|
||||
val tagLength: UInt,
|
||||
val memorySize: UInt,
|
||||
val numberOfIterations: UInt,
|
||||
val versionNumber: UInt,
|
||||
val key: Array<UByte>,
|
||||
val associatedData: Array<UByte>,
|
||||
val type: ArgonType
|
||||
)
|
||||
|
||||
data class ArgonInternalContext(
|
||||
val matrix: Array<Array<Array<UByte>>>,
|
||||
val blockCount : UInt,
|
||||
val columnCount : Int,
|
||||
val segmentLength: Int
|
||||
)
|
||||
|
||||
data class SegmentPosition(
|
||||
val iteration: Int,
|
||||
val lane: Int,
|
||||
val slice: Int
|
||||
)
|
||||
|
||||
internal fun derive(
|
||||
password: Array<UByte>,
|
||||
@ -327,10 +391,24 @@ class Argon2 internal constructor(
|
||||
associatedData: Array<UByte>,
|
||||
type: ArgonType
|
||||
): Array<UByte> {
|
||||
val argonContext = ArgonContext(
|
||||
password = password,
|
||||
salt = salt,
|
||||
parallelism = parallelism,
|
||||
tagLength = tagLength,
|
||||
memorySize = memorySize,
|
||||
numberOfIterations = numberOfIterations,
|
||||
versionNumber = versionNumber,
|
||||
key = key,
|
||||
associatedData = associatedData,
|
||||
type = type
|
||||
)
|
||||
|
||||
println("H0 Input")
|
||||
val toDigest = parallelism.toLittleEndianUByteArray() + tagLength.toLittleEndianUByteArray() + memorySize.toLittleEndianUByteArray() +
|
||||
numberOfIterations.toLittleEndianUByteArray() + versionNumber.toLittleEndianUByteArray() + type.typeId.toUInt().toLittleEndianUByteArray() +
|
||||
val toDigest =
|
||||
parallelism.toLittleEndianUByteArray() + tagLength.toLittleEndianUByteArray() + memorySize.toLittleEndianUByteArray() +
|
||||
numberOfIterations.toLittleEndianUByteArray() + versionNumber.toLittleEndianUByteArray() + type.typeId.toUInt()
|
||||
.toLittleEndianUByteArray() +
|
||||
password.size.toUInt().toLittleEndianUByteArray() + password +
|
||||
salt.size.toUInt().toLittleEndianUByteArray() + salt +
|
||||
key.size.toUInt().toLittleEndianUByteArray() + key +
|
||||
@ -339,7 +417,8 @@ class Argon2 internal constructor(
|
||||
println("Marker H0 Input end")
|
||||
val h0 = Blake2b.digest(
|
||||
parallelism.toLittleEndianUByteArray() + tagLength.toLittleEndianUByteArray() + memorySize.toLittleEndianUByteArray() +
|
||||
numberOfIterations.toLittleEndianUByteArray() + versionNumber.toLittleEndianUByteArray() + type.typeId.toUInt().toLittleEndianUByteArray()+
|
||||
numberOfIterations.toLittleEndianUByteArray() + versionNumber.toLittleEndianUByteArray() + type.typeId.toUInt()
|
||||
.toLittleEndianUByteArray() +
|
||||
password.size.toUInt().toLittleEndianUByteArray() + password +
|
||||
salt.size.toUInt().toLittleEndianUByteArray() + salt +
|
||||
key.size.toUInt().toLittleEndianUByteArray() + key +
|
||||
@ -370,6 +449,7 @@ class Argon2 internal constructor(
|
||||
h0 + 0.toUInt().toLittleEndianUByteArray() + i.toUInt().toLittleEndianUByteArray(),
|
||||
1024U
|
||||
)
|
||||
println("Start, matrix [$i][0]")
|
||||
matrix[i][0].hexColumsPrint(16)
|
||||
println("Marker, matrix [$i][0]")
|
||||
}
|
||||
@ -381,69 +461,144 @@ class Argon2 internal constructor(
|
||||
h0 + 1.toUInt().toLittleEndianUByteArray() + i.toUInt().toLittleEndianUByteArray(),
|
||||
1024U
|
||||
)
|
||||
println("Start, matrix [$i][1]")
|
||||
matrix[i][1].hexColumsPrint(16)
|
||||
println("Marker, matrix [$i][1]")
|
||||
}
|
||||
|
||||
// ---- Good until here at least ----
|
||||
val argonInternalContext = ArgonInternalContext(
|
||||
matrix, blockCount, columnCount, segmentLength
|
||||
)
|
||||
singleThreaded(argonContext, argonInternalContext)
|
||||
|
||||
return emptyArray()
|
||||
}
|
||||
|
||||
fun singleThreaded(argonContext: ArgonContext, argonInternalContext: ArgonInternalContext ) {
|
||||
for (iteration in 0 until argonContext.numberOfIterations.toInt()) {
|
||||
for (slice in 0 until 4) {
|
||||
for (lane in 0 until argonContext.parallelism.toInt()) {
|
||||
println("Processing segment I: $iteration, S: $slice, L: $lane")
|
||||
val segmentPosition = SegmentPosition(iteration, lane, slice)
|
||||
processSegment(argonContext, argonInternalContext, segmentPosition)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fun processSegment(argonContext: ArgonContext, argonInternalContext: ArgonInternalContext, segmentPosition: SegmentPosition) {
|
||||
val password = argonContext.password
|
||||
val salt = argonContext.salt
|
||||
val parallelism = argonContext.parallelism
|
||||
val tagLength = argonContext.tagLength
|
||||
val memorySize = argonContext.memorySize
|
||||
val numberOfIterations = argonContext.numberOfIterations
|
||||
val versionNumber = argonContext.versionNumber
|
||||
val key = argonContext.key
|
||||
val associatedData = argonContext.associatedData
|
||||
val type = argonContext.type
|
||||
|
||||
val matrix = argonInternalContext.matrix
|
||||
val blockCount = argonInternalContext.blockCount
|
||||
val columnCount = argonInternalContext.columnCount
|
||||
val segmentLength = argonInternalContext.segmentLength
|
||||
|
||||
val iteration = segmentPosition.iteration
|
||||
val lane = segmentPosition.lane
|
||||
val slice = segmentPosition.slice
|
||||
|
||||
|
||||
if (iteration == 0) {
|
||||
//Compute B[i][j]
|
||||
//Using B[i][j] = G(B[i][j], B[l][z]) where l and z are provided bu computeIndexes
|
||||
for (i in 0 until parallelism.toInt()) {
|
||||
for (j in 2..columnCount) {
|
||||
val (l, z) = computeIndexNew(matrix, i, j, columnCount, parallelism.toInt(), 0, 0, type)
|
||||
matrix[i][j] = compressionFunctionG(matrix[i][j], matrix[l][z])
|
||||
//Because this is iteration 0 we have B[i][0] and B[i][1] already filled, so whenever we
|
||||
//are processing first segment we skip these two blocks
|
||||
if (slice == 0) {
|
||||
for (column in 2..(slice * segmentLength)) {
|
||||
val (l, z) = computeIndexNew(matrix, lane, column, columnCount, parallelism.toInt(), 0, 0, type)
|
||||
println("Calling compress for I: $iteration S: $slice Lane: $lane Column: $column with l: $l z: $z")
|
||||
matrix[lane][column] = compressionFunctionG(matrix[lane][column - 1], matrix[l][z])
|
||||
}
|
||||
} else {
|
||||
for (column in (slice * segmentLength)..((slice + 1) * segmentLength)) {
|
||||
val (l, z) = computeIndexNew(matrix, lane, column, columnCount, parallelism.toInt(), iteration, slice, type)
|
||||
println("Calling compress for I: $iteration S: $slice Lane: $lane Column: $column with l: $l z: $z")
|
||||
matrix[lane][column] = compressionFunctionG(matrix[lane][column - 1], matrix[l][z])
|
||||
}
|
||||
}
|
||||
//Remaining iteration
|
||||
val remainingIterations = (1..numberOfIterations.toInt()).map { iteration ->
|
||||
} else {
|
||||
val (l, z) = computeIndexNew(matrix, lane, 0, columnCount, parallelism.toInt(), 0, 0, type)
|
||||
matrix[lane][0] = compressionFunctionG(matrix[lane][columnCount - 1], matrix[l][z])
|
||||
for (column in 1..(slice * segmentLength)) {
|
||||
val (l, z) = computeIndexNew(matrix, lane, column, columnCount, parallelism.toInt(), 0, 0, type)
|
||||
println("Calling compress for I: $iteration S: $slice Lane: $lane Column: $column with l: $l z: $z")
|
||||
matrix[lane][column] = compressionFunctionG(matrix[lane][column - 1], matrix[l][z])
|
||||
}
|
||||
|
||||
for (i in 0 until parallelism.toInt()) {
|
||||
for (j in 0 until columnCount) {
|
||||
// val indexContext = IndexContext(
|
||||
// indexMatrix = emptyArray(),
|
||||
// parallelism = parallelism,
|
||||
// pass = pass,
|
||||
// lane = i,
|
||||
// column = j,
|
||||
// blockCount = blockCount,
|
||||
// iterationCount = numberOfIterations,
|
||||
// type = type,
|
||||
// laneCounter = 0
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
// //Remaining iteration
|
||||
// val remainingIterations = (1..numberOfIterations.toInt()).map { iteration ->
|
||||
//
|
||||
// for (i in 0 until parallelism.toInt()) {
|
||||
// for (j in 0 until columnCount) {
|
||||
// val (l, z) = computeIndexNew(
|
||||
// matrix,
|
||||
// i,
|
||||
// j,
|
||||
// columnCount,
|
||||
// parallelism.toInt(),
|
||||
// iteration,
|
||||
// iteration / segmentLength,
|
||||
// type
|
||||
// )
|
||||
// if (j == 0) {
|
||||
// matrix[i][j] = compressionFunctionG(matrix[i][columnCount - 1], matrix[l][z])
|
||||
// } else {
|
||||
// matrix[i][j] = compressionFunctionG(matrix[i][j - 1], matrix[l][z])
|
||||
// }
|
||||
//
|
||||
// }
|
||||
// }
|
||||
//
|
||||
//
|
||||
// val result = matrix.foldIndexed(emptyArray<UByte>()) { lane, acc, laneArray ->
|
||||
// return if (acc.size == 0) {
|
||||
// acc + laneArray[columnCount - 1] // add last element in first lane to the accumulator
|
||||
// } else {
|
||||
// // For each element in our accumulator, xor it with an appropriate element from the last column in current lane (from 1 to `parallelism`)
|
||||
// acc.mapIndexed { index, it -> it xor laneArray[columnCount - 1][index] }
|
||||
// .toTypedArray()
|
||||
// }
|
||||
// }
|
||||
// result
|
||||
// }
|
||||
|
||||
val (l,z) = computeIndexNew(matrix, i, j, columnCount, parallelism.toInt(), iteration, iteration / segmentLength, type)
|
||||
if (j == 0) {
|
||||
matrix[i][j] = compressionFunctionG(matrix[i][columnCount - 1], matrix[l][z])
|
||||
} else {
|
||||
matrix[i][j] = compressionFunctionG(matrix[i][j - 1], matrix[l][z])
|
||||
|
||||
|
||||
// return remainingIterations.foldRight(emptyArray()) { arrayOfUBytes, acc -> acc xor arrayOfUBytes } //TODO placeholder
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
val result = matrix.foldIndexed(emptyArray<UByte>()) { lane, acc, laneArray ->
|
||||
return if (acc.size == 0) {
|
||||
acc + laneArray[columnCount - 1] // add last element in first lane to the accumulator
|
||||
} else {
|
||||
// For each element in our accumulator, xor it with an appropriate element from the last column in current lane (from 1 to `parallelism`)
|
||||
acc.mapIndexed { index, it -> it xor laneArray[columnCount - 1][index] }
|
||||
.toTypedArray()
|
||||
}
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
|
||||
|
||||
return remainingIterations.foldRight(emptyArray()) { arrayOfUBytes, acc -> acc xor arrayOfUBytes } //TODO placeholder
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
fun calculate(): Array<UByte> {
|
||||
return derive(
|
||||
password, salt, parallelism, tagLength, memorySize, numberOfIterations, versionNumber, key, associatedData, type
|
||||
password,
|
||||
salt,
|
||||
parallelism,
|
||||
tagLength,
|
||||
memorySize,
|
||||
numberOfIterations,
|
||||
versionNumber,
|
||||
key,
|
||||
associatedData,
|
||||
type
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -70,7 +70,7 @@ infix fun Array<UByte>.xor(other : Array<UByte>) : Array<UByte> {
|
||||
if (this.size != other.size) {
|
||||
throw RuntimeException("Operands of different sizes are not supported yet")
|
||||
}
|
||||
return Array(this.size) { this[it] xor other [it]}
|
||||
return Array(this.size) { this[it] xor other[it] }
|
||||
}
|
||||
|
||||
@ExperimentalUnsignedTypes
|
||||
|
Loading…
x
Reference in New Issue
Block a user