Skip to content

Commit dfaabdb

Browse files
authored
Explicitly qualify Base constructors. (#2883)
1 parent 578d95d commit dfaabdb

File tree

5 files changed

+27
-27
lines changed

5 files changed

+27
-27
lines changed

lib/cusolver/linalg.jl

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -143,10 +143,10 @@ using LinearAlgebra: Factorization, AbstractQ, QRCompactWY, QRCompactWYQ, QRPack
143143
LinearAlgebra.qr!(A::CuMatrix{T}) where T = QR(geqrf!(A::CuMatrix{T})...)
144144

145145
# conversions
146-
CuMatrix(F::Union{QR,QRCompactWY}) = CuArray(AbstractArray(F))
147-
CuArray(F::Union{QR,QRCompactWY}) = CuMatrix(F)
148-
CuMatrix(F::QRPivoted) = CuArray(AbstractArray(F))
149-
CuArray(F::QRPivoted) = CuMatrix(F)
146+
CUDA.CuMatrix(F::Union{QR,QRCompactWY}) = CuArray(AbstractArray(F))
147+
CUDA.CuArray(F::Union{QR,QRCompactWY}) = CuMatrix(F)
148+
CUDA.CuMatrix(F::QRPivoted) = CuArray(AbstractArray(F))
149+
CUDA.CuArray(F::QRPivoted) = CuMatrix(F)
150150

151151
function LinearAlgebra.ldiv!(_qr::QR, b::CuVector)
152152
m,n = size(_qr)
@@ -174,16 +174,16 @@ end
174174
# AbstractQ's `size` is the size of the full matrix,
175175
# while `Matrix(Q)` only gives the compact Q.
176176
# See JuliaLang/julia#26591 and JuliaGPU/CUDA.jl#969.
177-
CuArray(Q::AbstractQ) = CuMatrix(Q)
178-
CuArray{T}(Q::AbstractQ) where {T} = CuMatrix{T}(Q)
179-
CuMatrix(Q::AbstractQ{T}) where {T} = CuMatrix{T}(Q)
180-
CuMatrix{T}(Q::QRPackedQ{S}) where {T,S} =
177+
CUDA.CuArray(Q::AbstractQ) = CuMatrix(Q)
178+
CUDA.CuArray{T}(Q::AbstractQ) where {T} = CuMatrix{T}(Q)
179+
CUDA.CuMatrix(Q::AbstractQ{T}) where {T} = CuMatrix{T}(Q)
180+
CUDA.CuMatrix{T}(Q::QRPackedQ{S}) where {T,S} =
181181
CuMatrix{T}(lmul!(Q, CuMatrix{S}(I, size(Q, 1), min(size(Q.factors)...))))
182-
CuMatrix{T, B}(Q::QRPackedQ{S}) where {T, B, S} = CuMatrix{T}(Q)
183-
CuMatrix{T}(Q::QRCompactWYQ) where {T} = error("QRCompactWY format is not supported")
182+
CUDA.CuMatrix{T, B}(Q::QRPackedQ{S}) where {T, B, S} = CuMatrix{T}(Q)
183+
CUDA.CuMatrix{T}(Q::QRCompactWYQ) where {T} = error("QRCompactWY format is not supported")
184184
# avoid the CPU array in the above mul!
185-
Matrix{T}(Q::QRPackedQ{S,<:CuArray,<:CuArray}) where {T,S} = Array(CuMatrix{T}(Q))
186-
Matrix{T}(Q::QRCompactWYQ{S,<:CuArray,<:CuArray}) where {T,S} = Array(CuMatrix{T}(Q))
185+
Base.Matrix{T}(Q::QRPackedQ{S,<:CuArray,<:CuArray}) where {T,S} = Array(CuMatrix{T}(Q))
186+
Base.Matrix{T}(Q::QRCompactWYQ{S,<:CuArray,<:CuArray}) where {T,S} = Array(CuMatrix{T}(Q))
187187

188188
function Base.getindex(Q::QRPackedQ{<:Any, <:CuArray}, ::Colon, j::Int)
189189
y = CUDA.zeros(eltype(Q), size(Q, 2))

lib/cusparse/array.jl

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -570,11 +570,11 @@ CuSparseMatrixCSC(x::Adjoint{T,<:Union{CuSparseMatrixCSC, CuSparseMatrixCSR, CuS
570570
CuSparseMatrixCOO(x::Adjoint{T,<:Union{CuSparseMatrixCSC, CuSparseMatrixCSR, CuSparseMatrixCOO}}) where {T} = CuSparseMatrixCOO(_spadjoint(parent(x)))
571571

572572
# gpu to cpu
573-
SparseVector(x::CuSparseVector) = SparseVector(length(x), Array(nonzeroinds(x)), Array(nonzeros(x)))
574-
SparseMatrixCSC(x::CuSparseMatrixCSC) = SparseMatrixCSC(size(x)..., Array(x.colPtr), Array(rowvals(x)), Array(nonzeros(x)))
575-
SparseMatrixCSC(x::CuSparseMatrixCSR) = SparseMatrixCSC(CuSparseMatrixCSC(x)) # no direct conversion (gpu_CSR -> gpu_CSC -> cpu_CSC)
576-
SparseMatrixCSC(x::CuSparseMatrixBSR) = SparseMatrixCSC(CuSparseMatrixCSR(x)) # no direct conversion (gpu_BSR -> gpu_CSR -> gpu_CSC -> cpu_CSC)
577-
SparseMatrixCSC(x::CuSparseMatrixCOO) = SparseMatrixCSC(CuSparseMatrixCSC(x)) # no direct conversion (gpu_COO -> gpu_CSC -> cpu_CSC)
573+
SparseArrays.SparseVector(x::CuSparseVector) = SparseVector(length(x), Array(nonzeroinds(x)), Array(nonzeros(x)))
574+
SparseArrays.SparseMatrixCSC(x::CuSparseMatrixCSC) = SparseMatrixCSC(size(x)..., Array(x.colPtr), Array(rowvals(x)), Array(nonzeros(x)))
575+
SparseArrays.SparseMatrixCSC(x::CuSparseMatrixCSR) = SparseMatrixCSC(CuSparseMatrixCSC(x)) # no direct conversion (gpu_CSR -> gpu_CSC -> cpu_CSC)
576+
SparseArrays.SparseMatrixCSC(x::CuSparseMatrixBSR) = SparseMatrixCSC(CuSparseMatrixCSR(x)) # no direct conversion (gpu_BSR -> gpu_CSR -> gpu_CSC -> cpu_CSC)
577+
SparseArrays.SparseMatrixCSC(x::CuSparseMatrixCOO) = SparseMatrixCSC(CuSparseMatrixCSC(x)) # no direct conversion (gpu_COO -> gpu_CSC -> cpu_CSC)
578578

579579
# collect to Array
580580
Base.collect(x::CuSparseVector) = collect(SparseVector(x))

lib/cusparse/conversions.jl

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -472,9 +472,9 @@ for (elty, felty) in ((:Int16, :Float16),
472472
end
473473

474474
## CuSparseVector to CuVector
475-
CuVector(x::CuSparseVector{T}) where {T} = CuVector{T}(x)
475+
CUDA.CuVector(x::CuSparseVector{T}) where {T} = CuVector{T}(x)
476476

477-
function CuVector{T}(sv::CuSparseVector{T}) where {T}
477+
function CUDA.CuVector{T}(sv::CuSparseVector{T}) where {T}
478478
n = length(sv)
479479
dv = CUDA.zeros(T, n)
480480
scatter!(dv, sv, 'O')

src/broadcast.jl

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6,18 +6,18 @@ struct CuArrayStyle{N,B} <: AbstractGPUArrayStyle{N} end
66
CuArrayStyle{M,B}(::Val{N}) where {N,M,B} = CuArrayStyle{N,B}()
77

88
# identify the broadcast style of a (wrapped) CuArray
9-
BroadcastStyle(::Type{<:CuArray{T,N,B}}) where {T,N,B} = CuArrayStyle{N,B}()
10-
BroadcastStyle(W::Type{<:AnyCuArray{T,N}}) where {T,N} =
9+
Broadcast.BroadcastStyle(::Type{<:CuArray{T,N,B}}) where {T,N,B} = CuArrayStyle{N,B}()
10+
Broadcast.BroadcastStyle(W::Type{<:AnyCuArray{T,N}}) where {T,N} =
1111
CuArrayStyle{N, memory_type(Adapt.unwrap_type(W))}()
1212

1313
# when we are dealing with different memory types, we cannot know
1414
# which one is better, so use unified memory
15-
BroadcastStyle(::CUDA.CuArrayStyle{N1, B1},
15+
Broadcast.BroadcastStyle(::CUDA.CuArrayStyle{N1, B1},
1616
::CUDA.CuArrayStyle{N2, B2}) where {N1,N2,B1,B2} =
1717
CuArrayStyle{max(N1,N2), UnifiedMemory}()
1818

1919
# resolve ambiguity: different N, same memory type
20-
BroadcastStyle(::CUDA.CuArrayStyle{N1, B},
20+
Broadcast.BroadcastStyle(::CUDA.CuArrayStyle{N1, B},
2121
::CUDA.CuArrayStyle{N2, B}) where {N1,N2,B} =
2222
CuArrayStyle{max(N1,N2), B}()
2323

src/pointer.jl

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -42,8 +42,8 @@ Base.eltype(::Type{<:CuPtr{T}}) where {T} = T
4242
Base.convert(::Type{T}, x::CuPtr) where {T<:Integer} = T(UInt(x))
4343
## integer to pointer
4444
Base.convert(::Type{CuPtr{T}}, x::Union{Int,UInt}) where {T} = CuPtr{T}(x)
45-
Int(x::CuPtr) = Base.bitcast(Int, x)
46-
UInt(x::CuPtr) = Base.bitcast(UInt, x)
45+
Base.Int(x::CuPtr) = Base.bitcast(Int, x)
46+
Base.UInt(x::CuPtr) = Base.bitcast(UInt, x)
4747

4848
# between regular and CUDA pointers
4949
Base.convert(::Type{<:Ptr}, p::CuPtr) =
@@ -167,8 +167,8 @@ Base.eltype(::Type{<:CuArrayPtr{T}}) where {T} = T
167167
Base.convert(::Type{T}, x::CuArrayPtr) where {T<:Integer} = T(UInt(x))
168168
## integer to pointer
169169
Base.convert(::Type{CuArrayPtr{T}}, x::Union{Int,UInt}) where {T} = CuArrayPtr{T}(x)
170-
Int(x::CuArrayPtr) = Base.bitcast(Int, x)
171-
UInt(x::CuArrayPtr) = Base.bitcast(UInt, x)
170+
Base.Int(x::CuArrayPtr) = Base.bitcast(Int, x)
171+
Base.UInt(x::CuArrayPtr) = Base.bitcast(UInt, x)
172172

173173
# between regular and CUDA pointers
174174
Base.convert(::Type{<:Ptr}, p::CuArrayPtr) =

0 commit comments

Comments
 (0)