Skip to content

Commit 3f5c0e6

Browse files
committed
fix lint
1 parent ffff36f commit 3f5c0e6

File tree

14 files changed

+67
-25
lines changed

14 files changed

+67
-25
lines changed

ccsrc/include/math/tensor/ops_cpu/memory_operator.h

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -53,10 +53,10 @@ void destroy(Tensor* t);
5353
// -----------------------------------------------------------------------------
5454

5555
template <TDtype src, TDtype des>
56-
Tensor cast_to(void* data, size_t len) {
56+
Tensor cast_to(const void* data, size_t len) {
5757
using d_src = to_device_t<src>;
5858
using d_des = to_device_t<des>;
59-
auto c_data = reinterpret_cast<d_src*>(data);
59+
auto c_data = reinterpret_cast<const d_src*>(data);
6060
auto out = cpu::init<des>(len);
6161
auto c_out = reinterpret_cast<d_des*>(out.data);
6262
auto caster = cast_value<to_device_t<src>, to_device_t<des>>();
@@ -71,13 +71,13 @@ Tensor cast_to(const Tensor& t, TDtype des);
7171
// -----------------------------------------------------------------------------
7272

7373
template <TDtype dtype>
74-
std::string to_string(void* data, size_t dim, bool simplify = false) {
74+
std::string to_string(const void* data, size_t dim, bool simplify = false) {
7575
std::string out = "";
7676
if (!simplify) {
7777
out = "array(dtype: " + dtype_to_string(dtype) + ", device: " + device_to_string(TDevice::CPU) + ", data: [";
7878
}
7979
using calc_t = to_device_t<dtype>;
80-
calc_t* data_ = reinterpret_cast<calc_t*>(data);
80+
const calc_t* data_ = reinterpret_cast<const calc_t*>(data);
8181
for (size_t i = 0; i < dim; i++) {
8282
if constexpr (is_complex_v<calc_t>) {
8383
out += "(" + std::to_string(data_[i].real()) + ", " + std::to_string(data_[i].imag()) + ")";
@@ -118,7 +118,7 @@ Tensor init_with_vector(const std::vector<T>& a) {
118118
// -----------------------------------------------------------------------------
119119

120120
template <TDtype dtype>
121-
Tensor copy(void* data, size_t len) {
121+
Tensor copy(const void* data, size_t len) {
122122
using calc_t = to_device_t<dtype>;
123123
auto out = init<dtype>(len);
124124
mindquantum::safe_copy(out.data, sizeof(calc_t) * len, data, sizeof(calc_t) * len);
@@ -128,7 +128,7 @@ Tensor copy(void* data, size_t len) {
128128
Tensor copy(const Tensor& t);
129129

130130
template <TDtype dtype>
131-
void* copy_mem(void* data, size_t len) {
131+
void* copy_mem(const void* data, size_t len) {
132132
using calc_t = to_device_t<dtype>;
133133
auto res = reinterpret_cast<void*>(malloc(sizeof(calc_t) * len));
134134
if (res == nullptr) {
@@ -137,7 +137,7 @@ void* copy_mem(void* data, size_t len) {
137137
mindquantum::safe_copy(res, sizeof(calc_t) * len, data, sizeof(calc_t) * len);
138138
return res;
139139
}
140-
void* copy_mem(void* data, TDtype dtype, size_t len);
140+
void* copy_mem(const void* data, TDtype dtype, size_t len);
141141

142142
// -----------------------------------------------------------------------------
143143
template <typename src, typename T>
@@ -185,8 +185,8 @@ Tensor get(const Tensor& t, size_t idx);
185185
// -----------------------------------------------------------------------------
186186

187187
template <TDtype src_dtype>
188-
std::vector<to_device_t<src_dtype>> to_vector(void* data, size_t len) {
189-
auto c_data = reinterpret_cast<to_device_t<src_dtype>*>(data);
188+
std::vector<to_device_t<src_dtype>> to_vector(const void* data, size_t len) {
189+
auto c_data = reinterpret_cast<const to_device_t<src_dtype>*>(data);
190190
std::vector<to_device_t<src_dtype>> out;
191191
for (size_t i = 0; i < len; i++) {
192192
out.push_back(c_data[i]);
@@ -204,8 +204,8 @@ std::vector<T> to_vector(const Tensor& ori) {
204204
}
205205

206206
template <TDtype src_dtype>
207-
std::vector<std::vector<to_device_t<src_dtype>>> to_vector(void* data, size_t n_row, size_t n_col) {
208-
auto c_data = reinterpret_cast<to_device_t<src_dtype>*>(data);
207+
std::vector<std::vector<to_device_t<src_dtype>>> to_vector(const void* data, size_t n_row, size_t n_col) {
208+
auto c_data = reinterpret_cast<const to_device_t<src_dtype>*>(data);
209209
std::vector<std::vector<to_device_t<src_dtype>>> out;
210210
for (size_t i = 0; i < n_row; i++) {
211211
std::vector<to_device_t<src_dtype>> tmp;

ccsrc/lib/math/tensor/ops_cpu/memory_operator.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,7 @@ Tensor copy(const Tensor& t) {
111111
return Tensor();
112112
}
113113

114-
void* copy_mem(void* data, TDtype dtype, size_t len) {
114+
void* copy_mem(const void* data, TDtype dtype, size_t len) {
115115
switch (dtype) {
116116
case (TDtype::Float32):
117117
return copy_mem<TDtype::Float32>(data, len);

ccsrc/lib/simulator/vector/detail/runtime/cmd.cpp

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@
3030
#include "simulator/vector/runtime/rt_gate.h"
3131
#include "simulator/vector/runtime/utils.h"
3232
#include "simulator/vector/vector_state.h"
33-
#define MAX_SHOTS 100000
33+
constexpr int MAX_SHOTS = 100000;
3434

3535
namespace mindquantum::sim::rt {
3636
int cmd(const std::vector<std::string> &args) {
@@ -216,7 +216,6 @@ int cmd_file(const char *filename) {
216216
file.open(filename);
217217
if (!file.is_open()) {
218218
throw std::runtime_error(fmt::format("Cannot open file {}", filename));
219-
return 0;
220219
}
221220
std::vector<std::string> cmds = {"", "cmd"};
222221
std::string current_cmd = "";

mindquantum/algorithm/compiler/decompose/utils.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,10 @@ def kron_factor_4x4_to_2x2s(mat: np.ndarray):
6868
f2 /= np.sqrt(np.linalg.det(f2)) or 1
6969

7070
# Determine global phase.
71-
g = mat[a, b] / (f1[a >> 1, b >> 1] * f2[a & 1, b & 1])
71+
denominator = f1[a >> 1, b >> 1] * f2[a & 1, b & 1]
72+
if denominator == 0:
73+
raise ZeroDivisionError("denominator cannot be zero.")
74+
g = mat[a, b] / denominator
7275
if np.real(g) < 0:
7376
f1 *= -1
7477
g = -g
@@ -222,6 +225,8 @@ def glob_phase(mat: np.ndarray) -> float:
222225
Global phase rad, in range of (-pi, pi].
223226
"""
224227
d = mat.shape[0]
228+
if d == 0:
229+
raise ZeroDivisionError("Dimension of mat can not be zero.")
225230
exp_alpha = linalg.det(mat) ** (1 / d)
226231
return np.angle(exp_alpha)
227232

mindquantum/algorithm/error_mitigation/mitigation.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,10 @@ def zne(
5959
product = 1
6060
for i in range(0, len(y)):
6161
if k != i:
62-
product = product * (scaling[i] / (scaling[i] - scaling[k]))
62+
try:
63+
product = product * (scaling[i] / (scaling[i] - scaling[k]))
64+
except ZeroDivisionError as exc:
65+
raise ZeroDivisionError(f"Error scaling: {scaling}") from exc
6366
mitigated = mitigated + y_k * product
6467
return mitigated
6568
if order is None:

mindquantum/algorithm/library/amplitude_encoder.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414
# ============================================================================
15-
1615
"""Amplitude encoder for quantum machine learning."""
1716

1817
import math
@@ -41,6 +40,7 @@ def controlled_gate(circuit, gate, t_qubit, c_qubits, zero_qubit):
4140
circuit += X.on(abs(control))
4241

4342

43+
# pylint: disable=too-many-locals
4444
def amplitude_encoder(x, n_qubits):
4545
"""
4646
Quantum circuit for amplitude encoding.
@@ -109,7 +109,10 @@ def amplitude_encoder(x, n_qubits):
109109
controls.append(tmp_j * j)
110110
theta = 0
111111
if tree[(i - 1) // 2] > 1e-10:
112-
amp_0 = tree[i] / tree[(i - 1) // 2]
112+
try:
113+
amp_0 = tree[i] / tree[(i - 1) // 2]
114+
except ZeroDivisionError as exc:
115+
raise ZeroDivisionError("Failed to set amplitude encoding.") from exc
113116
theta = 2 * math.acos(amp_0)
114117
if tree[i + 1] < 0 < math.sin(theta / 2):
115118
theta = -theta

mindquantum/algorithm/nisq/barren_plateau.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,10 @@ def run(grad_ops, n_sampling):
9999
half_l = len(gradients) // 2
100100
ori_var = np.var(gradients[:half_l])
101101
this_var = np.var(gradients[half_l:])
102-
var_i = np.abs(ori_var - this_var) / ori_var
102+
try:
103+
var_i = np.abs(ori_var - this_var) / ori_var
104+
except ZeroDivisionError as exc:
105+
raise ZeroDivisionError("ori_val cannot be zero.") from exc
103106
step += 1
104107

105108
return np.var(gradients)

mindquantum/core/operators/fermion_operator.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -225,10 +225,14 @@ def __rmul__(self, other: typing.Union["FermionOperator", PRConvertible]) -> "Fe
225225

226226
def __truediv__(self, other: PRConvertible) -> "FermionOperator":
227227
"""Divide a number."""
228+
if other == 0.0:
229+
raise ZeroDivisionError("other cannot be zero.")
228230
return self * (1.0 / other)
229231

230232
def __itruediv__(self, other: PRConvertible) -> "FermionOperator":
231233
"""Divide a number."""
234+
if other == 0.0:
235+
raise ZeroDivisionError("other cannot be zero.")
232236
self.__imul__(1.0 / other)
233237
return self
234238

mindquantum/core/operators/qubit_operator.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -157,10 +157,14 @@ def __rmul__(self, other: typing.Union["QubitOperator", PRConvertible]) -> "Qubi
157157

158158
def __truediv__(self, other: PRConvertible) -> "QubitOperator":
159159
"""Divide a number."""
160+
if other == 0.0:
161+
raise ZeroDivisionError("other cannot be zero.")
160162
return self * (1.0 / other)
161163

162164
def __itruediv__(self, other: PRConvertible) -> "QubitOperator":
163165
"""Divide a number."""
166+
if other == 0.0:
167+
raise ZeroDivisionError("other cannot be zero.")
164168
self.__imul__(1.0 / other)
165169
return self
166170

mindquantum/io/display/bloch_plt_drawer.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -345,8 +345,10 @@ def state_to_cor(amp: np.ndarray):
345345
_check_input_type('amp', np.ndarray, amp)
346346
if amp.shape != (2,):
347347
raise ValueError(f"amp requires shape (2, ), but get {amp.shape}")
348-
if np.sqrt(np.vdot(amp, amp)) != 0:
348+
try:
349349
amp = amp / np.sqrt(np.vdot(amp, amp))
350+
except ZeroDivisionError as exc:
351+
raise ZeroDivisionError("Mode of amp is zero.") from exc
350352
global_phase = np.angle(amp[0])
351353
amp = amp / np.exp(1j * global_phase)
352354
theta = 2 * np.arccos(np.real(amp[0]))

0 commit comments

Comments
 (0)